max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
beet/contrib/render.py
Arcensoth/beet
46
12100
<filename>beet/contrib/render.py """Plugin that invokes the built-in template renderer.""" __all__ = [ "RenderOptions", "render", ] from typing import Dict, List from pydantic import BaseModel from beet import Context, configurable class RenderOptions(BaseModel): resource_pack: Dict[str, List[str]] = {} data_pack: Dict[str, List[str]] = {} def beet_default(ctx: Context): ctx.require(render) @configurable(validator=RenderOptions) def render(ctx: Context, opts: RenderOptions): """Plugin that processes the data pack and the resource pack with Jinja.""" for groups, pack in zip([opts.resource_pack, opts.data_pack], ctx.packs): for group, patterns in groups.items(): try: proxy = getattr(pack, group) file_paths = proxy.match(*patterns) except: raise ValueError(f"Invalid render group {group!r}.") from None else: for path in file_paths: with ctx.override(render_path=path, render_group=group): ctx.template.render_file(proxy[path])
2.3125
2
customtkinter/customtkinter_progressbar.py
thisSELFmySELF/CustomTkinter
1
12101
import sys import tkinter from .customtkinter_tk import CTk from .customtkinter_frame import CTkFrame from .appearance_mode_tracker import AppearanceModeTracker from .customtkinter_color_manager import CTkColorManager class CTkProgressBar(tkinter.Frame): """ tkinter custom progressbar, always horizontal, values are from 0 to 1 """ def __init__(self, *args, variable=None, bg_color=None, border_color="CTkColorManager", fg_color="CTkColorManager", progress_color="CTkColorManager", width=160, height=10, border_width=0, **kwargs): super().__init__(*args, **kwargs) # overwrite configure methods of master when master is tkinter widget, so that bg changes get applied on child CTk widget too if isinstance(self.master, (tkinter.Tk, tkinter.Frame)) and not isinstance(self.master, (CTk, CTkFrame)): master_old_configure = self.master.config def new_configure(*args, **kwargs): if "bg" in kwargs: self.configure(bg_color=kwargs["bg"]) elif "background" in kwargs: self.configure(bg_color=kwargs["background"]) # args[0] is dict when attribute gets changed by widget[<attribut>] syntax elif len(args) > 0 and type(args[0]) == dict: if "bg" in args[0]: self.configure(bg_color=args[0]["bg"]) elif "background" in args[0]: self.configure(bg_color=args[0]["background"]) master_old_configure(*args, **kwargs) self.master.config = new_configure self.master.configure = new_configure AppearanceModeTracker.add(self.change_appearance_mode, self) self.appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark" self.bg_color = self.detect_color_of_master() if bg_color is None else bg_color self.border_color = CTkColorManager.PROGRESS_BG if border_color == "CTkColorManager" else border_color self.fg_color = CTkColorManager.PROGRESS_BG if fg_color == "CTkColorManager" else fg_color self.progress_color = CTkColorManager.MAIN if progress_color == "CTkColorManager" else progress_color self.variable = variable self.variable_callback_blocked = False self.variabel_callback_name = None self.width = width self.height = self.calc_optimal_height(height) self.border_width = round(border_width) self.value = 0.5 self.configure(width=self.width, height=self.height) self.canvas = tkinter.Canvas(master=self, highlightthicknes=0, width=self.width, height=self.height) self.canvas.place(x=0, y=0) # Each time an item is resized due to pack position mode, the binding Configure is called on the widget self.bind('<Configure>', self.update_dimensions) self.draw() # initial draw if self.variable is not None: self.variabel_callback_name = self.variable.trace_add("write", self.variable_callback) self.variable_callback_blocked = True self.set(self.variable.get(), from_variable_callback=True) self.variable_callback_blocked = False def destroy(self): AppearanceModeTracker.remove(self.change_appearance_mode) if self.variable is not None: self.variable.trace_remove("write", self.variabel_callback_name) super().destroy() def detect_color_of_master(self): if isinstance(self.master, CTkFrame): return self.master.fg_color else: return self.master.cget("bg") @staticmethod def calc_optimal_height(user_height): if sys.platform == "darwin": return user_height # on macOS just use given value (canvas has Antialiasing) else: # make sure the value is always with uneven for better rendering of the ovals if user_height == 0: return 0 elif user_height % 2 == 0: return user_height + 1 else: return user_height def update_dimensions(self, event): # only redraw if dimensions changed (for performance) if self.width != event.width or self.height != event.height: self.width = event.width self.height = event.height self.draw() def draw(self, no_color_updates=False): # decide the drawing method if sys.platform == "darwin": # on macOS draw button with polygons (positions are more accurate, macOS has Antialiasing) self.draw_with_polygon_shapes() else: # on Windows and other draw with ovals (corner_radius can be optimised to look better than with polygons) self.draw_with_ovals_and_rects() if no_color_updates is False: self.canvas.configure(bg=CTkColorManager.single_color(self.bg_color, self.appearance_mode)) self.canvas.itemconfig("border_parts", fill=CTkColorManager.single_color(self.border_color, self.appearance_mode)) self.canvas.itemconfig("inner_parts", fill=CTkColorManager.single_color(self.fg_color, self.appearance_mode)) self.canvas.itemconfig("progress_parts", fill=CTkColorManager.single_color(self.progress_color, self.appearance_mode)) def draw_with_polygon_shapes(self): """ draw the progress bar parts with just three polygons that have a rounded border """ coordinate_shift = -1 width_reduced = -1 # create border button parts (only if border exists) if self.border_width > 0: if not self.canvas.find_withtag("border_parts"): self.canvas.create_line((0, 0, 0, 0), tags=("border_line_1", "border_parts")) self.canvas.coords("border_line_1", (self.height / 2, self.height / 2, self.width - self.height / 2 + coordinate_shift, self.height / 2)) self.canvas.itemconfig("border_line_1", capstyle=tkinter.ROUND, width=self.height + width_reduced) self.canvas.lower("border_parts") # create inner button parts if not self.canvas.find_withtag("inner_parts"): self.canvas.create_line((0, 0, 0, 0), tags=("inner_line_1", "inner_parts")) self.canvas.coords("inner_line_1", (self.height / 2, self.height / 2, self.width - self.height / 2 + coordinate_shift, self.height / 2)) self.canvas.itemconfig("inner_line_1", capstyle=tkinter.ROUND, width=self.height - self.border_width * 2 + width_reduced) # progress parts if not self.canvas.find_withtag("progress_parts"): self.canvas.create_line((0, 0, 0, 0), tags=("progress_line_1", "progress_parts")) self.canvas.coords("progress_line_1", (self.height / 2, self.height / 2, self.height / 2 + (self.width + coordinate_shift - self.height) * self.value, self.height / 2)) self.canvas.itemconfig("progress_line_1", capstyle=tkinter.ROUND, width=self.height - self.border_width * 2 + width_reduced) def draw_with_ovals_and_rects(self): """ draw the progress bar parts with ovals and rectangles """ if sys.platform == "darwin": oval_bottom_right_shift = 0 rect_bottom_right_shift = 0 else: # ovals and rects are always rendered too large on Windows and need to be made smaller by -1 oval_bottom_right_shift = -1 rect_bottom_right_shift = -0 # frame_border if self.border_width > 0: if not self.canvas.find_withtag("border_parts"): self.canvas.create_oval((0, 0, 0, 0), tags=("border_oval_1", "border_parts"), width=0) self.canvas.create_rectangle((0, 0, 0, 0), tags=("border_rect_1", "border_parts"), width=0) self.canvas.create_oval((0, 0, 0, 0), tags=("border_oval_2", "border_parts"), width=0) self.canvas.coords("border_oval_1", (0, 0, self.height + oval_bottom_right_shift, self.height + oval_bottom_right_shift)) self.canvas.coords("border_rect_1", (self.height/2, 0, self.width-(self.height/2) + rect_bottom_right_shift, self.height + rect_bottom_right_shift)) self.canvas.coords("border_oval_2", (self.width-self.height, 0, self.width + oval_bottom_right_shift, self.height + oval_bottom_right_shift)) # foreground if not self.canvas.find_withtag("inner_parts"): self.canvas.create_oval((0, 0, 0, 0), tags=("inner_oval_1", "inner_parts"), width=0) self.canvas.create_rectangle((0, 0, 0, 0), tags=("inner_rect_1", "inner_parts"), width=0) self.canvas.create_oval((0, 0, 0, 0), tags=("inner_oval_2", "inner_parts"), width=0) self.canvas.coords("inner_oval_1", (self.border_width, self.border_width, self.height-self.border_width + oval_bottom_right_shift, self.height-self.border_width + oval_bottom_right_shift)) self.canvas.coords("inner_rect_1", (self.height/2, self.border_width, self.width-(self.height/2 + rect_bottom_right_shift), self.height-self.border_width + rect_bottom_right_shift)) self.canvas.coords("inner_oval_2", (self.width-self.height+self.border_width, self.border_width, self.width-self.border_width + oval_bottom_right_shift, self.height-self.border_width + oval_bottom_right_shift)) # progress parts if not self.canvas.find_withtag("progress_parts"): self.canvas.create_oval((0, 0, 0, 0), tags=("progress_oval_1", "progress_parts"), width=0) self.canvas.create_rectangle((0, 0, 0, 0), tags=("progress_rect_1", "progress_parts"), width=0) self.canvas.create_oval((0, 0, 0, 0), tags=("progress_oval_2", "progress_parts"), width=0) self.canvas.coords("progress_oval_1", (self.border_width, self.border_width, self.height - self.border_width + oval_bottom_right_shift, self.height - self.border_width + oval_bottom_right_shift)) self.canvas.coords("progress_rect_1", (self.height / 2, self.border_width, self.height / 2 + (self.width - self.height) * self.value + rect_bottom_right_shift, self.height - self.border_width + rect_bottom_right_shift)) self.canvas.coords("progress_oval_2", (self.height / 2 + (self.width - self.height) * self.value - self.height / 2 + self.border_width, self.border_width, self.height / 2 + (self.width - self.height) * self.value + self.height / 2 - self.border_width + oval_bottom_right_shift, self.height - self.border_width + oval_bottom_right_shift)) def configure(self, *args, **kwargs): require_redraw = False # some attribute changes require a call of self.draw() at the end if "bg_color" in kwargs: self.bg_color = kwargs["bg_color"] del kwargs["bg_color"] require_redraw = True if "fg_color" in kwargs: self.fg_color = kwargs["fg_color"] del kwargs["fg_color"] require_redraw = True if "border_color" in kwargs: self.border_color = kwargs["border_color"] del kwargs["border_color"] require_redraw = True if "progress_color" in kwargs: self.progress_color = kwargs["progress_color"] del kwargs["progress_color"] require_redraw = True if "border_width" in kwargs: self.border_width = kwargs["border_width"] del kwargs["border_width"] require_redraw = True if "variable" in kwargs: if self.variable is not None: self.variable.trace_remove("write", self.variabel_callback_name) self.variable = kwargs["variable"] if self.variable is not None and self.variable != "": self.variabel_callback_name = self.variable.trace_add("write", self.variable_callback) self.set(self.variable.get(), from_variable_callback=True) else: self.variable = None del kwargs["variable"] super().configure(*args, **kwargs) if require_redraw is True: self.draw() def variable_callback(self, var_name, index, mode): if not self.variable_callback_blocked: self.set(self.variable.get(), from_variable_callback=True) def set(self, value, from_variable_callback=False): self.value = value if self.value > 1: self.value = 1 elif self.value < 0: self.value = 0 self.draw(no_color_updates=True) if self.variable is not None and not from_variable_callback: self.variable_callback_blocked = True self.variable.set(round(self.value) if isinstance(self.variable, tkinter.IntVar) else self.value) self.variable_callback_blocked = False def change_appearance_mode(self, mode_string): if mode_string.lower() == "dark": self.appearance_mode = 1 elif mode_string.lower() == "light": self.appearance_mode = 0 if isinstance(self.master, CTkFrame): self.bg_color = self.master.fg_color else: self.bg_color = self.master.cget("bg") self.draw()
2.5625
3
backtest/tests/test_strategy.py
Christakou/backtest
0
12102
import pytest from backtest.strategy import BuyAndHoldEqualAllocation @pytest.fixture def strategy(): symbols = ('AAPL', 'GOOG') strategy = BuyAndHoldEqualAllocation(relevant_symbols=symbols) return strategy def test_strategy_execute(strategy): strategy.execute() assert len(strategy.holdings) > 0 assert len(strategy.trades) > 0 def test_holdings_at(strategy): strategy.execute() assert (strategy._holdings_at('2018-05-05') =={}) assert (strategy._holdings_at('2021-05-06') == {'AAPL': 7466}) assert (strategy._holdings_at('2021-05-07') == {'AAPL': 3862, 'GOOG': 209}) assert (strategy._holdings_at('2021-05-08') == {'AAPL': 3862, 'GOOG': 209})
2.71875
3
onnx/backend/test/case/node/constant.py
stillmatic/onnx
0
12103
# SPDX-License-Identifier: Apache-2.0 from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import onnx from ..base import Base from . import expect class Constant(Base): @staticmethod def export(): # type: () -> None values = np.random.randn(5, 5).astype(np.float32) node = onnx.helper.make_node( 'Constant', inputs=[], outputs=['values'], value=onnx.helper.make_tensor( name='const_tensor', data_type=onnx.TensorProto.FLOAT, dims=values.shape, vals=values.flatten().astype(float), ), ) expect(node, inputs=[], outputs=[values], name='test_constant')
1.96875
2
nvd3/multiChart.py
areski/python-nvd3
442
12104
<filename>nvd3/multiChart.py<gh_stars>100-1000 #!/usr/bin/python # -*- coding: utf-8 -*- """ Python-nvd3 is a Python wrapper for NVD3 graph library. NVD3 is an attempt to build re-usable charts and chart components for d3.js without taking away the power that d3.js gives you. Project location : https://github.com/areski/python-nvd3 """ from .NVD3Chart import NVD3Chart, TemplateMixin class multiChart(TemplateMixin, NVD3Chart): """ A multiChart is a type of chart which combines several plots of the same or different types. Python example:: from nvd3 import multiChart type = "multiChart" chart = multiChart(name=type, x_is_date=False, x_axis_format="AM_PM") xdata = [1,2,3,4,5,6] ydata = [115.5,160.5,108,145.5,84,70.5] ydata2 = [48624,42944,43439,24194,38440,31651] kwargs1 = {'color': 'black'} kwargs2 = {'color': 'red'} extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"}} chart.add_serie(y=ydata, x=xdata, type='line', yaxis=1, name='visits', extra=extra_serie, **kwargs1) extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}} chart.add_serie(y=ydata2, x=xdata, type='bar', yaxis=2,name='spend', extra=extra_serie, **kwargs2) chart.buildhtml() Javascript rendered to: .. raw:: html <div id="multichart"><svg style="height:450px;"></svg></div> <script> data_multichart=[{"color": "black", "type": "line", "values": [{"y": 115.5, "x": 1}, {"y": 160.5, "x": 2}, {"y": 108, "x": 3}, {"y ": 145.5, "x": 4}, {"y": 84, "x": 5}, {"y": 70.5, "x": 6}], "key": "visits", "yAxis": 1}, {"color": "red", "type": "bar", "values": [{"y": 486 24, "x": 1}, {"y": 42944, "x": 2}, {"y": 43439, "x": 3}, {"y": 24194, "x": 4}, {"y": 38440, "x": 5}, {"y": 31651, "x": 6}], "key": "spend", "y Axis": 2}]; nv.addGraph(function() { var chart = nv.models.multiChart(); chart.margin({top: 30, right: 60, bottom: 20, left: 60}); var datum = data_multichart; chart.yAxis1 .tickFormat(d3.format(',.02f')); chart.yAxis2 .tickFormat(d3.format(',.02f')); chart.xAxis .tickFormat(function(d) { return get_am_pm(parseInt(d)); }); function get_am_pm(d){ if (d > 12) { d = d - 12; return (String(d) + 'PM'); } else { return (String(d) + 'AM'); } }; chart.showLegend(true); d3.select('#multichart svg') .datum(datum) .transition().duration(500) .attr('height', 450) .call(chart); }); </script> See the source code of this page, to see the underlying javascript. """ CHART_FILENAME = "./multichart.html" template_chart_nvd3 = NVD3Chart.template_environment.get_template(CHART_FILENAME) def __init__(self, **kwargs): super(multiChart, self).__init__(**kwargs) self.model = 'multiChart' height = kwargs.get('height', 450) width = kwargs.get('width', None) if kwargs.get('x_is_date', False): self.set_date_flag(True) self.create_x_axis('xAxis', format=kwargs.get('x_axis_format', '%d %b %Y'), date=True) self.set_custom_tooltip_flag(True) else: if kwargs.get('x_axis_format') == 'AM_PM': self.x_axis_format = format = 'AM_PM' else: format = kwargs.get('x_axis_format', 'r') self.create_x_axis('xAxis', format=format, custom_format=kwargs.get('x_custom_format', False)) self.create_y_axis( 'yAxis1', format=kwargs.get('y1_axis_format', '.02f'), custom_format=kwargs.get('y1_custom_format', False)) self.create_y_axis( 'yAxis2', format=kwargs.get('y2_axis_format', '.02f'), custom_format=kwargs.get('y2_custom_format', False)) # must have a specified height, otherwise it superimposes both chars self.set_graph_height(height) if width: self.set_graph_width(width)
3.21875
3
utils/config.py
AlbertiPot/nar
2
12105
""" Date: 2021/09/23 Target: config utilities for yml file. implementation adapted from Slimmable: https://github.com/JiahuiYu/slimmable_networks.git """ import os import yaml class LoaderMeta(type): """ Constructor for supporting `!include`. """ def __new__(mcs, __name__, __bases__, __dict__): """Add include constructer to class.""" # register the include constructor on the class cls = super().__new__(mcs, __name__, __bases__, __dict__) cls.add_constructor('!include', cls.construct_include) return cls class Loader(yaml.Loader, metaclass=LoaderMeta): """ YAML Loader with `!include` constructor. """ def __init__(self, stream): try: self._root = os.path.split(stream.name)[0] except AttributeError: self._root = os.path.curdir super().__init__(stream) def construct_include(self, node): """ Include file referenced at node. """ filename = os.path.abspath( os.path.join(self._root, self.construct_scalar(node))) extension = os.path.splitext(filename)[1].lstrip('.') with open(filename, 'r') as f: if extension in ('yaml', 'yml'): return yaml.load(f, Loader) else: return ''.join(f.readlines()) class AttrDict(dict): """ Dict as attribute trick. """ def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self for key in self.__dict__: value = self.__dict__[key] if isinstance(value, dict): self.__dict__[key] = AttrDict(value) elif isinstance(value, list): if isinstance(value[0], dict): self.__dict__[key] = [AttrDict(item) for item in value] else: self.__dict__[key] = value def yaml(self): """ Convert object to yaml dict and return. """ yaml_dict = {} for key in self.__dict__: value = self.__dict__[key] if isinstance(value, AttrDict): yaml_dict[key] = value.yaml() elif isinstance(value, list): if isinstance(value[0], AttrDict): new_l = [] for item in value: new_l.append(item.yaml()) yaml_dict[key] = new_l else: yaml_dict[key] = value else: yaml_dict[key] = value return yaml_dict def __repr__(self): """ Print all variables. """ ret_str = [] for key in self.__dict__: value = self.__dict__[key] if isinstance(value, AttrDict): ret_str.append('{}:'.format(key)) child_ret_str = value.__repr__().split('\n') for item in child_ret_str: ret_str.append(' ' + item) elif isinstance(value, list): if isinstance(value[0], AttrDict): ret_str.append('{}:'.format(key)) for item in value: # treat as AttrDict above child_ret_str = item.__repr__().split('\n') for item in child_ret_str: ret_str.append(' ' + item) else: ret_str.append('{}: {}'.format(key, value)) else: ret_str.append('{}: {}'.format(key, value)) return '\n'.join(ret_str) class Config(AttrDict): def __init__(self, filename=None): try: with open(filename, 'r') as f: cfg_dict = yaml.load(f, Loader) except EnvironmentError: print('Please check the file with name of "%s"', filename) super(Config, self).__init__(cfg_dict) def get_config(config_file): assert os.path.exists(config_file), 'File {} not exist.'.format(config_file) return Config(config_file)
2.359375
2
src/api/fundings/entities.py
cbn-alpin/gefiproj-api
2
12106
<filename>src/api/fundings/entities.py<gh_stars>1-10 from marshmallow import Schema, fields, validate from sqlalchemy import Column, String, Integer, Float, Date, ForeignKey from sqlalchemy.orm import relationship from ..funders.entities import Funder, FunderSchema from src.api import db from src.shared.entity import Base class Funding(Base, db.Model): __tablename__ = 'financement' id_f = Column(Integer, primary_key=True) id_p = Column(Integer, nullable=False) id_financeur = Column(Integer, ForeignKey('financeur.id_financeur'), nullable=False) financeur = relationship("Funder") montant_arrete_f = Column(Float, nullable=False) statut_f = Column(String(250), nullable=False) date_solde_f = Column(Date) date_arrete_f = Column(Date) date_limite_solde_f = Column(Date) commentaire_admin_f = Column(String(250)) commentaire_resp_f = Column(String(250)) numero_titre_f = Column(String(250)) annee_titre_f = Column(String(250)) imputation_f = Column(String(250)) def __init__(self, id_p, id_financeur, montant_arrete_f, statut_f, date_solde_f = None, date_arrete_f=None, date_limite_solde_f=None, commentaire_admin_f='', commentaire_resp_f='', numero_titre_f='', annee_titre_f='', imputation_f='', id_f=''): if id_f != '': self.id_f = id_f self.id_p = id_p self.id_financeur = id_financeur self.montant_arrete_f = montant_arrete_f self.statut_f = statut_f self.date_solde_f = date_solde_f self.date_arrete_f = date_arrete_f self.date_limite_solde_f = date_limite_solde_f self.commentaire_admin_f = commentaire_admin_f self.commentaire_resp_f = commentaire_resp_f self.numero_titre_f = numero_titre_f self.annee_titre_f = annee_titre_f self.imputation_f = imputation_f class FundingSchema(Schema): id_f = fields.Integer() id_p = fields.Integer(required=True) id_financeur = fields.Integer(required=True) financeur = fields.Nested(FunderSchema) montant_arrete_f = fields.Float(required=True) statut_f = fields.Str(validate=validate.OneOf(["ANTR", "ATR", "SOLDE"]), required=True) date_solde_f = fields.Date(allow_none=True) date_arrete_f = fields.Date(allow_none=True) date_limite_solde_f = fields.Date(allow_none=True) commentaire_admin_f = fields.Str(allow_none=True) commentaire_resp_f = fields.Str(allow_none=True) numero_titre_f = fields.Str(allow_none=True) annee_titre_f = fields.Str(allow_none=True) imputation_f = fields.Str(allow_none=True) # TODO find solution to replace because option unknown=INCLUDE don't work in a list difference = fields.Float(allow_none=True) solde = fields.Float(allow_none=True) nom_financeur = fields.Str(allow_none=True)
2.359375
2
src/extensions/COMMANDS/CommitCommand.py
DMTF/python-redfish-utility
15
12107
<filename>src/extensions/COMMANDS/CommitCommand.py<gh_stars>10-100 ### # Copyright Notice: # Copyright 2016 Distributed Management Task Force, Inc. All rights reserved. # License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/python-redfish-utility/blob/master/LICENSE.md ### """ Commit Command for RDMC """ import sys from optparse import OptionParser from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, \ NoChangesFoundOrMadeError, NoCurrentSessionEstablished from rdmc_base_classes import RdmcCommandBase class CommitCommand(RdmcCommandBase): """ Constructor """ def __init__(self, rdmcObj): RdmcCommandBase.__init__(self,\ name='commit',\ usage='commit [OPTIONS]\n\n\tRun to apply all changes made during the' \ ' current session\n\texample: commit',\ summary='Applies all the changes made during the current' \ ' session.',\ aliases=[],\ optparser=OptionParser()) self.definearguments(self.parser) self._rdmc = rdmcObj self.logoutobj = rdmcObj.commandsDict["LogoutCommand"](rdmcObj) def commitfunction(self, options=None): """ Main commit worker function :param options: command line options :type options: list. """ self.commitvalidation() sys.stdout.write("Committing changes...\n") if not self._rdmc.app.commit(verbose=self._rdmc.opts.verbose): raise NoChangesFoundOrMadeError("No changes found or made " \ "during commit operation.") self.logoutobj.logoutfunction("") def run(self, line): """ Wrapper function for commit main function :param line: command line input :type line: string. """ try: (options, _) = self._parse_arglist(line) except: if ("-h" in line) or ("--help" in line): return ReturnCodes.SUCCESS else: raise InvalidCommandLineErrorOPTS("") self.commitfunction(options) #Return code return ReturnCodes.SUCCESS def commitvalidation(self): """ Commit method validation function """ try: self._rdmc.app.get_current_client() except: raise NoCurrentSessionEstablished("Please login and make setting" \ " changes before using commit command.") def definearguments(self, customparser): """ Wrapper function for new command main function :param customparser: command line input :type customparser: parser. """ if not customparser: return
2.078125
2
rosetta/views.py
evrenesat/ganihomes
24
12108
from django.conf import settings from django.contrib.auth.decorators import user_passes_test from django.core.paginator import Paginator from django.core.urlresolvers import reverse from django.http import Http404, HttpResponseRedirect, HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext from django.utils.encoding import smart_unicode, iri_to_uri from django.utils.translation import ugettext_lazy as _ from django.views.decorators.cache import never_cache from rosetta.conf import settings as rosetta_settings from rosetta.polib import pofile from rosetta.poutil import find_pos, pagination_range from rosetta.signals import entry_changed, post_save import re import rosetta import datetime import unicodedata import hashlib import os def home(request): """ Displays a list of messages to be translated """ def fix_nls(in_, out_): """Fixes submitted translations by filtering carriage returns and pairing newlines at the begging and end of the translated string with the original """ if 0 == len(in_) or 0 == len(out_): return out_ if "\r" in out_ and "\r" not in in_: out_ = out_.replace("\r", '') if "\n" == in_[0] and "\n" != out_[0]: out_ = "\n" + out_ elif "\n" != in_[0] and "\n" == out_[0]: out_ = out_.lstrip() if "\n" == in_[-1] and "\n" != out_[-1]: out_ = out_ + "\n" elif "\n" != in_[-1] and "\n" == out_[-1]: out_ = out_.rstrip() return out_ version = rosetta.get_version(True) if 'rosetta_i18n_fn' in request.session: rosetta_i18n_fn = request.session.get('rosetta_i18n_fn') rosetta_i18n_app = get_app_name(rosetta_i18n_fn) rosetta_i18n_lang_code = request.session['rosetta_i18n_lang_code'] rosetta_i18n_lang_bidi = rosetta_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI rosetta_i18n_write = request.session.get('rosetta_i18n_write', True) if rosetta_i18n_write: rosetta_i18n_pofile = pofile(rosetta_i18n_fn) for entry in rosetta_i18n_pofile: entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest() else: rosetta_i18n_pofile = request.session.get('rosetta_i18n_pofile') if 'filter' in request.GET: if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'): filter_ = request.GET.get('filter') request.session['rosetta_i18n_filter'] = filter_ return HttpResponseRedirect(reverse('rosetta-home')) rosetta_i18n_filter = request.session.get('rosetta_i18n_filter', 'all') if '_next' in request.POST: rx = re.compile(r'^m_([0-9a-f]+)') rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)') file_change = False for key, value in request.POST.items(): md5hash = None plural_id = None if rx_plural.match(key): md5hash = str(rx_plural.match(key).groups()[0]) # polib parses .po files into unicode strings, but # doesn't bother to convert plural indexes to int, # so we need unicode here. plural_id = unicode(rx_plural.match(key).groups()[1]) elif rx.match(key): md5hash = str(rx.match(key).groups()[0]) if md5hash is not None: entry = rosetta_i18n_pofile.find(md5hash, 'md5hash') # If someone did a makemessage, some entries might # have been removed, so we need to check. if entry: old_msgstr = entry.msgstr if plural_id is not None: plural_string = fix_nls(entry.msgstr_plural[plural_id], value) entry.msgstr_plural[plural_id] = plural_string else: entry.msgstr = fix_nls(entry.msgid, value) is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False)) old_fuzzy = 'fuzzy' in entry.flags if old_fuzzy and not is_fuzzy: entry.flags.remove('fuzzy') elif not old_fuzzy and is_fuzzy: entry.flags.append('fuzzy') file_change = True if old_msgstr != value or old_fuzzy != is_fuzzy: entry_changed.send(sender=entry, user=request.user, old_msgstr=old_msgstr, old_fuzzy=old_fuzzy, pofile=rosetta_i18n_fn, language_code=rosetta_i18n_lang_code, ) else: request.session['rosetta_last_save_error'] = True if file_change and rosetta_i18n_write: try: # Provide defaults in case authorization is not required. request.user.first_name = getattr(request.user, 'first_name', 'Anonymous') request.user.last_name = getattr(request.user, 'last_name', 'User') request.user.email = getattr(request.user, 'email', '<EMAIL>') rosetta_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (request.user.first_name, request.user.last_name, request.<EMAIL>.email)).encode('ascii', 'ignore') rosetta_i18n_pofile.metadata['X-Translated-Using'] = u"django-rosetta %s" % rosetta.get_version(False) rosetta_i18n_pofile.metadata['PO-Revision-Date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M%z') except UnicodeDecodeError: pass try: rosetta_i18n_pofile.save() rosetta_i18n_pofile.save_as_mofile(rosetta_i18n_fn.replace('.po', '.mo')) post_save.send(sender=None, language_code=rosetta_i18n_lang_code, request=request) # Try auto-reloading via the WSGI daemon mode reload mechanism if rosetta_settings.WSGI_AUTO_RELOAD and \ 'mod_wsgi.process_group' in request.environ and \ request.environ.get('mod_wsgi.process_group', None) and \ 'SCRIPT_FILENAME' in request.environ and \ int(request.environ.get('mod_wsgi.script_reloading', '0')): try: os.utime(request.environ.get('SCRIPT_FILENAME'), None) except OSError: pass # Try auto-reloading via uwsgi daemon reload mechanism if rosetta_settings.UWSGI_AUTO_RELOAD: try: import uwsgi # pretty easy right? uwsgi.reload() except: # we may not be running under uwsgi :P pass except: request.session['rosetta_i18n_write'] = False request.session['rosetta_i18n_pofile'] = rosetta_i18n_pofile # Retain query arguments query_arg = '' if 'query' in request.REQUEST: query_arg = '?query=%s' % request.REQUEST.get('query') if 'page' in request.GET: if query_arg: query_arg = query_arg + '&' else: query_arg = '?' query_arg = query_arg + 'page=%d' % int(request.GET.get('page')) return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg)) rosetta_i18n_lang_name = _(request.session.get('rosetta_i18n_lang_name')) rosetta_i18n_lang_code = request.session.get('rosetta_i18n_lang_code') if 'query' in request.REQUEST and request.REQUEST.get('query', '').strip(): query = request.REQUEST.get('query').strip() rx = re.compile(re.escape(query), re.IGNORECASE) paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete and rx.search(smart_unicode(e.msgstr) + smart_unicode(e.msgid) + u''.join([o[0] for o in e.occurrences]))], rosetta_settings.MESSAGES_PER_PAGE) else: if rosetta_i18n_filter == 'untranslated': paginator = Paginator(rosetta_i18n_pofile.untranslated_entries(), rosetta_settings.MESSAGES_PER_PAGE) elif rosetta_i18n_filter == 'translated': paginator = Paginator(rosetta_i18n_pofile.translated_entries(), rosetta_settings.MESSAGES_PER_PAGE) elif rosetta_i18n_filter == 'fuzzy': paginator = Paginator([e for e in rosetta_i18n_pofile.fuzzy_entries() if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE) else: paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE) if 'page' in request.GET and int(request.GET.get('page')) <= paginator.num_pages and int(request.GET.get('page')) > 0: page = int(request.GET.get('page')) else: page = 1 messages = paginator.page(page).object_list if rosetta_settings.MAIN_LANGUAGE and rosetta_settings.MAIN_LANGUAGE != rosetta_i18n_lang_code: main_language = None for language in settings.LANGUAGES: if language[0] == rosetta_settings.MAIN_LANGUAGE: main_language = _(language[1]) break fl = ("/%s/" % rosetta_settings.MAIN_LANGUAGE).join(rosetta_i18n_fn.split("/%s/" % rosetta_i18n_lang_code)) po = pofile(fl) main_messages = [] for message in messages: message.main_lang = po.find(message.msgid).msgstr needs_pagination = paginator.num_pages > 1 if needs_pagination: if paginator.num_pages >= 10: page_range = pagination_range(1, paginator.num_pages, page) else: page_range = range(1, 1 + paginator.num_pages) ADMIN_MEDIA_PREFIX = settings.STATIC_URL ENABLE_TRANSLATION_SUGGESTIONS = rosetta_settings.BING_APP_ID and rosetta_settings.ENABLE_TRANSLATION_SUGGESTIONS BING_APP_ID = rosetta_settings.BING_APP_ID MESSAGES_SOURCE_LANGUAGE_NAME = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_NAME MESSAGES_SOURCE_LANGUAGE_CODE = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE if 'rosetta_last_save_error' in request.session: del(request.session['rosetta_last_save_error']) rosetta_last_save_error = True return render_to_response('rosetta/pofile.html', locals(), context_instance=RequestContext(request)) else: return list_languages(request) home = never_cache(home) home = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(home) def download_file(request): import zipfile from StringIO import StringIO # original filename rosetta_i18n_fn = request.session.get('rosetta_i18n_fn', None) # in-session modified catalog rosetta_i18n_pofile = request.session.get('rosetta_i18n_pofile', None) # language code rosetta_i18n_lang_code = request.session.get('rosetta_i18n_lang_code', None) if not rosetta_i18n_lang_code or not rosetta_i18n_pofile or not rosetta_i18n_fn: return HttpResponseRedirect(reverse('rosetta-home')) try: if len(rosetta_i18n_fn.split('/')) >= 5: offered_fn = '_'.join(rosetta_i18n_fn.split('/')[-5:]) else: offered_fn = rosetta_i18n_fn.split('/')[-1] po_fn = str(rosetta_i18n_fn.split('/')[-1]) mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh zipdata = StringIO() zipf = zipfile.ZipFile(zipdata, mode="w") zipf.writestr(po_fn, unicode(rosetta_i18n_pofile).encode("utf8")) zipf.writestr(mo_fn, rosetta_i18n_pofile.to_binary()) zipf.close() zipdata.seek(0) response = HttpResponse(zipdata.read()) response['Content-Disposition'] = 'attachment; filename=%s.%s.zip' % (offered_fn, rosetta_i18n_lang_code) response['Content-Type'] = 'application/x-zip' return response except Exception: return HttpResponseRedirect(reverse('rosetta-home')) download_file = never_cache(download_file) download_file = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(download_file) def list_languages(request): """ Lists the languages for the current project, the gettext catalog files that can be translated and their translation progress """ languages = [] if 'filter' in request.GET: if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'): filter_ = request.GET.get('filter') request.session['rosetta_i18n_catalog_filter'] = filter_ return HttpResponseRedirect(reverse('rosetta-pick-file')) rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project') third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party') django_apps = rosetta_i18n_catalog_filter in ('all', 'django') project_apps = rosetta_i18n_catalog_filter in ('all', 'project') has_pos = False for language in settings.LANGUAGES: pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps) has_pos = has_pos or len(pos) languages.append( (language[0], _(language[1]), [(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos], ) ) ADMIN_MEDIA_PREFIX = settings.STATIC_URL version = rosetta.get_version(True) return render_to_response('rosetta/languages.html', locals(), context_instance=RequestContext(request)) list_languages = never_cache(list_languages) list_languages = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(list_languages) def get_app_name(path): app = path.split("/locale")[0].split("/")[-1] return app def lang_sel(request, langid, idx): """ Selects a file to be translated """ if langid not in [l[0] for l in settings.LANGUAGES]: raise Http404 else: rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project') third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party') django_apps = rosetta_i18n_catalog_filter in ('all', 'django') project_apps = rosetta_i18n_catalog_filter in ('all', 'project') file_ = find_pos(langid, project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)[int(idx)] request.session['rosetta_i18n_lang_code'] = langid request.session['rosetta_i18n_lang_name'] = unicode([l[1] for l in settings.LANGUAGES if l[0] == langid][0]) request.session['rosetta_i18n_fn'] = file_ po = pofile(file_) for entry in po: entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest() request.session['rosetta_i18n_pofile'] = po try: os.utime(file_, None) request.session['rosetta_i18n_write'] = True except OSError: request.session['rosetta_i18n_write'] = False return HttpResponseRedirect(reverse('rosetta-home')) lang_sel = never_cache(lang_sel) lang_sel = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(lang_sel) def can_translate(user): if not getattr(settings, 'ROSETTA_REQUIRES_AUTH', True): return True if not user.is_authenticated(): return False elif user.is_superuser and user.is_staff: return True else: try: from django.contrib.auth.models import Group translators = Group.objects.get(name='translators') return translators in user.groups.all() except Group.DoesNotExist: return False
1.882813
2
examples/experiment_pulse.py
HySynth/HySynth
4
12109
# to run this, add code from experiments_HSCC2021.py def time_series_pulse(): path = Path(__file__).parent.parent / "data" / "real_data" / "datasets" / "basic_data" filename1 = path / "pulse1-1.csv" filename2 = path / "pulse1-2.csv" filename3 = path / "pulse1-3.csv" f1 = load_time_series(filename1, 1) f2 = load_time_series(filename2, 1) f3 = load_time_series(filename3, 1) dataset = [f1, f2, f3] return dataset def parameters_pulse(): delta_ts = 0.02 deltas_ha = [0.1] n_discrete_steps = 10 reachability_time_step = 1e-3 refinement_distance = 0.001 n_intermediate = 1 max_dwell_time = 4.0 min_dwell_time = None n_simulations = 3 path_length = 6 time_step = 0.01 return delta_ts, deltas_ha, n_discrete_steps, reachability_time_step, refinement_distance, max_dwell_time,\ n_intermediate, n_simulations, path_length, time_step, min_dwell_time
2.5625
3
ironic/drivers/modules/drac/management.py
Tehsmash/ironic
0
12110
# -*- coding: utf-8 -*- # # Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DRAC Management Driver """ from oslo.utils import excutils from oslo.utils import importutils from ironic.common import boot_devices from ironic.common import exception from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.drivers import base from ironic.drivers.modules.drac import common as drac_common from ironic.drivers.modules.drac import resource_uris from ironic.openstack.common import log as logging pywsman = importutils.try_import('pywsman') LOG = logging.getLogger(__name__) _BOOT_DEVICES_MAP = { boot_devices.DISK: 'HardDisk', boot_devices.PXE: 'NIC', boot_devices.CDROM: 'Optical', } # IsNext constants PERSISTENT = '1' """ Is the next boot config the system will use. """ NOT_NEXT = '2' """ Is not the next boot config the system will use. """ ONE_TIME_BOOT = '3' """ Is the next boot config the system will use, one time boot only. """ def _get_next_boot_mode(node): """Get the next boot mode. To see a list of supported boot modes see: http://goo.gl/aEsvUH (Section 7.2) :param node: an ironic node object. :raises: DracClientError on an error from pywsman library. :returns: a dictionary containing: :instance_id: the instance id of the boot device. :is_next: whether it's the next device to boot or not. One of PERSISTENT, NOT_NEXT, ONE_TIME_BOOT constants. """ client = drac_common.get_wsman_client(node) options = pywsman.ClientOptions() filter_query = ('select * from DCIM_BootConfigSetting where IsNext=%s ' 'or IsNext=%s' % (PERSISTENT, ONE_TIME_BOOT)) try: doc = client.wsman_enumerate(resource_uris.DCIM_BootConfigSetting, options, filter_query=filter_query) except exception.DracClientError as exc: with excutils.save_and_reraise_exception(): LOG.error(_LE('DRAC driver failed to get next boot mode for ' 'node %(node_uuid)s. Reason: %(error)s.'), {'node_uuid': node.uuid, 'error': exc}) items = drac_common.find_xml(doc, 'DCIM_BootConfigSetting', resource_uris.DCIM_BootConfigSetting, find_all=True) # This list will have 2 items maximum, one for the persistent element # and another one for the OneTime if set boot_mode = None for i in items: instance_id = drac_common.find_xml(i, 'InstanceID', resource_uris.DCIM_BootConfigSetting).text is_next = drac_common.find_xml(i, 'IsNext', resource_uris.DCIM_BootConfigSetting).text boot_mode = {'instance_id': instance_id, 'is_next': is_next} # If OneTime is set we should return it, because that's # where the next boot device is if is_next == ONE_TIME_BOOT: break return boot_mode def _create_config_job(node): """Create a configuration job. This method is used to apply the pending values created by set_boot_device(). :param node: an ironic node object. :raises: DracClientError on an error from pywsman library. :raises: DracConfigJobCreationError on an error when creating the job. """ client = drac_common.get_wsman_client(node) options = pywsman.ClientOptions() options.add_selector('CreationClassName', 'DCIM_BIOSService') options.add_selector('Name', 'DCIM:BIOSService') options.add_selector('SystemCreationClassName', 'DCIM_ComputerSystem') options.add_selector('SystemName', 'DCIM:ComputerSystem') options.add_property('Target', 'BIOS.Setup.1-1') options.add_property('ScheduledStartTime', 'TIME_NOW') doc = client.wsman_invoke(resource_uris.DCIM_BIOSService, options, 'CreateTargetedConfigJob') return_value = drac_common.find_xml(doc, 'ReturnValue', resource_uris.DCIM_BIOSService).text # NOTE(lucasagomes): Possible return values are: RET_ERROR for error # or RET_CREATED job created (but changes will be # applied after the reboot) # Boot Management Documentation: http://goo.gl/aEsvUH (Section 8.4) if return_value == drac_common.RET_ERROR: error_message = drac_common.find_xml(doc, 'Message', resource_uris.DCIM_BIOSService).text raise exception.DracConfigJobCreationError(error=error_message) def _check_for_config_job(node): """Check if a configuration job is already created. :param node: an ironic node object. :raises: DracClientError on an error from pywsman library. :raises: DracConfigJobCreationError if the job is already created. """ client = drac_common.get_wsman_client(node) options = pywsman.ClientOptions() try: doc = client.wsman_enumerate(resource_uris.DCIM_LifecycleJob, options) except exception.DracClientError as exc: with excutils.save_and_reraise_exception(): LOG.error(_LE('DRAC driver failed to list the configuration jobs ' 'for node %(node_uuid)s. Reason: %(error)s.'), {'node_uuid': node.uuid, 'error': exc}) items = drac_common.find_xml(doc, 'DCIM_LifecycleJob', resource_uris.DCIM_LifecycleJob, find_all=True) for i in items: name = drac_common.find_xml(i, 'Name', resource_uris.DCIM_LifecycleJob) if 'BIOS.Setup.1-1' not in name.text: continue job_status = drac_common.find_xml(i, 'JobStatus', resource_uris.DCIM_LifecycleJob).text # If job is already completed or failed we can # create another one. # Job Control Documentation: http://goo.gl/o1dDD3 (Section 7.2.3.2) if job_status.lower() not in ('completed', 'failed'): job_id = drac_common.find_xml(i, 'InstanceID', resource_uris.DCIM_LifecycleJob).text reason = (_('Another job with ID "%s" is already created ' 'to configure the BIOS. Wait until existing job ' 'is completed or is cancelled') % job_id) raise exception.DracConfigJobCreationError(error=reason) class DracManagement(base.ManagementInterface): def get_properties(self): return drac_common.COMMON_PROPERTIES def validate(self, task): """Validate the driver-specific info supplied. This method validates whether the 'driver_info' property of the supplied node contains the required information for this driver to manage the node. :param task: a TaskManager instance containing the node to act on. :raises: InvalidParameterValue if required driver_info attribute is missing or invalid on the node. """ return drac_common.parse_driver_info(task.node) def get_supported_boot_devices(self): """Get a list of the supported boot devices. :returns: A list with the supported boot devices defined in :mod:`ironic.common.boot_devices`. """ return list(_BOOT_DEVICES_MAP.keys()) def set_boot_device(self, task, device, persistent=False): """Set the boot device for a node. Set the boot device to use on next reboot of the node. :param task: a task from TaskManager. :param device: the boot device, one of :mod:`ironic.common.boot_devices`. :param persistent: Boolean value. True if the boot device will persist to all future boots, False if not. Default: False. :raises: DracClientError on an error from pywsman library. :raises: InvalidParameterValue if an invalid boot device is specified. :raises: DracConfigJobCreationError on an error when creating the job. """ # Check for an existing configuration job _check_for_config_job(task.node) client = drac_common.get_wsman_client(task.node) options = pywsman.ClientOptions() filter_query = ("select * from DCIM_BootSourceSetting where " "InstanceID like '%%#%s%%'" % _BOOT_DEVICES_MAP[device]) try: doc = client.wsman_enumerate(resource_uris.DCIM_BootSourceSetting, options, filter_query=filter_query) except exception.DracClientError as exc: with excutils.save_and_reraise_exception(): LOG.error(_LE('DRAC driver failed to set the boot device ' 'for node %(node_uuid)s. Can\'t find the ID ' 'for the %(device)s type. Reason: %(error)s.'), {'node_uuid': task.node.uuid, 'error': exc, 'device': device}) instance_id = drac_common.find_xml(doc, 'InstanceID', resource_uris.DCIM_BootSourceSetting).text source = 'OneTime' if persistent: source = drac_common.find_xml(doc, 'BootSourceType', resource_uris.DCIM_BootSourceSetting).text # NOTE(lucasagomes): Don't ask me why 'BootSourceType' is set # for 'InstanceID' and 'InstanceID' is set for 'source'! You # know enterprisey... options = pywsman.ClientOptions() options.add_selector('InstanceID', source) options.add_property('source', instance_id) doc = client.wsman_invoke(resource_uris.DCIM_BootConfigSetting, options, 'ChangeBootOrderByInstanceID') return_value = drac_common.find_xml(doc, 'ReturnValue', resource_uris.DCIM_BootConfigSetting).text # NOTE(lucasagomes): Possible return values are: RET_ERROR for error, # RET_SUCCESS for success or RET_CREATED job # created (but changes will be applied after # the reboot) # Boot Management Documentation: http://goo.gl/aEsvUH (Section 8.7) if return_value == drac_common.RET_ERROR: error_message = drac_common.find_xml(doc, 'Message', resource_uris.DCIM_BootConfigSetting).text raise exception.DracOperationError(operation='set_boot_device', error=error_message) # Create a configuration job _create_config_job(task.node) def get_boot_device(self, task): """Get the current boot device for a node. Returns the current boot device of the node. :param task: a task from TaskManager. :raises: DracClientError on an error from pywsman library. :returns: a dictionary containing: :boot_device: the boot device, one of :mod:`ironic.common.boot_devices` or None if it is unknown. :persistent: Whether the boot device will persist to all future boots or not, None if it is unknown. """ client = drac_common.get_wsman_client(task.node) boot_mode = _get_next_boot_mode(task.node) persistent = boot_mode['is_next'] == PERSISTENT instance_id = boot_mode['instance_id'] options = pywsman.ClientOptions() filter_query = ('select * from DCIM_BootSourceSetting where ' 'PendingAssignedSequence=0 and ' 'BootSourceType="%s"' % instance_id) try: doc = client.wsman_enumerate(resource_uris.DCIM_BootSourceSetting, options, filter_query=filter_query) except exception.DracClientError as exc: with excutils.save_and_reraise_exception(): LOG.error(_LE('DRAC driver failed to get the current boot ' 'device for node %(node_uuid)s. ' 'Reason: %(error)s.'), {'node_uuid': task.node.uuid, 'error': exc}) instance_id = drac_common.find_xml(doc, 'InstanceID', resource_uris.DCIM_BootSourceSetting).text boot_device = next((key for (key, value) in _BOOT_DEVICES_MAP.items() if value in instance_id), None) return {'boot_device': boot_device, 'persistent': persistent} def get_sensors_data(self, task): """Get sensors data. :param task: a TaskManager instance. :raises: FailedToGetSensorData when getting the sensor data fails. :raises: FailedToParseSensorData when parsing sensor data fails. :returns: returns a consistent format dict of sensor data grouped by sensor type, which can be processed by Ceilometer. """ raise NotImplementedError()
1.734375
2
matrix_diagonalization/finite_barrier_square_well.py
coherent17/physics_calculation
1
12111
import numpy as np import matplotlib.pyplot as plt #grid number on half space (without the origin) N=150 #total grid number = 2*N + 1 (with origin) N_g=2*N+1 #finite barrier potential value = 300 (meV) potential_value=300 #building potential: def potential(potential_value): V=np.zeros((1,N_g),dtype=float) V[0,0:100]=potential_value V[0,100:201]=0 V[0,201:]=potential_value return V # #Hamiltonian matrix: def Hamiltonian(V): H=np.zeros((N_g,N_g),dtype=float) dx=10 #0.1 (nanometer) for i in range(0,N_g): for j in range(0,N_g): if i==j: x=dx*(i-N) #position H[i,j]=1/(dx**2)+V[0,i] elif j==i-1 or j==i+1: H[i,j]=-0.5/(dx**2) return H V=potential(potential_value) H=Hamiltonian(V) #sort the eigenvalue and get the corresponding eigenvector eigenvalue,eigenvector=np.linalg.eig(H) idx=np.argsort(eigenvalue) eigenvalue=eigenvalue[idx] eigenvector=eigenvector[:,idx] #visualize fig=plt.figure(figsize=(18,6)) ax1=fig.add_subplot(131) x=np.linspace(0,10,11) ax1.plot(x,eigenvalue[0:11],'r.',label='numerical') ax1.set_xlabel('n') ax1.set_ylabel('$E_n (meV)$') ax1.set_title('eigen energies') ax1.grid(True) ax1.legend() ax2=fig.add_subplot(132) x=np.linspace(-5,5,301) #x/lamda_0 x=x/(np.sqrt(2)*10**(10-9)/np.pi) y1=eigenvector[:,0] y2=eigenvector[:,1] y3=eigenvector[:,2] y4=eigenvector[:,3] y5=eigenvector[:,4] ax2.plot(x,(y1),label='$Ψ_{n=0}(x)$') ax2.plot(x,(y2),label='$Ψ_{n=1}(x)$') ax2.plot(x,(y3),label='$Ψ_{n=2}(x)$') ax2.set_xlabel('position ($x/λ_0$) ') ax2.set_ylabel('wavefunction') ax2.set_title('wave function in different eigen state') ax2.legend() ax2.grid(True) ax3=fig.add_subplot(133) ax3.plot(x,(y1**2),label='$Ψ^2_{n=0}(x)$') ax3.plot(x,(y2**2),label='$Ψ^2_{n=1}(x)$') ax3.plot(x,(y3**2),label='$Ψ^2_{n=2}(x)$') ax3.set_xlabel('position ($x/λ_0$) ') ax3.set_ylabel('square wavefunction') ax3.set_title('probability distribution in finite barrier well') ax3.grid(True) ax3.legend() plt.show()
2.890625
3
SVDD/__init__.py
SolidusAbi/SVDD-Python
0
12112
<filename>SVDD/__init__.py from .BaseSVDD import BaseSVDD
1
1
setup.py
SteveLTN/iex-api-python
0
12113
<reponame>SteveLTN/iex-api-python<filename>setup.py import setuptools import glob import os required = [ "requests", "pandas", "arrow", "socketIO-client-nexus" ] setuptools.setup(name='iex-api-python', version="0.0.5", description='Fetch data from the IEX API', long_description=open('README.md').read().strip(), author='<NAME>', author_email='<EMAIL>', url='http://www.github.com/danielecook/iex-api-python', packages=['iex'], install_requires=required, keywords=['finance', 'stock', 'market', 'market-data', 'IEX', 'API'], license='MIT License', zip_safe=False)
1.640625
2
src/models/train_model.py
4c697361/e-commerce
3
12114
import os import click import logging from pathlib import Path from dotenv import find_dotenv, load_dotenv from keras.callbacks import ModelCheckpoint, EarlyStopping import src.utils.utils as ut import src.utils.model_utils as mu import src.models.model as md import src.models.data_generator as dg import src.data.dataframe as dat def train(classmode, modelmode, batch_size, epochs, learning_rate): train = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.train_df)) nclasses = mu.ref_n_classes(classmode) valid = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.valid_df)) traindata = dg.DataSequence(train, ut.dirs.train_dir, batch_size=batch_size, classmode=classmode, modelmode=modelmode) validdata = dg.DataSequence(valid, ut.dirs.validation_dir, batch_size=batch_size, classmode=classmode, modelmode=modelmode) model = md.custom(classmode, modelmode, nclasses).make_compiled_model(learning_rate) model.summary() save_model_to = os.path.join(ut.dirs.model_dir, classmode + '_' + modelmode + '.h5') Checkpoint = ModelCheckpoint(save_model_to, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1) Earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None) model.fit_generator(generator=traindata, steps_per_epoch=len(train)//batch_size, validation_data=validdata, validation_steps=len(valid)//batch_size, epochs=epochs, callbacks=[mu.TrainValTensorBoard(write_graph=False), Checkpoint], #verbose=1, use_multiprocessing=False, workers=1) @click.command() @click.option('--classmode', type=str, default=ut.params.classmode, help='choose a classmode:\n\ multilabel, multiclass\n\ (default: multilabel)') @click.option('--modelmode', type=str, default=ut.params.modelmode, help='choose a modelmode:\n\ image, text, combined\n\ (default: combined)') @click.option('--ep', type=float, default=ut.params.epochs, help='number of epochs (default: {})'. format(ut.params.epochs)) @click.option('--lr', type=float, default=ut.params.learning_rate, help='learning rate (default: {})'. format(ut.params.learning_rate)) @click.option('--bs', type=int, default=ut.params.batch_size, help='batch size (default: {})'. format(ut.params.batch_size)) def main(classmode, modelmode, bs, ep, lr): classmode, modelmode = ut.check_modes(classmode, modelmode) train(classmode, modelmode, bs, ep, lr) if __name__ == '__main__': log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) project_dir = Path(__file__).resolve().parents[2] load_dotenv(find_dotenv()) main()
2.03125
2
Jobs/pm_match.py
Shantanu48114860/DPN-SA
2
12115
<gh_stars>1-10 """ MIT License Copyright (c) 2020 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy from matplotlib import pyplot import pandas as pd import os from Propensity_socre_network import Propensity_socre_network from Utils import Utils from dataloader import DataLoader def draw(treated_ps_list, control_ps_list, bins1): pyplot.hist(treated_ps_list, bins1, alpha=0.5, label='treated') pyplot.hist(control_ps_list, bins1, alpha=0.5, label='control') pyplot.legend(loc='upper right') pyplot.show() csv_path = "Dataset/ihdp_sample.csv" # 139 treated # 747 - 139 = 608 control # 747 total split_size = 0.8 device = Utils.get_device() dL = DataLoader() np_covariates_X_train, np_covariates_X_test, np_covariates_Y_train, np_covariates_Y_test = \ dL.preprocess_data_from_csv(csv_path, split_size) ps_train_set = dL.convert_to_tensor(np_covariates_X_train, np_covariates_Y_train) train_parameters_NN = { "epochs": 75, "lr": 0.001, "batch_size": 32, "shuffle": True, "train_set": ps_train_set, "model_save_path": "./Propensity_Model/NN_PS_model_iter_id_" + str(1) + "_epoch_{0}_lr_{1}.pth" } # ps using NN ps_net_NN = Propensity_socre_network() print("############### Propensity Score neural net Training ###############") ps_net_NN.train(train_parameters_NN, device, phase="train") # eval eval_parameters_NN = { "eval_set": ps_train_set, "model_path": "./Propensity_Model/NN_PS_model_iter_id_{0}_epoch_75_lr_0.001.pth" .format(1) } ps_score_list_NN = ps_net_NN.eval_return_complete_list(eval_parameters_NN, device, phase="eval") treated_ps_list = [d["prop_score"] for d in ps_score_list_NN if d['treatment'] == 1] control_ps_list = [d["prop_score"] for d in ps_score_list_NN if d['treatment'] == 0] for ps_dict in treated_ps_list: print(ps_dict) print("--------------") for ps_dict in control_ps_list: print(ps_dict) print("treated: " + str(len(treated_ps_list))) print("control: " + str(len(control_ps_list))) print("total: " + str(len(treated_ps_list) + len(control_ps_list))) bins1 = numpy.linspace(0, 1, 100) bins2 = numpy.linspace(0, 0.2, 100) bins3 = numpy.linspace(0.2, 0.5, 100) bins4 = numpy.linspace(0.5, 1, 100) draw(treated_ps_list, control_ps_list, bins1) draw(treated_ps_list, control_ps_list, bins2) draw(treated_ps_list, control_ps_list, bins3) draw(treated_ps_list, control_ps_list, bins4)
1.679688
2
varcode/effects/effect_prediction.py
openvax/varcode
39
12116
# Copyright (c) 2016-2019. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division, absolute_import import logging from Bio.Seq import reverse_complement from pyensembl import Transcript from ..common import groupby_field from .transcript_helpers import interval_offset_on_transcript from .effect_helpers import changes_exonic_splice_site from .effect_collection import EffectCollection from .effect_prediction_coding import predict_variant_coding_effect_on_transcript from .effect_classes import ( Failure, Intergenic, Intragenic, NoncodingTranscript, IncompleteTranscript, FivePrimeUTR, ThreePrimeUTR, Intronic, IntronicSpliceSite, SpliceAcceptor, SpliceDonor, StartLoss, ExonLoss, ExonicSpliceSite, ) logger = logging.getLogger(__name__) def predict_variant_effects(variant, raise_on_error=False): """Determine the effects of a variant on any transcripts it overlaps. Returns an EffectCollection object. Parameters ---------- variant : Variant raise_on_error : bool Raise an exception if we encounter an error while trying to determine the effect of this variant on a transcript, or simply log the error and continue. """ # if this variant isn't overlapping any genes, return a # Intergenic effect # TODO: look for nearby genes and mark those as Upstream and Downstream # effects try: gene_ids = variant.gene_ids transcripts = variant.transcripts except: if raise_on_error: raise else: return [] if len(gene_ids) == 0: effects = [Intergenic(variant)] else: # list of all MutationEffects for all genes & transcripts effects = [] # group transcripts by their gene ID transcripts_grouped_by_gene = \ groupby_field(transcripts, 'gene_id') # want effects in the list grouped by the gene they come from for gene_id in sorted(gene_ids): if gene_id not in transcripts_grouped_by_gene: # intragenic variant overlaps a gene but not any transcripts gene = variant.genome.gene_by_id(gene_id) effects.append(Intragenic(variant, gene)) else: # gene ID has transcripts overlapped by this variant for transcript in transcripts_grouped_by_gene[gene_id]: if raise_on_error: effect = predict_variant_effect_on_transcript( variant=variant, transcript=transcript) else: effect = predict_variant_effect_on_transcript_or_failure( variant=variant, transcript=transcript) effects.append(effect) return EffectCollection(effects) def predict_variant_effect_on_transcript_or_failure(variant, transcript): """ Try predicting the effect of a variant on a particular transcript but suppress raised exceptions by converting them into `Failure` effect values. """ try: return predict_variant_effect_on_transcript( variant=variant, transcript=transcript) except (AssertionError, ValueError) as error: logger.warn( "Encountered error annotating %s for %s: %s", variant, transcript, error) return Failure(variant, transcript) def predict_variant_effect_on_transcript(variant, transcript): """Return the transcript effect (such as FrameShift) that results from applying this genomic variant to a particular transcript. Parameters ---------- transcript : Transcript Transcript we're going to apply mutation to. """ if transcript.__class__ is not Transcript: raise TypeError( "Expected %s : %s to have type Transcript" % ( transcript, type(transcript))) # check for non-coding transcripts first, since # every non-coding transcript is "incomplete". if not transcript.is_protein_coding: return NoncodingTranscript(variant, transcript) if not transcript.complete: return IncompleteTranscript(variant, transcript) # since we're using inclusive base-1 coordinates, # checking for overlap requires special logic for insertions is_insertion = variant.is_insertion # determine if any exons are deleted, and if not, # what is the closest exon and how far is this variant # from that exon (overlapping the exon = 0 distance) completely_lost_exons = [] # list of which (exon #, Exon) pairs this mutation overlaps overlapping_exon_numbers_and_exons = [] distance_to_nearest_exon = float("inf") start_in_exon = False end_in_exon = False nearest_exon = None variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end for i, exon in enumerate(transcript.exons): if variant_start <= exon.start and variant_end >= exon.end: completely_lost_exons.append(exon) if is_insertion and exon.strand == "+" and variant_end == exon.end: # insertions after an exon don't overlap the exon distance = 1 elif is_insertion and exon.strand == "-" and variant_start == exon.start: distance = 1 else: distance = exon.distance_to_interval(variant_start, variant_end) if distance == 0: overlapping_exon_numbers_and_exons.append((i + 1, exon)) # start is contained in current exon if exon.start <= variant_start <= exon.end: start_in_exon = True # end is contained in current exon if exon.end >= variant_end >= exon.start: end_in_exon = True elif distance < distance_to_nearest_exon: distance_to_nearest_exon = distance nearest_exon = exon if len(overlapping_exon_numbers_and_exons) == 0: intronic_effect_class = choose_intronic_effect_class( variant=variant, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) return intronic_effect_class( variant=variant, transcript=transcript, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) elif len(completely_lost_exons) > 0 or ( len(overlapping_exon_numbers_and_exons) > 1): # if spanning multiple exons, or completely deleted an exon # then consider that an ExonLoss mutation exons = [exon for (_, exon) in overlapping_exon_numbers_and_exons] return ExonLoss(variant, transcript, exons) assert len(overlapping_exon_numbers_and_exons) == 1 exon_number, exon = overlapping_exon_numbers_and_exons[0] exonic_effect_annotation = exonic_transcript_effect( variant, exon, exon_number, transcript) # simple case: both start and end are in the same if start_in_exon and end_in_exon: return exonic_effect_annotation elif isinstance(exonic_effect_annotation, ExonicSpliceSite): # if mutation bleeds over into intro but even just # the exonic portion got annotated as an exonic splice site # then return it return exonic_effect_annotation return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=exonic_effect_annotation) def choose_intronic_effect_class( variant, nearest_exon, distance_to_exon): """ Infer effect of variant which does not overlap any exon of the given transcript. """ assert distance_to_exon > 0, \ "Expected intronic effect to have distance_to_exon > 0, got %d" % ( distance_to_exon,) if nearest_exon.strand == "+": # if exon on positive strand start_before = variant.trimmed_base1_start < nearest_exon.start start_same = variant.trimmed_base1_start == nearest_exon.start before_exon = start_before or (variant.is_insertion and start_same) else: # if exon on negative strand end_after = variant.trimmed_base1_end > nearest_exon.end end_same = variant.trimmed_base1_end == nearest_exon.end before_exon = end_after or (variant.is_insertion and end_same) # distance cutoffs based on consensus splice sequences from # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/ # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # 3' splice site: YAG|R if distance_to_exon <= 2: if before_exon: # 2 last nucleotides of intron before exon are the splice # acceptor site, typically "AG" return SpliceAcceptor else: # 2 first nucleotides of intron after exon are the splice donor # site, typically "GT" return SpliceDonor elif not before_exon and distance_to_exon <= 6: # variants in nucleotides 3-6 at start of intron aren't as certain # to cause problems as nucleotides 1-2 but still implicated in # alternative splicing return IntronicSpliceSite elif before_exon and distance_to_exon <= 3: # nucleotide -3 before exon is part of the 3' splicing # motif but allows for more degeneracy than the -2, -1 nucleotides return IntronicSpliceSite else: # intronic mutation unrelated to splicing return Intronic def exonic_transcript_effect(variant, exon, exon_number, transcript): """Effect of this variant on a Transcript, assuming we already know that this variant overlaps some exon of the transcript. Parameters ---------- variant : Variant exon : pyensembl.Exon Exon which this variant overlaps exon_number : int Index (starting from 1) of the given exon in the transcript's sequence of exons. transcript : pyensembl.Transcript """ genome_ref = variant.trimmed_ref genome_alt = variant.trimmed_alt variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end # clip mutation to only affect the current exon if variant_start < exon.start: # if mutation starts before current exon then only look # at nucleotides which overlap the exon logger.info('Mutation in variant %s starts before exon %s', variant, exon) assert len(genome_ref) > 0, "Unexpected insertion into intron" n_skip_start = exon.start - variant_start genome_ref = genome_ref[n_skip_start:] genome_alt = genome_alt[n_skip_start:] genome_start = exon.start else: genome_start = variant_start if variant_end > exon.end: # if mutation goes past exon end then only look at nucleotides # which overlap the exon logger.info('Mutation in variant %s ends after exon %s', variant, exon) n_skip_end = variant_end - exon.end genome_ref = genome_ref[:-n_skip_end] genome_alt = genome_alt[:len(genome_ref)] genome_end = exon.end else: genome_end = variant_end transcript_offset = interval_offset_on_transcript( genome_start, genome_end, transcript) if transcript.on_backward_strand: cdna_ref = reverse_complement(genome_ref) cdna_alt = reverse_complement(genome_alt) else: cdna_ref = genome_ref cdna_alt = genome_alt n_ref = len(cdna_ref) expected_ref = str( transcript.sequence[transcript_offset:transcript_offset + n_ref]) if cdna_ref != expected_ref: raise ValueError( ("Found ref nucleotides '%s' in sequence" " of %s at offset %d (chromosome positions %d:%d)" " but variant %s has '%s'") % ( expected_ref, transcript, transcript_offset, genome_start, genome_end, variant, cdna_ref)) utr5_length = min(transcript.start_codon_spliced_offsets) # does the variant start inside the 5' UTR? if utr5_length > transcript_offset: # does the variant end after the 5' UTR, within the coding region? if utr5_length < transcript_offset + n_ref: # TODO: we *might* lose the Kozak sequence or the start codon # but without looking at the modified sequence how can we tell # for sure that this is a start-loss variant? return StartLoss(variant, transcript) else: # if variant contained within 5' UTR return FivePrimeUTR(variant, transcript) utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1 if transcript_offset >= utr3_offset: return ThreePrimeUTR(variant, transcript) exon_start_offset = interval_offset_on_transcript( exon.start, exon.end, transcript) exon_end_offset = exon_start_offset + len(exon) - 1 # Further below we're going to try to predict exonic splice site # modifications, which will take this effect_annotation as their # alternative hypothesis for what happens if splicing doesn't change. # If the mutation doesn't affect an exonic splice site, then # we'll just return this effect. coding_effect_annotation = predict_variant_coding_effect_on_transcript( variant=variant, transcript=transcript, trimmed_cdna_ref=cdna_ref, trimmed_cdna_alt=cdna_alt, transcript_offset=transcript_offset) if changes_exonic_splice_site( transcript=transcript, transcript_ref=cdna_ref, transcript_alt=cdna_alt, transcript_offset=transcript_offset, exon_start_offset=exon_start_offset, exon_end_offset=exon_end_offset, exon_number=exon_number): return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=coding_effect_annotation) return coding_effect_annotation
1.820313
2
configs.py
platonic-realm/UM-PDD
0
12117
# OS-Level Imports import os import sys import multiprocessing from multiprocessing import cpu_count # Library Imports import tensorflow as tf from tensorflow.keras import mixed_precision from tensorflow.python.distribute.distribute_lib import Strategy # Internal Imports from Utils.enums import Environment, Accelerator # Global Configuration Variables environment = Environment.GoogleColab accelerator = Accelerator.GPU strategy = None cpu_no = multiprocessing.cpu_count() batch_size = 64 latent_dim = 100 epochs = 10 supervised_samples_ratio = 0.05 save_interval = 17 super_batches = 1 unsuper_batches = 1 prefetch_no = tf.data.AUTOTUNE eager_execution = True model_summary = False resume_training = False result_path = './results/' dataset_path = './dataset/' def parse_args(): global environment global accelerator global batch_size global latent_dim global epochs global supervised_samples_ratio global save_interval global super_batches global unsuper_batches global prefetch_no global eager_execution global model_summary for arg in sys.argv: if arg.lower().__contains__("envi"): param = arg[arg.index("=") + 1:] if param.lower() == "local": environment = Environment.Local elif param.lower() == "colab": environment = Environment.GoogleColab elif param.lower() == "research": environment = Environment.GoogleResearch if arg.lower().__contains__("accel"): param = arg[arg.index("=") + 1:] if param.lower() == "gpu": accelerator = Accelerator.GPU elif param.lower() == "tpu": accelerator = Accelerator.TPU if arg.lower().__contains__("batch"): param = arg[arg.index("=") + 1:] batch_size = int(param) if arg.lower().__contains__("epoch"): param = arg[arg.index("=") + 1:] epochs = int(param) if arg.lower().__contains__("sample_ratio"): param = arg[arg.index("=") + 1:] supervised_samples_ratio = float(param) if arg.lower().__contains__("save_interval"): param = arg[arg.index("=") + 1:] save_interval = int(param) if arg.lower().__contains__("super_batches"): param = arg[arg.index("=") + 1:] super_batches = int(param) if arg.lower().__contains__("unsuper_batches"): param = arg[arg.index("=") + 1:] unsuper_batches = int(param) if arg.lower().__contains__("eager"): param = arg[arg.index("=") + 1:] if param.lower().__contains__("false"): eager_execution = False else: eager_execution = True if arg.lower().__contains__("model_sum"): param = arg[arg.index("=") + 1:] if param.lower().__contains__("false"): model_summery = False else: model_summery = True def print_args(): global environment global accelerator global batch_size global latent_dim global epochs global supervised_samples_ratio global save_interval global super_batches global unsuper_batches global prefetch_no global eager_execution global model_summary print(environment) print(accelerator) print("Batch Size: ", batch_size) print("Epochs: ", epochs) print("Supervised Ratio: ", supervised_samples_ratio) print("Save Interval: ", save_interval) print("Supervised Batches per Interval: ", super_batches) print("Unsupervised Batches per Interval: ", unsuper_batches) print("Eager Execution: ", eager_execution) print("Print Model Summery: ", model_summary) def configure(enable_xla: bool = True, print_device_placement: bool = False, enable_eager_execution: bool = True, only_cpu: bool = False, enable_memory_growth: bool = True, enable_mixed_float16: bool = False): global environment global accelerator global batch_size global latent_dim global epochs global supervised_samples_ratio global save_interval global super_batches global unsuper_batches global prefetch_no global eager_execution global model_summary global strategy global result_path global dataset_path # Configurations ######################################################### # To enable xla compiler if enable_xla: os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' ######################################################### # To print out on which device operation is taking place if print_device_placement: tf.debugging.set_log_device_placement(True) ######################################################### # To disable eager execution and use graph functions if not enable_eager_execution: tf.compat.v1.disable_eager_execution() ######################################################### # To disable GPUs if only_cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' ######################################################### # Setting memory growth gpus = tf.config.list_physical_devices('GPU') if enable_memory_growth and gpus: try: tf.config.experimental.set_memory_growth(gpus[0], True) except Exception as ex: # Invalid device or cannot modify virtual devices once initialized. pass ######################################################### # Create 2 virtual GPUs with 1GB memory each # if gpus: # try: # tf.config.experimental.set_virtual_device_configuration( # gpus[0], # [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024), # tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)]) # logical_gpus = tf.config.experimental.list_logical_devices('GPU') # print(len(gpus), "Physical GPU,", len(logical_gpus), "Logical GPUs") # except RuntimeError as e: # # Virtual devices must be set before GPUs have been initialized # print(e) ######################################################### # Using mixed_precision to activate Tensor Cores if enable_mixed_float16: mixed_precision.set_global_policy('mixed_float16') ######################################################### # Configurations # House keeping ######################################################### # Storing the default TF strategy, we will use it in case we don`t set our own strategy = tf.distribute.get_strategy() if environment == Environment.Local: accelerator = Accelerator.GPU if accelerator == Accelerator.TPU and \ (environment == Environment.GoogleColab or environment == Environment.GoogleResearch): resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') tf.config.experimental_connect_to_cluster(resolver) # This is the TPU initialization code that has to be called at the beginning of program. tf.tpu.experimental.initialize_tpu_system(resolver) print("TPUs: ", tf.config.list_logical_devices('TPU')) strategy = tf.distribute.TPUStrategy(resolver) if environment == Environment.GoogleColab and accelerator == Accelerator.GPU: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") dataset_path = '/content/drive/MyDrive/Share/UM-PDD/dataset/' result_path = '/content/drive/MyDrive/Share/UM-PDD/results/' if environment == Environment.GoogleColab and accelerator == Accelerator.TPU: dataset_path = '/content/dataset/' result_path = '/content/drive/MyDrive/Share/UM-PDD/results/' ######################################################### # House keeping
2.171875
2
Legacy/Audit_Sweep/daily_audit_cron.py
QualiSystemsLab/Power-Management
0
12118
<filename>Legacy/Audit_Sweep/daily_audit_cron.py from power_audit import PowerAudit def main(): local = PowerAudit() local.full_audit() if __name__ == '__main__': main()
1.421875
1
python/hayate/store/actions.py
tao12345666333/Talk-Is-Cheap
4
12119
from turbo.flux import Mutation, register, dispatch, register_dispatch import mutation_types @register_dispatch('user', mutation_types.INCREASE) def increase(rank): pass def decrease(rank): return dispatch('user', mutation_types.DECREASE, rank)
1.867188
2
data_service/api/data_api.py
statisticsnorway/microdata-data-service
0
12120
import logging import os import io from fastapi import APIRouter, Depends, Header from fastapi.responses import FileResponse, StreamingResponse from fastapi import HTTPException, status import pyarrow as pa import pyarrow.parquet as pq from data_service.api.query_models import ( InputTimePeriodQuery, InputTimeQuery, InputFixedQuery ) from data_service.config import config from data_service.api.response_models import ErrorMessage from data_service.config.config import get_settings from data_service.config.dependencies import get_processor from data_service.core.processor import Processor from data_service.api.auth import authorize_user data_router = APIRouter() log = logging.getLogger(__name__) @data_router.get("/data/resultSet", responses={ 204: {}, 404: {"model": ErrorMessage}}) def retrieve_result_set(file_name: str, authorization: str = Header(None), settings: config.BaseSettings = Depends(get_settings)): """ Stream a generated result parquet file. """ log.info( f"Entering /data/resultSet with request for file name: {file_name}" ) user_id = authorize_user(authorization) log.info(f"Authorized token for user: {user_id}") file_path = ( f"{settings.RESULTSET_DIR}/{file_name}" ) if not os.path.isfile(file_path): log.warning(f"No file found for path: {file_path}") raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail='Result set not found' ) else: return FileResponse( file_path, media_type='application/octet-stream' ) @data_router.post("/data/event/generate-file", responses={404: {"model": ErrorMessage}}) def create_result_file_event(input_query: InputTimePeriodQuery, authorization: str = Header(None), processor: Processor = Depends(get_processor)): """ Create result set of data with temporality type event, and write result to file. Returns name of file in response. """ log.info( f'Entering /data/event/generate-file with input query: {input_query}' ) user_id = authorize_user(authorization) log.info(f"Authorized token for user: {user_id}") result_data = processor.process_event_request(input_query) resultset_file_name = processor.write_table(result_data) log.info(f'File name for event result set: {resultset_file_name}') return { 'filename': resultset_file_name, } @data_router.post("/data/status/generate-file", responses={404: {"model": ErrorMessage}}) def create_result_file_status(input_query: InputTimeQuery, authorization: str = Header(None), processor: Processor = Depends(get_processor)): """ Create result set of data with temporality type status, and write result to file. Returns name of file in response. """ log.info( f'Entering /data/status/generate-file with input query: {input_query}' ) user_id = authorize_user(authorization) log.info(f"Authorized token for user: {user_id}") result_data = processor.process_status_request(input_query) resultset_file_name = processor.write_table(result_data) log.info(f'File name for event result set: {resultset_file_name}') return { 'filename': resultset_file_name, } @data_router.post("/data/fixed/generate-file", responses={404: {"model": ErrorMessage}}) def create_file_result_fixed(input_query: InputFixedQuery, authorization: str = Header(None), processor: Processor = Depends(get_processor)): """ Create result set of data with temporality type fixed, and write result to file. Returns name of file in response. """ log.info( f'Entering /data/fixed/generate-file with input query: {input_query}' ) user_id = authorize_user(authorization) log.info(f"Authorized token for user: {user_id}") result_data = processor.process_fixed_request(input_query) resultset_file_name = processor.write_table(result_data) log.info(f'File name for event result set: {resultset_file_name}') return { 'filename': resultset_file_name, } @data_router.post("/data/event/stream", responses={404: {"model": ErrorMessage}}) def stream_result_event(input_query: InputTimePeriodQuery, authorization: str = Header(None), processor: Processor = Depends(get_processor)): """ Create Result set of data with temporality type event, and stream result as response. """ log.info(f'Entering /data/event/stream with input query: {input_query}') user_id = authorize_user(authorization) log.info(f"Authorized token for user: {user_id}") result_data = processor.process_event_request(input_query) buffer_stream = pa.BufferOutputStream() pq.write_table(result_data, buffer_stream) return StreamingResponse( io.BytesIO(buffer_stream.getvalue().to_pybytes()) ) @data_router.post("/data/status/stream", responses={404: {"model": ErrorMessage}}) def stream_result_status(input_query: InputTimeQuery, authorization: str = Header(None), processor: Processor = Depends(get_processor)): """ Create result set of data with temporality type status, and stream result as response. """ log.info(f'Entering /data/status/stream with input query: {input_query}') user_id = authorize_user(authorization) log.info(f"Authorized token for user: {user_id}") result_data = processor.process_status_request(input_query) buffer_stream = pa.BufferOutputStream() pq.write_table(result_data, buffer_stream) return StreamingResponse( io.BytesIO(buffer_stream.getvalue().to_pybytes()) ) @data_router.post("/data/fixed/stream", responses={404: {"model": ErrorMessage}}) def stream_result_fixed(input_query: InputFixedQuery, authorization: str = Header(None), processor: Processor = Depends(get_processor)): """ Create result set of data with temporality type fixed, and stream result as response. """ log.info(f'Entering /data/fixed/stream with input query: {input_query}') user_id = authorize_user(authorization) log.info(f"Authorized token for user: {user_id}") result_data = processor.process_fixed_request(input_query) buffer_stream = pa.BufferOutputStream() pq.write_table(result_data, buffer_stream) return StreamingResponse( io.BytesIO(buffer_stream.getvalue().to_pybytes()) )
2.21875
2
lib/parser/augur/Bonus.py
Innoviox/QuizDB
0
12121
<filename>lib/parser/augur/Bonus.py from utils import sanitize class Bonus: def __init__(self, number, leadin="", texts=None, answers=None, category="", subcategory="", tournament="", round=""): self.number = number self.leadin = leadin self.texts = texts self.answers = answers self.category = category self.subcategory = subcategory self.tournament = tournament self.round = round if texts is None: self.texts = [] if answers is None: self.answers = [] def has_content(self): if len(self.texts) == 0 and len(self.answers) == 0: return False if len(self.texts) == 0 or len(self.answers) == 0: print("Discrepancy in Bonus %d" % self.number) return False return self.texts[0].strip() != "" or self.answers[0].strip() != "" def to_dict(self): return { "number": self.number, "leadin": self.leadin, "formatted_texts": self.texts, "formatted_answers": self.answers, "texts": [sanitize(t, valid_tags=[]) for t in self.texts], "answers": [sanitize(a, valid_tags=[]) for a in self.answers], "category": self.category, "subcategory": self.subcategory, "tournament": self.tournament, "round": self.round } def __str__(self): return str(self.to_dict()) def is_valid(self): return (self.leadin.strip() != "" and len(self.texts) == 3 and len(self.answers) == 3 and all(text.strip() != "" for text in self.texts) and all(answer.strip() != "" for answer in self.answers)) def content(self): text = self.leadin for i in range(3): if len(self.texts) > i: text += " " + self.texts[i] + " ANSWER: " + self.answers[i] return text
2.953125
3
train_folds.py
wubinbai/argus-freesound
1
12122
import json import argparse from argus.callbacks import MonitorCheckpoint, \ EarlyStopping, LoggingToFile, ReduceLROnPlateau from torch.utils.data import DataLoader from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset from src.datasets import get_corrected_noisy_data, FreesoundCorrectedNoisyDataset from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb from src.transforms import get_transforms from src.argus_models import FreesoundModel from src.utils import load_noisy_data, load_folds_data from src import config parser = argparse.ArgumentParser() parser.add_argument('--experiment', required=True, type=str) args = parser.parse_args() BATCH_SIZE = 128 CROP_SIZE = 256 DATASET_SIZE = 128 * 256 NOISY_PROB = 0.01 CORR_NOISY_PROB = 0.42 MIXER_PROB = 0.8 WRAP_PAD_PROB = 0.5 CORRECTIONS = True if config.kernel: NUM_WORKERS = 2 else: NUM_WORKERS = 8 SAVE_DIR = config.experiments_dir / args.experiment PARAMS = { 'nn_module': ('AuxSkipAttention', { 'num_classes': len(config.classes), 'base_size': 64, 'dropout': 0.4, 'ratio': 16, 'kernel_size': 7, 'last_filters': 8, 'last_fc': 4 }), 'loss': ('OnlyNoisyLSoftLoss', { 'beta': 0.7, 'noisy_weight': 0.5, 'curated_weight': 0.5 }), 'optimizer': ('Adam', {'lr': 0.0009}), 'device': 'cuda', 'aux': { 'weights': [1.0, 0.4, 0.2, 0.1] }, 'amp': { 'opt_level': 'O2', 'keep_batchnorm_fp32': True, 'loss_scale': "dynamic" } } def train_fold(save_dir, train_folds, val_folds, folds_data, noisy_data, corrected_noisy_data): train_transfrom = get_transforms(train=True, size=CROP_SIZE, wrap_pad_prob=WRAP_PAD_PROB, resize_scale=(0.8, 1.0), resize_ratio=(1.7, 2.3), resize_prob=0.33, spec_num_mask=2, spec_freq_masking=0.15, spec_time_masking=0.20, spec_prob=0.5) mixer = RandomMixer([ SigmoidConcatMixer(sigmoid_range=(3, 12)), AddMixer(alpha_dist='uniform') ], p=[0.6, 0.4]) mixer = UseMixerWithProb(mixer, prob=MIXER_PROB) curated_dataset = FreesoundDataset(folds_data, train_folds, transform=train_transfrom, mixer=mixer) noisy_dataset = FreesoundNoisyDataset(noisy_data, transform=train_transfrom, mixer=mixer) corr_noisy_dataset = FreesoundCorrectedNoisyDataset(corrected_noisy_data, transform=train_transfrom, mixer=mixer) dataset_probs = [NOISY_PROB, CORR_NOISY_PROB, 1 - NOISY_PROB - CORR_NOISY_PROB] print("Dataset probs", dataset_probs) print("Dataset lens", len(noisy_dataset), len(corr_noisy_dataset), len(curated_dataset)) train_dataset = RandomDataset([noisy_dataset, corr_noisy_dataset, curated_dataset], p=dataset_probs, size=DATASET_SIZE) val_dataset = FreesoundDataset(folds_data, val_folds, get_transforms(False, CROP_SIZE)) train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True, num_workers=NUM_WORKERS) val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE * 2, shuffle=False, num_workers=NUM_WORKERS) model = FreesoundModel(PARAMS) callbacks = [ MonitorCheckpoint(save_dir, monitor='val_lwlrap', max_saves=1), ReduceLROnPlateau(monitor='val_lwlrap', patience=6, factor=0.6, min_lr=1e-8), EarlyStopping(monitor='val_lwlrap', patience=18), LoggingToFile(save_dir / 'log.txt'), ] model.fit(train_loader, val_loader=val_loader, max_epochs=700, callbacks=callbacks, metrics=['multi_accuracy', 'lwlrap']) if __name__ == "__main__": if not SAVE_DIR.exists(): SAVE_DIR.mkdir(parents=True, exist_ok=True) else: print(f"Folder {SAVE_DIR} already exists.") with open(SAVE_DIR / 'source.py', 'w') as outfile: outfile.write(open(__file__).read()) print("Model params", PARAMS) with open(SAVE_DIR / 'params.json', 'w') as outfile: json.dump(PARAMS, outfile) folds_data = load_folds_data(use_corrections=CORRECTIONS) noisy_data = load_noisy_data() corrected_noisy_data = get_corrected_noisy_data() for fold in config.folds: val_folds = [fold] train_folds = list(set(config.folds) - set(val_folds)) save_fold_dir = SAVE_DIR / f'fold_{fold}' print(f"Val folds: {val_folds}, Train folds: {train_folds}") print(f"Fold save dir {save_fold_dir}") train_fold(save_fold_dir, train_folds, val_folds, folds_data, noisy_data, corrected_noisy_data)
1.734375
2
source_code/day001/input-exercise.py
MKutka/100daysofcode
0
12123
#Day 1.3 Exercise!! #First way I thought to do it without help name = input("What is your name? ") print(len(name)) #Way I found to do it from searching google print(len(input("What is your name? ")))
4.1875
4
pyvalidator/is_strong_password.py
theteladras/py.validator
15
12124
from typing import TypedDict from .utils.Classes.String import String from .utils.assert_string import assert_string from .utils.merge import merge class _IsStrongPasswordOptions(TypedDict): min_length: int min_uppercase: int min_lowercase: int min_numbers: int min_symbols: int return_score: bool points_per_unique: int points_per_repeat: float points_for_containing_upper: int points_for_containing_lower: int points_for_containing_number: int points_for_containing_symbol: int class _Analysis(TypedDict): length: int unique_chars: int uppercase_count: int lowercase_count: int number_count: int symbol_count: int default_options: _IsStrongPasswordOptions = { "min_length": 8, "min_uppercase": 1, "min_lowercase": 1, "min_numbers": 1, "min_symbols": 1, "return_score": False, "points_per_unique": 1, "points_per_repeat": 0.5, "points_for_containing_lower": 10, "points_for_containing_upper": 10, "points_for_containing_number": 10, "points_for_containing_symbol": 10, } def count_chars(pw: String): result = {} for char in pw: if char in result: result[char] += result[char] + 1 else: result[char] = 1 return result def analyze_password(pw: String) -> _Analysis: upper_case_regex = r"^[A-Z]$" lower_case_regex = r"^[a-z]$" number_regex = r"^[0-9]$" symbol_regex = r"^[-#!$@%^&*()_+|~=`{}\[\]:\";'<>?,./ ]$" char_map = count_chars(pw) analysis: _Analysis = { "length": pw.length, "unique_chars": len([*char_map]), "uppercase_count": 0, "lowercase_count": 0, "number_count": 0, "symbol_count": 0, } for char in [*char_map]: char = String(char) if char.match(upper_case_regex): analysis["uppercase_count"] += char_map[char] elif char.match(lower_case_regex): analysis["lowercase_count"] += char_map[char] elif char.match(number_regex): analysis["number_count"] += char_map[char] elif char.match(symbol_regex): analysis["symbol_count"] += char_map[char] return analysis def score_password(analysis: _Analysis, options: _IsStrongPasswordOptions): points = 0 points += analysis["unique_chars"] * options["points_per_unique"] points += (analysis["length"] - analysis["unique_chars"]) * options["points_per_unique"] if analysis["uppercase_count"] > 0: points += options["points_for_containing_upper"] if analysis["lowercase_count"] > 0: points += options["points_for_containing_lower"] if analysis["number_count"] > 0: points += options["points_for_containing_number"] if analysis["symbol_count"] > 0: points += options["points_for_containing_symbol"] return points def is_strong_password(input: str, options: _IsStrongPasswordOptions = {}) -> bool: input = assert_string(input) options = merge(options, default_options) analysis = analyze_password(input) if options["return_score"]: return score_password(analysis, options) return ( analysis["length"] >= options["min_length"] and analysis["uppercase_count"] >= options["min_uppercase"] and analysis["lowercase_count"] >= options["min_lowercase"] and analysis["number_count"] >= options["min_numbers"] and analysis["symbol_count"] >= options["min_symbols"] )
2.75
3
boilerplate_app/serializers.py
taher-systango/DjangoUnboxed
0
12125
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals # Python imports. import logging import datetime import calendar # Django imports. from django.db import transaction # Rest Framework imports. from rest_framework import serializers # Third Party Library imports # local imports. from boilerplate_app.models import User, Projects class UserCreateSerializer(serializers.ModelSerializer): password = serializers.CharField(write_only=True) def validate(self, data, *args, **kwargs): return super(UserCreateSerializer, self).validate(data, *args, **kwargs) @transaction.atomic() def create(self, validated_data): # Register new users user = super(UserCreateSerializer, self).create(validated_data) user.set_password(validated_data['password']) user.save() return user class Meta: model = User fields = ('email', 'id', 'password', 'username', 'first_name', 'last_name', 'role') extra_kwargs = {'password':{'<PASSWORD>':<PASSWORD>}} class UserListSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('id', 'first_name', 'last_name', 'email', 'role') class ProjectsCreateSerializer(serializers.ModelSerializer): class Meta: model = Projects fields = ('project_name','user') def create(self, validated_data): user = User.objects.get(pk=validated_data.pop('user')) return Projects.objects.create(**validated_data,user=user) class ProjectsListSerializer(serializers.ModelSerializer): class Meta: model = Projects fields = ('id', 'project_name', 'user')
2.140625
2
wagtail/wagtailadmin/blocks.py
patphongs/wagtail
3
12126
<gh_stars>1-10 from __future__ import absolute_import, unicode_literals import warnings from wagtail.wagtailcore.blocks import * # noqa warnings.warn("wagtail.wagtailadmin.blocks has moved to wagtail.wagtailcore.blocks", UserWarning, stacklevel=2)
1.117188
1
src/olympia/stats/management/commands/theme_update_counts_from_file.py
mstriemer/olympia
0
12127
<reponame>mstriemer/olympia import codecs from datetime import datetime, timedelta from optparse import make_option from os import path, unlink from django.conf import settings from django.core.management.base import BaseCommand, CommandError import commonware.log from olympia import amo from olympia.addons.models import Addon, Persona from olympia.stats.models import ThemeUpdateCount from . import get_date_from_file, save_stats_to_file log = commonware.log.getLogger('adi.themeupdatecount') class Command(BaseCommand): """Process hive results stored in different files and store them in the db. Usage: ./manage.py theme_update_counts_from_file <folder> --date=YYYY-MM-DD If no date is specified, the default is the day before. If not folder is specified, the default is `hive_results/<YYYY-MM-DD>/`. This folder will be located in `<settings.NETAPP_STORAGE>/tmp`. File processed: - theme_update_counts.hive Each file has the following cols: - date - addon id (if src is not "gp") or persona id - src (if it's "gp" then it's an old request with the persona id) - count """ help = __doc__ option_list = BaseCommand.option_list + ( make_option('--date', action='store', type='string', dest='date', help='Date in the YYYY-MM-DD format.'), make_option('--separator', action='store', type='string', default='\t', dest='separator', help='Field separator in file.'), ) def handle(self, *args, **options): start = datetime.now() # Measure the time it takes to run the script. day = options['date'] if not day: day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d') folder = args[0] if args else 'hive_results' folder = path.join(settings.TMP_PATH, folder, day) sep = options['separator'] filepath = path.join(folder, 'theme_update_counts.hive') # Make sure we're not trying to update with mismatched data. if get_date_from_file(filepath, sep) != day: raise CommandError('%s file contains data for another day' % filepath) # First, make sure we don't have any existing counts for the same day, # or it would just increment again the same data. ThemeUpdateCount.objects.filter(date=day).delete() theme_update_counts = {} # Preload a set containing the ids of all the persona Add-on objects # that we care about. When looping, if we find an id that is not in # that set, we'll reject it. addons = set(Addon.objects.filter(type=amo.ADDON_PERSONA, status=amo.STATUS_PUBLIC, persona__isnull=False) .values_list('id', flat=True)) # Preload all the Personas once and for all. This builds a dict where # each key (the persona_id we get from the hive query) has the addon_id # as value. persona_to_addon = dict(Persona.objects.values_list('persona_id', 'addon_id')) with codecs.open(filepath, encoding='utf8') as count_file: for index, line in enumerate(count_file): if index and (index % 1000000) == 0: log.info('Processed %s lines' % index) splitted = line[:-1].split(sep) if len(splitted) != 4: log.debug('Badly formatted row: %s' % line) continue day, id_, src, count = splitted try: id_, count = int(id_), int(count) except ValueError: # Badly formatted? Drop. continue if src: src = src.strip() # If src is 'gp', it's an old request for the persona id. if id_ not in persona_to_addon and src == 'gp': continue # No such persona. addon_id = persona_to_addon[id_] if src == 'gp' else id_ # Does this addon exist? if addon_id not in addons: continue # Memoize the ThemeUpdateCount. if addon_id in theme_update_counts: tuc = theme_update_counts[addon_id] else: tuc = ThemeUpdateCount(addon_id=addon_id, date=day, count=0) theme_update_counts[addon_id] = tuc # We can now fill the ThemeUpdateCount object. tuc.count += count # Create in bulk: this is much faster. ThemeUpdateCount.objects.bulk_create(theme_update_counts.values(), 100) for theme_update_count in theme_update_counts.values(): save_stats_to_file(theme_update_count) log.info('Processed a total of %s lines' % (index + 1)) log.debug('Total processing time: %s' % (datetime.now() - start)) # Clean up file. log.debug('Deleting {path}'.format(path=filepath)) unlink(filepath)
2.140625
2
KRR/Saved/Run 4/plot_all.py
MadsAW/machine-learning-on-materials
2
12128
<reponame>MadsAW/machine-learning-on-materials import numpy as np import pickle import matplotlib.pyplot as plt import os import fnmatch folder = "GP/" ktype = "lin/" matrices=os.listdir(folder+ktype) for matrix in matrices: if fnmatch.fnmatch(matrix, '*_val_*'): with open(folder+ktype+matrix, "rb") as pickleFile: results = pickle.load(pickleFile) arrray = results[2] # Enable interactive mode plt.ion() # Draw the grid lines plt.grid(True) plt.plot(results[1],results[2],label=matrix) plt.xscale('symlog', linthreshx=20) plt.legend(loc='upper left') plt.show()
2.78125
3
FATERUI/common/camera/mindvision/camera_mindvision.py
LynnChan706/Fater
4
12129
<reponame>LynnChan706/Fater #!/usr/bin/env python2.7 # coding=utf-8 import logging import traceback import time from FATERUI.common.camera.camera import Camera from . import CameraMindVision from FATERUI.common.camera.common_tools import * import cv2 # from aoi.common.infraredcontrol import infraredcontrol from time import sleep import datetime def get_formated_time (pstr='%Y%m%d_%H_%M_%S_'): return datetime.datetime.now().strftime(pstr)+ str(datetime.datetime.now().microsecond) class MindVision(Camera): def __init__(self, camera_degree=0, is_trigger_mode=False): Camera.__init__(self) self.camera_name = 'MINDVISION' self.camera_init_status = False self.__is_trigger_mode = is_trigger_mode self.__camera = CameraMindVision.CameraMindVision(_mode=0, _single_mode=False, _packetSize=9000, _strobe_enable=False, _trigger_delay=0.018, _interPacketDelay=3000, _debug=False, _is_hardware_trigger=self.__is_trigger_mode, ) self.camera_status = self.CAMERA_STATUS_UNCONNECTED self.__mark_as_open = False self.__flash_mode = True self.__old_time = 0.0 self.__rotate_degree = camera_degree # self.__control=infraredcontrol.infrared() def get_camera_name(self): return self.camera_name def open(self): return self.__camera.open() def __del__(self): if self.get_camera_status() == self.CAMERA_STATUS_CONNECTED: self.__camera.release_camera() self.__mark_as_open = False del self.__camera def close(self): self.__camera.save_parmeter() self.__camera.release_camera() self.__mark_as_open = False self.camera_status = self.CAMERA_STATUS_UNCONNECTED return True def __take_picture(self): print('ccc take a picture mind') if self.__mark_as_open: self.__mark_as_open = False self.__camera.open() logging.getLogger('pointgrey_camera').info('camera info:%s' % self.get_camera_description()) img = self.__camera.get_image_in_numpy() img2=cv2.flip(img,0) timenow = get_formated_time() pimgtest = './imgdata/orgimg/' + timenow + 'x.bmp' if img!=None: cv2.imwrite(pimgtest,img) return img2 def set_flash_mode(self, flash=True): self.__flash_mode = flash def take_picture(self, index=None): img=None try: img = self.__take_picture() if img is not None and img.size > 100: if self.__is_trigger_mode: if time.time() - self.__old_time < 1: return None self.__old_time = time.time() if abs(self.__rotate_degree) > 0.0001: img = transform_image(img, angle=self.__rotate_degree, keep_all=True) elif img.size <= 100: img = None except Exception as err: print('error ---',err) # logging.getLogger('logger_system').exception(u'error in take_picture:%s' % traceback.print_exc()) img = None return img def get_camera_description(self): desc = 'firmware:' + str(self.__camera.get_firmware_version()) +\ '\nid:' + str(self.get_camera_id()) return desc def get_camera_id(self): return self.__camera.get_camera_id() def get_camera_status(self): return self.camera_status def get_frame_count(self): return self.__camera.get_frame_count() def get_error_frame_count(self): return self.__camera.get_error_frame_count() def get_frame_rate(self): return self.__camera.get_frame_rate() def get_parameter(self): shutter_time = self.__camera.get_shutter() wb_red = self.__camera.get_white_balance_red() wb_green = self.__camera.get_white_balance_green() wb_blue = self.__camera.get_white_balance_blue() parameter = {'shutter': shutter_time, 'wb_red': wb_red, 'wb_green': wb_green, 'wb_blue': wb_blue, 'rotate': self.__rotate_degree} return parameter def set_parameter(self, **kwargs): flag = True shutter = kwargs.get('shutter', None) if shutter is not None: success = self.__camera.set_shutter(kwargs['shutter']) if not success: flag = False wb_red = kwargs.get('wb_red', None) if wb_red is not None: success = self.__camera.set_wb_red(wb_red) if not success: flag = False wb_green = kwargs.get('wb_green', None) if wb_green is not None: success = self.__camera.set_wb_green(wb_green) if not success: flag = False wb_blue = kwargs.get('wb_blue', None) if wb_blue is not None and wb_blue > 0: success = self.__camera.set_wb_blue(wb_blue) if not success: flag = False self.__rotate_degree = kwargs.get('rotate', self.__rotate_degree) return flag def get_picture_info(self): return 'Temperature:[%.2f]\t\tFrameRate:[%.2f]' % (self.get_camera_temperature(), self.get_frame_rate()) def get_camera_temperature(self): return self.__camera.get_camera_temperature() def __is_connect(self): return self.__camera.is_connected()
2.265625
2
tests/conftest.py
arosen93/jobflow
10
12130
import pytest @pytest.fixture(scope="session") def test_data(): from pathlib import Path module_dir = Path(__file__).resolve().parent test_dir = module_dir / "test_data" return test_dir.resolve() @pytest.fixture(scope="session") def database(): return "jobflow_test" @pytest.fixture(scope="session") def mongo_jobstore(database): from maggma.stores import MongoStore from jobflow import JobStore store = JobStore(MongoStore(database, "outputs")) store.connect() return store @pytest.fixture(scope="function") def memory_jobstore(): from maggma.stores import MemoryStore from jobflow import JobStore store = JobStore(MemoryStore()) store.connect() return store @pytest.fixture(scope="function") def memory_data_jobstore(): from maggma.stores import MemoryStore from jobflow import JobStore store = JobStore(MemoryStore(), additional_stores={"data": MemoryStore()}) store.connect() return store @pytest.fixture def clean_dir(): import os import shutil import tempfile old_cwd = os.getcwd() newpath = tempfile.mkdtemp() os.chdir(newpath) yield os.chdir(old_cwd) shutil.rmtree(newpath) @pytest.fixture(scope="session") def debug_mode(): return False @pytest.fixture(scope="session") def lpad(database, debug_mode): from fireworks import LaunchPad lpad = LaunchPad(name=database) lpad.reset("", require_password=False) yield lpad if not debug_mode: lpad.reset("", require_password=False) for coll in lpad.db.list_collection_names(): lpad.db[coll].drop() @pytest.fixture def no_pydot(monkeypatch): import builtins import_orig = builtins.__import__ def mocked_import(name, *args, **kwargs): if name == "pydot": raise ImportError() return import_orig(name, *args, **kwargs) monkeypatch.setattr(builtins, "__import__", mocked_import) @pytest.fixture def no_matplotlib(monkeypatch): import builtins import_orig = builtins.__import__ def mocked_import(name, *args, **kwargs): if name == "matplotlib": raise ImportError() return import_orig(name, *args, **kwargs) monkeypatch.setattr(builtins, "__import__", mocked_import)
1.851563
2
src/utilities/download_file_from_zip.py
Bhaskers-Blu-Org2/arcticseals
16
12131
# This script allows to download a single file from a remote ZIP archive # without downloading the whole ZIP file itself. # The hosting server needs to support the HTTP range header for it to work import zipfile import requests import argparse class HTTPIO(object): def __init__(self, url): self.url = url r = requests.head(self.url) self.size = int(r.headers['content-length']) assert self.size > 0 self.offset = 0 def seek(self, offset, whence=0): if whence == 0: self.offset = offset elif whence == 1: self.offset += offset elif whence == 2: self.offset = self.size + offset else: raise Exception('Unknown value for parameter whence') def read(self, size = None): if size is None: r = requests.get(self.url, headers={"range": "bytes={}-{}".format(self.offset, self.size - 1)}, stream=True) else: r = requests.get(self.url, headers={"range": "bytes={}-{}".format(self.offset, min(self.size - 1, self.offset+size - 1))}, stream=True) r.raise_for_status() r.raw.decode_content = True content = r.raw.read() self.offset += len(content) return content def tell(self): return self.offset def download_file(zip_url, relative_path, output_file): with zipfile.ZipFile(HTTPIO(zip_url)) as zz: with open(output_file, 'wb') as f: f.write(zz.read(relative_path)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('URL', type=str, help='URL to zip file, e.g. https://example.com/myfile.zip') parser.add_argument('FILE_PATH', type=str, help='Path of the desired file in the ZIP file, e.g. myfolder/mydocument.docx') parser.add_argument('OUTPUT_FILE', type=str, help='Local path to write the file to, e.g. /home/user/mydocument.docx') args = parser.parse_args() download_file(args.URL, args.FILE_PATH, args.OUTPUT_FILE)
3.5625
4
cxphasing/CXFileReader.py
jbgastineau/cxphasing
3
12132
<filename>cxphasing/CXFileReader.py import Image import readMDA import h5py import os import numpy from mmpad_image import open_mmpad_tif import numpy as np import scipy as sp import sys #import libtiff from cxparams import CXParams as CXP class CXFileReader(object): """ file_reader A generic and configurable file reader. The file reader determines the file type from the extension. For hierarchical data files a method for extracting the data must be specified. Inputs ------ filename - the name of the file to read h5_file_path - hdf5 files: a string describing the location of the data inside a hierarchical data format mda_filepath - mda files: must specify whether to read a detector channel or positioner number. For e.g. detector channel 5 mda_filepath='d5' positioner number 2 mda_filepath='p2' Outputs ------- data - the 2 or 3D array read from the data file. Example Usage: fr = FileReader() data=fr.open('filename.h5', h5_file_path='/some/string') data=fr.open('filename.mda', mda_file_path='d4') for detector channel 4 """ def __init__(self, *args, **kwargs): self.args = args for key in kwargs.keys(): setattr(self, key, kwargs[key]) def openup(self, filename, **kwargs): if not os.path.isfile(filename): CXP.log.error('{} is not a valid file'.format(filename)) sys.exit(1) self.extension = filename.split('.')[-1].lower() for key in kwargs.keys(): setattr(self, key, kwargs[key]) try: action = { 'mda': self.read_mda, 'h5': self.read_h5, 'hdf5': self.read_h5, 'jpg': self.read_image, 'jpeg': self.read_image, 'png': self.read_image, 'tif': self.read_image, 'tiff': self.read_tif, 'npy': self.read_npy, 'npz': self.read_npz, 'dat': self.read_dat, 'pkl': self.read_pickle, 'mmpd': self.read_mmpad, 'pil': self.read_pilatus }[self.extension] except NameError: CXP.log.error('Unknown file extension {}'.format(self.extension)) raise return action(filename=filename) def read_mda(self, filename=None): if not filename: filename = self.filename source = self.mda_file_path[0].lower() if source not in ['d', 'p']: CXP.log.error("mda_file_path first character must be 'd' or 'p'") raise channel = self.mda_file_path[1] if not np.isnumeric(channel): CXP.log.error("mda_file_path second character must be numeric.") raise try: return readMDA.readMDA(filename)[2][source].data except: CXP.log.error('Could not extract array from mda file') raise def read_h5(self, filename=None, h5_file_path='/entry/instrument/detector/data'): if not filename: filename = self.filename try: h5_file_path = self.h5_file_path except: pass try: return h5py.File(filename)[h5_file_path].value except: CXP.log.error('Could not extract data from h5 file.') raise def read_image(self, filename=None): if not filename: filename = self.filename try: return sp.misc.fromimage(Image.open(filename)) except: CXP.log.error('Unable to read data from {}'.format(filename)) raise def read_npy(self, filename=None): if not filename: filename = self.filename try: return numpy.load(filename) except IOError as e: print e CXP.log.error('Could not extract data from numpy file.') raise def read_npz(self, filename=None): if not filename: filename = self.filename l=[] try: d= dict(numpy.load(filename)) # Return list in the right order for i in range(len(d)): l.append(d['arr_{:d}'.format(i)]) return l except IOError: CXP.log.error('Could not extract data from numpy file.') raise def read_dat(self, filename=None): if not filename: filename = self.filename try: return sp.fromfile(filename) except: CXP.log.error('Could not extract data from data file.') raise def read_pickle(self, filename=None): if not filename: filename = self.filename try: return pickle.load(filename) except: CXP.log.error('Could not load data from pickle') raise def read_mmpad(self, filename=None): if not filename: filename = self.filename try: return open_mmpad_tif(filename) except: CXP.log.error('Could not load data from pickle') raise def read_pilatus(self, filename=None): if not filename: filename = self.filename try: return sp.misc.fromimage(Image.open(filename))[:-1,:-1] except: CXP.log.error('Unable to read data from {}'.format(filename)) raise def read_tif(self, filename=None): if not filename: filename = self.filename try: return libtiff.TIFF.open(filename).read_image() except: CXP.log.error('Unable to read data from {}'.format(filename)) raise
2.828125
3
homeassistant/components/notify/file.py
SKarthick5121995/karthickmaduraai
0
12133
<filename>homeassistant/components/notify/file.py """ Support for file notification. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/notify.file/ """ import logging import os import homeassistant.util.dt as dt_util from homeassistant.components.notify import ( ATTR_TITLE, DOMAIN, BaseNotificationService) from homeassistant.helpers import validate_config _LOGGER = logging.getLogger(__name__) def get_service(hass, config): """Get the file notification service.""" if not validate_config({DOMAIN: config}, {DOMAIN: ['filename', 'timestamp']}, _LOGGER): return None filename = config['filename'] timestamp = config['timestamp'] return FileNotificationService(hass, filename, timestamp) # pylint: disable=too-few-public-methods class FileNotificationService(BaseNotificationService): """Implement the notification service for the File service.""" def __init__(self, hass, filename, add_timestamp): """Initialize the service.""" self.filepath = os.path.join(hass.config.config_dir, filename) self.add_timestamp = add_timestamp def send_message(self, message="", **kwargs): """Send a message to a file.""" with open(self.filepath, 'a') as file: if os.stat(self.filepath).st_size == 0: title = '{} notifications (Log started: {})\n{}\n'.format( kwargs.get(ATTR_TITLE), dt_util.strip_microseconds(dt_util.utcnow()), '-' * 80) file.write(title) if self.add_timestamp == 1: text = '{} {}\n'.format(dt_util.utcnow(), message) file.write(text) else: text = '{}\n'.format(message) file.write(text)
2.609375
3
app/eSignature/views/eg035_scheduled_sending.py
docusign/eg-03-python-auth-code-grant
7
12134
""" Example 035: Scheduled sending and delayed routing """ from os import path from docusign_esign.client.api_exception import ApiException from flask import render_template, session, Blueprint, request from ..examples.eg035_scheduled_sending import Eg035ScheduledSendingController from ...docusign import authenticate from ...ds_config import DS_CONFIG from ...error_handlers import process_error from ...consts import pattern eg = "eg035" # reference (and url) for this example eg035 = Blueprint("eg035", __name__) def get_args(): """Get request and session arguments""" # More data validation would be a good idea here # Strip anything other than characters listed signer_email = pattern.sub("", request.form.get("signer_email")) signer_name = pattern.sub("", request.form.get("signer_name")) resume_date = request.form.get("resume_date") envelope_args = { "signer_email": signer_email, "signer_name": signer_name, "resume_date": resume_date, "status": "sent", } args = { "account_id": session["ds_account_id"], "base_path": session["ds_base_path"], "access_token": session["ds_access_token"], "envelope_args": envelope_args } return args @eg035.route("/eg035", methods=["POST"]) @authenticate(eg=eg) def sign_by_email(): """ 1. Get required arguments 2. Call the worker method 3. Render success response with envelopeId """ # 1. Get required arguments args = get_args() try: # 1. Call the worker method results = Eg035ScheduledSendingController.worker(args) print(results) except ApiException as err: return process_error(err) # 2. Render success response with envelopeId return render_template( "example_done.html", title="Envelope sent", h1="Envelope sent", message=f"The envelope has been created and scheduled!<br/>Envelope ID: {results['envelope_id']}." ) @eg035.route("/eg035", methods=["GET"]) @authenticate(eg=eg) def get_view(): """responds with the form for the example""" return render_template( "eg035_scheduled_sending.html", title="Scheduled sending", source_file="eg035_scheduled_sending.py", source_url=DS_CONFIG["github_example_url"] + "eg035_scheduled_sending.py", documentation=DS_CONFIG["documentation"] + eg, show_doc=DS_CONFIG["documentation"], signer_name=DS_CONFIG["signer_name"], signer_email=DS_CONFIG["signer_email"] )
2.34375
2
Informatik1/Finals Prep/HS20/1 Warmup/tally.py
Queentaker/uzh
8
12135
#-- THIS LINE SHOULD BE THE FIRST LINE OF YOUR SUBMISSION! --# def tally(costs, discounts, rebate_factor): cost = sum(costs) discount = sum(discounts) pre = (cost - discount) * rebate_factor if pre < 0: return 0 else: return round(pre, 2) #-- THIS LINE SHOULD BE THE LAST LINE OF YOUR SUBMISSION! ---# ### DO NOT SUBMIT THE FOLLOWING LINES!!! THESE ARE FOR LOCAL TESTING ONLY! # ((10+24) - (3+4+3)) * 0.3 assert(tally([10,24], [3,4,3], 0.30) == 7.20) # if the result would be negative, 0 is returned instead assert(tally([10], [20], 0.1) == 0)
3.140625
3
linear_sequence_of_dominos/valid_sequence.py
bhpayne/domino_tile_floor
0
12136
<reponame>bhpayne/domino_tile_floor #!/usr/bin/env python3 """ Given a set of dominos, construct a linear sequence For example, if the set of dominos is [ (0,0) (1,0), (1,1)] then a valid linear sequence of length four would be (0,0),(0,1),(1,1),(1,0) In this script we first create a set of dominos to sample from. Then every permutation of that set is tested to see whether the sequence is a valid linear sequence. If the sequence is invalid, a counter is incremented to record how long the sequence was. """ # http://www.domino-games.com/domino-rules/double-six.html import itertools ''' list_of_dominos = [ (0,0), (1,0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (1,1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (2, 2), (3, 2), (4, 2), (5, 2), (6, 2), (3, 3), (4, 3), (5, 3), (6, 3), (4, 4), (5, 4), (6, 4), (5, 5), (6, 5), (6, 6)] ''' list_of_dominos = [(0,0),(1,0),(2,0),(3,0), (1,1),(2,1),(3,1), (2,2),(3,2), (3,3)] print("number of unique dominos in this set is",len(list_of_dominos)) # 28! = 3*10^29 #print(list_of_dominos) broke_on={} for indx in range(11): broke_on[indx+1]=0 print("initialized data structure (nothing up my sleeve):",broke_on) for this_perm in itertools.permutations(list_of_dominos): if(this_perm[0][1] != this_perm[1][0]): #print("broke on first pair") broke_on[1] += 1 elif(this_perm[1][1] != this_perm[2][0]): #print("broke on second pair") broke_on[2] += 1 elif(this_perm[2][1] != this_perm[3][0]): broke_on[3] += 1 elif(this_perm[3][1] != this_perm[4][0]): broke_on[4] += 1 elif(this_perm[4][1] != this_perm[5][0]): broke_on[5] += 1 elif(this_perm[5][1] != this_perm[6][0]): broke_on[6] += 1 elif(this_perm[6][1] != this_perm[7][0]): broke_on[7] += 1 elif(this_perm[7][1] != this_perm[8][0]): broke_on[8] += 1 elif(this_perm[8][1] != this_perm[9][0]): broke_on[9] += 1 elif(this_perm[9][1] != this_perm[10][0]): broke_on[10] += 1 else: print("made it to another pair") print(this_perm) break print(broke_on)
3.9375
4
Projects/Arena/old-version.py
hastysun/Python
1
12137
## Unit 4 Project - Two Player Game ## <NAME> - Computer Programming II ## The Elder Scrolls X # A fan made 2 player game successor the The Elder Scrolls Series # Two players start off in an arena # Can choose starting items # Can choose classes ## Libraries import time # Self explanatory import random # Self explanatory import os # Used for Linux commands import os, platform # For Linux intro ## Functions def sleep(): # This function just automates what I usually do manually time.sleep(0.1) print("\n") return ## Code class Player1(object): # This is the class for Player 1 def __init__(self, name, health, attack, stamina, defense): self.name = name # Player's name self.health = health # Player's max health self.attack = attack # Player's attack power, can be changed self.stamina = stamina # How many attacks you can do self.defense = defense # How much damage you take def Stats(self): sleep() print(self.name + "'s currents stats are: ") sleep() print("Health = " + str(self.health)) print("Attack = " + str(self.attack)) print("Stamina = " + str(self.stamina)) print("Defense = " + str(self.defense)) sleep() class Player2(object): # This is the class for Player 2 def __init__(self, name, health, attack, stamina, defense): self.name = name self.health = health self.attack = attack self.stamina = stamina self.defense = defense def Stats(self): sleep() print(self.name + "'s currents stats are: ") sleep() print("Health = " + str(self.health)) print("Attack = " + str(self.attack)) print("Stamina = " + str(self.stamina)) print("Defense = " + str(self.defense)) sleep() def intro1(): # This is an intro for Linux sleep() os.system("figlet Elder Scrolls X") sleep() return def intro2(): # Intro for anything else sleep() print("\n\t Elder Scrolls X") sleep() return if platform.system() == "Linux": intro1() else: intro2() def CharCreation(): # Function to ask questions for class choosing sleep() print("=> What kind of class do you want?") sleep() print("> 1 - Knight") #sleep() print("> 2 - Thief") #sleep() print("> 3 - Lancer") sleep() return sleep() print("=> Player 1 : What is your name?") name1 = input("> ") # "name1" is Player 1's name sleep() print("=> Player 1,") CharCreation() CharCreationChoice1 = input("> ") if CharCreationChoice1 == ("1"): # Knight player1 = Player1(name1, 200, 150, 50, 200) if CharCreationChoice1 == ("2"): # Thief player1 = Player1(name1, 100, 200, 100, 50) if CharCreationChoice1 == ("3"): # Lancer player1 = Player1(name1, 100, 100, 100, 100) sleep() player1.Stats() # Prints the stats for Player 1 sleep() print("=> Player 2 : What is your name?") name2 = input("> ") # "name2" is Player 2's name CharCreation() CharCreationChoice2 = input("> ") if CharCreationChoice2 == ("1"): # Knight player2 = Player2(name2, 200, 150, 50, 200) if CharCreationChoice2 == ("2"): # Thief player2 = Player2(name2, 100, 200, 100, 50) if CharCreationChoice2 == ("3"): # Lancer player2 = Player2(name2, 100, 100, 100, 100) player2.Stats() # Prints Player 2's stats
3.546875
4
chromium/tools/telemetry/telemetry/internal/image_processing/video.py
wedataintelligence/vivaldi-source
925
12138
<gh_stars>100-1000 # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import subprocess from catapult_base import cloud_storage from telemetry.core import platform from telemetry.util import image_util from telemetry.util import rgba_color HIGHLIGHT_ORANGE_FRAME = rgba_color.WEB_PAGE_TEST_ORANGE class BoundingBoxNotFoundException(Exception): pass class Video(object): """Utilities for storing and interacting with the video capture.""" def __init__(self, video_file_obj): assert video_file_obj.delete assert not video_file_obj.close_called self._video_file_obj = video_file_obj self._tab_contents_bounding_box = None def UploadToCloudStorage(self, bucket, target_path): """Uploads video file to cloud storage. Args: target_path: Path indicating where to store the file in cloud storage. """ cloud_storage.Insert(bucket, target_path, self._video_file_obj.name) def GetVideoFrameIter(self): """Returns the iteration for processing the video capture. This looks for the initial color flash in the first frame to establish the tab content boundaries and then omits all frames displaying the flash. Yields: (time_ms, image) tuples representing each video keyframe. Only the first frame is a run of sequential duplicate bitmaps is typically included. time_ms is milliseconds since navigationStart. image may be a telemetry.core.Bitmap, or a numpy array depending on whether numpy is installed. """ frame_generator = self._FramesFromMp4(self._video_file_obj.name) # Flip through frames until we find the initial tab contents flash. content_box = None for _, bmp in frame_generator: content_box = self._FindHighlightBoundingBox( bmp, HIGHLIGHT_ORANGE_FRAME) if content_box: break if not content_box: raise BoundingBoxNotFoundException( 'Failed to identify tab contents in video capture.') # Flip through frames until the flash goes away and emit that as frame 0. timestamp = 0 for timestamp, bmp in frame_generator: if not self._FindHighlightBoundingBox(bmp, HIGHLIGHT_ORANGE_FRAME): yield 0, image_util.Crop(bmp, *content_box) break start_time = timestamp for timestamp, bmp in frame_generator: yield timestamp - start_time, image_util.Crop(bmp, *content_box) def _FindHighlightBoundingBox(self, bmp, color, bounds_tolerance=8, color_tolerance=8): """Returns the bounding box of the content highlight of the given color. Raises: BoundingBoxNotFoundException if the hightlight could not be found. """ content_box, pixel_count = image_util.GetBoundingBox(bmp, color, tolerance=color_tolerance) if not content_box: return None # We assume arbitrarily that tabs are all larger than 200x200. If this # fails it either means that assumption has changed or something is # awry with our bounding box calculation. if content_box[2] < 200 or content_box[3] < 200: raise BoundingBoxNotFoundException('Unexpectedly small tab contents.') # TODO(tonyg): Can this threshold be increased? if pixel_count < 0.9 * content_box[2] * content_box[3]: raise BoundingBoxNotFoundException( 'Low count of pixels in tab contents matching expected color.') # Since we allow some fuzziness in bounding box finding, we want to make # sure that the bounds are always stable across a run. So we cache the # first box, whatever it may be. # # This relies on the assumption that since Telemetry doesn't know how to # resize the window, we should always get the same content box for a tab. # If this assumption changes, this caching needs to be reworked. if not self._tab_contents_bounding_box: self._tab_contents_bounding_box = content_box # Verify that there is only minor variation in the bounding box. If it's # just a few pixels, we can assume it's due to compression artifacts. for x, y in zip(self._tab_contents_bounding_box, content_box): if abs(x - y) > bounds_tolerance: # If this fails, it means either that either the above assumption has # changed or something is awry with our bounding box calculation. raise BoundingBoxNotFoundException( 'Unexpected change in tab contents box.') return self._tab_contents_bounding_box def _FramesFromMp4(self, mp4_file): host_platform = platform.GetHostPlatform() if not host_platform.CanLaunchApplication('avconv'): host_platform.InstallApplication('avconv') def GetDimensions(video): proc = subprocess.Popen(['avconv', '-i', video], stderr=subprocess.PIPE) dimensions = None output = '' for line in proc.stderr.readlines(): output += line if 'Video:' in line: dimensions = line.split(',')[2] dimensions = map(int, dimensions.split()[0].split('x')) break proc.communicate() assert dimensions, ('Failed to determine video dimensions. output=%s' % output) return dimensions def GetFrameTimestampMs(stderr): """Returns the frame timestamp in integer milliseconds from the dump log. The expected line format is: ' dts=1.715 pts=1.715\n' We have to be careful to only read a single timestamp per call to avoid deadlock because avconv interleaves its writes to stdout and stderr. """ while True: line = '' next_char = '' while next_char != '\n': next_char = stderr.read(1) line += next_char if 'pts=' in line: return int(1000 * float(line.split('=')[-1])) dimensions = GetDimensions(mp4_file) frame_length = dimensions[0] * dimensions[1] * 3 frame_data = bytearray(frame_length) # Use rawvideo so that we don't need any external library to parse frames. proc = subprocess.Popen(['avconv', '-i', mp4_file, '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-dump', '-loglevel', 'debug', '-f', 'rawvideo', '-'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) while True: num_read = proc.stdout.readinto(frame_data) if not num_read: raise StopIteration assert num_read == len(frame_data), 'Unexpected frame size: %d' % num_read yield (GetFrameTimestampMs(proc.stderr), image_util.FromRGBPixels(dimensions[0], dimensions[1], frame_data))
2.40625
2
homeassistant/components/renault/renault_coordinator.py
basicpail/core
5
12139
<gh_stars>1-10 """Proxy to handle account communication with Renault servers.""" from __future__ import annotations from collections.abc import Awaitable from datetime import timedelta import logging from typing import Callable, TypeVar from renault_api.kamereon.exceptions import ( AccessDeniedException, KamereonResponseException, NotSupportedException, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed T = TypeVar("T") class RenaultDataUpdateCoordinator(DataUpdateCoordinator[T]): """Handle vehicle communication with Renault servers.""" def __init__( self, hass: HomeAssistant, logger: logging.Logger, *, name: str, update_interval: timedelta, update_method: Callable[[], Awaitable[T]], ) -> None: """Initialise coordinator.""" super().__init__( hass, logger, name=name, update_interval=update_interval, update_method=update_method, ) self.access_denied = False self.not_supported = False async def _async_update_data(self) -> T: """Fetch the latest data from the source.""" if self.update_method is None: raise NotImplementedError("Update method not implemented") try: return await self.update_method() except AccessDeniedException as err: # Disable because the account is not allowed to access this Renault endpoint. self.update_interval = None self.access_denied = True raise UpdateFailed(f"This endpoint is denied: {err}") from err except NotSupportedException as err: # Disable because the vehicle does not support this Renault endpoint. self.update_interval = None self.not_supported = True raise UpdateFailed(f"This endpoint is not supported: {err}") from err except KamereonResponseException as err: # Other Renault errors. raise UpdateFailed(f"Error communicating with API: {err}") from err async def async_config_entry_first_refresh(self) -> None: """Refresh data for the first time when a config entry is setup. Contrary to base implementation, we are not raising ConfigEntryNotReady but only updating the `access_denied` and `not_supported` flags. """ await self._async_refresh(log_failures=False, raise_on_auth_failed=True)
1.960938
2
Pacote Dowload/pythonProject/aula020.py
J297-hub/exercicios-de-python
0
12140
<reponame>J297-hub/exercicios-de-python<filename>Pacote Dowload/pythonProject/aula020.py def soma (a,b): print(f'A = {a} e B = {b}') s=a+b print(f'A soma A + B ={s}') #Programa Principal soma(4,5)
2.265625
2
docs/script/CLI_docker_image_uri_script.py
ai4eu/on-boarding
0
12141
#!/usr/bin/env python3 # =================================================================================== # Copyright (C) 2019 Fraunhofer Gesellschaft. All rights reserved. # =================================================================================== # This Acumos software file is distributed by Fraunhofer Gesellschaft # under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===============LICENSE_END========================================================== """ Provides an example of Docker URI cli on-boarding """ import requests import os import json # properties of the model model_name = "my-model-1" dockerImageURI = "cicd.ai4eu-dev.eu:7444/myimages/onboardingtest:v3" #Docker image URI looks like: example.com:port/image-tag:version license_file = "./license-1.0.0.json" protobuf_file = "./model.proto" # setup parameters host = os.environ['ACUMOS_HOST'] # FQHN like aiexp-preprod.ai4europe.eu token = os.environ['ACUMOS_TOKEN'] # format is 'acumos_username:API_TOKEN' advanced_api = "https://" + host + ":443/onboarding-app/v2/advancedModel" files= {'license': ('license.json', open(license_file, 'rb'), 'application.json'), 'protobuf': ('model.proto', open(protobuf_file, 'rb'), 'text/plain')} headers = {"Accept": "application/json", "modelname": model_name, "Authorization": token, "dockerFileURL": dockerImageURI, 'isCreateMicroservice': 'false'} #send request response = requests.post(advanced_api, files=files, headers=headers) #check response if response.status_code == 201: body = json.loads(response.text) solution_id = body['result']['solutionId'] print("Docker uri is pushed successfully on {" + host + "}, response is: ", response.status_code, " - solutionId: ", solution_id) else: print("Docker uri is not pushed on {" + host + "}, response is: ", response.status_code)
1.75
2
zeus/networks/pytorch/backbones/getter.py
shaido987/vega
1
12142
<filename>zeus/networks/pytorch/backbones/getter.py # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """ResNetVariant for Detection.""" from zeus.common import ClassType, ClassFactory from zeus.modules.connections.connections import MultiOutputGetter @ClassFactory.register(ClassType.NETWORK) class BackboneGetter(MultiOutputGetter): """Backbone Getter form torchvision ResNet.""" def __init__(self, backbone_name, layer_names=None, **kwargs): backbone = ClassFactory.get_cls(ClassType.NETWORK, backbone_name) backbone = backbone(**kwargs) if kwargs else backbone() if hasattr(backbone, "layers_name"): layer_names = backbone.layers_name() layer_names = layer_names or ['layer1', 'layer2', 'layer3', 'layer4'] super(BackboneGetter, self).__init__(backbone, layer_names)
2.015625
2
pure_ee/lista.py
geosconsulting/gee_wapor
2
12143
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Fri Jan 20 08:40:22 2017 @author: fabio """ import ee import ee.mapclient ee.Initialize() collection = ee.ImageCollection('MODIS/MCD43A4_NDVI') lista = collection.toList(10) #print lista.getInfo() image = ee.Image('LC8_L1T/LC81910312016217LGN00') #print image.getInfo() bandNames = image.bandNames() print('Band Names: ', bandNames.getInfo()) b1scale = image.select('B1').projection().nominalScale() print('Band 1 scale: ', b1scale.getInfo()) b8scale = image.select('B8').projection().nominalScale() print('Band 8 scale: ', b8scale.getInfo()) ndvi = image.normalizedDifference(['B5', 'B4']) ee.mapclient.addToMap(ndvi, {'min' : -1, "max": 1}, "NDVI") ee.mapclient.centerMap(12.3536,41.7686,9)
2.359375
2
pkg/tests/helpers_test.py
hborawski/rules_pkg
0
12144
<filename>pkg/tests/helpers_test.py # Copyright 2019 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from private import helpers class GetFlagValueTestCase(unittest.TestCase): def testNonStripped(self): self.assertEqual(helpers.GetFlagValue('value ', strip=False), 'value ') def testStripped(self): self.assertEqual(helpers.GetFlagValue('value ', strip=True), 'value') def testNonStripped_fromFile(self): with tempfile.TemporaryDirectory() as temp_d: argfile_path = os.path.join(temp_d, 'argfile') with open(argfile_path, 'wb') as f: f.write(b'value ') self.assertEqual( helpers.GetFlagValue('@'+argfile_path, strip=False), 'value ') def testStripped_fromFile(self): with tempfile.TemporaryDirectory() as temp_d: argfile_path = os.path.join(temp_d, 'argfile') with open(argfile_path, 'wb') as f: f.write(b'value ') self.assertEqual( helpers.GetFlagValue('@'+argfile_path, strip=True), 'value') class SplitNameValuePairAtSeparatorTestCase(unittest.TestCase): def testNoSep(self): key, val = helpers.SplitNameValuePairAtSeparator('abc', '=') self.assertEqual(key, 'abc') self.assertEqual(val, '') def testNoSepWithEscape(self): key, val = helpers.SplitNameValuePairAtSeparator('a\\=bc', '=') self.assertEqual(key, 'a=bc') self.assertEqual(val, '') def testNoSepWithDanglingEscape(self): key, val = helpers.SplitNameValuePairAtSeparator('abc\\', '=') self.assertEqual(key, 'abc') self.assertEqual(val, '') def testHappyCase(self): key, val = helpers.SplitNameValuePairAtSeparator('abc=xyz', '=') self.assertEqual(key, 'abc') self.assertEqual(val, 'xyz') def testHappyCaseWithEscapes(self): key, val = helpers.SplitNameValuePairAtSeparator('a\\=\\=b\\=c=xyz', '=') self.assertEqual(key, 'a==b=c') self.assertEqual(val, 'xyz') def testStopsAtFirstSep(self): key, val = helpers.SplitNameValuePairAtSeparator('a=b=c', '=') self.assertEqual(key, 'a') self.assertEqual(val, 'b=c') def testDoesntUnescapeVal(self): key, val = helpers.SplitNameValuePairAtSeparator('abc=x\\=yz\\', '=') self.assertEqual(key, 'abc') # the val doesn't get unescaped at all self.assertEqual(val, 'x\\=yz\\') def testUnescapesNonsepCharsToo(self): key, val = helpers.SplitNameValuePairAtSeparator('na\\xffme=value', '=') # this behaviour is surprising self.assertEqual(key, 'naxffme') self.assertEqual(val, 'value') if __name__ == '__main__': unittest.main()
2.34375
2
atendimento/admin.py
alantinoco/django-crmsmart
0
12145
from django.contrib import admin from .models import Contato, Venda, FormaPagamento admin.site.register(Contato) admin.site.register(Venda) admin.site.register(FormaPagamento)
1.296875
1
pome/models/transaction.py
pome-gr/pome
3
12146
<filename>pome/models/transaction.py import os import re import urllib from datetime import datetime from pathlib import Path from typing import Dict, List, Tuple, Union from money.currency import Currency from money.money import Money from werkzeug.utils import secure_filename from pome import g from pome.models.encoder import PomeEncodable RECORDED_TX_FOLDER_NAME = os.path.join("transactions", "recorded") class Amount(PomeEncodable): def __init__(self, currency_code: str, raw_amount_in_main_currency: str): # Putting this there to avoid circular imports from pome.currency import DECIMAL_PRECISION_FOR_CURRENCY amount_regex = re.compile( "^[0-9]*(\.[0-9]{0," + str(DECIMAL_PRECISION_FOR_CURRENCY) + "})?$" ) if not bool(amount_regex.fullmatch(raw_amount_in_main_currency)): raise ValueError( f"Invalid payload amount {raw_amount_in_main_currency}. Decimal separator is '.' and maximum number of decimals allowed is set by the currency (EUR and USD are 2 decimals)." ) self.raw_amount_in_main_currency: str = raw_amount_in_main_currency self.currency_code: str = currency_code def amount(self, formatted=False) -> Union[Money, str]: to_ret = Money(self.raw_amount_in_main_currency, Currency(self.currency_code)) if not formatted: return to_ret return to_ret.format(g.company.locale) @classmethod def from_payload(cls, payload: str): try: return cls(g.company.accounts_currency_code, payload) except ValueError as e: raise e class TransactionAttachmentOnDisk(PomeEncodable): def __init__(self, filename: str, filepath: str): self.filename = filename self.filepath = filepath class TransactionAttachmentPayload(PomeEncodable): def __init__(self, filename: str, b64_content: str): self.filename = filename self.b64_content = b64_content def save_on_disk(self, tx_path: str) -> TransactionAttachmentOnDisk: filepath = os.path.join(tx_path, self.filename) response = urllib.request.urlopen(self.b64_content) with open(filepath, "wb") as f: f.write(response.file.read()) return TransactionAttachmentOnDisk(self.filename, filepath) @classmethod def from_payload(cls, payload): try: if not "filename" in payload: raise ValueError("Field `filename` was not set in attached file.") if not "b64_content" in payload: raise ValueError("Filed `b64_content` was not set in attached file.") return cls(secure_filename(payload["filename"]), payload["b64_content"]) except ValueError as e: raise e class TransactionLine(PomeEncodable): def __init__(self, account_dr_code: str, account_cr_code: str, amount: Amount): self.account_dr_code: str = account_dr_code self.account_cr_code: str = account_cr_code self.amount: Amount = amount if not g.accounts_chart.is_valid_account_code(self.account_dr_code): raise ValueError(f"Invalid dr account code {self.account_dr_code }") if not g.accounts_chart.is_valid_account_code(self.account_cr_code): raise ValueError(f"Invalid cr account code {self.account_cr_code}") def _post_load_json(self): self.amount = Amount.from_json_dict(self.amount) @classmethod def from_payload(cls, payload): try: if type(payload) != dict: raise ValueError(f"Invalid transaction line {payload}.") if "account_dr" not in payload: raise ValueError(f"Field `account_dr` was not set in {payload}.") if "account_cr" not in payload: raise ValueError(f"Field `account_cr` was not set in {payload}.") if "raw_amount_in_main_currency" not in payload: raise ValueError( f"Field `raw_amount_in_main_currency` was not set in {payload}." ) return cls( str(payload["account_dr"]), str(payload["account_cr"]), Amount.from_payload(payload["raw_amount_in_main_currency"]), ) except ValueError as e: raise e class Transaction(PomeEncodable): """Stores all the metadata associated to a transaction.""" default_filename = "tx.json" def __init__( self, date: Union[None, str], lines: List[TransactionLine], attachments: Union[ List[TransactionAttachmentOnDisk], List[TransactionAttachmentPayload] ], narrative: str = "", comments: str = "", date_recorded: Union[None, str] = None, id: Union[None, str] = None, ): self.date: Union[None, str] = date self.lines: List[TransactionLine] = lines self.attachments: Union[ List[TransactionAttachmentOnDisk], List[TransactionAttachmentPayload] ] = attachments self.date_recorded: Union[None, str] = date_recorded self.narrative: str = narrative self.comments: str = comments self.id: Union[None, str] = id if not self.validate_date(self.date): raise ValueError( f"Invalid date {self.date}. A valid date is yyyy-mm-dd, for instance 2021-08-30." ) if not self.validate_date(self.date_recorded, True): raise ValueError( f"Invalid record date {self.date_recorded}. A valid date record date is ISO8601, for instance 2008-08-30T01:45:36.123Z." ) @classmethod def get_transactions_id_sorted_by_date_recorded(cls, transactions): return [ tx.id for tx in sorted(list(transactions.values()), key=lambda x: x.date_recorded) ] @classmethod def order_recorded(cls, transactions): sorted_transactions = cls.get_transactions_id_sorted_by_date_recorded( transactions ) def f(tx_id): return sorted_transactions.index(tx_id) + 1 return f def _post_load_json(self): self.lines = list(map(TransactionLine.from_json_dict, self.lines)) self.attachments = list( map(TransactionAttachmentOnDisk.from_json_dict, self.attachments) ) def total_amount(self, formatted=False) -> Union[Money, str]: to_return = Money("0", Currency(g.company.accounts_currency_code)) for line in self.lines: to_return += line.amount.amount() if not formatted: return to_return return to_return.format(g.company.locale) @classmethod def fetch_all_recorded_transactions(cls) -> Dict[str, "Transaction"]: to_return = {} try: for tx_folder in os.listdir(RECORDED_TX_FOLDER_NAME): tx_file = os.path.join( RECORDED_TX_FOLDER_NAME, tx_folder, cls.default_filename ) if not os.path.exists(tx_file): continue to_return[tx_folder] = cls.from_json_file(tx_file) if tx_folder != to_return[tx_folder].id: raise ValueError( f"Transaction id `{to_return[tx_folder].id}` stored in `{tx_file}` does not match folder name {tx_folder}`" ) except FileNotFoundError as e: return {} return to_return def commit_message(self) -> str: to_return = self.date + "\n" to_return += "=" * len(self.date) + "\n" to_return += "lines:\n" for line in self.lines: to_return += " " + ( "DR " + g.accounts_chart.account_codes[line.account_dr_code].pretty_name() + "\n" + "\tCR " + g.accounts_chart.account_codes[line.account_cr_code].pretty_name() + "\n" + " " + line.amount.amount().format(g.company.locale) + "\n\n" ) if self.narrative != "": to_return += "narrative:" + "\n" to_return += " " + self.narrative + "\n" if self.comments != "": to_return += "\n" + "comments:" + "\n" to_return += " " + self.comments + "\n" if len(self.attachments) != 0: to_return += "\n" + "attachments:" + "\n" for file in self.attachments: to_return += f" - {file.filepath}\n" return to_return def assign_suitable_id(self) -> Union[None, str]: if self.id is not None: return self.id if self.date is None: return None self.id = self.date i = 1 while os.path.exists(self.get_tx_path()): self.id = self.date + f"_{i}" i += 1 return self.id def get_tx_path(self, absolute: bool = False) -> Union[None, str]: if self.id is None: return None if not absolute: return os.path.join(RECORDED_TX_FOLDER_NAME, self.id) else: return os.path.join(os.getcwd(), RECORDED_TX_FOLDER_NAME, self.id) def save_on_disk(self): if self.get_tx_path() is None: return Path(self.get_tx_path()).mkdir(parents=True, exist_ok=True) with open(os.path.join(self.get_tx_path(), self.default_filename), "w") as f: for i in range(len(self.attachments)): if isinstance(self.attachments[i], TransactionAttachmentPayload): self.attachments[i] = self.attachments[i].save_on_disk( self.get_tx_path() ) f.write(self.to_json()) regex_date = re.compile("^\d{4}\-(0[1-9]|1[012])\-(0[1-9]|[12][0-9]|3[01])$") regex_ISO8601 = re.compile( "^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$" ) @classmethod def validate_date(cls, date_str, ISO8601=False): p = cls.regex_date if not ISO8601 else cls.regex_ISO8601 return bool(p.fullmatch(date_str)) @classmethod def from_payload(cls, json_payload): try: if not "date" in json_payload: raise ValueError(f"Field `date` was not set. Format is yyyy-mm-dd.") date = json_payload["date"] if not "lines" in json_payload: raise ValueError("No transaction lines specified.") lines = [] for line in json_payload["lines"]: try: tx_line = TransactionLine.from_payload(line) lines.append(tx_line) except ValueError as e: raise e narrative = "" if "narrative" in json_payload: narrative = str(json_payload["narrative"]) comments = "" if "comments" in json_payload: comments = str(json_payload["comments"]) file_list = [] if "files" in json_payload: if type(json_payload["files"]) != list: raise ValueError("Invalid file payload.") for file in json_payload["files"]: file_list.append(TransactionAttachmentPayload.from_payload(file)) date_recorded = datetime.utcnow().isoformat() + "+00:00" if "date_recorded" in json_payload: date_recorded = json_payload["date_recorded"] toReturn = cls( date, lines, file_list, narrative, comments, date_recorded=date_recorded, ) return toReturn except ValueError as e: raise (e)
2.296875
2
site/external/moya.logins/py/oauth1.py
moyaproject/moya-techblog
31
12147
from __future__ import unicode_literals from __future__ import print_function import moya from moya.compat import text_type from requests_oauthlib import OAuth1Session def get_credentials(provider, credentials): client_id = credentials.client_id or provider.get('client_id', None) client_secret = credentials.client_secret or provider.get('client_secret', None) return client_id, client_secret @moya.expose.macro('get_oauth_resource_owner') def get_oauth_resource_owner(app, provider, credentials): client_id, client_secret = get_credentials(provider, credentials) oauth = OAuth1Session(client_id, client_secret=client_secret) request_token_url = provider['request_token_url'] response = oauth.fetch_request_token(request_token_url) resource_owner_key = response.get('oauth_token') resource_owner_secret = response.get('oauth_token_secret') result = { "key": resource_owner_key, "secret": resource_owner_secret } return result @moya.expose.macro('get_oauth_authorize_url') def get_oauth_authorize_url(app, provider, credentials): context = moya.pilot.context client_id, client_secret = get_credentials(provider, credentials) resource_owner_key = context['.session.oauth1.resource_owner.key'] resource_owner_secret = context['.session.oauth1.resource_owner.secret'] oauth = OAuth1Session(client_id, client_secret=client_secret, resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret) authorization_url = oauth.authorization_url(provider['authorization_base_url']) return authorization_url @moya.expose.macro('get_oauth_access_token') def get_oauth_access_token(app, provider, credentials, verifier): context = moya.pilot.context client_id, client_secret = get_credentials(provider, credentials) resource_owner_key = context['.session.oauth1.resource_owner.key'] resource_owner_secret = context['.session.oauth1.resource_owner.secret'] oauth = OAuth1Session(client_id, client_secret=client_secret, resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret, verifier=verifier) access_token_url = provider['access_token_url'] oauth_tokens = oauth.fetch_access_token(access_token_url) return oauth_tokens @moya.expose.macro('get_oauth_profile') def get_oauth_profile(app, provider, credentials, verifier): context = moya.pilot.context client_id, client_secret = get_credentials(provider, credentials) resource_owner_key = context['.session.oauth1.resource_owner.key'] resource_owner_secret = context['.session.oauth1.resource_owner.secret'] resources = provider.get('resources', {}) session = OAuth1Session(client_id, client_secret=client_secret, resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret, verifier=verifier) access_token_url = provider['access_token_url'] try: oauth_tokens = session.fetch_access_token(access_token_url) except Exception as e: app.throw('moya.logins.access-fail', text_type(e)) info = {} for scope, scope_url in sorted(resources.items()): try: response = session.get(scope_url) except Exception as e: app.throw('moya.logins.get-scope-fail', text_type(e), diagnosis="There may be a connectivity issue getting scope information.", scope=scope, scope_url=scope_url) try: info[scope] = scope_data = response.json() #if(context['.debug']): # context['.console'].obj(context, scope_data) except: pass provider_profile = provider.get('profile', {}) profile = {} context['_oauth_info'] = info with context.frame('_oauth_info'): for k, v in provider_profile.items(): try: profile[k] = context.eval(v) except: pass return {'profile': profile, 'info': info}
2.140625
2
cgp.py
BakudanKame/CGPCatAndRat
0
12148
""" Cartesian genetic programming """ import operator as op import random import copy import math from settings import VERBOSE, N_COLS, LEVEL_BACK class Function: """ A general function """ def __init__(self, f, arity, name=None): self.f = f self.arity = arity self.name = f.__name__ if name is None else name def __call__(self, *args, **kwargs): return self.f(*args, **kwargs) class Node: """ A node in CGP graph """ def __init__(self, max_arity): """ Initialize this node randomly """ self.i_func = None self.i_inputs = [None] * max_arity self.weights = [None] * max_arity self.i_output = None self.output = None self.active = False class Individual: """ An individual (chromosome, genotype, etc.) in evolution """ function_set = None weight_range = [-1, 1] max_arity = 3 n_inputs = 3 n_outputs = 1 n_cols = N_COLS level_back = LEVEL_BACK def __init__(self): self.nodes = [] for pos in range(self.n_cols): self.nodes.append(self._create_random_node(pos)) for i in range(1, self.n_outputs + 1): self.nodes[-i].active = True self.fitness = None self._active_determined = False def _create_random_node(self, pos): node = Node(self.max_arity) node.i_func = random.randint(0, len(self.function_set) - 1) for i in range(self.function_set[node.i_func].arity): node.i_inputs[i] = random.randint(max(pos - self.level_back, -self.n_inputs), pos - 1) node.weights[i] = random.uniform(self.weight_range[0], self.weight_range[1]) node.i_output = pos return node def _determine_active_nodes(self): """ Determine which nodes in the CGP graph are active """ # check each node in reverse order n_active = 0 for node in reversed(self.nodes): if node.active: n_active += 1 for i in range(self.function_set[node.i_func].arity): i_input = node.i_inputs[i] if i_input >= 0: # a node (not an input) self.nodes[i_input].active = True if VERBOSE: print("# active genes: ", n_active) def eval(self, *args): """ Given inputs, evaluate the output of this CGP individual. :return the final output value """ if not self._active_determined: self._determine_active_nodes() self._active_determined = True # forward pass: evaluate for node in self.nodes: if node.active: inputs = [] for i in range(self.function_set[node.i_func].arity): i_input = node.i_inputs[i] w = node.weights[i] if i_input < 0: inputs.append(args[-i_input - 1] * w) else: inputs.append(self.nodes[i_input].output * w) node.output = self.function_set[node.i_func](*inputs) return self.nodes[-1].output def mutate(self, mut_rate=0.01): """ Mutate this individual. Each gene is varied with probability *mut_rate*. :param mut_rate: mutation probability :return a child after mutation """ child = copy.deepcopy(self) for pos, node in enumerate(child.nodes): # mutate the function gene if random.random() < mut_rate: node.i_func = random.choice(range(len(self.function_set))) # mutate the input genes (connection genes) arity = self.function_set[node.i_func].arity for i in range(arity): if node.i_inputs[i] is None or random.random() < mut_rate: # if the mutated function requires more arguments, then the last ones are None node.i_inputs[i] = random.randint(max(pos - self.level_back, -self.n_inputs), pos - 1) if node.weights[i] is None or random.random() < mut_rate: node.weights[i] = random.uniform(self.weight_range[0], self.weight_range[1]) # initially an individual is not active except the last output node node.active = False for i in range(1, self.n_outputs + 1): child.nodes[-i].active = True child.fitness = None child._active_determined = False return child def save(self): file_object = open(r"SavedBrain", 'w+') for pos, node in enumerate(self.nodes): file_object.write(str(node.i_func)) file_object.write("\n") file_object.write("\n") if not self._active_determined: self._determine_active_nodes() self._active_determined = True for pos, node in enumerate(self.nodes): if node.active: file_object.write(str(node.i_func)) file_object.write("\n") file_object.write("\n") activeNodes = [] for node in self.nodes: if node.active: activeNodes.append(node) file_object.write(str(self.function_set[node.i_func].f)) file_object.close() # function set def protected_div(a, b): if abs(b) < 1e-6: return a return a / b fs = [Function(op.add, 2), Function(op.sub, 2), Function(op.mul, 2), Function(protected_div, 2), Function(op.neg, 1), Function(math.cos, 1), Function(math.sin, 1), Function(math.tan, 1), Function(math.atan2, 2)] Individual.function_set = fs Individual.max_arity = max(f.arity for f in fs) def evolve(pop, mut_rate, mu, lambda_): """ Evolve the population *pop* using the mu + lambda evolutionary strategy :param pop: a list of individuals, whose size is mu + lambda. The first mu ones are previous parents. :param mut_rate: mutation rate :return: a new generation of individuals of the same size """ pop = sorted(pop, key=lambda ind: ind.fitness) # stable sorting parents = pop[-mu:] # generate lambda new children via mutation offspring = [] for _ in range(lambda_): parent = random.choice(parents) offspring.append(parent.mutate(mut_rate)) return parents + offspring def create_population(n): """ Create a random population composed of n individuals. """ return [Individual() for _ in range(n)]
3.203125
3
historia/pops/logic/refiner.py
eranimo/historia
6
12149
<filename>historia/pops/logic/refiner.py from historia.pops.logic.logic_base import LogicBase from historia.economy.enums.resource import Good class RefinerLogic(LogicBase): def perform(self): bread = self.get_good(Good.bread) tools = self.get_good(Good.tools) iron_ore = self.get_good(Good.iron_ore) if bread is None or iron_ore is None: # fine $2 for being idle self.charge_idle_money() elif tools is not None: # convert iron_ore to iron self.produce(Good.iron, 2) self.consume(Good.iron_ore, 1) self.consume(Good.bread, 1) self.consume(Good.tools, 1, 0.1) else: # convert iron_ore to iron self.produce(Good.iron, 1) self.consume(Good.iron_ore, 1) self.consume(Good.bread, 1)
2.625
3
image_processing/manual_features/extract-features.py
ColoredInsaneAsylums/PrivacySensitiveTranscription
0
12150
<reponame>ColoredInsaneAsylums/PrivacySensitiveTranscription import argparse import cv2 import numpy as np import os import _pickle as pickle from descriptors import HOG #from skimage.morphology import skeletonize # run image filtering and HOG feature extraction def main(im_path, desc_name): print('[INFO] Preparing to extract features for images in \'' + im_path + '\'') # track HOG feature vectors and corresponding images features = {} # image dimensions width = 128 height = 64 # feature descriptor print('[INFO] Using the ' + desc_name.upper() + ' feature descriptor') if desc_name == 'hog': descriptor = HOG() # evaluate image files print('[INFO] Processing images and computing features') for filename in os.listdir(im_path): if not filename.endswith('.jpg'): continue im = cv2.imread(im_path + filename, cv2.COLOR_BGR2GRAY) # resize image im = cv2.resize(im, (width,height)) # binarize using Otsu's method im = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] im[im == 255] = 1 # thin using Zhang and Suen's method #im = skeletonize(im) #im = im.astype(np.uint8) # compute features v = descriptor.compute(im) features[filename] = v # save data print('[INFO] Saving features and corresponding image name to \'features/' + desc_name + '_features.pickle\'') with open('./features/' + desc_name + '_features.pickle', 'wb') as handle: pickle.dump(features, handle) if __name__ == '__main__': # require image directory and name of descriptor to use parser = argparse.ArgumentParser(description='Extract image feature vectors using feature descriptors') parser.add_argument('-p', '--path', required=True, nargs='?', action='store', const='./images/', type=str, dest='im_path', help='The filepath of the image directory') parser.add_argument('-d', '--descriptor', required=True, choices=['hog'], nargs='?', action='store', const='hog', type=str, dest='desc_name', help='The name of the descriptor to use') args = vars(parser.parse_args()) im_path = args['im_path'] desc_name = args['desc_name'] main(im_path, desc_name)
2.921875
3
src/exco/extractor_spec/spec_source.py
thegangtechnology/excel_comment_orm
2
12151
import abc class SpecSource(abc.ABC): @abc.abstractmethod def describe(self) -> str: """ Returns: str to print in case there is an error constructing extractor for tracing back """ raise NotImplementedError() class UnknownSource(SpecSource): def describe(self) -> str: return 'Unknown Source'
3.25
3
app/admin.py
CS-Hunt/Get-Placed
14
12152
<filename>app/admin.py from django.contrib import admin from .models import Placement_Company_Detail,Profile,StudentBlogModel,ResorcesModel admin.site.register(Placement_Company_Detail) admin.site.register(Profile) admin.site.register(StudentBlogModel) admin.site.register(ResorcesModel)
1.429688
1
data/mapping.py
wby1905/Graph-Transformer-SSPR
2
12153
import torch as t import torch_geometric.utils as utils def qw_score(graph): """ 未实现qw_score,采用度数代替 :param graph: """ score = utils.degree(graph.edge_index[0]) return score.sort() def pre_processing(graph, m, score, trees): score, indices = score indices.squeeze_() old_edges = graph.edge_index trees[-1] = [-1] * m def graft(root): """ 找到分值最大的2阶节点并与源节点连接 和论文有一些不一样,会在加入二阶节点后把它视为一阶节点 :param root: 源节点(度小于m) """ nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index) if nodes_1_hop.shape[0] > m: return nodes_2_hop, _, _, _ = utils.k_hop_subgraph(root, 2, graph.edge_index) ma = 0 for node in nodes_2_hop: if node not in nodes_1_hop: node = int(node.item()) idx = t.nonzero(indices == node, as_tuple=False).item() ma = max(ma, idx) new_edge = t.tensor([[indices[ma], root], [root, indices[ma]]]) degree[root] += 1 graph.edge_index = t.cat((graph.edge_index, new_edge), dim=1) if degree[root] < m: graft(root) elif degree[root] == m: nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index) trees[root] = ([i.item() for i in nodes_1_hop if i != root]) graph.edge_index = old_edges def prune(root): """ 找到分值最小的1阶节点并删除连接 默认图为简单图 :param root: 源节点 """ nodes_1_hop, _, _, mask = utils.k_hop_subgraph(root, 1, graph.edge_index) if nodes_1_hop.shape[0] == m + 1: return mi = graph.num_nodes + 1 for node in nodes_1_hop: if node != root: node = int(node.item()) idx = t.nonzero(indices == node, as_tuple=False).item() mi = min(idx, mi) mask = mask.nonzero(as_tuple=False) edges = graph.edge_index l, r = 0, 0 for i in mask: i = i.item() if edges[0][i] == indices[mi] and edges[1][i] == root: l = i elif edges[1][i] == indices[mi] and edges[0][i] == root: r = i l, r = sorted([l, r]) graph.edge_index = t.cat((edges[:, :l], edges[:, l + 1:r], edges[:, r + 1:]), dim=1) degree[root] -= 1 if degree[root] > m: prune(root) elif degree[root] == m: nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index) trees[root] = ([i.item() for i in nodes_1_hop if i != root]) graph.edge_index = old_edges degree = utils.degree(graph.edge_index[0]) for node, d in enumerate(degree): tmp = degree[node] if d > m: prune(node) elif d < m: graft(node) else: nodes_1_hop, _, _, _ = utils.k_hop_subgraph(node, 1, graph.edge_index) trees[node] = ([i.item() for i in nodes_1_hop if i != node]) degree[node] = tmp for tree in trees: while len(trees[tree]) < m: trees[tree].append(-1) # 对于孤立点对它的子树加哑节点 graph.edge_index = old_edges return trees def construct_node_tree(graph, node, trees, opt): """ 生成目标节点的 K_level, m_ary 树 :param graph: :param node: :param opt: """ m = opt.m K = opt.K tree = [node] now = 0 for i in range(K - 1): for j in range(m ** i): root = tree[now] tree += trees[root] now += 1 zero = t.zeros(graph.x[-1].shape) x = graph.x graph.x = t.cat([graph.x, zero[None, :]], dim=0) tree = graph.x[tree] graph.x = x return tree
2.390625
2
.archived/snakecode/0460.py
gearbird/calgo
4
12154
<gh_stars>1-10 from typing import Optional, Any class Node: def __init__(self, key: int = 0, val: int = 0): self.key: int = key self.val: int = val self.freq: int = 0 self.pre: Optional[Node] = None self.next: Optional[Node] = None class DLList: def __init__(self): self.size = 0 self._guard = Node() self._guard.pre = self._guard.next = self._guard def headify(self, node: Optional[Node]): assert node node.pre = self._guard node.next = self._guard.next assert node.next is not None node.next.pre = node self._guard.next = node self.size += 1 def pop(self, node: Optional[Node] = None): if self.size == 0: return if not node: node = self._guard.pre assert node and node.next and node.pre node.pre.next = node.next node.next.pre = node.pre node.pre, node.next = None, None self.size -= 1 return node class LFUCache: def __init__(self, capacity: int): self._groups: dict[int, DLList] = {} self._nodes: dict[int, Node] = {} self._cap = capacity self._size = 0 self._minFreq = 1 def get(self, key: int) -> int: '''Get Node, update Cache and Node status''' if key not in self._nodes: return -1 node = self._nodes[key] self._update(node) return node.val def put(self, key: int, value: int): ''' If it's a existing Node, update Cache and Node.\n If it's a new Node and there're space, update Cache.\n If it's a new Node but no space, kick out one Node, update Cache ''' if self._cap == 0: return if key in self._nodes: node = self._nodes[key] node.val = value else: node = Node(key, value) if self._size < self._cap: self._addNew(node) else: self._addNew(node, kick=True) self._update(node) def _update(self, node: Node): '''Given a Node in cache, update it's freq and cache status''' group = self._groups[node.freq] group.pop(node) groupNext = self._getGroup(node.freq+1) groupNext.headify(node) if node.freq == self._minFreq and group.size == 0: self._minFreq = node.freq + 1 node.freq += 1 def _addNew(self, node: Node, kick: bool = False): ''' Simply kickout the least frequently used Node from frequency group and node map\n Replace it with a new Node, set cache min frequency to 1 ''' if self._cap == 0: return if kick: group = self._getGroup(self._minFreq) badNode = group.pop() assert badNode self._nodes.pop(badNode.key) self._size -= 1 self._minFreq = node.freq = 0 self._getGroup(node.freq).headify(node) self._nodes[node.key] = node self._size += 1 def _getGroup(self, freq: int) -> DLList: return self._groups.setdefault(freq, DLList()) def test(actions: list[str], val: list[list[int]]): cache: Optional[LFUCache] = None result: list[Optional[Any]] = [] for i, v in zip(actions, val): if i == 'LFUCache': cache = LFUCache(v[0]) result.append(None) elif i == 'put': assert cache result.append((i, v, cache.put(v[0], v[1]))) elif i == 'get': assert cache result.append((i, v, cache.get(v[0]))) print(result) if __name__ == '__main__': actions = ["LFUCache","put","put","get","put","get","get","put","get","get","get"] values = [[2],[1,1],[2,2],[1],[3,3],[2],[3],[4,4],[1],[3],[4]] test(actions, values) # actions2 = ["LFUCache","put","put","get","get","get","put","put","get","get","get","get"] # values2 = [[3],[2,2],[1,1],[2],[1],[2],[3,3],[4,4],[3],[2],[1],[4]] # test(actions2, values2) # actions3 = ["LFUCache","put","put","put","put","put","get","put","get","get","put","get","put","put","put","get","put","get","get","get","get","put","put","get","get","get","put","put","get","put","get","put","get","get","get","put","put","put","get","put","get","get","put","put","get","put","put","put","put","get","put","put","get","put","put","get","put","put","put","put","put","get","put","put","get","put","get","get","get","put","get","get","put","put","put","put","get","put","put","put","put","get","get","get","put","put","put","get","put","put","put","get","put","put","put","get","get","get","put","put","put","put","get","put","put","put","put","put","put","put"] # values3 = [[10],[10,13],[3,17],[6,11],[10,5],[9,10],[13],[2,19],[2],[3],[5,25],[8],[9,22],[5,5],[1,30],[11],[9,12],[7],[5],[8],[9],[4,30],[9,3],[9],[10],[10],[6,14],[3,1],[3],[10,11],[8],[2,14],[1],[5],[4],[11,4],[12,24],[5,18],[13],[7,23],[8],[12],[3,27],[2,12],[5],[2,9],[13,4],[8,18],[1,7],[6],[9,29],[8,21],[5],[6,30],[1,12],[10],[4,15],[7,22],[11,26],[8,17],[9,29],[5],[3,4],[11,30],[12],[4,29],[3],[9],[6],[3,4],[1],[10],[3,29],[10,28],[1,20],[11,13],[3],[3,12],[3,8],[10,9],[3,26],[8],[7],[5],[13,17],[2,27],[11,15],[12],[9,19],[2,15],[3,16],[1],[12,17],[9,1],[6,19],[4],[5],[5],[8,1],[11,7],[5,2],[9,28],[1],[2,2],[7,4],[4,22],[7,24],[9,26],[13,28],[11,26]] # test(actions3, values3)
3.296875
3
scripts/sha3.py
cidox479/ecc
0
12155
#/* # * Copyright (C) 2017 - This file is part of libecc project # * # * Authors: # * <NAME> <<EMAIL>> # * <NAME> <<EMAIL>> # * <NAME> <<EMAIL>> # * # * Contributors: # * <NAME> <<EMAIL>> # * <NAME> <<EMAIL>> # * # * This software is licensed under a dual BSD and GPL v2 license. # * See LICENSE file at the root folder of the project. # */ import struct keccak_rc = [ 0x0000000000000001, 0x0000000000008082, 0x800000000000808A, 0x8000000080008000, 0x000000000000808B, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009, 0x000000000000008A, 0x0000000000000088, 0x0000000080008009, 0x000000008000000A, 0x000000008000808B, 0x800000000000008B, 0x8000000000008089, 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, 0x000000000000800A, 0x800000008000000A, 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 ] keccak_rot = [ [ 0, 36, 3, 41, 18 ], [ 1, 44, 10, 45, 2 ], [ 62, 6, 43, 15, 61 ], [ 28, 55, 25, 21, 56 ], [ 27, 20, 39, 8, 14 ], ] # Keccak function def keccak_rotl(x, l): return (((x << l) ^ (x >> (64 - l))) & (2**64-1)) def keccakround(bytestate, rc): # Import little endian state state = [0] * 25 for i in range(0, 25): (state[i],) = struct.unpack('<Q', ''.join(bytestate[(8*i):(8*i)+8])) # Proceed with the KECCAK core bcd = [0] * 25 # Theta for i in range(0, 5): bcd[i] = state[i] ^ state[i + (5*1)] ^ state[i + (5*2)] ^ state[i + (5*3)] ^ state[i + (5*4)] for i in range(0, 5): tmp = bcd[(i+4)%5] ^ keccak_rotl(bcd[(i+1)%5], 1) for j in range(0, 5): state[i + (5 * j)] = state[i + (5 * j)] ^ tmp # Rho and Pi for i in range(0, 5): for j in range(0, 5): bcd[j + (5*(((2*i)+(3*j)) % 5))] = keccak_rotl(state[i + (5*j)], keccak_rot[i][j]) # Chi for i in range(0, 5): for j in range(0, 5): state[i + (5*j)] = bcd[i + (5*j)] ^ (~bcd[((i+1)%5) + (5*j)] & bcd[((i+2)%5) + (5*j)]) # Iota state[0] = state[0] ^ keccak_rc[rc] # Pack the output state output = [0] * (25 * 8) for i in range(0, 25): output[(8*i):(8*i)+1] = struct.pack('<Q', state[i]) return output def keccakf(bytestate): for rnd in range(0, 24): bytestate = keccakround(bytestate, rnd) return bytestate # SHA-3 context class class Sha3_ctx(object): def __init__(self, digest_size): self.digest_size = digest_size / 8 self.block_size = (25*8) - (2 * (digest_size / 8)) self.idx = 0 self.state = [chr(0)] * (25 * 8) def digest_size(self): return self.digest_size def block_size(self): return self.block_size def update(self, message): for i in range(0, len(message)): self.state[self.idx] = chr(ord(self.state[self.idx]) ^ ord(message[i])) self.idx = self.idx + 1 if (self.idx == self.block_size): self.state = keccakf(self.state) self.idx = 0 def digest(self): self.state[self.idx] = chr(ord(self.state[self.idx]) ^ 0x06) self.state[self.block_size - 1] = chr(ord(self.state[self.block_size - 1]) ^ 0x80) self.state = keccakf(self.state) return ''.join(self.state[:self.digest_size])
2.0625
2
lps/seeds.py
fernandoleira/lps-platform
0
12156
<reponame>fernandoleira/lps-platform import csv from pathlib import Path from datetime import datetime from lps.models import * from lps.schemas import * SEED_FOLDER_PATH = Path("db/seeds/") def import_from_csv(csv_filename): with open(SEED_FOLDER_PATH / csv_filename) as csv_file: csv_read = csv.DictReader(csv_file, delimiter=',') return list(csv_read) def export_to_csv(model_dict, csv_filename="out.csv"): if len(model_dict) > 0: with open(SEED_FOLDER_PATH / csv_filename, "w") as csv_filename: csv_filename.write(",".join(model_dict[0].keys()) + '\n') for i in range(len(model_dict)): csv_filename.write(",".join([str(elm) for elm in model_dict[i].values()]) + '\n') return True else: return False def seed_database(db): # Users seed_data = import_from_csv("users.csv") for obj in seed_data: seed = User(obj["username"], obj["email"], obj["phone_number"], obj["password"], is_admin=bool(obj["is_admin"]), is_super=bool(obj["is_super"]), user_id=obj['user_id']) db.session.add(seed) print(seed) db.session.commit() print() # Api Key seed_data = import_from_csv("api_keys.csv") for obj in seed_data: seed = ApiKey(obj["user_id"], api_key=obj["api_key"]) db.session.add(seed) print(seed) db.session.commit() print() # Units seed_data = import_from_csv("units.csv") for obj in seed_data: seed = Unit(obj["name"], obj["user_id"], bool(obj["alert_mail"]), bool(obj["alert_sms"]), unit_id=obj["unit_id"]) db.session.add(seed) print(seed) db.session.commit() print() # Locator Points seed_data = import_from_csv("points.csv") for obj in seed_data: seed = LocatorPoint(obj["title"], obj["description"], obj["point_type"], float(obj['lat']), float(obj['lon']), obj['unit_id'], point_id=obj['point_id']) db.session.add(seed) print(seed) db.session.commit() def export_seed(): # Units units_q = Unit.query.all() units = UnitSchema(many=True).dump(units_q) export_check = export_to_csv(units, "units.csv") if export_check: print("--> Units export has been completed to 'units.csv'") else: print("--> An error has occurred exporting Units") # Locator Points points_q = LocatorPoint.query.all() points = LocatorPointSchema(many=True).dump(points_q) export_check = export_to_csv(points, "points.csv") if export_check: print("--> Locator Points export has been completed to 'points.csv'") else: print("--> An error has occurred exporting Locator Points") # Users users_q = User.query.all() users = UserSchema(many=True).dump(users_q) export_check = export_to_csv(users, "users.csv") if export_check: print("--> Users export has been completed to 'users.csv'") else: print("--> An error has occurred exporting Users") # Api Keys api_keys_q = ApiKey.query.all() api_keys = ApiKeySchema(many=True).dump(api_keys_q) export_check = export_to_csv(api_keys, "api_keys.csv") if export_check: print("--> Api Keys export has been completed to 'api_keys.csv'") else: print("--> An error has occurred exporting Api Keys")
2.71875
3
Python/Least_Common_Multiple_for_large_numbers.py
DeathcallXD/DS-Algo-Point
0
12157
<reponame>DeathcallXD/DS-Algo-Point<gh_stars>0 def GCD(a,b): if b == 0: return a else: return GCD(b, a%b) a = int(input()) b = int(input()) print(a*b//(GCD(a,b)))
3.28125
3
matgendb/builders/examples/maxvalue_builder.py
Tinaatucsd/pymatgen-db
0
12158
<filename>matgendb/builders/examples/maxvalue_builder.py """ Build a derived collection with the maximum value from each 'group' defined in the source collection. """ __author__ = '<NAME> <<EMAIL>>' __date__ = '5/21/14' from matgendb.builders import core from matgendb.builders import util from matgendb.query_engine import QueryEngine _log = util.get_builder_log("incr") class MaxValueBuilder(core.Builder): """Example of incremental builder that requires some custom logic for incremental case. """ def get_items(self, source=None, target=None): """Get all records from source collection to add to target. :param source: Input collection :type source: QueryEngine :param target: Output collection :type target: QueryEngine """ self._groups = self.shared_dict() self._target_coll = target.collection self._src = source return source.query() def process_item(self, item): """Calculate new maximum value for each group, for "new" items only. """ group, value = item['group'], item['value'] if group in self._groups: cur_val = self._groups[group] self._groups[group] = max(cur_val, value) else: # New group. Could fetch old max. from target collection, # but for the sake of illustration recalculate it from # the source collection. self._src.tracking = False # examine entire collection new_max = value for rec in self._src.query(criteria={'group': group}, properties=['value']): new_max = max(new_max, rec['value']) self._src.tracking = True # back to incremental mode # calculate new max self._groups[group] = new_max def finalize(self, errs): """Update target collection with calculated maximum values. """ for group, value in self._groups.items(): doc = {'group': group, 'value': value} self._target_coll.update({'group': group}, doc, upsert=True) return True
2.609375
3
dvc/dependency/ssh.py
yfarjoun/dvc
2
12159
from __future__ import unicode_literals from dvc.output.ssh import OutputSSH from dvc.dependency.base import DependencyBase class DependencySSH(DependencyBase, OutputSSH): pass
1.226563
1
multiscaleloss.py
praveeenbadimala/flow_unsupervised
0
12160
import torch import torch.nn as nn import optflow.compute_tvl1_energy as compute_tvl1_energy def EPE(input_flow, target_flow, sparse=False, mean=True): EPE_map = torch.norm(target_flow-input_flow,2,1) if sparse: EPE_map = EPE_map[target_flow != 0] if mean: return EPE_map.mean() else: return EPE_map.sum() def multiscale_energy_loss(network_output_energy, target_flow,img1,img2, weights=None, sparse=False): def one_scale_mod(output, target, sparse,img1,img2): b, _, h, w = target.size() down_sample_img1 =nn.functional.adaptive_avg_pool2d(img1, (h, w)) down_sample_img2 = nn.functional.adaptive_avg_pool2d(img2, (h, w)) target_energy = compute_tvl1_energy.compute_tvl1_energy_optimized_batch(down_sample_img1, down_sample_img2, target) l1_loss = (output - target_energy).abs().sum() / target_energy.size(0) return l1_loss if type(network_output_energy) not in [tuple, list]: network_output_energy = [network_output_energy] if weights is None: weights = [0.46,0.23,0.23,0.46] # more preference for starting layers assert(len(weights) == len(network_output_energy)) loss = 0 flow_index = 0 for output, weight in zip(network_output_energy, weights): loss += weight * one_scale_mod(output, target_flow[flow_index], sparse,img1,img2) flow_index = flow_index + 1 return loss def realEPE(output, target, sparse=False): b, _, h, w = target.size() upsampled_output = nn.functional.upsample(output, size=(h,w), mode='bilinear') return EPE(upsampled_output, target, sparse, mean=True)
2.171875
2
object_detector/src/object_detector/object_detector.py
Ajapaik/ml-2021-ajapaik
0
12161
<gh_stars>0 import numpy as np import time import cv2 import argparse import sys import os import glob import json from pathlib import Path class ObjectDetector: def file_exist(file_names_list: list) -> bool: if all(list(map(os.path.isfile,file_names_list))): return True else: print("Please check one of the Config Files does not exist") return False # if coco.names, yolov4.{cfg,weights} are relative to cli-tool # no need to pass them as cli options def set_default_config(): LABELS_FILE='coco.names' CONFIG_FILE='yolov4.cfg' WEIGHTS_FILE='yolov4.weights' CONFIDENCE_THRESHOLD=0.25 file_names_list = [LABELS_FILE,CONFIG_FILE,WEIGHTS_FILE] # Check if the provided file paths exists if ObjectDetector.file_exist(file_names_list=file_names_list): return LABELS_FILE,CONFIG_FILE,WEIGHTS_FILE # load all files matching ext from im_dir def load_image_files(dir=""): # defaults to "current" dir imdir = dir # various extensions of files that can be fetched ext = ['png', 'jpg', 'gif','jpeg'] files = [] [files.extend(glob.glob(imdir + '*.' + e)) for e in ext] return files def save_to_json(dir,file_name,data): full_file_path = dir + file_name + ".json" with open(full_file_path, 'w') as f: json.dump(data, f, ensure_ascii=False) print(f" Saved {full_file_path} with bounding box cordinates.") def object_detection(image_path,output_dir,label_path=None,config_path=None,weight_path=None,threshold=0.25,): INPUT_FILE= image_path CONFIDENCE_THRESHOLD = threshold LABELS_FILE,_,_ = ObjectDetector.set_default_config() if label_path == None else label_path,None,None _,CONFIG_FILE,_ = ObjectDetector.set_default_config() if config_path == None else None,config_path,None _,_,WEIGHTS_FILE = ObjectDetector.set_default_config() if weight_path == None else None,None,weight_path LABELS = open(LABELS_FILE).read().strip().split("\n") np.random.seed(4) COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8") net = cv2.dnn.readNetFromDarknet(CONFIG_FILE, WEIGHTS_FILE) image = cv2.imread(INPUT_FILE) (H, W) = image.shape[:2] # determine only the *output* layer names that we need from YOLO ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False) net.setInput(blob) start = time.time() layerOutputs = net.forward(ln) end = time.time() #Maybe output some time Metrics? #print(" took {:.6f} seconds".format(end - start)) # initialize our lists of detected bounding boxes, confidences, and # class IDs, respectively boxes = [] confidences = [] classIDs = [] # loop over each of the layer outputs for output in layerOutputs: # loop over each of the detections for detection in output: # extract the class ID and confidence (i.e., probability) of # the current object detection scores = detection[5:] classID = np.argmax(scores) confidence = scores[classID] # filter out weak predictions by ensuring the detected # probability is greater than the minimum probability if confidence > CONFIDENCE_THRESHOLD: # scale the bounding box coordinates back relative to the # size of the image, keeping in mind that YOLO actually # returns the center (x, y)-coordinates of the bounding # box followed by the boxes' width and height box = detection[0:4] * np.array([W, H, W, H]) (centerX, centerY, width, height) = box.astype("int") # use the center (x, y)-coordinates to derive the top and # and left corner of the bounding box x = int(centerX - (width / 2)) y = int(centerY - (height / 2)) # update our list of bounding box coordinates, confidences, # and class IDs boxes.append([x, y, int(width), int(height)]) confidences.append(float(confidence)) classIDs.append(classID) # apply non-maxima suppression to suppress weak, overlapping bounding # boxes idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD, CONFIDENCE_THRESHOLD) # how name objects were detected data = [{"detection_count": len(idxs), "file_name": image_path, "confidence_threshold": CONFIDENCE_THRESHOLD}] # ensure at least one detection exists if len(idxs) > 0: # loop over the indexes we are keeping for i in idxs.flatten(): # extract the bounding box coordinates (x, y) = (boxes[i][0], boxes[i][1]) (w, h) = (boxes[i][2], boxes[i][3]) data.append({"label":LABELS[classIDs[i]], "confidence":"{:.2f}".format(confidences[i]*100), "left_x":x, "top_y":y,"width":w,"height":h}) # save file ot image_path.json # Maybe a flag to save results of a batch to same json? _file_name = Path(image_path).name # safe file to json ObjectDetector.save_to_json(output_dir,_file_name,data) def main(): parser = argparse.ArgumentParser( description="Script to Object Detection with Yolo4." ) parser.add_argument( "--config_path", "-c", dest="config_path", default="yolov4.cfg", help="Path to yolov4.cfg" ) parser.add_argument( "--weight_path", "-w", dest="weight_path", default="yolov4.weights", help="Path to yolov4.weights." ) parser.add_argument( "--label_path", "-l", dest="label_path", default="coco.names", help="Path to coco.names." ) parser.add_argument( "--image_path", "-i", dest="image_path", default=None, help="Path to Image file. Leaving Blank Searches the current directory" ) parser.add_argument( "--threshold", "-t", dest="threshold", default=float(0.25), help="Detection Confidence Threshold to apply" ) parser.add_argument( "--image-dir", "-d", dest="input_dir", default=os.getcwd(), help="Directory containing image file" ) parser.add_argument( "--output-dir", "-o", dest="output_dir", default=os.getcwd(), help="Directory where output should be stored" ) args = parser.parse_args() if args.image_path is None: print(f"--image_path not provided, searching {args.input_dir} for image files...") image_files = ObjectDetector.load_image_files(args.input_dir) if len(image_files) <=0: print("No Image file(s) found") for i,image in enumerate(image_files,1): print(f"Running Object Detection on {i} of {len(image_files)} images") ObjectDetector.object_detection( image, args.output_dir, args.label_path, args.config_path, args.weight_path, float(args.threshold) ) else: ObjectDetector.object_detection( args.image_path, args.output_dir, args.label_path, args.config_path, args.weight_path, float(args.threshold) ) if __name__ == "__main__": main()
2.53125
3
pipeline/tests/engine/core/data/test_api.py
wkma/bk-sops
2
12162
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import sys from django.test import TestCase from django.utils.module_loading import import_string from pipeline.tests.mock import * # noqa from pipeline.tests.mock_settings import * # noqa class EngineDataAPITestCase(TestCase): @classmethod def setUpClass(cls): cls.mock_settings = MagicMock() cls.settings_patch = patch(ENGINE_DATA_API_SETTINGS, cls.mock_settings) cls.import_backend_patch = patch(ENGINE_DATA_API_IMPORT_BACKEND, MagicMock()) cls.settings_patch.start() cls.import_backend_patch.start() cls.api = import_string("pipeline.engine.core.data.api") cls.write_methods = ["set_object", "del_object", "expire_cache"] cls.read_methods = ["get_object", "cache_for"] cls.method_params = { "set_object": ["key", "obj"], "del_object": ["key"], "expire_cache": ["key", "obj", "expires"], "cache_for": ["key"], "get_object": ["key"], } @classmethod def tearDownClass(cls): cls.settings_patch.stop() cls.import_backend_patch.stop() def setUp(self): self.backend = MagicMock() self.candidate_backend = MagicMock() self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = False def test_write__without_candidate(self): for method in self.write_methods: with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None): getattr(self.api, method)(*self.method_params[method]) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_not_called() sys.stdout.write( "{} pass test_write__without_candidate test\n".format(method) ) def test_write__without_candiate_raise_err(self): for method in self.write_methods: setattr(self.backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None): self.assertRaises( Exception, getattr(self.api, method), *self.method_params[method] ) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_not_called() sys.stdout.write( "{} pass test_write__without_candiate_raise_err test\n".format(method) ) def test_write__with_candidate(self): for method in self.write_methods: with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): getattr(self.api, method)(*self.method_params[method]) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write("{} pass test_write__with_candidate test\n".format(method)) def test_write__with_candidate_main_raise_err(self): for method in self.write_methods: setattr(self.backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): getattr(self.api, method)(*self.method_params[method]) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_write__with_candidate_main_raise_err test\n".format( method ) ) def test_write__with_candidate_raise_err(self): for method in self.write_methods: setattr(self.candidate_backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): getattr(self.api, method)(*self.method_params[method]) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_write__with_candidate_raise_err test\n".format(method) ) def test_write__with_candidate_both_raise_err(self): for method in self.write_methods: setattr(self.backend, method, MagicMock(side_effect=Exception)) setattr(self.candidate_backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): self.assertRaises( Exception, getattr(self.api, method), *self.method_params[method] ) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_write__with_candidate_both_raise_err test\n".format( method ) ) def test_write__with_auto_expire(self): self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = True self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE_SECONDS = 30 for method in self.write_methods: with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): getattr(self.api, method)(*self.method_params[method]) if method == "set_object": getattr(self.backend, "expire_cache").assert_called_once_with( *self.method_params[method], expires=30 ) self.backend.expire_cache.reset_mock() else: getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_write__with_candidate_both_raise_err test\n".format( method ) ) def test_read__without_candidate(self): for method in self.read_methods: with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None): data = getattr(self.api, method)(*self.method_params[method]) self.assertIsNotNone(data) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_not_called() sys.stdout.write( "{} pass test_read__without_candidate test\n".format(method) ) def test_read__without_candidate_raise_err(self): for method in self.read_methods: setattr(self.backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None): self.assertRaises( Exception, getattr(self.api, method), *self.method_params[method] ) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_not_called() sys.stdout.write( "{} pass test_read__without_candidate_raise_err test\n".format(method) ) def test_read__with_candidate_not_use(self): for method in self.read_methods: with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): data = getattr(self.api, method)(*self.method_params[method]) self.assertIsNotNone(data) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_not_called() sys.stdout.write( "{} pass test_read__with_candidate_not_use test\n".format(method) ) def test_read__with_candidate_use(self): for method in self.read_methods: setattr(self.backend, method, MagicMock(return_value=None)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): data = getattr(self.api, method)(*self.method_params[method]) self.assertIsNotNone(data) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_read__with_candidate_use test\n".format(method) ) def test_read__with_candidate_err(self): for method in self.read_methods: setattr(self.backend, method, MagicMock(return_value=None)) setattr(self.candidate_backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): data = getattr(self.api, method)(*self.method_params[method]) self.assertIsNone(data) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_read__with_candidate_err test\n".format(method) ) def test_read__with_candidate_main_raise_err(self): for method in self.read_methods: setattr(self.backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): data = getattr(self.api, method)(*self.method_params[method]) self.assertIsNotNone(data) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_read__with_candidate_main_raise_err test\n".format(method) ) def test_read__with_candidate_both_raise_err(self): for method in self.read_methods: setattr(self.backend, method, MagicMock(side_effect=Exception)) setattr(self.candidate_backend, method, MagicMock(side_effect=Exception)) with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): self.assertRaises( Exception, getattr(self.api, method), *self.method_params[method] ) getattr(self.backend, method).assert_called_once_with( *self.method_params[method] ) getattr(self.candidate_backend, method).assert_called_once_with( *self.method_params[method] ) sys.stdout.write( "{} pass test_read__with_candidate_both_raise_err test\n".format(method) ) def test_set_schedule_data(self): with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): self.api.set_schedule_data("key", "data") self.backend.set_object.assert_called_once_with( "key_schedule_parent_data", "data" ) self.candidate_backend.set_object.assert_called_once_with( "key_schedule_parent_data", "data" ) def test_delete_parent_data(self): with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): self.api.delete_parent_data("key") self.backend.del_object.assert_called_once_with( "key_schedule_parent_data" ) self.candidate_backend.del_object.assert_called_once_with( "key_schedule_parent_data" ) def test_get_schedule_parent_data(self): with patch(ENGINE_DATA_API_BACKEND, self.backend): with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend): data = self.api.get_schedule_parent_data("key") self.assertIsNotNone(data) self.backend.get_object.assert_called_once_with( "key_schedule_parent_data" ) self.candidate_backend.get_object.assert_not_called()
1.929688
2
pactools/mne_api.py
mathurinm/pactools
0
12163
import numpy as np def _check_mne(name): """Helper to check if h5py is installed""" try: import mne except ImportError: raise ImportError('Please install MNE-python to use %s.' % name) return mne def raw_to_mask(raw, ixs, events=None, tmin=None, tmax=None): """ A function to transform MNE data into pactools input signals. It select the one channel on which you to estimate PAC, or two channels for cross-channel PAC. It also returns a mask generator, that mask the data outside a given window around an event. The mask generator returns a number of masks equal to the number of events times the number of windows (i.e. the number of pairs (tmin, tmax)). Warning: events is stored in indices, tmin and tmax are stored in seconds. Parameters ---------- raw : an instance of Raw, containing data of shape (n_channels, n_times) The data used to calculate PAC ixs : int or couple of int The indices for the low/high frequency channels. If only one is given, the same channel is used for both low_sig and high_sig. events : array, shape (n_events, 3) | array, shape (n_events,) | None MNE events array. To be supplied if data is 2D and output should be split by events. In this case, `tmin` and `tmax` must be provided. If `ndim == 1`, it is assumed to be event indices, and all events will be grouped together. Otherwise, events will be grouped along the third dimension. tmin : float | list of floats, shape (n_windows, ) | None If `events` is not provided, it is the start time to use in `raw`. If `events` is provided, it is the time (in seconds) to include before each event index. If a list of floats is given, then PAC is calculated for each pair of `tmin` and `tmax`. Defaults to `min(raw.times)`. tmax : float | list of floats, shape (n_windows, ) | None If `events` is not provided, it is the stop time to use in `raw`. If `events` is provided, it is the time (in seconds) to include after each event index. If a list of floats is given, then PAC is calculated for each pair of `tmin` and `tmax`. Defaults to `max(raw.times)`. Attributes ---------- low_sig : array, shape (1, n_points) Input data for the phase signal high_sig : array or None, shape (1, n_points) Input data for the amplitude signal. If None, we use low_sig for both signals. mask : MaskIterator instance Object that behaves like a list of mask, without storing them all. The PAC will only be evaluated where the mask is False. Examples -------- >>> from pactools import raw_to_mask >>> low_sig, high_sig, mask = raw_to_mask(raw, ixs, events, tmin, tmax) >>> n_masks = len(mask) >>> for one_mask in mask: ... pass """ mne = _check_mne('raw_to_mask') if not isinstance(raw, mne.io.BaseRaw): raise ValueError('Must supply Raw as input') ixs = np.atleast_1d(ixs) fs = raw.info['sfreq'] data = raw[:][0] n_channels, n_points = data.shape low_sig = data[ixs[0]][None, :] if ixs.shape[0] > 1: high_sig = data[ixs[1]][None, :] else: high_sig = None mask = MaskIterator(events, tmin, tmax, n_points, fs) return low_sig, high_sig, mask class MaskIterator(object): """Iterator that creates the masks one at a time. Examples -------- >>> from pactools import MaskIterator >>> all_masks = MaskIterator(events, tmin, tmax, n_points, fs) >>> n_masks = len(all_masks) >>> for one_mask in all_masks: ... pass """ def __init__(self, events, tmin, tmax, n_points, fs): self.events = events self.tmin = tmin self.tmax = tmax self.n_points = n_points self.fs = float(fs) self._init() def _init(self): self.tmin = np.atleast_1d(self.tmin) self.tmax = np.atleast_1d(self.tmax) if len(self.tmin) != len(self.tmax): raise ValueError('tmin and tmax have differing lengths') n_windows = len(self.tmin) if self.events is None: self.events = np.array([0.]) n_events = 1 if self.events.ndim == 1: n_events = 1 # number of different event kinds else: n_events = np.unique(self.events[:, -1]).shape[0] self._n_iter = n_windows * n_events def __iter__(self): return self.next() def __len__(self): return self._n_iter def next(self): if self.events.ndim == 1: event_names = [None, ] else: event_names = np.unique(self.events[:, -1]) mask = np.empty((1, self.n_points), dtype=bool) for event_name in event_names: if self.events.ndim == 1: # select all the events since their kind is not specified these_events = self.events else: # select the event indices of one kind of event these_events = self.events[self.events[:, -1] == event_name, 0] for tmin, tmax in zip(self.tmin, self.tmax): mask.fill(True) # it masks everything for event in these_events: start, stop = None, None if tmin is not None: start = int(event + tmin * self.fs) if tmax is not None: stop = int(event + tmax * self.fs) mask[:, start:stop] = False yield mask
2.796875
3
1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py
jiadaizhao/LeetCode
49
12164
<filename>1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py import collections from itertools import combinations from collections import Counter class Solution: def mostVisitedPattern(self, username: List[str], timestamp: List[int], website: List[str]) -> List[str]: visit = collections.defaultdict(list) for t, u, w in sorted(zip(timestamp, username, website)): visit[u].append(w) table = sum([Counter(set(combinations(w, 3))) for w in visit.values()], Counter()) return list(min(table, key=lambda k: (-table[k], k)))
3.0625
3
src/tarot/magicEight.py
tjweldon/St_Germain
0
12165
import random async def magicEightBall(ctx, message=True): if message: eightBall = random.randint(0, 19) outlooks = [ "As I see it, yes.", "Ask again later.", "Better not tell you now.", "Cannot predict now.", "Concentrate and ask again.", "Don’t count on it.", "It is certain.", "It is decidedly so.", "Most likely.", "My reply is no.", "My sources say no.", "Outlook not so good.", "Outlook good.", "Reply hazy, try again.", "Signs point to yes.", "Very doubtful.", "Without a doubt.", "Yes.", "Yes – definitely.", "You may rely on it.", ] await ctx.send('Magic 8: ' + outlooks[eightBall])
2.5625
3
app/mod_check/MySQL.py
RITC3/Hermes
2
12166
<reponame>RITC3/Hermes import pymysql.cursors from ..mod_check import app @app.task def check(host, port, username, password, db): result = None connection = None try: connection = pymysql.connect(host=host, port=port, user=username, password=password, db=db, charset='utf8mb4', autocommit=True, cursorclass=pymysql.cursors.DictCursor) with connection.cursor() as cursor: cursor.execute('SELECT @@version AS version') res = cursor.fetchone() if isinstance(res, dict): result = res.get('version', None) except pymysql.Error: result = False finally: if connection is not None: connection.close() return result
1.992188
2
src/adafruit-circuitpython-bundle-4.x-mpy-20190713/examples/hue_simpletest.py
mbaaba/solar_panel
1
12167
import time import board import busio from digitalio import DigitalInOut from adafruit_esp32spi import adafruit_esp32spi from adafruit_esp32spi import adafruit_esp32spi_wifimanager import neopixel # Import Philips Hue Bridge from adafruit_hue import Bridge # Get wifi details and more from a secrets.py file try: from secrets import secrets except ImportError: print("WiFi and API secrets are kept in secrets.py, please add them there!") raise # ESP32 SPI esp32_cs = DigitalInOut(board.ESP_CS) esp32_ready = DigitalInOut(board.ESP_BUSY) esp32_reset = DigitalInOut(board.ESP_RESET) spi = busio.SPI(board.SCK, board.MOSI, board.MISO) esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset) status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2) wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light) # Attempt to load bridge username and IP address from secrets.py try: username = secrets['hue_username'] bridge_ip = secrets['bridge_ip'] my_bridge = Bridge(wifi, bridge_ip, username) except: # Perform first-time bridge setup my_bridge = Bridge(wifi) ip = my_bridge.discover_bridge() username = my_bridge.register_username() print('ADD THESE VALUES TO SECRETS.PY: \ \n\t"bridge_ip":"{0}", \ \n\t"hue_username":"{1}"'.format(ip, username)) raise # Enumerate all lights on the bridge my_bridge.get_lights() # Turn on the light my_bridge.set_light(1, on=True) # RGB colors to Hue-Compatible HSL colors hsl_y = my_bridge.rgb_to_hsb([255, 255, 0]) hsl_b = my_bridge.rgb_to_hsb([0, 0, 255]) hsl_w = my_bridge.rgb_to_hsb([255, 255, 255]) hsl_colors = [hsl_y, hsl_b, hsl_w] # Set the light to Python colors! for color in hsl_colors: my_bridge.set_light(1, hue=int(color[0]), sat=int(color[1]), bri=int(color[2])) time.sleep(5) # Set a predefinedscene # my_bridge.set_group(1, scene='AB34EF5') # Turn off the light my_bridge.set_light(1, on=False)
2.515625
3
examples/python-echo/src/echo.py
mdelete/kore
0
12168
# # Copyright (c) 2013-2018 <NAME> <<EMAIL>> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # import kore import socket class EchoServer: # Setup socket + wrap it inside of a kore socket so we can use it. def __init__(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setblocking(False) sock.bind(("127.0.0.1", 6969)) sock.listen() self.conn = kore.socket_wrap(sock) # Wait for a new client to connect, then create a new task # that calls handle_client with the ocnnected client as # the argument. async def run(self): while True: try: client = await self.conn.accept() kore.task_create(self.handle_client(client)) client = None except Exception as e: kore.fatal("exception %s" % e) # Each client will run as this co-routine. async def handle_client(self, client): while True: try: data = await client.recv(1024) if data is None: break await client.send(data) except Exception as e: print("client got exception %s" % e) client.close() # Setup the server object. server = EchoServer() # Create a task that will execute inside of Kore as a co-routine. kore.task_create(server.run())
2.296875
2
src/token/__init__.py
mingz2013/py.script
1
12169
<gh_stars>1-10 # -*- coding:utf-8 -*- """ """ __date__ = "14/12/2017" __author__ = "zhaojm"
0.96875
1
pesto-cli/pesto/ws/service/process.py
CS-SI/pesto
25
12170
import asyncio import logging import traceback import uuid from typing import Optional, Tuple, Any, Callable from pesto.ws.core.payload_parser import PayloadParser, PestoConfig from pesto.ws.core.pesto_feature import PestoFeatures from pesto.ws.core.utils import load_class, async_exec from pesto.ws.features.algorithm_wrapper import AlgorithmWrapper from pesto.ws.features.converter.image.image_roi import ImageROI, DummyImageROI from pesto.ws.features.payload_converter import PayloadConverter from pesto.ws.features.payload_debug import PayloadDebug from pesto.ws.features.response_serializer import ResponseSerializer from pesto.ws.features.schema_validation import SchemaValidation from pesto.ws.features.stateful_response import StatefulResponse from pesto.ws.features.stateless_response import StatelessResponse from pesto.ws.service.describe import DescribeService from pesto.ws.service.job_result import ResultType log = logging.getLogger(__name__) class ProcessService: PROCESS_CLASS_NAME = 'algorithm.process.Process' _algorithm: Optional[Callable] = None _describe = None @staticmethod def init(): if ProcessService._algorithm is not None: raise ValueError('Process Service already loaded !') try: log.info('ProcessService.init() ...') ProcessService._algorithm = load_class(ProcessService.PROCESS_CLASS_NAME)() if hasattr(ProcessService._algorithm, 'on_start'): log.info('ProcessService.on_start() ...') ProcessService._algorithm.on_start() log.info('ProcessService.on_start() ... Done !') log.info('ProcessService.init() ... Done !') except: traceback.print_exc() log.warning('Algorithm {}.on_start() failure !'.format(ProcessService.PROCESS_CLASS_NAME)) def __init__(self, url_root: str): self.url_root = url_root @property def service_description(self): if ProcessService._describe is None: ProcessService._describe = DescribeService(self.url_root).compute_describe() return ProcessService._describe def process(self, payload: dict) -> dict: config = PayloadParser.parse(payload) image_roi: Optional[ImageROI] = config.get(PestoConfig.roi) # if no ROI: None active_roi: ImageROI = image_roi or DummyImageROI() # bypass compute crop info and remove margins in pipeline job_id = str(uuid.uuid4().time_low) is_stateful = self.service_description['asynchronous'] is True input_schema = self.service_description['input'] output_schema = self.service_description['output'] common_pipeline = filter(None, [ SchemaValidation(schema=input_schema), active_roi.compute_crop_infos(), PayloadConverter(image_roi=image_roi, schema=input_schema), PayloadDebug(schema=input_schema), AlgorithmWrapper(ProcessService._algorithm), active_roi.remove_margin(), ResponseSerializer(schema=output_schema, job_id=job_id), ]) if is_stateful: pipeline = [ *common_pipeline, StatefulResponse(self.url_root, job_id) ] else: pipeline = [ *common_pipeline, StatelessResponse(self.url_root, job_id, output_schema) ] return PestoFeatures(pipeline).process(payload) async def async_process(self, request_payload: dict) -> Tuple[Any, ResultType]: return await asyncio.wait_for( async_exec(lambda: self.process(request_payload)), timeout=None )
1.773438
2
migration_runner/helpers.py
beveradb/ecs-digital-interview-test
0
12171
# -*- coding: utf-8 -*- import logging import os import re import sys class Helpers: def __init__(self, logger=None): if logger is None: self.logger = logging.getLogger(__name__) else: self.logger = logger @staticmethod def extract_sequence_num(filename): sequence_num = re.search( '([0-9]+)[^0-9].+', os.path.basename(filename) ).group(1) return int(sequence_num) def append_migration(self, migrations, filename): try: migrations.append((self.extract_sequence_num(filename), filename)) except AttributeError: self.logger.error("Invalid filename found: {}".format(filename)) sys.exit(1) def find_migrations(self, sql_directory): migrations = [] for filename in os.listdir(sql_directory): if filename.endswith(".sql"): self.append_migration( migrations, str(os.path.join(sql_directory, filename)) ) return migrations @staticmethod def sort_migrations(migrations): if ( all(isinstance(tup, tuple) for tup in migrations) and all(isinstance(tup[0], int) for tup in migrations) and all(isinstance(tup[1], str) for tup in migrations) ): migrations.sort(key=lambda tup: tup[0]) else: raise TypeError( "Migrations list did not contain only tuple(int, str)") def populate_migrations(self, sql_directory): migrations = self.find_migrations(sql_directory) self.sort_migrations(migrations) return migrations @staticmethod def get_unprocessed_migrations(db_version, migrations): return [tup for tup in migrations if tup[0] > int(db_version)]
2.6875
3
sitri/providers/contrib/ini.py
Elastoo-Team/sitri
11
12172
import configparser import os import typing from sitri.providers.base import ConfigProvider class IniConfigProvider(ConfigProvider): """Config provider for Initialization file (Ini).""" provider_code = "ini" def __init__( self, ini_path: str = "./config.ini", ): """ :param ini_path: path to ini file """ self.configparser = configparser.ConfigParser() with open(os.path.abspath(ini_path)) as f: self.configparser.read_file(f) self._sections = None @property def sections(self): if not self._sections: self._sections = list(self.configparser.keys()) return self._sections def get(self, key: str, section: str, **kwargs) -> typing.Optional[typing.Any]: # type: ignore """Get value from ini file. :param key: key or path for search :param section: section of ini file """ if section not in self.sections: return None return self.configparser[section].get(key) def keys(self, section: str, **kwargs) -> typing.List[str]: # type: ignore """Get keys of section. :param section: section of ini file """ if section not in self.sections: return [] return list(self.configparser[section].keys())
2.59375
3
xpanse/api/assets/v2/ip_range.py
PaloAltoNetworks/cortex-xpanse-python-sdk
3
12173
<reponame>PaloAltoNetworks/cortex-xpanse-python-sdk<gh_stars>1-10 from typing import Any, Dict, List from xpanse.const import V2_PREFIX from xpanse.endpoint import ExEndpoint from xpanse.iterator import ExResultIterator class IpRangeEndpoint(ExEndpoint): """ Part of the Assets v2 API for handling IP Ranges. See: https://api.expander.expanse.co/api/v1/docs/ """ def list(self, **kwargs: Any) -> ExResultIterator: """ Returns the list of IP Ranges. Arguments should be passed as keyword args using the names below. Args: limit (int, optional): Returns at most this many results in a single api call. Default is 100, max is 10000. offset (int, optional): Returns results starting at this offset. Default is 0. sort (str, optional): Comma-separated string; orders results by the given fields. If the field name is prefixed by a -, then the ordering will be descending for that field. Use a dotted notation to order by fields that are nested. business_units (str, optional): Comma-separated string; Returns only results whose Business Unit's ID falls in the provided list. NOTE: If omitted, API will return results for all Business Units the user has permissions to view. Also, cannot be used with the business-unit-names parameter. business_unit_names (str, optional): Comma-separated string; Returns only results whose Business Unit's name falls in the provided list. NOTE: If omitted, API will return results for all Business Units the user has permissions to view. Also, cannot be used with the business-units parameter. inet (str, optional): Search for given IP/CIDR block using a single IP (d.d.d.d), a dashed IP range (d.d.d.d-d.d.d.d), a CIDR block (d.d.d.d/m), a partial CIDR (d.d.), or a wildcard (d.d.*.d). Returns only results whose [startAddress, endAddress] range overlap with the given IP Address or CIDR. tags (str, optional): Comma-separated string; Returns only results who are associated with the provided Tag IDs. Cannot be used with the tag-names parameter. tag_names (str, optional): Comma-separated string; Returns only results who are associated with the provided Tag names. Cannot be used with the tags parameter. include (str, optional): Comma-separated string; Include the provided fields as part of the serialized result. Allowed values are `annotations`, `severityCounts`, `attributionReasons`, `relatedRegistrationInformation`, `certDetails`, and `locationInformation` Returns: :obj:`ExResultIterator`: An iterator containing all of the ip_range results. Results can be iterated or called by page using `<iterator>.next()`. Examples: >>> # Return all ip ranges and print each range: >>> for res in client.assets.ip_range.v2.list(): ... for ip_r in res: ... print(ip_r) """ return ExResultIterator(self._api, f"{V2_PREFIX}/ip-range", kwargs) def get(self, id: str, **kwargs: Any) -> Dict[str, Any]: """ Returns the details for a given IP Range. Arguments should be passed as keyword args using the names below. Args: id (str): ID for the ip-range. Should be a UUID. include (str, optional): Comma-separated string; Include the provided fields as part of the serialized result. Returns: :obj:`dict`: A dictionary containing all of the details about an IP Range. Examples: >>> # Return IP Range with severity counts >>> my_range = client.assets.ip_range.v2.get(<id>, include="severityCounts") """ return self._api.get(f"{V2_PREFIX}/ip-range/{id}", params=kwargs).json() def create( self, startAddress: str, endAddress: str, parentId: str, **kwargs: Any ) -> Dict[str, Any]: """ Creates a new custom IP Range. NOTE: A validation error will be returned if the start and end addresses of the custom range do not fit within a top level range defined by Xpanse. Args: startAddress (str): Start address of custom ip-range. endAddress (str): End address of custom ip-range. parentId (str): Id of parent ip-range. tags (list, optional): A list of tag annotation names. additionalNotes (str, optional): Any additional notes about the custom ip-range. pointOfContactIds (list, optional): A lost of point-of-contact annotation ids. Returns: :obj:`dict`: A dictionary containing all of the details about the newly created, custom IP Range. Examples: >>> # Create a new ip-range under a parent range >>> new_range = client.assets.ip_range.v2.create("172.16.31.10", "172.16.17.32", "43a5a569-27b0-39b5-98f4-22b9885546d7", additionalNotes="Business Unit X - Marketing website hosts") """ payload: Dict[str, Any] = { "startAddress": startAddress, "endAddress": endAddress, "parentId": parentId, "annotations": {}, } if "tags" in kwargs: payload["annotations"]["tags"] = kwargs["tags"] if "additionalNotes" in kwargs: payload["annotations"]["additionalNotes"] = kwargs["additionalNotes"] if "pointOfContactIds" in kwargs: payload["annotations"]["pointOfContactIds"] = kwargs["pointOfContactIds"] return self._api.post(f"{V2_PREFIX}/ip-range", json=payload).json() def delete(self, id: str) -> bool: """ Delete the given IP Range, and all connections to other data. NOTE: This will only work for user-defined IP Ranges. Args: id (str): ID for the ip-range. Should be a UUID. Returns: :obj:`boolean`: `True` if the range was successfully deleted, otherwise `False`. Examples: >>> # Deletes a user defined range >>> client.assets.ip_range.v2.delete("43a5a569-27b0-39b5-98f4-22b9885546d7") """ return ( True if self._api.delete(f"{V2_PREFIX}/ip-range/{id}").status_code == 204 else False ) def update(self, id: str, **kwargs: Any) -> Dict[str, Any]: """ Allows the partial update of the given IP Range. Args: id (str): ID for the ip-range. Should be a UUID. startAddress (str, optional): Start address of custom ip-range. endAddress (str, optional): End address of custom ip-range. parentId (str, optional): Id of parent ip-range. tags (list, optional): A list of tag annotation ids. additionalNotes (str, optional): Any additional notes about the custom ip-range. pointOfContactIds (list, optional): A lost of point-of-contact annotation ids. Returns: :obj:`dict`: A dictionary containing all of the details about the updated, custom IP Range. Examples: >>> # Update an ip-range under a parent range >>> new_range = client.assets.ip_range.v2.update("43a5a569-27b0-39b5-98f4-22b9885546d7", additionalNotes="Business Unit X - Development Environment") """ payload = {} if "startAddress" in kwargs: payload["startAddress"] = kwargs["startAddress"] if "endAddress" in kwargs: payload["endAddress"] = kwargs["endAddress"] if "parentId" in kwargs: payload["parentId"] = kwargs["parentId"] if any( arg in ("tags", "additionalNotes", "pointOfContactIds") for arg in kwargs ): payload["annotations"] = {} if "tags" in kwargs: payload["annotations"]["tags"] = kwargs["tags"] if "additionalNotes" in kwargs: payload["annotations"]["additionalNotes"] = kwargs["additionalNotes"] if "pointOfContactIds" in kwargs: payload["annotations"]["pointOfContactIds"] = kwargs["pointOfContactIds"] return self._api.patch(f"{V2_PREFIX}/ip-range/{id}", json=payload).json() def tag(self, ranges: List[str], tags: List[str]) -> bool: """ Adds the provided tags to all of the specified ip ranges. If the any of the provided tags do not exist, they will be created. Args: ranges (list): A list of ip-range IDs. Should be UUIDs. tags (list): A list of tag annotation names to add to an ip-range. Returns: :obj:`boolean`: `True` if the ranges were tagged successfully, otherwise `False`. """ payload = {"ipRangeIds": ranges, "tags": tags} return self._api.post(f"{V2_PREFIX}/ip-range/tag", json=payload)
2.59375
3
polus-color-pyramid-builder-plugin/src/main.py
blowekamp/polus-plugins
0
12174
<reponame>blowekamp/polus-plugins<gh_stars>0 from bfio import BioReader import argparse, logging import numpy as np from pathlib import Path import filepattern, multiprocessing, utils from concurrent.futures import ThreadPoolExecutor COLORS = ['red', 'green', 'blue', 'yellow', 'magenta', 'cyan', 'gray'] def get_number(s): """ Check that s is number If s is a number, first attempt to convert it to an int. If integer conversion fails, attempt to convert it to a float. If float conversion fails, return None. Inputs: s - An input string or number Outputs: value - Either float, int or None """ try: return [int(si) for si in s.split('-')] except ValueError: try: return [float(si) for si in s.split('-')] except ValueError: return None def get_bounds(br,lower_bound,upper_bound): """ Calculate upper and lower pixel values for image rescaling This method calculates the upper and lower percentiles for a given image. The lower_bound and upper_bound must be floats in the range 0-1, where 0.01 indicates 1%. The values returned are pixel intensity values. Images are read in tiles to prevent images from being completely read into memory. This permits calculation of percentiles on images that are larger than memory. Args: br (bfio.BioReader): A BioReader object to access a tiled tiff lower_bound (float): Lower bound percentile, must be in 0.00-1.00 upper_bound (float): Upper bound percentile, must be in 0.00-1.00 Returns: [list]: List of upper and lower bound values in pixel intensity units. """ # TODO: Replace pixel buffer with histogram/fingerprint to handle # larger images and/or larger percentile values # Make sure the inputs are properly formatted assert isinstance(lower_bound,float) and isinstance(upper_bound,float) assert lower_bound >= 0 and lower_bound <= 1.0 assert upper_bound >= 0 and upper_bound <= 1.0 # Get the image size in pixels image_size = br.num_x() * br.num_y() # Get number of pixels needed to get percentile information upper_bound_size = int(image_size * (1-upper_bound)) lower_bound_size = int(image_size * lower_bound) # Create the pixel buffer dtype = br.read_metadata().image().Pixels.get_PixelType() upper_bound_vals = np.zeros((2*upper_bound_size,),dtype=dtype) lower_bound_vals = np.full((2*lower_bound_size,),np.iinfo(dtype).max,dtype=dtype) # Load image tiles and sort pixels for x in range(0,br.num_x(),1024): # Load the first tile tile = br.read_image(X=[x,min([x+1024,br.num_x()])],Z=[0,1]) # Sort the non-zero values tile_sorted = np.sort(tile[tile.nonzero()],axis=None) # Store the upper and lower bound pixel values temp = tile_sorted[-upper_bound_size:] upper_bound_vals[:temp.size] = temp temp = tile_sorted[:lower_bound_size] lower_bound_vals[-temp.size:] = temp # Resort the pixels upper_bound_vals = np.sort(upper_bound_vals,axis=None) lower_bound_vals = np.sort(lower_bound_vals,axis=None) return [lower_bound_vals[lower_bound_size],upper_bound_vals[-upper_bound_size]] if __name__=="__main__": # Initialize the logger logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') logger = logging.getLogger("main") logger.setLevel(logging.INFO) ''' Argument parsing ''' logger.info("Parsing arguments...") parser = argparse.ArgumentParser(prog='main', description='Builds a DeepZoom color pyramid.') # Input arguments parser.add_argument('--filePattern', dest='filePattern', type=str, help='Filename pattern used to separate data', required=True) parser.add_argument('--inpDir', dest='inpDir', type=str, help='Input image collection to be processed by this plugin', required=True) parser.add_argument('--layout', dest='layout', type=str, help='Color ordering (e.g. 1,11,,,,5,6)', required=True) parser.add_argument('--bounds', dest='bounds', type=str, help='Set bounds (should be float-float, int-int, or blank, e.g. 0.01-0.99,0-16000,,,,,)', required=False) # Output arguments parser.add_argument('--outDir', dest='outDir', type=str, help='Output pyramid path.', required=True) # Parse the arguments args = parser.parse_args() filePattern = args.filePattern logger.info('filePattern = {}'.format(filePattern)) inpDir = args.inpDir if (Path.is_dir(Path(args.inpDir).joinpath('images'))): # switch to images folder if present fpath = str(Path(args.inpDir).joinpath('images').absolute()) logger.info('inpDir = {}'.format(inpDir)) layout = args.layout logger.info('layout = {}'.format(layout)) bounds = args.bounds logger.info('bounds = {}'.format(bounds)) outDir = args.outDir logger.info('outDir = {}'.format(outDir)) outDir = Path(outDir) # Parse the layout layout = [None if l=='' else int(l) for l in layout.split(',')] if len(layout)>7: layout = layout[:7] # Parse the bounds if bounds != None: bounds = [[None] if l=='' else get_number(l) for l in bounds.split(',')] bounds = bounds[:len(layout)] else: bounds = [[None] for _ in layout] # Parse files fp = filepattern.FilePattern(inpDir,filePattern) count = 0 for files in fp.iterate(group_by='c'): outDirFrame = outDir.joinpath('{}_files'.format(count)) outDirFrame.mkdir() count += 1 bioreaders = [] threads = [] with ThreadPoolExecutor(max([multiprocessing.cpu_count()//2,2])) as executor: for i,l in enumerate(layout): if l == None: bioreaders.append(None) continue f_path = [f for f in files if f['c']==l] if len(f_path)==0: continue f_path = f_path[0]['file'] bioreaders.append(BioReader(f_path,max_workers=multiprocessing.cpu_count())) if layout[i] != None: if isinstance(bounds[i][0],float): logger.info('{}: Getting percentile bounds {}...'.format(Path(bioreaders[-1]._file_path).name, bounds[i])) threads.append(executor.submit(get_bounds,bioreaders[-1],bounds[i][0],bounds[i][1])) elif isinstance(bounds[i][0],int): bioreaders[-1].bounds = bounds[i] else: bioreaders[-1].bounds = [0,np.iinfo(bioreaders[-1].read_metadata().image().Pixels.get_PixelType()).max] for i in reversed(range(len(layout))): if isinstance(bounds[i][0],int): logger.info('Color {}: {} (rescaling to {})'.format(COLORS[i], Path(Path(bioreaders[i]._file_path).name).name, bioreaders[i].bounds)) continue if layout[i] == None: continue bioreaders[i].bounds = threads.pop().result() logger.info('Color {}: {} (rescaling to {})'.format(COLORS[i], Path(Path(bioreaders[i]._file_path).name).name, bioreaders[i].bounds)) for br in bioreaders: if br != None: br_meta = br file_info = utils.dzi_file(br_meta,outDirFrame,0) encoder = utils.DeepZoomChunkEncoder(file_info) file_writer = utils.DeepZoomWriter(outDirFrame) utils._get_higher_res(0,bioreaders,file_writer,encoder)
2.546875
3
kinetics/reaction_classes/general_rate_Law.py
wlawler45/kinetics
13
12175
from kinetics.reaction_classes.reaction_base_class import Reaction class Generic(Reaction): """ This Reaction class allows you to specify your own rate equation. Enter the parameter names in params, and the substrate names used in the reaction in species. Type the rate equation as a string in rate_equation, using these same names. Enter the substrates used up, and the products made in the reaction as normal. """ def __init__(self, params=[], species=[], rate_equation='', substrates=[], products=[]): super().__init__() self.reaction_substrate_names = species self.parameter_names=params self.rate_equation = rate_equation self.substrates = substrates self.products = products def calculate_rate(self, substrates, parameters): for i, name in enumerate(self.reaction_substrate_names): locals().update({name: substrates[i]}) for i, name in enumerate(self.parameter_names): locals().update({name: parameters[i]}) rate = eval(self.rate_equation, locals(), globals()) return rate
3.453125
3
examples/03-interception/api.py
nomadsinteractive/migi
3
12176
<gh_stars>1-10 from ctypes import * from migi.decorators import stdcall @stdcall('MessageBoxW', 'User32.dll', interceptable=True) def _native_message_box_w(hwnd: c_void_p, content: c_wchar_p, title: c_wchar_p, flags: c_uint32) -> c_int32: if wstring_at(content) == "I'm in": return _native_message_box_w.call_original(hwnd, create_unicode_buffer("We're in"), title, flags) return _native_message_box_w.call_original(hwnd, content, title, flags) def message_box(content: str, title: str, flags: int = 0) -> c_int32: return _native_message_box_w(None, create_unicode_buffer(content), create_unicode_buffer(title), flags) def restore(): _native_message_box_w.restore() _native_message_box_w.intercept()
1.992188
2
adapters/heiman/HS1RC.py
russdan/domoticz-zigbee2mqtt-plugin
146
12177
<filename>adapters/heiman/HS1RC.py from adapters.adapter_with_battery import AdapterWithBattery from devices.switch.selector_switch import SelectorSwitch class HeimanAlarmRemoteAdapter(AdapterWithBattery): def __init__(self): super().__init__() self.switch = SelectorSwitch('Remote', 'action') self.switch.add_level('Off', None) self.switch.add_level('Arm all zones', 'arm_all_zones') self.switch.add_level('Arm partial zones', 'arm_partial_zones') self.switch.add_level('Disarm', 'disarm') self.switch.add_level('Emergency', 'emergency') self.switch.set_selector_style(SelectorSwitch.SELECTOR_TYPE_MENU) self.switch.disable_value_check_on_update() self.devices.append(self.switch) def convert_message(self, message): message = super().convert_message(message) return message def handleCommand(self, alias, device, device_data, command, level, color): self.switch.handle_command(device_data, command, level, color)
2.375
2
tennis_model_scraper/tennis_model_scraper/spiders/tennis_data_co_uk_spider.py
DrAndrey/tennis_model
0
12178
# -*- coding: utf-8 -*- """ """ import scrapy from tennis_model.tennis_model_scraper.tennis_model_scraper import items class TennisDataCoUkSpider(scrapy.Spider): name = "tennis_data_co_uk" allowed_domains = ["www.tennis-data.co.uk"] start_urls = ["http://www.tennis-data.co.uk/alldata.php"] custom_settings = {'ITEM_PIPELINES': {'tennis_model_scraper.pipelines.TennisDataCoUkPipeline': 1}} def _correct_ext(self, link): if ".zip" in link: return link elif "zip" in link: return ".zip".join(link.split("zip")) else: raise Exception("Unknown file extension from url - {0}. 'zip' is expected".format(link)) def parse(self, response): archive_links = response.xpath("/html/body/table[5]/tr[2]/td[3]/a/@href") for link in archive_links: short_file_url = self._correct_ext(link.extract()) is_man_archives = 'w' not in short_file_url.split("/")[0] if is_man_archives: full_file_url = response.urljoin(short_file_url) item = items.TennisDataCoUkItem() item["file_urls"] = [full_file_url] yield item if __name__ == '__main__': pass
2.71875
3
aoc20211219b.py
BarnabyShearer/aoc
0
12179
<reponame>BarnabyShearer/aoc from aoc20211219a import * def aoc(data): sensors, _ = slam(parse(data)) return max(sum(abs(x) for x in sub(a, b)) for a in sensors for b in sensors)
2.296875
2
pyhmy/rpc/request.py
difengJ/pyhmy
37
12180
import json import requests from .exceptions import ( RequestsError, RequestsTimeoutError, RPCError ) _default_endpoint = 'http://localhost:9500' _default_timeout = 30 def base_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> str: """ Basic RPC request Parameters --------- method: str RPC Method to call params: :obj:`list`, optional Parameters for the RPC method endpoint: :obj:`str`, optional Endpoint to send request to timeout: :obj:`int`, optional Timeout in seconds Returns ------- str Raw output from the request Raises ------ TypeError If params is not a list or None RequestsTimeoutError If request timed out RequestsError If other request error occured """ if params is None: params = [] elif not isinstance(params, list): raise TypeError(f'invalid type {params.__class__}') try: payload = { "id": "1", "jsonrpc": "2.0", "method": method, "params": params } headers = { 'Content-Type': 'application/json' } resp = requests.request('POST', endpoint, headers=headers, data=json.dumps(payload), timeout=timeout, allow_redirects=True) return resp.content except requests.exceptions.Timeout as err: raise RequestsTimeoutError(endpoint) from err except requests.exceptions.RequestException as err: raise RequestsError(endpoint) from err def rpc_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> dict: """ RPC request Parameters --------- method: str RPC Method to call params: :obj:`list`, optional Parameters for the RPC method endpoint: :obj:`str`, optional Endpoint to send request to timeout: :obj:`int`, optional Timeout in seconds Returns ------- dict Returns dictionary representation of RPC response Example format: { "jsonrpc": "2.0", "id": 1, "result": ... } Raises ------ RPCError If RPC response returned a blockchain error See Also -------- base_request """ raw_resp = base_request(method, params, endpoint, timeout) try: resp = json.loads(raw_resp) if 'error' in resp: raise RPCError(method, endpoint, str(resp['error'])) return resp except json.decoder.JSONDecodeError as err: raise RPCError(method, endpoint, raw_resp) from err # TODO: Add GET requests
2.75
3
gaze_api/scripts/vidPub.py
ajdroid/tobii_ros
0
12181
<reponame>ajdroid/tobii_ros #!/usr/bin/env python # -*- coding: utf-8 -*- import time import socket import threading import rospy from publisher import * import cv2 import imagezmq import numpy as np from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError def parse_sent_msg(msg): ctr, frame_time = msg.split() frame_time = float(frame_time) return frame_time, ctr # setup socket to python3 video streamer # Helper class implementing an IO daemon thread for imgzmq recv class VideoStreamSubscriber: def __init__(self, hostname, port): self.hostname = hostname self.port = port if port == 0: # ipc operation rather than tcp self.receiver_address = "ipc://{}".format(self.hostname) else: self.receiver_address = "tcp://{}:{}".format(self.hostname, self.port) self._stop = False self._data_ready = threading.Event() self._thread = threading.Thread(target=self._run, args=()) self._thread.daemon = True self._thread.start() def receive(self, timeout=30.0): flag = self._data_ready.wait(timeout=timeout) if not flag: raise TimeoutError( "Timeout while reading from subscriber tcp://{}:{}".format(self.hostname, self.port)) self._data_ready.clear() return self._data def _run(self): receiver = imagezmq.ImageHub(self.receiver_address, REQ_REP=False) while not self._stop: self._data = receiver.recv_jpg() # self._data = receiver.recv_image() self._data_ready.set() receiver.close() def close(self): self._stop = True # Receive from broadcast # There are 2 hostname styles; comment out the one you don't need hostname = "127.0.0.1" # Use to receive from localhost # hostname = "192.168.86.38" # Use to receive from other computer if __name__ == '__main__': try: # parser = argparse.ArgumentParser() # parser.add_argument('--gp', action="store_true", help="Option to publish gaze position (2D) data") # args = parser.parse_args(rospy.myargv()[1:]) ''' Initiate the Video Stream Subscription over Image ZMQ ''' imgzmq_port = 5555 hostname = "/tmp/tobiiVid"; imgzmq_port = 0 receiver = VideoStreamSubscriber(hostname, imgzmq_port) ''' Create publisher ''' # Default publish the 3D gaze position data vidpub = rospy.Publisher("tobii_video", Image, queue_size=10) bridge = CvBridge() rospy.init_node('tobii_image_sender', anonymous=True) while not rospy.is_shutdown(): # get from py3 sent_msg_string, frame = receiver.receive() image = cv2.imdecode(np.frombuffer(frame, dtype='uint8'), -1) image = np.frombuffer(frame, dtype='uint8') image = image.reshape(1080, 1920, 3) print(image.shape, sent_msg_string) # Parse sent message to convert to ros formats frametime, counter = parse_sent_msg(sent_msg_string) # publish to ROS im_ros = bridge.cv2_to_imgmsg(image, "bgr8") im_ros.header.stamp = rospy.Time.from_sec(frametime) im_ros.header.frame_id = str(counter) vidpub.publish(im_ros) except (rospy.ROSInterruptException, KeyboardInterrupt, SystemExit): sys.exit(0)
2.46875
2
LAImapping_SNAP/snappy_backscatterLAI.py
dipankar05/aws4agrisar
5
12182
import sys import numpy import numpy as np from snappy import Product from snappy import ProductData from snappy import ProductIO from snappy import ProductUtils from snappy import FlagCoding ############## import csv ###############MSVR from sklearn.svm import SVR from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline ######################## if len(sys.argv) != 2: print("usage: %s <file>" % sys.argv[0]) sys.exit(1) file = sys.argv[1] print("Reading...") product = ProductIO.readProduct(file) width = product.getSceneRasterWidth() height = product.getSceneRasterHeight() name = product.getName() description = product.getDescription() band_names = product.getBandNames() print("Product: %s, %s" % (name, description)) print("Raster size: %d x %d pixels" % (width, height)) print("Start time: " + str(product.getStartTime())) print("End time: " + str(product.getEndTime())) print("Bands: %s" % (list(band_names))) ##--------------------------------------------------------------------------------- with open('rice_LUT.csv','r') as dest_f: data_iter = csv.reader(dest_f, delimiter = ',', quotechar = '"') data = [data for data in data_iter] data_array = np.asarray(data, dtype = np.float32) VV = data_array[:,1] VH = data_array[:,2] PAI = data_array[:,0] X=np.column_stack((VV,VH)) Y = PAI #SVR training pipeline = make_pipeline(StandardScaler(), SVR(kernel='rbf', epsilon=0.105, C=250, gamma = 2.8), ) SVRmodel=pipeline.fit(X,Y) # Predictfor validation data valX = X; y_out = pipeline.predict(valX); ##--------------------------------------------------------------------------------- bandc11 = product.getBand('C11') bandc22 = product.getBand('C22') laiProduct = Product('LAI', 'LAI', width, height) laiBand = laiProduct.addBand('lai', ProductData.TYPE_FLOAT32) laiFlagsBand = laiProduct.addBand('lai_flags', ProductData.TYPE_UINT8) writer = ProductIO.getProductWriter('BEAM-DIMAP') ProductUtils.copyGeoCoding(product, laiProduct) ProductUtils.copyMetadata(product, laiProduct) ProductUtils.copyTiePointGrids(product, laiProduct) laiFlagCoding = FlagCoding('lai_flags') laiFlagCoding.addFlag("LAI_LOW", 1, "LAI below 0") laiFlagCoding.addFlag("LAI_HIGH", 2, "LAI above 5") group = laiProduct.getFlagCodingGroup() #print(dir(group)) group.add(laiFlagCoding) laiFlagsBand.setSampleCoding(laiFlagCoding) laiProduct.setProductWriter(writer) laiProduct.writeHeader('LAImap_output.dim') c11 = numpy.zeros(width, dtype=numpy.float32) c22 = numpy.zeros(width, dtype=numpy.float32) print("Writing...") for y in range(height): print("processing line ", y, " of ", height) c11 = bandc11.readPixels(0, y, width, 1, c11) c22 = bandc22.readPixels(0, y, width, 1, c22) Z=np.column_stack((c11,c22)) #ndvi = (r10 - r7) / (r10 + r7) lai = pipeline.predict(Z); laiBand.writePixels(0, y, width, 1, lai) laiLow = lai < 0.0 laiHigh = lai > 5.0 laiFlags = numpy.array(laiLow + 2 * laiHigh, dtype=numpy.int32) laiFlagsBand.writePixels(0, y, width, 1, laiFlags) laiProduct.closeIO() print("Done.")
2.421875
2
test_fiona_issue383.py
thomasaarholt/fiona-wheels
0
12183
<filename>test_fiona_issue383.py<gh_stars>0 import fiona d = { "type": "Feature", "id": "0", "properties": { "ADMINFORES": "99081600010343", "REGION": "08", "FORESTNUMB": "16", "FORESTORGC": "0816", "FORESTNAME": "El Yunque National Forest", "GIS_ACRES": 55829.81, "SHAPE_AREA": 0.0193062316937, "SHAPE_LEN": 0.754287568301, }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [-65.73293016000002, 18.33284838999998], [-65.73293445000002, 18.331367639999996], [-65.73189660000003, 18.331369719999998], [-65.73040952000002, 18.33137273], [-65.72620770999998, 18.33138113000001], [-65.72303074000001, 18.331387389999975], [-65.71763471000003, 18.331393549999973], [-65.71717587, 18.331394069999988], [-65.71297922999997, 18.331403290000026], [-65.71248787000002, 18.33140437999998], [-65.70898332000002, 18.33141236], [-65.70846269999998, 18.331413540000028], [-65.70470655999998, 18.331422009999983], [-65.70340513999997, 18.33142491000001], [-65.70268779000003, 18.331419400000016], [-65.70098910000002, 18.33140635000001], [-65.69978839999999, 18.33139711000001], [-65.69977925, 18.32948927000001], [-65.69976860000003, 18.32723274], [-65.69976336000002, 18.326155840000013], [-65.69975882, 18.32519180999998], [-65.69975420999998, 18.324281380000002], [-65.69975116, 18.323670390000018], [-65.69974878, 18.323214399999983], [-65.69972460999998, 18.317907339999977], [-65.69972661000003, 18.31559458999999], [-65.69972832000002, 18.314692869999988], [-65.69972934999998, 18.312400700000012], [-65.69973214999999, 18.309193600000015], [-65.69973189000001, 18.308128119999992], [-65.69971594999998, 18.304170699999986], [-65.69971009, 18.302713270000027], [-65.69969680999998, 18.29942688], [-65.69968705999997, 18.297028839999996], [-65.69968439000002, 18.294420890000026], [-65.69968401, 18.294158770000024], [-65.69968397000002, 18.29406161000003], [-65.69968146999997, 18.29031968999999], [-65.69967542, 18.286261500000023], [-65.6996757, 18.286123120000013], [-65.69967338999999, 18.284205750000012], [-65.69967251000003, 18.283497660000023], [-65.69967014000002, 18.281735219999973], [-65.69967000000003, 18.28134633000002], [-65.69994827, 18.28134559], [-65.70099542999998, 18.28134276999998], [-65.70358926, 18.28133575999999], [-65.70616948000003, 18.281328770000016], [-65.70911901, 18.28132070999999], [-65.70971071999998, 18.28131909000001], [-65.71624101999998, 18.28131652000002], [-65.71624542, 18.276418089999993], [-65.71624548, 18.27636744], [-65.71624578000001, 18.275968209999974], [-65.71624845000002, 18.27300660999998], [-65.71624307000002, 18.271180739999977], [-65.71623899999997, 18.26979332000002], [-65.71623254999997, 18.267581380000024], [-65.71623254999997, 18.267578500000013], [-65.71623402, 18.267040029999976], [-65.71623762000002, 18.265657929999975], [-65.71623955000001, 18.26496930000002], [-65.71624981999997, 18.260115170000006], [-65.71625891999997, 18.257678180000028], [-65.71625689000001, 18.25766888999999], [-65.71628033000002, 18.252014929999973], [-65.71628700000002, 18.250603020000028], [-65.71629617000002, 18.248364939999988], [-65.71629643, 18.248011659999975], [-65.71974196999997, 18.248007089999987], [-65.72038055000002, 18.24800706000002], [-65.72076942000001, 18.24800829999998], [-65.72464429000001, 18.248011910000002], [-65.72465315, 18.248011519999977], [-65.72509256000001, 18.24801222000002], [-65.72707300000002, 18.24801083], [-65.73231042999998, 18.2480104], [-65.73397174000002, 18.248009190000005], [-65.73705114, 18.248008589999984], [-65.73750502000001, 18.248008190000007], [-65.73889711999999, 18.24800842000002], [-65.73978022, 18.248008830000003], [-65.74408667, 18.248010669999985], [-65.74502591999999, 18.248009980000006], [-65.74623288999999, 18.248009120000006], [-65.74772324000003, 18.248009149999973], [-65.74924592000002, 18.248014580000017], [-65.74961603999998, 18.248013990000004], [-65.74961524000003, 18.244120570000007], [-65.74961268999999, 18.243257019999987], [-65.74961502999997, 18.235669789999974], [-65.74961267999998, 18.235211540000023], [-65.74961048, 18.234789499999977], [-65.74961128000001, 18.231243000000006], [-65.75090724, 18.231235679999998], [-65.75247086000002, 18.231236500000023], [-65.75309636999998, 18.231236850000016], [-65.75896512000003, 18.231239829999993], [-65.76053288000003, 18.231240590000027], [-65.76145975999998, 18.231241049999994], [-65.76266423999999, 18.23124161999999], [-65.76402088999998, 18.231242259999988], [-65.76422652999997, 18.231242339999994], [-65.76459129, 18.231242520000023], [-65.76506522, 18.231243529999972], [-65.76575971, 18.231245], [-65.77265518000002, 18.231259480000006], [-65.77609515, 18.23126751000001], [-65.77853763000002, 18.231273129999977], [-65.78301661, 18.231283440000027], [-65.78536026, 18.231288749999976], [-65.78565572000002, 18.231289430000004], [-65.78587555000001, 18.23129019999999], [-65.78745778000001, 18.23129352000001], [-65.79147775000001, 18.231303949999983], [-65.80175496999999, 18.23133021000001], [-65.80328739999999, 18.23133408000001], [-65.80925552999997, 18.23135074999999], [-65.81185003000002, 18.231357919999994], [-65.81302187, 18.231352949999973], [-65.81574820999998, 18.23134140000002], [-65.81705820000002, 18.231335829999978], [-65.81733358000002, 18.231334670000024], [-65.82028713, 18.231322050000017], [-65.82052381, 18.23132104000001], [-65.82337763999999, 18.23130882999999], [-65.82649563000001, 18.231295439999997], [-65.82811142999998, 18.231288459999973], [-65.83293057999998, 18.23127384999998], [-65.83292964999998, 18.231761140000003], [-65.83293025, 18.234220730000004], [-65.83292996, 18.23624890000002], [-65.83292955000002, 18.239821380000024], [-65.83292905000002, 18.244286690000024], [-65.83292845, 18.244807849999972], [-65.83292886999999, 18.245117160000007], [-65.83292883000001, 18.24573097000001], [-65.83292870999998, 18.247063589999982], [-65.83292857999999, 18.248008060000018], [-65.83315374, 18.248008760000005], [-65.83325909000001, 18.248009089999982], [-65.83590992, 18.248030509999978], [-65.84442614, 18.248036909999996], [-65.84617400000002, 18.248038199999996], [-65.84807433999998, 18.24803958000001], [-65.84813063000001, 18.248039609999978], [-65.84903366999998, 18.248040240000023], [-65.85197088000001, 18.24804229], [-65.85535651999999, 18.24804193], [-65.85613706999999, 18.248041839999985], [-65.85719701, 18.248041699999987], [-65.8638446, 18.24804075999998], [-65.86544515000003, 18.24804051000001], [-65.87069150999997, 18.248039570000003], [-65.87385301, 18.248038310000027], [-65.87461352999998, 18.248020329999974], [-65.87817146999998, 18.248007959999995], [-65.88441703000001, 18.24800984000001], [-65.89088908999997, 18.248012580000022], [-65.89899125, 18.248013500000013], [-65.89925985999997, 18.24801395999998], [-65.90513017, 18.248014790000013], [-65.90874113000001, 18.248012710000012], [-65.91595359000002, 18.248011819999988], [-65.91629429, 18.248011819999988], [-65.9162887, 18.250010359999976], [-65.9162852, 18.25164811000002], [-65.91628292000001, 18.25191947000002], [-65.91627997, 18.253774229999976], [-65.91627848000002, 18.25477933000002], [-65.91627578999999, 18.255991100000017], [-65.91626445999998, 18.261137089999977], [-65.91625448000002, 18.26512563], [-65.91625524, 18.26536785000002], [-65.91625922999998, 18.266019389999997], [-65.91632637999999, 18.266198929999973], [-65.91632625, 18.266542049999998], [-65.91631202000002, 18.267959780000012], [-65.91631167000003, 18.267977850000022], [-65.91630744000003, 18.268755800000008], [-65.91630715999997, 18.268808560000025], [-65.91625932, 18.270663520000028], [-65.91625911, 18.270671989999983], [-65.91625876, 18.270887870000024], [-65.91625875, 18.27455298000001], [-65.91625871999997, 18.274613149999993], [-65.91625811, 18.279979179999998], [-65.91626000000002, 18.280340190000004], [-65.91625800000003, 18.281121770000027], [-65.91625804, 18.281356930000015], [-65.91618933000001, 18.281356570000014], [-65.91500064000002, 18.281350369999984], [-65.91296770999998, 18.281339800000012], [-65.91253340999998, 18.281337529999973], [-65.91229578999997, 18.281336280000005], [-65.90998387000002, 18.281324219999988], [-65.90871597, 18.281318759999976], [-65.90216367, 18.28129032999999], [-65.90111256, 18.281285760000003], [-65.89913740999998, 18.28127711000002], [-65.89885119000002, 18.28127286], [-65.89237293000002, 18.281247450000023], [-65.89048616000002, 18.281239140000025], [-65.88711766, 18.28122424999998], [-65.88599235999999, 18.281219249999992], [-65.88291291000002, 18.28120555999999], [-65.88291178999998, 18.28584490999998], [-65.88291048999997, 18.291010749999998], [-65.88290905000002, 18.29165870999998], [-65.88291565999998, 18.302684020000015], [-65.88291612, 18.303763930000002], [-65.88291874999999, 18.31314200999998], [-65.88292098, 18.314737100000002], [-65.88292178, 18.316319510000028], [-65.88292336, 18.320099939999977], [-65.88292583999998, 18.325711160000026], [-65.88292658, 18.32707603], [-65.88292819999998, 18.330798640000012], [-65.88292837, 18.331260059999977], [-65.88087401000001, 18.331255440000007], [-65.87894735999998, 18.331251090000023], [-65.87603802000001, 18.33124448000001], [-65.87461601000001, 18.33124122999999], [-65.86804993999999, 18.331420340000022], [-65.86763531000003, 18.331420009999988], [-65.86672666999999, 18.33141931], [-65.86648867999997, 18.331419100000005], [-65.86635653000002, 18.331419170000004], [-65.86273363999999, 18.331421009999985], [-65.85793086000001, 18.331423389999998], [-65.85789242999999, 18.33142171999998], [-65.85542400000003, 18.331424019999986], [-65.85350249999999, 18.331425749999994], [-65.84982063000001, 18.33142908000002], [-65.84969439000002, 18.331429189999994], [-65.84969428, 18.331550279999988], [-65.84969804000002, 18.33796344000001], [-65.84969840999997, 18.338737999999978], [-65.8497021, 18.345083629999976], [-65.84970268000001, 18.346151969999994], [-65.84970370000002, 18.34806388999999], [-65.84281220000003, 18.348051429999998], [-65.83631126, 18.348039400000005], [-65.83572038, 18.348038309999993], [-65.82972193, 18.348027020000018], [-65.82915395999999, 18.348025940000014], [-65.82799924, 18.34802375999999], [-65.82479099, 18.34801637999999], [-65.82399432, 18.34801453], [-65.82321229000001, 18.348012719999986], [-65.82141923, 18.348008540000023], [-65.82131368, 18.34800831000001], [-65.81955477000002, 18.348004189999983], [-65.81593006999998, 18.347995690000005], [-65.81524768000003, 18.347994099999994], [-65.81430688, 18.347991850000028], [-65.81409592, 18.34799134000002], [-65.81219464999998, 18.347986839999976], [-65.81037927, 18.347982520000016], [-65.80875237999999, 18.347978650000016], [-65.80848982999998, 18.34797801000002], [-65.80829098999999, 18.347977609999987], [-65.80772302000003, 18.347976930000016], [-65.80733909999998, 18.34797567999999], [-65.80353065000003, 18.347967859999983], [-65.80071562, 18.347962040000027], [-65.79902959999998, 18.34795853999998], [-65.79798546, 18.34795637000002], [-65.79009180999998, 18.347941110000022], [-65.78932427000001, 18.347939639999993], [-65.78840032, 18.347937820000027], [-65.78753816, 18.347936129999994], [-65.78601164000003, 18.347933119999993], [-65.78038322999998, 18.347921919999976], [-65.77934201, 18.347919479999973], [-65.77871169000002, 18.347918520000007], [-65.77776547000002, 18.347916520000012], [-65.77676473999998, 18.347914670000023], [-65.77662666999998, 18.347914370000012], [-65.77532722000001, 18.347911739999972], [-65.77499889, 18.347911039999985], [-65.77385053, 18.347908700000005], [-65.77354066999999, 18.34790806000001], [-65.76955748, 18.347899840000025], [-65.76888499, 18.347898439999994], [-65.76835487, 18.347897349999982], [-65.76683013000002, 18.34789416000001], [-65.76222604999998, 18.347884490000013], [-65.75909141, 18.347877840000024], [-65.75869390000003, 18.347874339999976], [-65.75078702000002, 18.34780397999998], [-65.74961532999998, 18.347793539999998], [-65.74804139999998, 18.347743690000016], [-65.74783091, 18.347737010000003], [-65.74728348000002, 18.347736259999976], [-65.74297489999998, 18.347730169999977], [-65.74044021999998, 18.347710549999988], [-65.73974084000002, 18.347705140000016], [-65.73561567000002, 18.34767314999999], [-65.73484725999998, 18.347665380000024], [-65.73302854000002, 18.347646950000012], [-65.73294028999999, 18.347646069999996], [-65.73293561999998, 18.346632310000018], [-65.73292482, 18.344269059999988], [-65.73292071999998, 18.343373789999987], [-65.73291719000002, 18.34259155000001], [-65.73290365999998, 18.339655180000022], [-65.73291784000003, 18.337885169999993], [-65.73292518, 18.334980180000002], [-65.73292579000002, 18.334753429999978], [-65.73293016000002, 18.33284838999998], ] ], [ [ [-66.16262245000001, 18.051031109999997], [-66.16184043999999, 18.049737929999992], [-66.1619091, 18.04731941], [-66.16514587, 18.04502678], [-66.16511536000002, 18.044198989999984], [-66.16511725999999, 18.043462750000003], [-66.16511725999999, 18.043279649999988], [-66.16594887000002, 18.04355812], [-66.16832161000002, 18.041448590000016], [-66.16813087000003, 18.040346150000005], [-66.16640091, 18.04031180999999], [-66.16698073999999, 18.03862952999998], [-66.16720580999998, 18.037527080000018], [-66.16765975999999, 18.033853529999988], [-66.16861915999999, 18.034097669999994], [-66.16942024000002, 18.033731460000013], [-66.16954613000001, 18.03507804999998], [-66.16970443999998, 18.036489490000008], [-66.16989517000002, 18.037008290000017], [-66.17005347999998, 18.038480760000027], [-66.17072487000002, 18.03927802999999], [-66.17091750999998, 18.039522169999998], [-66.17117309999998, 18.039552689999994], [-66.17162131999999, 18.039552689999994], [-66.17216492, 18.039308549999987], [-66.17245293000002, 18.039155960000016], [-66.17293358, 18.039094920000025], [-66.17320251000001, 18.039094920000025], [-66.17344666000002, 18.039094920000025], [-66.17376709000001, 18.03928185000001], [-66.17305756000002, 18.042036059999987], [-66.17280005999999, 18.04304695000002], [-66.17234993, 18.044912339999996], [-66.17170142999998, 18.050027849999992], [-66.17182922, 18.050394059999974], [-66.17035484000002, 18.051618580000024], [-66.16718483, 18.05198096999999], [-66.16692733999997, 18.051458360000026], [-66.16661072, 18.050817489999986], [-66.16660117999999, 18.050874710000016], [-66.16659355000002, 18.05092811999998], [-66.16641808000003, 18.052057269999978], [-66.16641426000001, 18.052072529999975], [-66.16576958000002, 18.05623436000002], [-66.16262245000001, 18.051031109999997], ] ], [ [ [-66.53508758999999, 18.392507550000005], [-66.53519820999998, 18.391786579999973], [-66.53970336999998, 18.392427440000006], [-66.53828812, 18.397306440000023], [-66.53822708000001, 18.39755821], [-66.53777313, 18.398542399999997], [-66.53761481999999, 18.400304790000007], [-66.53463554000001, 18.40027046], [-66.53440475000002, 18.399271010000007], [-66.53497124, 18.39718819000001], [-66.53505897999997, 18.396612170000026], [-66.53450774999999, 18.395158770000023], [-66.53466796999999, 18.394887919999974], [-66.53466796999999, 18.39454841999998], [-66.53477286999998, 18.394208909999975], [-66.53480911000003, 18.393922809999992], [-66.53482628, 18.39348030000002], [-66.5349865, 18.393175129999975], [-66.53508758999999, 18.392507550000005], ] ], ], }, } from shapely.geometry import shape print(shape(d["geometry"]))
1.828125
2
pre_commit_hooks/forbid_crlf.py
henryiii/pre-commit-hooks
62
12184
from __future__ import print_function import argparse, sys from .utils import is_textfile def contains_crlf(filename): with open(filename, mode='rb') as file_checked: for line in file_checked.readlines(): if line.endswith(b'\r\n'): return True return False def main(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='filenames to check') args = parser.parse_args(argv) text_files = [f for f in args.filenames if is_textfile(f)] files_with_crlf = [f for f in text_files if contains_crlf(f)] return_code = 0 for file_with_crlf in files_with_crlf: print('CRLF end-lines detected in file: {0}'.format(file_with_crlf)) return_code = 1 return return_code if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
3.265625
3
backend/code/start.py
socek/iep
0
12185
if __name__ == "__main__": print("Nothing yet...")
1.296875
1
vantage6/server/model/organization.py
jaspersnel/vantage6-server
2
12186
<gh_stars>1-10 import base64 from sqlalchemy import Column, String, LargeBinary from sqlalchemy.orm import relationship from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm.exc import NoResultFound from vantage6.common.globals import STRING_ENCODING from .base import Base, Database class Organization(Base): """A legal entity. An organization plays a central role in managing distributed tasks. Each Organization contains a public key which other organizations can use to send encrypted messages that only this organization can read. """ # fields name = Column(String) domain = Column(String) address1 = Column(String) address2 = Column(String) zipcode = Column(String) country = Column(String) _public_key = Column(LargeBinary) # relations collaborations = relationship("Collaboration", secondary="Member", back_populates="organizations") results = relationship("Result", back_populates="organization") nodes = relationship("Node", back_populates="organization") users = relationship("User", back_populates="organization") created_tasks = relationship("Task", back_populates="initiator") roles = relationship("Role", back_populates="organization") @classmethod def get_by_name(cls, name): session = Database().Session try: return session.query(cls).filter_by(name=name).first() except NoResultFound: return None @hybrid_property def public_key(self): if self._public_key: # TODO this should be fixed properly try: return base64.b64decode(self._public_key)\ .decode(STRING_ENCODING) except Exception: return "" else: return "" @public_key.setter def public_key(self, public_key_b64): """Assumes that the public key is in b64-encoded.""" self._public_key = base64.b64decode( public_key_b64.encode(STRING_ENCODING) ) def __repr__(self): number_of_users = len(self.users) return ( "<Organization " f"name:{self.name}, " f"domain:{self.domain}, " f"users:{number_of_users}" ">" )
2.328125
2
checkov/terraform/checks/resource/aws/EKSSecretsEncryption.py
cclauss/checkov
1
12187
<reponame>cclauss/checkov<gh_stars>1-10 from checkov.common.models.enums import CheckResult, CheckCategories from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class EKSSecretsEncryption(BaseResourceCheck): def __init__(self): name = "Ensure EKS Cluster has Secrets Encryption Enabled" id = "CKV_AWS_58" supported_resources = ['aws_eks_cluster'] categories = [CheckCategories.KUBERNETES] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if "encryption_config" in conf.keys() and "resources" in conf["encryption_config"][0] and \ "secrets" in conf["encryption_config"][0]["resources"][0]: return CheckResult.PASSED else: return CheckResult.FAILED check = EKSSecretsEncryption()
2.171875
2
analysis/models/nodes/analysis_node.py
SACGF/variantgrid
5
12188
<gh_stars>1-10 """ AnalysisNode is the base class that all analysis nodes inherit from. """ import logging import operator from functools import reduce from random import random from time import time from typing import Tuple, Sequence, List, Dict, Optional from celery.canvas import Signature from django.conf import settings from django.core.cache import cache from django.db import connection, models from django.db.models import Value, IntegerField from django.db.models.aggregates import Count from django.db.models.deletion import CASCADE, SET_NULL from django.db.models.query_utils import Q from django.dispatch import receiver from django.utils import timezone from django_dag.models import node_factory, edge_factory from django_extensions.db.models import TimeStampedModel from lazy import lazy from model_utils.managers import InheritanceManager from analysis.exceptions import NonFatalNodeError, NodeParentErrorsException, NodeConfigurationException, \ NodeParentNotReadyException, NodeNotFoundException, NodeOutOfDateException from analysis.models.enums import GroupOperation, NodeStatus, NodeColors, NodeErrorSource, AnalysisTemplateType from analysis.models.models_analysis import Analysis from analysis.models.nodes.node_counts import get_extra_filters_q, get_node_counts_and_labels_dict from annotation.annotation_version_querysets import get_variant_queryset_for_annotation_version from classification.models import Classification, post_delete from library.database_utils import queryset_to_sql from library.django_utils import thread_safe_unique_together_get_or_create from library.log_utils import report_event from library.utils import format_percent from snpdb.models import BuiltInFilters, Sample, Variant, VCFFilter, Wiki, Cohort, VariantCollection, \ ProcessingStatus, GenomeBuild, AlleleSource from snpdb.variant_collection import write_sql_to_variant_collection from variantgrid.celery import app def _default_position(): return 10 + random() * 50 class AnalysisNode(node_factory('AnalysisEdge', base_model=TimeStampedModel)): model = Variant objects = InheritanceManager() analysis = models.ForeignKey(Analysis, on_delete=CASCADE) name = models.TextField(blank=True) x = models.IntegerField(default=_default_position) y = models.IntegerField(default=_default_position) version = models.IntegerField(default=0) # Queryset version appearance_version = models.IntegerField(default=0) auto_node_name = models.BooleanField(default=True) output_node = models.BooleanField(default=False) hide_node_and_descendants_upon_template_configuration_error = models.BooleanField(default=False) ready = models.BooleanField(default=True) valid = models.BooleanField(default=False) visible = models.BooleanField(default=True) count = models.IntegerField(null=True, default=None) errors = models.TextField(null=True) shadow_color = models.TextField(null=True) load_seconds = models.FloatField(null=True) parents_should_cache = models.BooleanField(default=False) # Node suggests parents use a cache # This is set to node/version you cloned - cleared upon modification cloned_from = models.ForeignKey('NodeVersion', null=True, on_delete=SET_NULL) status = models.CharField(max_length=1, choices=NodeStatus.choices, default=NodeStatus.DIRTY) PARENT_CAP_NOT_SET = -1 min_inputs = 1 max_inputs = 1 uses_parent_queryset = True disabled = False UPDATE_TASK = "analysis.tasks.node_update_tasks.update_node_task" NODE_CACHE_TASK = "analysis.tasks.node_update_tasks.node_cache_task" WAIT_FOR_CACHE_TASK = "analysis.tasks.node_update_tasks.wait_for_cache_task" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.appearance_dirty = False self.ancestor_input_samples_changed = False self.parents_changed = False self.queryset_dirty = False self.update_children = True def get_subclass(self): """ Returns the node loaded as a subclass """ return AnalysisNode.objects.get_subclass(pk=self.pk) def check_still_valid(self): """ Checks that the node is still there and has the version we expect - or throw exception """ version_qs = AnalysisNode.objects.filter(pk=self.pk).values_list("version", flat=True) if version_qs: db_version = version_qs[0] if db_version > self.version: raise NodeOutOfDateException() else: raise NodeNotFoundException(self.pk) def _get_cohorts_and_sample_visibility_for_node(self) -> Tuple[Sequence[Cohort], Dict]: """ Visibility = can see on grid """ return [], {} @staticmethod def _get_visible_samples_from_cohort(cohorts, visibility): samples = set() for c in cohorts: for s in c.get_samples(): if visibility.get(s): samples.add(s) return sorted(samples) def _get_model_queryset(self): self.analysis.check_valid() return get_variant_queryset_for_annotation_version(self.analysis.annotation_version) def get_cohorts_and_sample_visibility(self, sort=True) -> Tuple[Sequence[Cohort], Dict]: """ Returns all node + ancestor cohorts (and visibilities of their samples) The underlying data for all samples/cohorts/sub-cohorts/trios/pedigrees is Cohorts, so need to know which to retrieve from DB (and what sample info to extract from packed columns) to filter + show on grid """ cohorts, visibility = self._get_cohorts_and_sample_visibility_for_node() cohorts = set(cohorts) if self.has_input(): parents, _ = self.get_parent_subclasses_and_errors() for parent in parents: c, v = parent.get_cohorts_and_sample_visibility(sort=False) cohorts.update(c) visibility.update(v) # May have sub-cohorts, so get unique base cohorts cohorts = {c.get_base_cohort() for c in cohorts} if sort: cohorts = sorted(cohorts) return cohorts, visibility def get_sample_ids(self) -> List[Sample]: return [s.pk for s in self.get_samples()] def get_samples_from_node_only_not_ancestors(self): cohorts, visibility = self._get_cohorts_and_sample_visibility_for_node() return self._get_visible_samples_from_cohort(cohorts, visibility) def _get_proband_sample_for_node(self) -> Optional[Sample]: """ Sample of the object of a study, if known """ return None def get_proband_sample(self) -> Optional[Sample]: """ Sample of the object of a study if known """ proband_samples = set() if proband_sample := self._get_proband_sample_for_node(): proband_samples.add(proband_sample) if self.has_input(): parents, _ = self.get_parent_subclasses_and_errors() for parent in parents: if parent_proband_sample := parent.get_proband_sample(): proband_samples.add(parent_proband_sample) proband_sample = None if len(proband_samples) == 1: # If ambiguous, then just give up proband_sample = proband_samples.pop() return proband_sample def get_samples(self) -> List[Sample]: """ Return all ancestor samples for a node""" cohorts, visibility = self.get_cohorts_and_sample_visibility(sort=False) return self._get_visible_samples_from_cohort(cohorts, visibility) def get_bams_dict(self): bams_dict = {} for sample in self.get_samples(): if sample.bam_file_path: bams_dict[sample.pk] = sample.bam_file_path return bams_dict def get_connection_data(self, parent): """ Return dict of source_id/target_id for sending as JSON """ return {"source_id": parent.get_css_id(), "target_id": self.get_css_id()} def get_rendering_args(self): return {} def get_css_id(self): if self.pk: css_id = f"analysis-node-{self.pk}" else: css_id = None return css_id def get_update_task(self): return Signature(self.UPDATE_TASK, args=(self.pk, self.version), immutable=True) def get_cache_task_args_objs_set(self, force_cache=False): """ returns Celery tasks which are called in node_utils.get_analysis_update_task before children are loaded Uses tasks not signatures so they are hashable in a set to be able to remove dupes """ task_args_objs_set = set() if self.is_valid() and (force_cache or self.use_cache): if parent := self.get_unmodified_single_parent_node(): return parent.get_cache_task_args_objs_set(force_cache=force_cache) node_cache, created = NodeCache.get_or_create_for_node(self) if created: task_args_objs_set.add((self.NODE_CACHE_TASK, (self.pk, self.version), node_cache)) else: # Cache has been launched already, we just need to make sure it's ready, so launch a task # waiting on it, to be used as a dependency task_args_objs_set.add((self.WAIT_FOR_CACHE_TASK, (node_cache.pk, ), node_cache)) return task_args_objs_set def get_parent_subclasses_and_errors(self): qs = AnalysisNode.objects.filter(children=self.id, children__isnull=False) parents = list(qs.select_subclasses()) num_parents = len(parents) errors = [] if self.min_inputs != AnalysisNode.PARENT_CAP_NOT_SET and num_parents < self.min_inputs: errors.append((NodeErrorSource.CONFIGURATION, f"{num_parents} parents < minimum of {self.min_inputs}")) elif self.max_inputs != AnalysisNode.PARENT_CAP_NOT_SET and num_parents > self.max_inputs: errors.append((NodeErrorSource.CONFIGURATION, f"{num_parents} parents > maximum of {self.max_inputs}")) for parent in parents: if NodeStatus.is_error(parent.status): errors.append((NodeErrorSource.PARENT, "Parent has errors")) break return parents, errors def get_parent_subclasses(self): """ Gets parents, throws an Exception if any errors """ parents, errors = self.get_parent_subclasses_and_errors() if errors: AnalysisNode.throw_errors_exception(errors) return parents def get_non_empty_parents(self, require_parents_ready=True): """ Returns non-empty (count > 0) parents. If require_parents_ready=True, die if parents not ready Otherwise, return them as we don't know if they're empty or not """ non_empty_parents = [] for p in self.get_parent_subclasses(): if p.is_ready(): if p.count == 0: continue elif require_parents_ready: raise NodeParentNotReadyException(f"Parent {p} is not ready!") non_empty_parents.append(p) return non_empty_parents def get_single_parent(self): if self.min_inputs != 1: msg = "get_single_parent() should only be called for single parent nodes" raise ValueError(msg) parents, errors = self.get_parent_subclasses_and_errors() if errors: errors = AnalysisNode.flatten_errors(errors) msg = "Parent had errors: " + ', '.join(errors) raise NonFatalNodeError(msg) num_parents = len(parents) if num_parents != 1: msg = f"get_single_parent() called for node with {num_parents} parents" raise ValueError(msg) return parents[0] def get_single_parent_q(self): parent = self.get_single_parent() if parent.is_ready(): if parent.count == 0: q = self.q_none() else: q = parent.get_q() else: # This should never happen... raise ValueError("get_single_parent_q called when single parent not ready!!!") return q def _get_annotation_kwargs_for_node(self) -> Dict: """ Override this method per-node. Any key/values in here MUST be consistent - as annotation_kwargs from multiple nodes may be combined in the MergeNode """ annotation_kwargs = {} if self.node_cache: annotation_kwargs.update(self.node_cache.variant_collection.get_annotation_kwargs()) return annotation_kwargs def get_annotation_kwargs(self) -> Dict: """ Passed to Variant QuerySet annotate() Can be used w/FilteredRelation to force a join to a partition, in which case you need to use the alias given in annotate. @see https://github.com/SACGF/variantgrid/wiki/Data-Partitioning """ a_kwargs = {} # Only apply parent annotation kwargs if you actually use their queryset if self.has_input() and self.uses_parent_queryset: for parent in self.get_non_empty_parents(): a_kwargs.update(parent.get_annotation_kwargs()) a_kwargs.update(self._get_annotation_kwargs_for_node()) return a_kwargs @property def queryset_requires_distinct(self): if self._queryset_requires_distinct(): return True if self.has_input() and self.uses_parent_queryset: for parent in self.get_non_empty_parents(): if parent.queryset_requires_distinct: return True return False def _queryset_requires_distinct(self): """ Override if you need this - don't do by default as it's slow """ return False @staticmethod def q_all(): return Q(pk__isnull=False) @staticmethod def q_none(): return ~AnalysisNode.q_all() def _get_cache_key(self) -> str: nv = NodeVersion.get(self) return str(nv.pk) def get_q(self, disable_cache=False): """ A Django Q object representing the Variant filters for this node. This is the method to override in subclasses - not get_queryset() as: Chains of filters to a reverse foreign key relationship causes Multiple joins, so use Q objects which are combined at the end qs = qs.filter(table_1__val=1) qs = qs.filter(table_2__val=2) This is not necessarily equal to: qs.filter(table_1__val=1, table_2__val=2) @see https://docs.djangoproject.com/en/2/topics/db/queries/#spanning-multi-valued-relationships """ # We need this for node counts, and doing a grid query (each page) - and it can take a few secs to generate # for some nodes (Comp HET / pheno) so cache it cache_key = self._get_cache_key() + f"q_cache={disable_cache}" q: Optional[Q] = None if settings.ANALYSIS_NODE_CACHE_Q: # Disable for unit tests q = cache.get(cache_key) if q is None: if disable_cache is False: if cache_q := self._get_node_cache_q(): return cache_q if self.has_input(): q = self.get_parent_q() if self.modifies_parents(): if node_q := self._get_node_q(): q &= node_q else: q = self.q_all() if node_q := self._get_node_q(): q = node_q cache.set(cache_key, q) return q def get_parent_q(self): if self.min_inputs == 1: return self.get_single_parent_q() raise NotImplementedError("You need to implement a non-default 'get_parent_q' if you have more than 1 parent") @property def use_cache(self): """ At the moment we only cache when a child requests it """ return AnalysisEdge.objects.filter(parent=self, child__parents_should_cache=True).exists() def write_cache(self, variant_collection: VariantCollection): qs = self.get_queryset(disable_cache=True) qs = qs.annotate(variant_collection_id=Value(variant_collection.pk, output_field=IntegerField())) sql = queryset_to_sql(qs.values_list('pk', 'variant_collection_id')) write_sql_to_variant_collection(variant_collection, sql) @lazy def node_version(self): return NodeVersion.get(self) @lazy def node_cache(self) -> Optional['NodeCache']: if parent := self.get_unmodified_single_parent_node(): return parent.node_cache return NodeCache.objects.filter(node_version=self.node_version, variant_collection__status=ProcessingStatus.SUCCESS).first() def _get_node_cache_q(self) -> Optional[Q]: q = None if self.node_cache: q = self.node_cache.variant_collection.get_q() return q def _get_node_q(self) -> Optional[Q]: raise NotImplementedError() def _get_unfiltered_queryset(self, **extra_annotation_kwargs): """ Unfiltered means before the get_q() is applied extra_annotation_kwargs is applied AFTER node's annotation kwargs """ qs = self._get_model_queryset() a_kwargs = self.get_annotation_kwargs() a_kwargs.update(extra_annotation_kwargs) if a_kwargs: # Clear ordering, @see # https://docs.djangoproject.com/en/3.0/topics/db/aggregation/#interaction-with-default-ordering-or-order-by qs = qs.annotate(**a_kwargs).order_by() return qs def get_queryset(self, extra_filters_q=None, extra_annotation_kwargs=None, inner_query_distinct=False, disable_cache=False): if extra_annotation_kwargs is None: extra_annotation_kwargs = {} qs = self._get_unfiltered_queryset(**extra_annotation_kwargs) q = self.get_q(disable_cache=disable_cache) if extra_filters_q: q &= extra_filters_q filtered_qs = qs.filter(q) if self.queryset_requires_distinct: if inner_query_distinct: qs = qs.filter(pk__in=filtered_qs.values_list("pk", flat=True)) else: qs = filtered_qs.distinct() else: qs = filtered_qs return qs def get_extra_grid_config(self): return {} def get_class_name(self): return self.__class__.__name__ def get_identifier(self): return f"{self.get_class_name()}-{self.pk}" def get_css_classes(self): """ returns list of css classes - set on "node > .node-overlay" on node appearance update """ css_classes = [] if self.output_node: css_classes.append("output-node") if self.analysis.template_type == AnalysisTemplateType.TEMPLATE and self.analysisvariable_set.exists(): css_classes.append("variable-node") return css_classes def get_input_count(self): parents = self.get_non_empty_parents() return sum([p.get_output_count() for p in parents]) def get_output_count(self): # TODO: Move the if not modify parents code in here. if self.count is not None: return self.count count = self.get_queryset().count() self.count = count self.save() return count def _get_method_summary(self): raise NotImplementedError() def get_method_summary(self): errors = self.get_errors(flat=True) if not errors: html_summary = self._get_method_summary() else: html_summary = "<b>incorrectly configured</b><ul>" for error in errors: html_summary += f"<li>{error}</li>" html_summary += "</ul>" return html_summary def get_node_name(self): """ Automatic node name """ raise NotImplementedError(f"Node Class: {self.get_class_name()}") @staticmethod def get_help_text() -> str: raise NotImplementedError() @staticmethod def get_node_class_label(): """ Used in create node dropdown """ raise NotImplementedError() def _get_genome_build_errors(self, field_name, field_genome_build: GenomeBuild) -> List: """ Used to quickly add errors about genome build mismatches This only happens in templates (ran template on sample with different build than hardcoded data) In normal analyses, autocomplete restrictions should not allow you to configure data from other builds """ errors = [] if field_genome_build != self.analysis.genome_build: msg = f"{field_name} genome build: {field_genome_build} different from analysis build: {self.analysis.genome_build}" errors.append(msg) return errors def _get_configuration_errors(self) -> List: return [] def get_parents_and_errors(self): """ Returns error array, includes any min/max parent error and node config error """ if self.has_input(): return self.get_parent_subclasses_and_errors() return [], [] def get_errors(self, include_parent_errors=True, flat=False): """ returns a tuple of (NodeError, str) unless flat=True where it's only string """ errors = [] for analysis_error in self.analysis.get_errors(): errors.append((NodeErrorSource.ANALYSIS, analysis_error)) _, parent_errors = self.get_parents_and_errors() if include_parent_errors: errors.extend(parent_errors) if self.errors: errors.append((NodeErrorSource.INTERNAL_ERROR, self.errors)) errors.extend((NodeErrorSource.CONFIGURATION, ce) for ce in self._get_configuration_errors()) if flat: errors = AnalysisNode.flatten_errors(errors) return errors @staticmethod def flatten_errors(errors): return [f"{NodeErrorSource(nes).label}: {error}" for nes, error in errors] @staticmethod def get_status_from_errors(errors): ERROR_STATUS = { NodeErrorSource.INTERNAL_ERROR: NodeStatus.ERROR, NodeErrorSource.ANALYSIS: NodeStatus.ERROR_WITH_PARENT, NodeErrorSource.PARENT: NodeStatus.ERROR_WITH_PARENT, NodeErrorSource.CONFIGURATION: NodeStatus.ERROR_CONFIGURATION, } if not errors: raise ValueError("Passed in empty errors!") error_sources = {s for s, _ in errors} for source, status in ERROR_STATUS.items(): if source in error_sources: return status raise ValueError("No error source found") @staticmethod def throw_errors_exception(errors): ERROR_EXCEPTIONS = { NodeErrorSource.INTERNAL_ERROR: ValueError, NodeErrorSource.ANALYSIS: NonFatalNodeError, NodeErrorSource.PARENT: NodeParentErrorsException, NodeErrorSource.CONFIGURATION: NodeConfigurationException, } if not errors: raise ValueError("Passed in empty errors!") error_sources = {s for s, _ in errors} for source, exception_klass in ERROR_EXCEPTIONS.items(): if source in error_sources: raise exception_klass() raise ValueError("No error source found") def inherits_parent_columns(self): return self.min_inputs == 1 and self.max_inputs == 1 def _get_node_extra_columns(self): return [] def _get_inherited_columns(self): extra_columns = [] if self.inherits_parent_columns(): parent = self.get_single_parent() extra_columns.extend(parent.get_extra_columns()) return extra_columns def get_extra_columns(self): cache_key = self._get_cache_key() + "_extra_columns" extra_columns = cache.get(cache_key) if extra_columns is None: extra_columns = [] if self.is_valid(): extra_columns.extend(self._get_inherited_columns()) # Only add columns that are unique, as otherwise filters get added twice. node_extra_columns = self._get_node_extra_columns() for col in node_extra_columns: if col not in extra_columns: extra_columns.append(col) cache.set(cache_key, extra_columns) return extra_columns def _get_node_extra_colmodel_overrides(self): """ Subclasses should override to add colmodel overrides for JQGrid """ return {} def _get_inherited_colmodel_overrides(self): extra_overrides = {} if self.inherits_parent_columns(): parent = self.get_single_parent() extra_overrides.update(parent.get_extra_colmodel_overrides()) return extra_overrides def get_extra_colmodel_overrides(self): """ For JQGrid - subclasses should override _get_node_extra_colmodel_overrides """ extra_overrides = {} if self.is_valid() and self.uses_parent_queryset: extra_overrides.update(self._get_inherited_colmodel_overrides()) extra_overrides.update(self._get_node_extra_colmodel_overrides()) return extra_overrides def get_node_classification(self): if self.is_source(): classification = "source" else: classification = "filter" return classification def has_input(self): return self.max_inputs != 0 def is_source(self): return self.has_input() is False def is_valid(self): return not self.get_errors() def is_ready(self): return NodeStatus.is_ready(self.status) def bump_version(self): if self.version > 0: DELETE_CACHE_TASK = "analysis.tasks.node_update_tasks.delete_old_node_versions" app.send_task(DELETE_CACHE_TASK, args=(self.pk, self.version)) self.version += 1 self.status = NodeStatus.DIRTY self.count = None self.errors = None self.cloned_from = None def modifies_parents(self): """ Can overwrite and set to False to use parent counts """ return True def get_unmodified_single_parent_node(self) -> Optional['AnalysisNode']: """ If a node doesn't modify single parent - can use that in some places to re-use cache """ if self.is_valid() and self.has_input() and not self.modifies_parents(): try: return self.get_single_parent() except ValueError: pass return None def _get_cached_label_count(self, label) -> Optional[int]: """ Override for optimisation. Returning None means we need to run the SQL to get the count """ try: if self.cloned_from: # If cloned (and we or original haven't changed) - use those counts try: node_count = NodeCount.load_for_node_version(self.cloned_from, label) return node_count.count except NodeCount.DoesNotExist: # Should only ever happen if original bumped version since we were loaded # otherwise should have cascade set cloned_from to NULL pass if self.has_input(): parent_non_zero_label_counts = [] for parent in self.get_non_empty_parents(): if parent.count != 0: # count=0 has 0 for all labels parent_node_count = NodeCount.load_for_node(parent, label) if parent_node_count.count != 0: parent_non_zero_label_counts.append(parent_node_count.count) if not parent_non_zero_label_counts: # logging.info("all parents had 0 %s counts", label) return 0 if not self.modifies_parents(): if len(parent_non_zero_label_counts) == 1: # logging.info("Single parent, no modification, using that") return parent_non_zero_label_counts[0] except NodeCount.DoesNotExist: pass except Exception as e: logging.warning("Trouble getting cached %s count: %s", label, e) return None def get_grid_node_id_and_version(self): """ Uses parent node_id/version if possible to re-use cache """ node_id = self.pk version = self.version if self.cloned_from: node_id = self.cloned_from.node_id version = self.cloned_from.version if parent := self.get_unmodified_single_parent_node(): node_id, version = parent.get_grid_node_id_and_version() return node_id, version def node_counts(self): """ This is inside Celery task """ self.count = None counts_to_get = {BuiltInFilters.TOTAL} counts_to_get.update([i[0] for i in self.analysis.get_node_count_types()]) label_counts = {} for label in counts_to_get: label_count = self._get_cached_label_count(label) if label_count is not None: label_counts[label] = label_count counts_to_get -= set(label_counts) logging.debug("%s cached counts: %s", self, label_counts) if counts_to_get: logging.debug("%s needs DB request for %s", self, counts_to_get) retrieved_label_counts = get_node_counts_and_labels_dict(self) label_counts.update(retrieved_label_counts) node_version = NodeVersion.get(self) for label, count in label_counts.items(): NodeCount.objects.create(node_version=node_version, label=label, count=count) return NodeStatus.READY, label_counts[BuiltInFilters.TOTAL] def _load(self): """ Override to do anything interesting """ pass def load(self): """ load is called after parents are run """ # logging.debug("node %d (%d) load()", self.id, self.version) start = time() self._load() # Do before counts in case it affects anything status, count = self.node_counts() load_seconds = time() - start self.update(status=status, count=count, load_seconds=load_seconds) def add_parent(self, parent, *args, **kwargs): if not parent.visible: raise NonFatalNodeError("Not connecting children to invisible nodes!") existing_connect = parent.children.through.objects.filter(parent=parent, child=self) if not existing_connect.exists(): super().add_parent(parent) self.parents_changed = True else: logging.error("Node(pk=%d).add_parent(pk=%d) already exists!", self.pk, parent.pk) def remove_parent(self, parent): """ disconnects parent by deleting edge """ # Ok to have multiple, just delete first edge = parent.children.through.objects.filter(parent=parent, child=self).first() if edge: # could be some kind of race condition? edge.delete() self.parents_changed = True def handle_ancestor_input_samples_changed(self): pass def update(self, **kwargs): """ Updates Node if self.version matches DB - otherwise throws NodeOutOfDateException """ self_qs = AnalysisNode.objects.filter(pk=self.pk, version=self.version) updated = self_qs.update(**kwargs) if not updated: raise NodeOutOfDateException() def save(self, **kwargs): """ To avoid race conditions, don't use save() in a celery task (unless running in scheduling_single_worker) instead use update() method above """ # logging.debug("save: pk=%s kwargs=%s", self.pk, str(kwargs)) super_save = super().save if self.parents_changed or self.ancestor_input_samples_changed: self.handle_ancestor_input_samples_changed() if self.auto_node_name: self.name = self.get_node_name() # TODO: This causes lots of DB queries... should we change this? self.valid = self.is_valid() if not self.valid: self.shadow_color = NodeColors.ERROR self.appearance_dirty = True elif self.shadow_color == NodeColors.ERROR: # Need to allow nodes to set to warning self.shadow_color = NodeColors.VALID self.appearance_dirty = True if self.appearance_dirty: self.appearance_version += 1 if self.parents_changed or self.queryset_dirty: self.bump_version() super_save(**kwargs) if self.update_children: # We also need to bump if node has it's own sample - as in templates, we set fields in toposort order # So we could go from having multiple proband samples to only one later (thus can set descendants) for kid in self.children.select_subclasses(): kid.ancestor_input_samples_changed = self.is_source() or self.ancestor_input_samples_changed or \ self.get_samples_from_node_only_not_ancestors() kid.appearance_dirty = False kid.queryset_dirty = True kid.save() # Will bump versions else: super_save(**kwargs) # Make sure this always exists NodeVersion.objects.get_or_create(node=self, version=self.version) # Modify our analyses last updated time Analysis.objects.filter(pk=self.analysis.pk).update(modified=timezone.now()) def set_node_task_and_status(self, celery_task, status): cursor = connection.cursor() db_pid = cursor.db.connection.get_backend_pid() self.update(status=status) NodeTask.objects.filter(node=self, version=self.version).update(celery_task=celery_task, db_pid=db_pid) def adjust_cloned_parents(self, old_new_map): """ If you need to do something with old/new parents """ pass def save_clone(self): node_id = self.pk try: # Have sometimes had race condition where we try to clone a node that has been updated # In that case we'll just miss out on the cache original_node_version = NodeVersion.get(self) except NodeVersion.DoesNotExist: original_node_version = None copy = self # Have to set both id/pk to None when using model inheritance copy.id = None copy.pk = None copy.version = 1 # 0 is for those being constructed in analysis templates # Store cloned_from so we can use original's NodeCounts copy.cloned_from = original_node_version copy.save() for npf in NodeVCFFilter.objects.filter(node_id=node_id): npf.pk = None npf.node = copy npf.save() naff = NodeAlleleFrequencyFilter.objects.filter(node_id=node_id).first() # 1-to-1 if naff: af_frequency_ranges = list(naff.nodeallelefrequencyrange_set.all().values_list("min", "max")) # Use existing if already created for node (eg AlleleFrequencyNode always makes one) copy_naff, created = NodeAlleleFrequencyFilter.objects.get_or_create(node=copy) if not created: # Wipe out defaults to clear way for clone copy_naff.nodeallelefrequencyrange_set.all().delete() copy_naff.group_operation = naff.group_operation copy_naff.save() for min_value, max_value in af_frequency_ranges: copy_naff.nodeallelefrequencyrange_set.create(min=min_value, max=max_value) return copy def __str__(self): return self.name @classmethod def depth_first(cls, node): parents = node.get_parent_subclasses() l = [] for p in parents: l.extend(cls.depth_first(p)) l.append(node) return l class AnalysisEdge(edge_factory(AnalysisNode, concrete=False)): pass class NodeTask(TimeStampedModel): """ Used to track/lock celery update tasks for nodes (uses DB constraints to ensure 1 per node/version) """ node = models.ForeignKey(AnalysisNode, on_delete=CASCADE) version = models.IntegerField(null=False) analysis_update_uuid = models.UUIDField() celery_task = models.CharField(max_length=36, null=True) db_pid = models.IntegerField(null=True) class Meta: unique_together = ("node", "version") def __str__(self): return f"NodeTask: {self.analysis_update_uuid} - {self.node.pk}/{self.version}" class NodeWiki(Wiki): node = models.OneToOneField(AnalysisNode, on_delete=CASCADE) def _get_restricted_object(self): return self.node.analysis class AnalysisNodeAlleleSource(AlleleSource): """ Used to link a nodes variants to alleleles and liftover to other builds """ node = models.ForeignKey(AnalysisNode, null=True, on_delete=SET_NULL) def get_genome_build(self): if self.node: genome_build = self.node.analysis.genome_build else: genome_build = None return genome_build def get_variant_qs(self): if self.node: qs = self.node.get_subclass().get_queryset() else: qs = Variant.objects.none() return qs def liftover_complete(self, genome_build: GenomeBuild): report_event('Completed AnalysisNode liftover', extra_data={'node_id': self.node_id, 'allele_count': self.get_allele_qs().count()}) class NodeVersion(models.Model): """ This will be deleted once a node updates, so make all version specific caches cascade delete from this """ node = models.ForeignKey(AnalysisNode, on_delete=CASCADE) version = models.IntegerField(null=False) class Meta: unique_together = ("node", "version") @staticmethod def get(node: AnalysisNode): try: return NodeVersion.objects.get(node=node, version=node.version) except NodeVersion.DoesNotExist: node.check_still_valid() raise def __str__(self): return f"{self.node.pk} (v{self.version})" class NodeCache(models.Model): node_version = models.OneToOneField(NodeVersion, on_delete=CASCADE) variant_collection = models.OneToOneField(VariantCollection, on_delete=CASCADE) @staticmethod def get_or_create_for_node(node: AnalysisNode) -> Tuple['NodeCache', bool]: variant_collection = VariantCollection.objects.create(name=f"NodeCache {node.node_version}") defaults = {"variant_collection": variant_collection} node_cache, created = thread_safe_unique_together_get_or_create(NodeCache, node_version=node.node_version, defaults=defaults) if not created: variant_collection.delete() return node_cache, created def __str__(self): return f"NodeCache {self.node_version}: {self.variant_collection.get_status_display()}" @receiver(post_delete, sender=NodeCache) def post_delete_node_cache(sender, instance, **kwargs): # pylint: disable=unused-argument """ This can sometimes be called multiple times - if node updated again before previous updates delete_old_node_versions is finished """ try: if instance.variant_collection: instance.variant_collection.delete_related_objects() instance.variant_collection.delete() except VariantCollection.DoesNotExist: # Deleted already pass class NodeCount(models.Model): node_version = models.ForeignKey(NodeVersion, on_delete=CASCADE) label = models.CharField(max_length=100) count = models.IntegerField(null=False) class Meta: unique_together = ("node_version", "label") @staticmethod def load_for_node_version(node_version: NodeVersion, label: str) -> 'NodeCount': return NodeCount.objects.get(node_version=node_version, label=label) @staticmethod def load_for_node(node: AnalysisNode, label: str) -> 'NodeCount': return NodeCount.load_for_node_version(NodeVersion.get(node), label=label) def __str__(self): return f"NodeCount({self.node_version}, {self.label}) = {self.count}" class NodeColumnSummaryCacheCollection(models.Model): node_version = models.ForeignKey(NodeVersion, on_delete=CASCADE) variant_column = models.TextField(null=False) extra_filters = models.TextField(null=False) @staticmethod def get_counts_for_node(node, variant_column, extra_filters): node_version = NodeVersion.get(node) ncscc, created = NodeColumnSummaryCacheCollection.objects.get_or_create(node_version=node_version, variant_column=variant_column, extra_filters=extra_filters) if created: extra_filters_q = get_extra_filters_q(node.analysis.user, node.analysis.genome_build, extra_filters) queryset = node.get_queryset(extra_filters_q) count_qs = queryset.values_list(variant_column).distinct().annotate(Count('id')) data_list = [] for value, count in count_qs: data = NodeColumnSummaryData(collection=ncscc, value=value, count=count) data_list.append(data) if data_list: NodeColumnSummaryData.objects.bulk_create(data_list) else: data_list = ncscc.nodecolumnsummarydata_set.all() counts = {} for ncsd in data_list: counts[ncsd.value] = ncsd.count return counts class NodeColumnSummaryData(models.Model): collection = models.ForeignKey(NodeColumnSummaryCacheCollection, on_delete=CASCADE) value = models.TextField(null=True) count = models.IntegerField(null=False) class NodeVCFFilter(models.Model): """ If these exist, they mean use that filter """ node = models.ForeignKey(AnalysisNode, on_delete=CASCADE) vcf_filter = models.ForeignKey(VCFFilter, on_delete=CASCADE, null=True) # null = 'PASS' @staticmethod def filter_for_node(node, vcf): """ returns vfc but also where vcf_filter is NULL (for pass) """ q_vcf_filter = Q(vcf_filter__isnull=True) | Q(vcf_filter__vcf=vcf) return NodeVCFFilter.objects.filter(q_vcf_filter, node=node) class NodeAlleleFrequencyFilter(models.Model): """ Used for various nodes """ node = models.OneToOneField(AnalysisNode, on_delete=CASCADE) group_operation = models.CharField(max_length=1, choices=GroupOperation.choices, default=GroupOperation.ANY) def get_q(self, allele_frequency_path: str, allele_frequency_percent: bool): af_q = None try: filters = [] for af_range in self.nodeallelefrequencyrange_set.all(): # Only apply filter if restricted range. # Missing value (historical data) == -1 so those will come through and_filters = [] if af_range.min > 0: min_value = af_range.min if allele_frequency_percent: min_value *= 100.0 and_filters.append(Q(**{allele_frequency_path + "__gte": min_value})) if af_range.max < 1: max_value = af_range.max if allele_frequency_percent: max_value *= 100.0 and_filters.append(Q(**{allele_frequency_path + "__lte": max_value})) if and_filters: and_q = reduce(operator.and_, and_filters) filters.append(and_q) if filters: group_op = GroupOperation.get_operation(self.group_operation) af_q = reduce(group_op, filters) except NodeAlleleFrequencyFilter.DoesNotExist: pass return af_q @staticmethod def get_sample_q(node: AnalysisNode, sample: Sample) -> Optional[Q]: af_q = None if sample: try: allele_frequency_path = sample.get_cohort_genotype_field("allele_frequency") allele_frequency_percent = sample.vcf.allele_frequency_percent af_q = node.nodeallelefrequencyfilter.get_q(allele_frequency_path, allele_frequency_percent) except NodeAlleleFrequencyFilter.DoesNotExist: pass return af_q def get_description(self): # TODO: do this properly with group operators etc af_ranges = list(self.nodeallelefrequencyrange_set.all()) if len(af_ranges) == 1: description = str(af_ranges[0]) else: description = f"{self.get_group_operation_display()} of {len(af_ranges)} filters" return description class NodeAlleleFrequencyRange(models.Model): MIN_VALUE = 0 MAX_VALUE = 1 filter = models.ForeignKey(NodeAlleleFrequencyFilter, on_delete=CASCADE) min = models.FloatField(null=False) max = models.FloatField(null=False) def __str__(self): has_min = self.min is not None and self.min > self.MIN_VALUE has_max = self.max is not None and self.max < self.MAX_VALUE min_perc = format_percent(self.min, is_unit=True) max_perc = format_percent(self.max, is_unit=True) if has_min and has_max: return f"{min_perc} - {max_perc}" if has_min: return f">={min_perc}" if has_max: return f"<={max_perc}" return "" class AnalysisClassification(models.Model): analysis = models.ForeignKey(Analysis, on_delete=CASCADE) classification = models.ForeignKey(Classification, on_delete=CASCADE)
1.632813
2
lbrc_flask/standard_views.py
LCBRU/lbrc_flask_ui
0
12189
<reponame>LCBRU/lbrc_flask_ui<filename>lbrc_flask/standard_views.py<gh_stars>0 import os import traceback from flask import render_template, send_from_directory, current_app, g from .emailing import email def init_standard_views(app): @app.route("/favicon.ico") def favicon(): return send_from_directory( os.path.join(app.root_path, "static"), "favicon.ico", mimetype="image/vnd.microsoft.icon", ) @app.errorhandler(400) def missing_page(exception): """Catch internal 404 errors, display a nice error page and log the error. """ return render_template("lbrc_flask/404.html"), 400 @app.errorhandler(401) def missing_page(exception): """Catch internal 404 errors, display a nice error page and log the error. """ return render_template("lbrc_flask/404.html"), 401 @app.errorhandler(403) def forbidden_page(exception): """Catch internal 404 errors, display a nice error page and log the error. """ return render_template("lbrc_flask/404.html"), 403 @app.errorhandler(404) def missing_page(exception): """Catch internal 404 errors, display a nice error page and log the error. """ return render_template("lbrc_flask/404.html"), 404 @app.errorhandler(500) @app.errorhandler(Exception) def internal_error(exception): """Catch internal exceptions and 500 errors, display a nice error page and log the error. """ if 'lbrc_flask_title' in g: app_name = g.lbrc_flask_title else: app_name = 'Application' print(traceback.format_exc()) app.logger.error(traceback.format_exc()) email( subject="{} {} Error".format(current_app.config["ORGANISATION_NAME"], app_name), message=traceback.format_exc(), recipients=[current_app.config["ADMIN_EMAIL_ADDRESS"]], ) return render_template("lbrc_flask/500.html"), 500
2.40625
2
tests/data/program_analysis/PyAST2CAST/import/test_import_3.py
rsulli55/automates
17
12190
<filename>tests/data/program_analysis/PyAST2CAST/import/test_import_3.py # 'from ... import ...' statement from sys import exit def main(): exit(0) main()
1.320313
1
postmanparser/form_parameter.py
appknox/postmanparser
5
12191
from dataclasses import dataclass from typing import List from typing import Union from postmanparser.description import Description from postmanparser.exceptions import InvalidObjectException from postmanparser.exceptions import MissingRequiredFieldException @dataclass class FormParameter: key: str value: str = "" src: Union[List, str, None] = None disabled: bool = False form_param_type: str = "" content_type: str = "" # should override content-type in header description: Union[Description, None, str] = None @classmethod def parse(cls, data: dict): key = data.get("key") if key is None: raise MissingRequiredFieldException( "'formparameter' object should have 'key' property" ) value = data.get("value", "") src = data.get("src") if value and src is not None: raise InvalidObjectException( "'formparamter' object can eiher have src or value and not both." ) description = data.get("description") if isinstance(description, dict): description = Description.parse(description) return cls( key, value=value, src=src, disabled=data.get("disabled", False), form_param_type=data.get("type", ""), content_type=data.get("contentType", ""), description=description, )
2.625
3
pip-check.py
Urucas/pip-check
0
12192
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import pip import os import sys def err(msg): print "\033[31m✗ \033[0m%s" % msg def ok(msg): print "\033[32m✓ \033[0m%s" % msg def main(): cwd = os.getcwd() json_file = os.path.join(cwd, 'dependencies.json') if os.path.isfile(json_file) == False: err("dependencies.json not found in current folder") sys.exit(1) with open(json_file) as data_file: data = json.load(data_file) dependencies = data["dependencies"] for lib in dependencies: command = pip.commands.install.InstallCommand() opts, args = command.parser.parse_args() requirements_set = command.run(opts, [lib]) requirements_set.install(opts) ok("Successfuly installed mising dependencies") if __name__ == "__main__": main()
2.484375
2
hxl/scripts.py
HXLStandard/libhxl-python
30
12193
<filename>hxl/scripts.py """ Console scripts <NAME> April 2015 This is a big, ugly module to support the libhxl console scripts, including (mainly) argument parsing. License: Public Domain Documentation: https://github.com/HXLStandard/libhxl-python/wiki """ from __future__ import print_function import argparse, json, logging, os, re, requests, sys # Do not import hxl, to avoid circular imports import hxl.converters, hxl.filters, hxl.io logger = logging.getLogger(__name__) # In Python2, sys.stdin is a byte stream; in Python3, it's a text stream STDIN = sys.stdin.buffer # Posix exit codes EXIT_OK = 0 EXIT_ERROR = 1 EXIT_SYNTAX = 2 # # Console script entry points # def hxladd(): """Console script for hxladd.""" run_script(hxladd_main) def hxlappend(): """Console script for hxlappend.""" run_script(hxlappend_main) def hxlclean(): """Console script for hxlclean""" run_script(hxlclean_main) def hxlcount(): """Console script for hxlcount.""" run_script(hxlcount_main) def hxlcut(): """Console script for hxlcut.""" run_script(hxlcut_main) def hxldedup(): """Console script for hxldedup.""" run_script(hxldedup_main) def hxlhash(): """Console script for hxlhash.""" run_script(hxlhash_main) def hxlmerge(): """Console script for hxlmerge.""" run_script(hxlmerge_main) def hxlrename(): """Console script for hxlrename.""" run_script(hxlrename_main) def hxlreplace(): """Console script for hxlreplace.""" run_script(hxlreplace_main) def hxlfill(): """Console script for hxlreplace.""" run_script(hxlfill_main) def hxlexpand(): """Console script for hxlexpand.""" run_script(hxlexpand_main) def hxlexplode(): """Console script for hxlexplode.""" run_script(hxlexplode_main) def hxlimplode(): """Console script for hxlimplode.""" run_script(hxlimplode_main) def hxlselect(): """Console script for hxlselect.""" run_script(hxlselect_main) def hxlsort(): """Console script for hxlsort.""" run_script(hxlsort_main) def hxlspec(): """Console script for hxlspec.""" run_script(hxlspec_main) def hxltag(): """Console script for hxltag.""" run_script(hxltag_main) def hxlvalidate(): """Console script for hxlvalidate.""" run_script(hxlvalidate_main) # # Main scripts for command-line tools. # def hxladd_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxladd with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Add new columns with constant values to a HXL dataset.') parser.add_argument( '-s', '--spec', help='Constant value to add to each row (may repeat option)', metavar='header#<tag>=<value>', action='append', required=True ) parser.add_argument( '-b', '--before', help='Add new columns before existing ones rather than after them.', action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.AddColumnsFilter(source, specs=args.spec, before=args.before) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlappend_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlappend with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Concatenate two HXL datasets') # repeatable argument parser.add_argument( '-a', '--append', help='HXL file to append (may repeat option).', metavar='file_or_url', action='append', default=[] ) parser.add_argument( '-l', '--list', help='URL or filename of list of URLs (may repeat option). Will appear after sources in -a options.', action='append', default=[] ) parser.add_argument( '-x', '--exclude-extra-columns', help='Don not add extra columns not in the original dataset.', action='store_const', const=True, default=False ) add_queries_arg(parser, 'From --append datasets, include only rows matching at least one query.') args = parser.parse_args(args) do_common_args(args) append_sources = [] for append_source in args.append: append_sources.append(hxl.data(append_source, True)) for list_source in args.list: for append_source in hxl.filters.AppendFilter.parse_external_source_list(hxl.data(list_source, True)): append_sources.append(hxl.data(append_source, True)) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.AppendFilter( source, append_sources=append_sources, add_columns=(not args.exclude_extra_columns), queries=args.query ) hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags) return EXIT_OK def hxlclean_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlclean with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Clean data in a HXL file.') parser.add_argument( '-w', '--whitespace', help='Comma-separated list of tag patterns for whitespace normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-u', '--upper', help='Comma-separated list of tag patterns for uppercase conversion.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-l', '--lower', help='Comma-separated list of tag patterns for lowercase conversion.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-d', '--date', help='Comma-separated list of tag patterns for date normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '--date-format', help='Date formatting string in strftime format (defaults to %%Y-%%m-%%d).', default=None, metavar='format', ) parser.add_argument( '-n', '--number', help='Comma-separated list of tag patternss for number normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '--number-format', help='Number formatting string in printf format (without leading %%).', default=None, metavar='format', ) parser.add_argument( '--latlon', help='Comma-separated list of tag patterns for lat/lon normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-p', '--purge', help='Purge unparseable dates, numbers, and lat/lon during cleaning.', action='store_const', const=True, default=False ) add_queries_arg(parser, 'Clean only rows matching at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.CleanDataFilter( source, whitespace=args.whitespace, upper=args.upper, lower=args.lower, date=args.date, date_format=args.date_format, number=args.number, number_format=args.number_format, latlon=args.latlon, purge=args.purge, queries=args.query ) hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags) return EXIT_OK def hxlcount_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlcount with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ # Command-line arguments parser = make_args('Generate aggregate counts for a HXL dataset') parser.add_argument( '-t', '--tags', help='Comma-separated list of column tags to count.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list, default='loc,org,sector,adm1,adm2,adm3' ) parser.add_argument( '-a', '--aggregator', help='Aggregator statement', metavar='statement', action='append', type=hxl.filters.Aggregator.parse, default=[] ) add_queries_arg(parser, 'Count only rows that match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.CountFilter(source, patterns=args.tags, aggregators=args.aggregator, queries=args.query) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlcut_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): parser = make_args('Cut columns from a HXL dataset.') parser.add_argument( '-i', '--include', help='Comma-separated list of column tags to include', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-x', '--exclude', help='Comma-separated list of column tags to exclude', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-s', '--skip-untagged', help="Skip columns without HXL hashtags", action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.ColumnFilter(source, args.include, args.exclude, args.skip_untagged) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxldedup_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): parser = make_args('Remove duplicate rows from a HXL dataset.') parser.add_argument( '-t', '--tags', help='Comma-separated list of column tags to use for deduplication (by default, use all values).', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) add_queries_arg(parser, 'Leave rows alone if they don\'t match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.DeduplicationFilter(source, args.tags, args.query) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlhash_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): parser = make_args( 'Generate an MD5 hash for a HXL dataset (or just its header rows).', hxl_output=False ) parser.add_argument( '-H', '--headers-only', help='Hash only the header and hashtag rows.', action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source: if args.headers_only: print(source.columns_hash) else: print(source.data_hash) return EXIT_OK def hxlmerge_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlmerge with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Merge part of one HXL dataset into another.') parser.add_argument( '-m', '--merge', help='HXL file to write (if omitted, use standard output).', metavar='filename', required=True ) parser.add_argument( '-k', '--keys', help='HXL tag(s) to use as a shared key.', metavar='tag,tag...', required=True, type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-t', '--tags', help='Comma-separated list of column tags to include from the merge dataset.', metavar='tag,tag...', required=True, type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-r', '--replace', help='Replace empty values in existing columns (when available) instead of adding new ones.', action='store_const', const=True, default=False ) parser.add_argument( '-O', '--overwrite', help='Used with --replace, overwrite existing values.', action='store_const', const=True, default=False ) add_queries_arg(parser, 'Merged data only from rows that match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output, hxl.io.data(args.merge, True) if args.merge else None as merge_source: filter = hxl.filters.MergeDataFilter( source, merge_source=merge_source, keys=args.keys, tags=args.tags, replace=args.replace, overwrite=args.overwrite, queries=args.query ) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlrename_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlrename with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Rename and retag columns in a HXL dataset') parser.add_argument( '-r', '--rename', help='Rename an old tag to a new one, with an optional new text header (may repeat option).', action='append', metavar='#?<original_tag>:<Text header>?#?<new_tag>', default=[], type=hxl.filters.RenameFilter.parse_rename ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.RenameFilter(source, args.rename) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlreplace_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlreplace with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Replace strings in a HXL dataset') inline_group = parser.add_argument_group('Inline replacement') map_group = parser.add_argument_group('External substitution map') inline_group.add_argument( '-p', '--pattern', help='String or regular expression to search for', nargs='?' ) inline_group.add_argument( '-s', '--substitution', help='Replacement string', nargs='?' ) inline_group.add_argument( '-t', '--tags', help='Tag patterns to match', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) inline_group.add_argument( '-r', '--regex', help='Use a regular expression instead of a string', action='store_const', const=True, default=False ) map_group.add_argument( '-m', '--map', help='Filename or URL of a mapping table using the tags #x_pattern (required), #x_substitution (required), #x_tag (optional), and #x_regex (optional), corresponding to the inline options above, for multiple substitutions.', metavar='PATH', nargs='?' ) add_queries_arg(parser, 'Replace only in rows that match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: if args.map: replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.io.data(args.map, True)) else: replacements = [] if args.pattern: for tag in args.tags: replacements.append(hxl.filters.ReplaceDataFilter.Replacement(args.pattern, args.substitution, tag, args.regex)) filter = hxl.filters.ReplaceDataFilter(source, replacements, queries=args.query) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlfill_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlfill with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Fill empty cells in a HXL dataset') parser.add_argument( '-t', '--tag', help='Fill empty cells only in matching columns (default: fill in all)', metavar='tagpattern,...', type=hxl.model.TagPattern.parse, ) add_queries_arg(parser, 'Fill only in rows that match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.FillDataFilter(source, pattern=args.tag, queries=args.query) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlexpand_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlexpand with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Expand lists in cells by repeating rows') parser.add_argument( '-t', '--tags', help='Comma-separated list of tag patterns for columns with lists to expand', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list, nargs="?" ) parser.add_argument( "-s", '--separator', help='string separating list items (defaults to "|")', metavar='string', default="|" ) parser.add_argument( "-c", '--correlate', help='correlate list values instead of producing a cartesian product', action='store_const', const=True, default=False ) add_queries_arg(parser, 'Limit list expansion to rows matching at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.ExpandListsFilter(source, patterns=args.tags, separator=args.separator, correlate=args.correlate, queries=args.query) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlexplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlexplode with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Explode a wide dataset into a long dataset') parser.add_argument( '-H', '--header-att', help='attribute to add to the label column (defaults to "label")', metavar='att', default="label" ) parser.add_argument( '-V', '--value-att', help='attribute to add to the value column (defaults to "value")', metavar='tagpattern', default="value" ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.ExplodeFilter(source, header_attribute=args.header_att, value_attribute=args.value_att) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlimplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlexplode with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Implode a long dataset into a wide dataset.') parser.add_argument( '-L', '--label', help='HXL tag pattern for the label column', metavar='tagpattern', required=True, type=hxl.model.TagPattern.parse, ) parser.add_argument( '-V', '--value', help='HXL tag pattern for the value column', metavar='tagpattern', required=True, type=hxl.model.TagPattern.parse, ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.ImplodeFilter(source, label_pattern=args.label, value_pattern=args.value) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlselect_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlselect with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ # Command-line arguments parser = make_args('Filter rows in a HXL dataset.') parser.add_argument( '-q', '--query', help='Query expression for selecting rows (may repeat option for logical OR). <op> may be =, !=, <, <=, >, >=, ~, or !~', action='append', metavar='<tagspec><op><value>', required=True ) parser.add_argument( '-r', '--reverse', help='Show only lines *not* matching criteria', action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.RowFilter(source, queries=args.query, reverse=args.reverse) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlsort_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlcut with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Sort a HXL dataset.') parser.add_argument( '-t', '--tags', help='Comma-separated list of tags to for columns to use as sort keys.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-r', '--reverse', help='Flag to reverse sort order.', action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.SortFilter(source, args.tags, args.reverse) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlspec_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlspec with command-line arguments. Args: args (list): a list of command-line arguments stdin (io.IOBase): alternative standard input (mainly for testing) stdout (io.IOBase): alternative standard output (mainly for testing) stderr (io.IOBase): alternative standard error (mainly for testing) """ def get_json (url_or_filename): if not url_or_filename: return json.load(stdin) if re.match(r'^(?:https?|s?ftp)://', url_or_filename.lower()): headers = make_headers(args) response = requests.get(url_or_filename, verify=(not args.ignore_certs), headers=headers) response.raise_for_status() return response.json() else: with open(url_or_filename, "r") as input: return json.load(input) parser = make_args('Process a HXL JSON spec') args = parser.parse_args(args) do_common_args(args) spec = get_json(args.infile) source = hxl.io.from_spec(spec, allow_local_ok=True) with make_output(args, stdout) as output: hxl.io.write_hxl(output.output, source, show_tags=not args.strip_tags) def hxltag_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxltag with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Add HXL tags to a raw CSV file.') parser.add_argument( '-a', '--match-all', help='Match the entire header text (not just a substring)', action='store_const', const=True, default=False ) parser.add_argument( '-m', '--map', help='Mapping expression', required=True, action='append', metavar='Header Text#tag', type=hxl.converters.Tagger.parse_spec ) parser.add_argument( '-d', '--default-tag', help='Default tag for non-matching columns', metavar='#tag', type=hxl.model.Column.parse ) args = parser.parse_args(args) do_common_args(args) with make_input(args, stdin) as input, make_output(args, stdout) as output: tagger = hxl.converters.Tagger(input, args.map, default_tag=args.default_tag, match_all=args.match_all) hxl.io.write_hxl(output.output, hxl.io.data(tagger), show_tags=not args.strip_tags) return EXIT_OK def hxlvalidate_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlvalidate with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Validate a HXL dataset.') parser.add_argument( '-s', '--schema', help='Schema file for validating the HXL dataset (if omitted, use the default core schema).', metavar='schema', default=None ) parser.add_argument( '-a', '--all', help='Include all rows in the output, including those without errors', action='store_const', const=True, default=False ) parser.add_argument( '-e', '--error-level', help='Minimum error level to show (defaults to "info") ', choices=['info', 'warning', 'error'], metavar='info|warning|error', default='info' ) args = parser.parse_args(args) do_common_args(args) with make_input(args, stdin) as input, make_output(args, stdout) as output: class Counter: infos = 0 warnings = 0 errors = 0 def callback(e): """Show a validation error message.""" if e.rule.severity == 'info': if args.error_level != 'info': return Counter.infos += 1 elif e.rule.severity == 'warning': if args.error_level == 'error': return Counter.warnings += 1 else: Counter.errors += 1 message = '[{}] '.format(e.rule.severity) if e.row: if e.rule: message += "{},{}: ".format(e.row.row_number + 1, e.rule.tag_pattern) else: message += "{}: ".format(e.row.row_number + 1) elif e.rule: message += "<dataset>,{}: ".format(e.rule.tag_pattern) else: message += "<dataset>: " if e.value: message += '"{}" '.format(e.value) if e.message: message += e.message message += "\n" output.write(message) output.write("Validating {} with schema {} ...\n".format(args.infile or "<standard input>", args.schema or "<default>")) source = hxl.io.data(input) if args.schema: with make_input(args, None, args.schema) as schema_input: schema = hxl.schema(schema_input, callback=callback) else: schema = hxl.schema(callback=callback) schema.validate(source) if args.error_level == 'info': output.write("{:,} error(s), {:,} warnings, {:,} suggestions\n".format(Counter.errors, Counter.warnings, Counter.infos)) elif args.error_level == 'warning': output.write("{:,} error(s), {:,} warnings\n".format(Counter.errors, Counter.warnings)) else: output.write("{:,} error(s)\n".format(Counter.errors)) if Counter.errors > 0: output.write("Validation failed.\n") return EXIT_ERROR else: output.write("Validation succeeded.\n") return EXIT_OK # # Utility functions # def run_script(func): """Try running a command-line script, with exception handling.""" try: sys.exit(func(sys.argv[1:], STDIN, sys.stdout)) except KeyboardInterrupt: logger.error("Interrupted") sys.exit(EXIT_ERROR) def make_args(description, hxl_output=True): """Set up parser with default arguments. @param description: usage description to show @param hxl_output: if True (default), include options for HXL output. @returns: an argument parser, partly set up. """ parser = argparse.ArgumentParser(description=description) parser.add_argument( 'infile', help='HXL file to read (if omitted, use standard input).', nargs='?' ) if hxl_output: parser.add_argument( 'outfile', help='HXL file to write (if omitted, use standard output).', nargs='?' ) parser.add_argument( '--sheet', help='Select sheet from a workbook (1 is first sheet)', metavar='number', type=int, nargs='?' ) parser.add_argument( '--selector', help='JSONPath expression for starting point in JSON input', metavar='path', nargs='?' ) parser.add_argument( '--http-header', help='Custom HTTP header to send with request', metavar='header', action='append' ) if hxl_output: parser.add_argument( '--remove-headers', help='Strip text headers from the CSV output', action='store_const', const=True, default=False ) parser.add_argument( '--strip-tags', help='Strip HXL tags from the CSV output', action='store_const', const=True, default=False ) parser.add_argument( "--ignore-certs", help="Don't verify SSL connections (useful for self-signed)", action='store_const', const=True, default=False ) parser.add_argument( '--log', help='Set minimum logging level', metavar='debug|info|warning|error|critical|none', choices=['debug', 'info', 'warning', 'error', 'critical'], default='error' ) return parser def add_queries_arg(parser, help='Apply only to rows matching at least one query.'): parser.add_argument( '-q', '--query', help=help, metavar='<tagspec><op><value>', action='append' ) return parser def do_common_args(args): """Process standard args""" logging.basicConfig(format='%(levelname)s (%(name)s): %(message)s', level=args.log.upper()) def make_source(args, stdin=STDIN): """Create a HXL input source.""" # construct the input object input = make_input(args, stdin) return hxl.io.data(input) def make_input(args, stdin=sys.stdin, url_or_filename=None): """Create an input object""" if url_or_filename is None: url_or_filename = args.infile # sheet index sheet_index = args.sheet if sheet_index is not None: sheet_index -= 1 # JSONPath selector selector = args.selector http_headers = make_headers(args) return hxl.io.make_input( url_or_filename or stdin, sheet_index=sheet_index, selector=selector, allow_local=True, http_headers=http_headers, verify_ssl=(not args.ignore_certs) ) def make_output(args, stdout=sys.stdout): """Create an output stream.""" if args.outfile: return FileOutput(args.outfile) else: return StreamOutput(stdout) def make_headers (args): # get custom headers header_strings = [] header = os.environ.get("HXL_HTTP_HEADER") if header is not None: header_strings.append(header) if args.http_header is not None: header_strings += args.http_header http_headers = {} for header in header_strings: parts = header.partition(':') http_headers[parts[0].strip()] = parts[2].strip() return http_headers class FileOutput(object): def __init__(self, filename): self.output = open(filename, 'w') def __enter__(self): return self def __exit__(self, value, type, traceback): self.output.close() class StreamOutput(object): def __init__(self, output): self.output = output def __enter__(self): return self def __exit__(self, value, type, traceback): pass def write(self, s): self.output.write(s)
2.703125
3
1801-1900/1807.evaluate-thebracket-pairs-of-a-string.py
guangxu-li/leetcode-in-python
0
12194
# # @lc app=leetcode id=1807 lang=python3 # # [1807] Evaluate the Bracket Pairs of a String # # @lc code=start import re class Solution: def evaluate(self, s: str, knowledge: list[list[str]]) -> str: mapping = dict(knowledge) return re.sub(r"\((\w+?)\)", lambda m: mapping.get(m.group(1), "?"), s) # @lc code=end
3.28125
3
Math Functions/Uncategorized/Herons formula.py
adrikagupta/Must-Know-Programming-Codes
13
12195
<reponame>adrikagupta/Must-Know-Programming-Codes #Heron's formula# import math unit_of_measurement = "cm" side1 = int(input("Enter the length of side A in cm: ")) side2 = int(input("Enter the length of side B in cm: ")) side3 = int(input("Enter the length of side C in cm: ")) braket1 = (side1 ** 2) * (side2**2) + (side1**2)*(side3**2) + (side2**2)*(side3**2) braket2 = (side1**2)+(side2**2)+(side3**2) function_braket1 = 4*braket1 function_braket2 = braket2**2 both_brakets = function_braket1 - function_braket2 result1 = math.sqrt(both_brakets) area_of_triangle = result1 / 4 print("Side A", "=", side1, sep="") print("Side B", "=", side2, sep="") print("Side C", "=", side3, sep="") print() print("Calculated using Heron's Formula") print() print("Area of triangle"), print(area_of_triangle, unit_of_measurement, "2", sep="")
4.40625
4
viewer/bitmap_from_array.py
TiankunZhou/dials
2
12196
from __future__ import absolute_import, division, print_function import numpy as np import wx from dials.array_family import flex from dials_viewer_ext import rgb_img class wxbmp_from_np_array(object): def __init__( self, lst_data_in, show_nums=True, palette="black2white", lst_data_mask_in=None ): self.wx_bmp_arr = rgb_img() if lst_data_in is None and lst_data_mask_in is None: self._ini_wx_bmp_lst = None else: self._ini_wx_bmp_lst = [] for lst_pos in range(len(lst_data_in)): data_3d_in = lst_data_in[lst_pos] xmax = data_3d_in.shape[1] ymax = data_3d_in.shape[2] # remember to put here some assertion to check that # both arrays have the same shape if lst_data_mask_in is not None: data_3d_in_mask = lst_data_mask_in[lst_pos] self.vl_max = float(np.amax(data_3d_in)) self.vl_min = float(np.amin(data_3d_in)) tmp_data2d = np.zeros((xmax, ymax), "double") tmp_data2d_mask = np.zeros((xmax, ymax), "double") z_dp = data_3d_in.shape[0] single_block_lst_01 = [] for z in range(z_dp): # print "z =", z tmp_data2d[:, :] = data_3d_in[z : z + 1, :, :] if lst_data_mask_in is not None: tmp_data2d_mask[:, :] = data_3d_in_mask[z : z + 1, :, :] else: tmp_data2d_mask = None data_sigle_img = self._wx_img_w_cpp( tmp_data2d, show_nums, palette, tmp_data2d_mask ) single_block_lst_01.append(data_sigle_img) self._ini_wx_bmp_lst.append(single_block_lst_01) def bmp_lst_scaled(self, scale=1.0): if self._ini_wx_bmp_lst is None: NewW = 350 wx_image = wx.Image(NewW, NewW) wxBitmap = wx_image.ConvertToBitmap() dc = wx.MemoryDC(wxBitmap) text = "No Shoebox data" w, h = dc.GetSize() tw, th = dc.GetTextExtent(text) dc.Clear() dc.DrawText(text, (w - tw) / 2, (h - th) / 2) # display text in center dc.SelectObject(wxBitmap) del dc wx_bmp_lst = [[wxBitmap]] else: wx_bmp_lst = [] for data_3d in self._ini_wx_bmp_lst: single_block_lst = [] for sigle_img_data in data_3d: single_block_lst.append(self._wx_bmp_scaled(sigle_img_data, scale)) wx_bmp_lst.append(single_block_lst) return wx_bmp_lst def _wx_img_w_cpp(self, np_2d_tmp, show_nums, palette, np_2d_mask=None): xmax = np_2d_tmp.shape[1] ymax = np_2d_tmp.shape[0] if np_2d_mask is None: np_2d_mask = np.zeros((ymax, xmax), "double") transposed_data = np.zeros((ymax, xmax), "double") transposed_mask = np.zeros((ymax, xmax), "double") transposed_data[:, :] = np_2d_tmp transposed_mask[:, :] = np_2d_mask flex_data_in = flex.double(transposed_data) flex_mask_in = flex.double(transposed_mask) if palette == "black2white": palette_num = 1 elif palette == "white2black": palette_num = 2 elif palette == "hot ascend": palette_num = 3 else: # assuming "hot descend" palette_num = 4 img_array_tmp = self.wx_bmp_arr.gen_bmp( flex_data_in, flex_mask_in, show_nums, palette_num ) np_img_array = img_array_tmp.as_numpy_array() height = np.size(np_img_array[:, 0:1, 0:1]) width = np.size(np_img_array[0:1, :, 0:1]) img_array = np.empty((height, width, 3), "uint8") img_array[:, :, :] = np_img_array[:, :, :] self._wx_image = wx.Image(width, height) self._wx_image.SetData(img_array.tostring()) data_to_become_bmp = (self._wx_image, width, height) return data_to_become_bmp def _wx_bmp_scaled(self, data_to_become_bmp, scale): to_become_bmp = data_to_become_bmp[0] width = data_to_become_bmp[1] height = data_to_become_bmp[2] NewW = int(width * scale) NewH = int(height * scale) to_become_bmp = to_become_bmp.Scale(NewW, NewH, wx.IMAGE_QUALITY_NORMAL) wxBitmap = to_become_bmp.ConvertToBitmap() return wxBitmap
2.15625
2
spinesTS/utils/_validation.py
BirchKwok/spinesTS
2
12197
import numpy as np def check_x_y(x, y): assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray) assert np.ndim(x) <= 3 and np.ndim(y) <= 2 assert len(x) == len(y)
3
3
sphinxsharp-pro/sphinxsharp.py
madTeddy/sphinxsharp-pro
2
12198
""" CSharp (С#) domain for sphinx ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sphinxsharp Pro (with custom styling) :copyright: Copyright 2021 by MadTeddy """ import re import warnings from os import path from collections import defaultdict, namedtuple from docutils import nodes from docutils.parsers.rst import directives, Directive from sphinx.locale import get_translation from sphinx.domains import Domain, Index, ObjType from sphinx.roles import XRefRole from sphinx.directives import ObjectDescription from sphinx.util.docfields import DocFieldTransformer from sphinx.util.nodes import make_refnode from sphinx import addnodes from sphinx.util.fileutil import copy_asset MODIFIERS = ('public', 'private', 'protected', 'internal', 'static', 'sealed', 'abstract', 'const', 'partial', 'readonly', 'virtual', 'extern', 'new', 'override', 'unsafe', 'async', 'event', 'delegate') VALUE_KEYWORDS = ('char', 'ulong', 'byte', 'decimal', 'double', 'bool', 'int', 'null', 'sbyte', 'float', 'long', 'object', 'short', 'string', 'uint', 'ushort', 'void') PARAM_MODIFIERS = ('ref', 'out', 'params') MODIFIERS_RE = '|'.join(MODIFIERS) PARAM_MODIFIERS_RE = '|'.join(PARAM_MODIFIERS) TYPE_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(\w+)\s([\w\.]+)(?:<(.+)>)?(?:\s?\:\s?(.+))?$') REF_TYPE_RE = re.compile(r'^(?:(new)\s+)?([\w\.]+)\s*(?:<(.+)>)*(\[\])*\s?(?:\((.*)\))?$') METHOD_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?([^\s=\(\)]+\s+)?([^\s=\(\)]+)\s?(?:\<(.+)\>)?\s?(?:\((.+)*\))$') PARAM_SIG_RE = re.compile(r'^(?:(?:(' + PARAM_MODIFIERS_RE + r')\s)*)?([^=]+)\s+([^=]+)\s*(?:=\s?(.+))?$') VAR_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?([^=]+)\s+([^\s=]+)\s*(?:=\s*(.+))?$') PROP_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(.+)\s+([^\s]+)\s*(?:{(\s*get;\s*)?((?:' + MODIFIERS_RE + r')?\s*set;\s*)?})$') ENUM_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(?:enum)\s?(\w+)$') _ = get_translation('sphinxsharp') class CSharpObject(ObjectDescription): PARENT_ATTR_NAME = 'sphinxsharp:parent' PARENT_TYPE_NAME = 'sphinxsharp:type' ParentType = namedtuple('ParentType', ['parent', 'name', 'type', 'override']) option_spec = { 'noindex': directives.flag } def __init__(self, *args, **kwargs): super(CSharpObject, self).__init__(*args, **kwargs) self.parentname_set = None self.parentname_saved = None def run(self): if ':' in self.name: self.domain, self.objtype = self.name.split(':', 1) else: self.domain, self.objtype = '', self.name self.indexnode = addnodes.index(entries=[]) node = addnodes.desc() node.document = self.state.document node['domain'] = self.domain node['classes'].append('csharp') node['objtype'] = node['desctype'] = self.objtype node['noindex'] = noindex = ('noindex' in self.options) self.names = [] signatures = self.get_signatures() for i, sig in enumerate(signatures): beforesignode = CSNodes.EmptyNode() node.append(beforesignode) signode = addnodes.desc_signature(sig, '') signode['first'] = False node.append(signode) self.before_sig(beforesignode) try: name = self.handle_signature(sig, signode) except ValueError: signode.clear() signode += addnodes.desc_name(sig, sig) continue if name not in self.names: self.names.append(name) if not noindex: self.add_target_and_index(name, sig, signode) aftersignode = CSNodes.EmptyNode() node.append(aftersignode) self.after_sig(aftersignode) contentnode = addnodes.desc_content() node.append(contentnode) self.before_content_node(contentnode) if self.names: self.env.temp_data['object'] = self.names[0] self.before_content() self.state.nested_parse(self.content, self.content_offset, contentnode) self.after_content_node(contentnode) DocFieldTransformer(self).transform_all(contentnode) self.env.temp_data['object'] = None self.after_content() return [self.indexnode, node] def before_sig(self, signode): """ Called before main ``signode`` appends """ pass def after_sig(self, signode): """ Called after main ``signode`` appends """ pass def before_content_node(self, node): """ Get ``contentnode`` before main content will append """ pass def after_content_node(self, node): """ Get ``contentnode`` after main content was appended """ pass def before_content(self): obj = self.env.temp_data['object'] if obj: self.parentname_set = True self.parentname_saved = self.env.ref_context.get(self.PARENT_ATTR_NAME) self.env.ref_context[self.PARENT_ATTR_NAME] = obj else: self.parentname_set = False def after_content(self): if self.parentname_set: self.env.ref_context[self.PARENT_ATTR_NAME] = self.parentname_saved def has_parent(self): return self._check_parent(self.PARENT_ATTR_NAME) def has_parent_type(self): return self._check_parent(self.PARENT_TYPE_NAME) def _check_parent(self, attr): return attr in self.env.ref_context and \ self.env.ref_context[attr] is not None def get_parent(self): return self.env.ref_context.get(self.PARENT_ATTR_NAME) def get_type_parent(self): return self.env.ref_context.get(self.PARENT_TYPE_NAME) def get_index_text(self, sig, name, typ): raise NotImplementedError('Must be implemented in subclass') def parse_signature(self, sig): raise NotImplementedError('Must be implemented in subclass') def add_target_and_index(self, name, sig, signode): objname, objtype = self.get_obj_name(sig) type_parent = self.get_type_parent() if self.has_parent_type() else None if self.objtype != 'type' and type_parent: self.env.ref_context[self.PARENT_ATTR_NAME] = '{}{}'.format(type_parent.parent + '.' \ if type_parent.parent else '', type_parent.name) name = self.get_fullname(objname) self.names.clear() self.names.append(name) anchor = '{}-{}'.format(self.objtype, name) if anchor not in self.state.document.ids: signode['names'].append(anchor) signode['ids'].append(anchor) signode['first'] = (not self.names) self.state.document.note_explicit_target(signode) objects = self.env.domaindata['sphinxsharp']['objects'] key = (self.objtype, name) if key in objects: warnings.warn('duplicate description of {}, other instance in {}'.format( key, self.env.doc2path(objects[key][0])), Warning) objects[key] = (self.env.docname, 'delegate' if self.objtype == 'method' else objtype) index_text = self.get_index_text(sig, objname, objtype) if index_text: parent = self.get_parent() if self.has_parent() else None if type_parent and type_parent.override and type_parent.name != objname: type_parent = self.ParentType(parent=type_parent.parent, name=type_parent.name, type=type_parent.type, override=None) index_format = '{parent} (C# {namespace});{text}' \ if (type_parent and type_parent.parent and (type_parent.name == objname and self.objtype == 'type') \ and not type_parent.override) or (parent and not type_parent) \ else '{name} (C# {type} {in_text} {parent});{text}' if type_parent and type_parent.name else '{text}' self.indexnode['entries'].append(('single', index_format.format( parent=type_parent.parent if type_parent else parent if parent else '', namespace=_('namespace'), text=index_text, name=type_parent.override if type_parent and type_parent.override \ else type_parent.name if type_parent else '', type=_(type_parent.type) if type_parent else '', in_text=_('in') ), anchor, None, None)) def get_fullname(self, name): fullname = '{parent}{name}'.format( parent=self.get_parent() + '.' if self.has_parent() else '', name=name) return fullname def get_obj_name(self, sig): raise NotImplementedError('Must be implemented in subclass') def append_ref_signature(self, typname, signode, append_generic=True): match = REF_TYPE_RE.match(typname.strip()) if not match: raise Exception('Invalid reference type signature. Got: {}'.format(typname)) is_new, name, generic, is_array, constr = match.groups() tnode = addnodes.desc_type() if is_new: tnode += CSNodes.Keyword(text='new') tnode += CSNodes.TextNode(text=' ') types = name.split('.') explicit_path = [] i = 1 for t in types: styp = t.strip() refnode = None if styp not in VALUE_KEYWORDS: explicit_path.append(styp) refnode = addnodes.pending_xref('', refdomain='sphinxsharp', reftype=None, reftarget=styp, modname=None, classname=None) if not self.has_parent(): refnode[self.PARENT_ATTR_NAME] = None else: refnode[self.PARENT_ATTR_NAME] = self.get_parent() if len(explicit_path) > 1: target_path = '.'.join(explicit_path[:-1]) type_par = self.get_type_parent() if self.has_parent_type() else None refnode[self.PARENT_ATTR_NAME] = (type_par.parent + '.' \ if type_par and type_par.parent \ else '') + target_path refnode += CSNodes.UnknownType(typ=None, text=styp) else: refnode = CSNodes.Keyword(text=styp) tnode += refnode if i < len(types): tnode += CSNodes.TextNode(text='.') i += 1 if append_generic and generic: gnode = CSNodes.EmptyNode() gnode += CSNodes.TextNode(text='<') gen_groups = split_sig(generic) i = 1 for g in gen_groups: self.append_ref_signature(g, gnode, append_generic) if i < len(gen_groups): gnode += CSNodes.TextNode(text=', ') i += 1 gnode += CSNodes.TextNode(text='>') tnode += gnode if is_array: tnode += CSNodes.TextNode(text='[]') if constr is not None: tnode += CSNodes.TextNode(text='()') signode += tnode def append_generic(self, generic, signode): gnode = CSNodes.EmptyNode() gnode += CSNodes.TextNode(text='<') generics = generic.split(',') i = 1 for g in generics: gnode += CSNodes.Generic(text=g) if i < len(generics): gnode += CSNodes.TextNode(text=', ') i += 1 gnode += CSNodes.TextNode(text='>') signode += gnode class CSharpType(CSharpObject): option_spec = { **CSharpObject.option_spec, 'nonamespace': directives.flag, 'parent': directives.unchanged } def before_sig(self, signode): if 'nonamespace' not in self.options and self.has_parent(): signode += CSNodes.Description(title=_('namespace'), desc=self.get_parent()) def handle_signature(self, sig, signode): mod, typ, name, generic, inherits = self.parse_signature(sig) tnode = CSNodes.EmptyNode() tnode += CSNodes.Modificator(text='{}'.format(mod if mod else 'private')) tnode += CSNodes.TextNode(text=' ') tnode += CSNodes.Keyword(text='{}'.format(typ)) tnode += CSNodes.TextNode(text=' ') tnode += CSNodes.UnknownType(typ=typ, text=name) if generic: self.append_generic(generic, tnode) if inherits: inherits_node = CSNodes.EmptyNode() inherits_node += CSNodes.TextNode(text=' : ') inherit_types = split_sig(inherits) i = 1 for t in inherit_types: self.append_ref_signature(t, inherits_node) if i < len(inherit_types): inherits_node += CSNodes.TextNode(text=', ') i += 1 tnode += inherits_node signode += tnode opt_parent = self.options['parent'] if 'parent' in self.options else None form = '{}.{}' if self.has_parent() and opt_parent else '{}{}' parent = form.format(self.get_parent() if self.has_parent() else '', opt_parent if opt_parent else '') self.env.ref_context[CSharpObject.PARENT_TYPE_NAME] = self.ParentType( parent=parent, name=name, type=typ, override=opt_parent) if opt_parent: self.env.ref_context[self.PARENT_ATTR_NAME] = parent return self.get_fullname(name) def get_index_text(self, sig, name, typ): rname = '{} (C# {})'.format(name, _(typ)) return rname def parse_signature(self, sig): match = TYPE_SIG_RE.match(sig.strip()) if not match: raise Exception('Invalid type signature. Got: {}'.format(sig)) mod, typ, names, generic, inherits = match.groups() return mod, typ.strip(), names, generic, inherits def get_obj_name(self, sig): _, typ, name, _, _ = self.parse_signature(sig) return name, typ class CSharpEnum(CSharpObject): option_spec = {**CSharpObject.option_spec, 'values': directives.unchanged_required, **dict(zip([('val(' + str(i) + ')') for i in range(1, 21)], [directives.unchanged] * 20))} def handle_signature(self, sig, signode): mod, name = self.parse_signature(sig) node = CSNodes.EmptyNode() if mod: node += CSNodes.Modificator(text='{}'.format(mod.strip())) node += CSNodes.TextNode(text=' ') node += CSNodes.Keyword(text='enum') node += CSNodes.TextNode(text=' ') node += CSNodes.Enum(text='{}'.format(name.strip())) signode += node return self.get_fullname(name) def after_content_node(self, node): options = self.options['values'].split() node += CSNodes.Description(title=_('values').title(), desc=', '.join(options)) options_values = list(value for key, value in self.options.items() \ if key not in ('noindex', 'values') and value) if not options_values: return i = 0 for vname in options: if i < len(options_values): node += CSNodes.Description(title=vname, desc=options_values[i]) i += 1 def parse_signature(self, sig): match = ENUM_SIG_RE.match(sig.strip()) if not match: raise Exception('Invalid enum signature. Got: {}'.format(sig)) mod, name = match.groups() return mod, name.strip() def get_index_text(self, sig, name, typ): rname = '{} (C# {})'.format(name, _('enum')) return rname def get_obj_name(self, sig): _, name = self.parse_signature(sig) return name, 'enum' class CSharpVariable(CSharpObject): _default = '' def handle_signature(self, sig, signode): mod, typ, name, self._default = self.parse_signature(sig) node = CSNodes.EmptyNode() node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private')) node += CSNodes.TextNode(text=' ') self.append_ref_signature(typ, node) node += CSNodes.TextNode(text=' ') node += CSNodes.VariableName(text='{}'.format(name)) signode += node return self.get_fullname(name) def before_content_node(self, node): if self._default: node += CSNodes.Description(title=_('value').title(), desc=self._default) def parse_signature(self, sig): match = VAR_SIG_RE.match(sig.strip()) if not match: raise Exception('Invalid variable signature. Got: {}'.format(sig)) mod, typ, name, default = match.groups() return mod, typ.strip(), name.strip(), default def get_index_text(self, sig, name, typ): rname = '{} (C# {})->{}'.format(name, _('variable'), typ) return rname def get_obj_name(self, sig): _, typ, name, _ = self.parse_signature(sig) return name, typ class CSharpProperty(CSharpObject): def handle_signature(self, sig, signode): mod, typ, name, getter, setter = self.parse_signature(sig) node = CSNodes.EmptyNode() node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private')) node += CSNodes.TextNode(text=' ') self.append_ref_signature(typ, node) node += CSNodes.TextNode(text=' ') node += CSNodes.MethodName(text='{}'.format(name)) node += CSNodes.TextNode(text=' { ') accessors = [] if getter: accessors.append('get;') if setter: accessors.append(setter.strip()) node += CSNodes.Modificator(text=' '.join(accessors)) node += CSNodes.TextNode(text=' } ') signode += node return self.get_fullname(name) def parse_signature(self, sig): match = PROP_SIG_RE.match(sig.strip()) if not match: raise Exception('Invalid property signature. Got: {}'.format(sig)) mod, typ, name, getter, setter = match.groups() return mod, typ.strip(), name.strip(), getter, setter def get_index_text(self, sig, name, typ): rname = '{} (C# {})->{}'.format(name, _('property'), typ) return rname def get_obj_name(self, sig): _, typ, name, _, _ = self.parse_signature(sig) return name, typ class CSharpMethod(CSharpObject): option_spec = {**CSharpObject.option_spec, 'returns': directives.unchanged, **dict(zip([('param(' + str(i) + ')') for i in range(1, 8)], [directives.unchanged] * 7))} _params_list = () def handle_signature(self, sig, signode): mod, typ, name, generic, params = self.parse_signature(sig) node = CSNodes.EmptyNode() node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private')) node += CSNodes.TextNode(text=' ') self.append_ref_signature(typ if typ else name, node) if typ: node += CSNodes.TextNode(text=' ') node += CSNodes.MethodName(text='{}'.format(name)) if generic: self.append_generic(generic, node) param_node = CSNodes.EmptyNode() param_node += CSNodes.TextNode(text='(') if params: self._params_list = self._get_params(params) i = 1 for (pmod, ptyp, pname, pvalue) in self._params_list: pnode = CSNodes.EmptyNode() if pmod: pnode += CSNodes.Keyword(text='{}'.format(pmod)) pnode += CSNodes.TextNode(text=' ') self.append_ref_signature(ptyp, pnode) pnode += CSNodes.TextNode(text=' ') pnode += CSNodes.TextNode(text='{}'.format(pname)) if pvalue: pnode += CSNodes.TextNode(text=' = ') self.append_ref_signature(pvalue, pnode) param_node += pnode if i < len(self._params_list): param_node += CSNodes.TextNode(text=', ') i += 1 param_node += CSNodes.TextNode(text=')') node += param_node signode += node return self.get_fullname(name) def before_content_node(self, node): if 'returns' in self.options: node += CSNodes.Description(title=_('returns').title(), desc=self.options['returns']) def after_content_node(self, node): options_values = list(value for key, value in self.options.items() if key != 'noindex') i = 0 for (_, _, pname, _) in self._params_list: if i < len(options_values): node += CSNodes.Description(title=pname, desc=options_values[i], lower=True) i += 1 def after_content(self): super().after_content() if self._params_list is not None and len(self._params_list) > 0: del self._params_list def parse_signature(self, sig): match = METHOD_SIG_RE.match(sig.strip()) if not match: raise Exception('Invalid method signature. Got: {}'.format(sig)) mod, typ, name, generic, params = match.groups() return mod, typ, name.strip(), generic, params @staticmethod def parse_param_signature(sig): match = PARAM_SIG_RE.match(sig.strip()) if not match: raise Exception('Invalid parameter signature. Got: {}'.format(sig)) mod, typ, name, value = match.groups() return mod, typ.strip(), name.strip(), value def _get_params(self, params): if not params: return None result = [] params_group = split_sig(params) for param in params_group: pmod, ptyp, pname, pvalue = self.parse_param_signature(param) result.append((pmod, ptyp, pname, pvalue)) return result def get_index_text(self, sig, name, typ): params_text = '' if self._params_list: names = [pname for _, _, pname, _ in self._params_list] params_text = '({})'.format(', '.join(names)) if typ: rname = '{}{} (C# {})->{}'.format(name, params_text, _('method'), typ) else: rname = '{}{} (C# {})->{}'.format(name, params_text, _('constructor'), name) return rname def get_obj_name(self, sig): _, typ, name, _, _ = self.parse_signature(sig) return name, typ class CSharpNamespace(Directive): required_arguments = 1 def run(self): env = self.state.document.settings.env namespace = self.arguments[0].strip() if namespace is None: env.ref_context.pop(CSharpObject.PARENT_ATTR_NAME, None) else: env.ref_context[CSharpObject.PARENT_ATTR_NAME] = namespace return [] class CSharpEndType(Directive): required_arguments = 0 def run(self): env = self.state.document.settings.env if CSharpObject.PARENT_TYPE_NAME in env.ref_context: env.ref_context.pop(CSharpObject.PARENT_TYPE_NAME, None) return [] class CSharpXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): refnode[CSharpObject.PARENT_ATTR_NAME] = env.ref_context.get( CSharpObject.PARENT_ATTR_NAME) return super(CSharpXRefRole, self).process_link(env, refnode, has_explicit_title, title, target) class CSharpIndex(Index): name = 'csharp' localname = 'CSharp Index' shortname = 'CSharp' def generate(self, docnames=None): content = defaultdict(list) objects = self.domain.get_objects() objects = sorted(objects, key=lambda obj: obj[0]) for name, dispname, objtype, docname, anchor, _ in objects: content[dispname.split('.')[-1][0].lower()].append( (dispname, 0, docname, anchor, docname, '', objtype)) content = sorted(content.items()) return content, True class CSharpDomain(Domain): name = 'sphinxsharp' label = 'C#' roles = { 'type': CSharpXRefRole(), 'var': CSharpXRefRole(), 'prop': CSharpXRefRole(), 'meth': CSharpXRefRole(), 'enum': CSharpXRefRole() } object_types = { 'type': ObjType(_('type'), 'type', 'obj'), 'variable': ObjType(_('variable'), 'var', 'obj'), 'property': ObjType(_('property'), 'prop', 'obj'), 'method': ObjType(_('method'), 'meth', 'obj'), 'enum': ObjType(_('enum'), 'enum', 'obj') } directives = { 'namespace': CSharpNamespace, 'end-type': CSharpEndType, 'type': CSharpType, 'variable': CSharpVariable, 'property': CSharpProperty, 'method': CSharpMethod, 'enum': CSharpEnum } indices = { CSharpIndex } initial_data = { 'objects': {} # (objtype, name) -> (docname, objtype(class, struct etc.)) } def clear_doc(self, docname): for (objtype, name), (doc, _) in self.data['objects'].copy().items(): if doc == docname: del self.data['objects'][(objtype, name)] def get_objects(self): for (objtype, name), (docname, _) in self.data['objects'].items(): yield (name, name, objtype, docname, '{}-{}'.format(objtype, name), 0) def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): targets = get_targets(target, node) objects = self.data['objects'] roletypes = self.objtypes_for_role(typ) types = ('type', 'enum', 'method') if typ is None else roletypes for t in targets: for objtyp in types: key = (objtyp, t) if key in objects: obj = objects[key] if typ is not None: role = self.role_for_objtype(objtyp) node['reftype'] = role else: contnode = CSNodes.UnknownType(typ=obj[1], text=target) return make_refnode(builder, fromdocname, obj[0], '{}-{}'.format(objtyp, t), contnode, '{} {}'.format(obj[1], t)) if typ is None: contnode = CSNodes.UnknownType(text=target) return None def merge_domaindata(self, docnames, otherdata): for (objtype, name), (docname, typ) in otherdata['objects'].items(): if docname in docnames: self.data['objects'][(objtype, name)] = (docname, typ) def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): for typ in self.roles: xref = self.resolve_xref(env, fromdocname, builder, typ, target, node, contnode) if xref: return [('sphinxsharp:{}'.format(typ), xref)] return [] class CSNodes: _TYPES = ('class', 'struct', 'interface', 'enum', 'delegate') class BaseNode(nodes.Element): def __init__(self, rawsource='', *children, **attributes): super().__init__(rawsource, *children, **attributes) @staticmethod def visit_html(self, node): self.body.append(self.starttag(node, 'div')) @staticmethod def depart_html(self, node): self.body.append('</div>') class EmptyNode(BaseNode): def __init__(self, rawsource='', *children, **attributes): super().__init__(rawsource, *children, **attributes) @staticmethod def visit_html(self, node): pass @staticmethod def depart_html(self, node): pass class InlineText(BaseNode): def __init__(self, rawsource, type_class, text, *children, **attributes): super().__init__(rawsource, *children, **attributes) if type_class is None: return self['classes'].append(type_class) if text: self.append(nodes.raw(text=text, format='html')) @staticmethod def visit_html(self, node): self.body.append(self.starttag(node, 'span').replace('\n', '')) @staticmethod def depart_html(self, node): self.body.append('</span>') class Description(BaseNode): def __init__(self, rawsource='', title='', desc='', *children, **attributes): super().__init__(rawsource, *children, **attributes) self['classes'].append('desc') if title and desc: if 'lower' not in attributes: title = title[0].upper() + title[1:] node = nodes.raw( text='<strong class="first">{}:</strong><span class="last">{}</span>'.format(title, desc), format='html') self.append(node) else: raise Exception('Title and description must be assigned.') class Modificator(InlineText): def __init__(self, rawsource='', text='', *children, **attributes): super().__init__(rawsource, 'mod', text, *children, **attributes) class UnknownType(InlineText): def __init__(self, rawsource='', typ='', text='', *children, **attributes): objclass = typ if not text: super().__init__(rawsource, None, text, *children, **attributes) return if typ not in CSNodes._TYPES: objclass = 'kw' if typ not in VALUE_KEYWORDS: objclass = 'unknown' super().__init__(rawsource, objclass, text, *children, **attributes) class TextNode(InlineText): def __init__(self, rawsource='', text='', *children, **attributes): super().__init__(rawsource, 'text', text, *children, **attributes) class MethodName(InlineText): def __init__(self, rawsource='', text='', *children, **attributes): super().__init__(rawsource, 'meth-name', text, *children, **attributes) class VariableName(InlineText): def __init__(self, rawsource='', text='', *children, **attributes): super().__init__(rawsource, 'var-name', text, *children, **attributes) class Keyword(InlineText): def __init__(self, rawsource='', text='', *children, **attributes): super().__init__(rawsource, 'kw', text, *children, **attributes) class Enum(InlineText): def __init__(self, rawsource='', text='', *children, **attributes): super().__init__(rawsource, 'enum', text, *children, **attributes) class Generic(InlineText): def __init__(self, rawsource='', text='', *children, **attributes): super().__init__(rawsource, 'generic', text, *children, **attributes) @staticmethod def add_nodes(app): app.add_node(CSNodes.Description, html=(CSNodes.Description.visit_html, CSNodes.Description.depart_html)) app.add_node(CSNodes.Modificator, html=(CSNodes.Modificator.visit_html, CSNodes.Modificator.depart_html)) app.add_node(CSNodes.UnknownType, html=(CSNodes.UnknownType.visit_html, CSNodes.UnknownType.depart_html)) app.add_node(CSNodes.TextNode, html=(CSNodes.TextNode.visit_html, CSNodes.TextNode.depart_html)) app.add_node(CSNodes.Enum, html=(CSNodes.Enum.visit_html, CSNodes.Enum.depart_html)) app.add_node(CSNodes.Keyword, html=(CSNodes.Keyword.visit_html, CSNodes.Keyword.depart_html)) app.add_node(CSNodes.MethodName, html=(CSNodes.MethodName.visit_html, CSNodes.MethodName.depart_html)) app.add_node(CSNodes.VariableName, html=(CSNodes.VariableName.visit_html, CSNodes.VariableName.depart_html)) app.add_node(CSNodes.BaseNode, html=(CSNodes.BaseNode.visit_html, CSNodes.BaseNode.depart_html)) app.add_node(CSNodes.EmptyNode, html=(CSNodes.EmptyNode.visit_html, CSNodes.EmptyNode.depart_html)) app.add_node(CSNodes.Generic, html=(CSNodes.Generic.visit_html, CSNodes.Generic.depart_html)) def split_sig(params): if not params: return None result = [] current = '' level = 0 for char in params: if char in ('<', '{', '['): level += 1 elif char in ('>', '}', ']'): level -= 1 if char != ',' or level > 0: current += char elif char == ',' and level == 0: result.append(current) current = '' if current.strip() != '': result.append(current) return result def get_targets(target, node): targets = [target] if node[CSharpObject.PARENT_ATTR_NAME] is not None: parts = node[CSharpObject.PARENT_ATTR_NAME].split('.') while parts: targets.append('{}.{}'.format('.'.join(parts), target)) parts = parts[:-1] return targets def copy_asset_files(app, exc): package_dir = path.abspath(path.dirname(__file__)) asset_files = [path.join(package_dir, '_static/css/sphinxsharp.css')] if exc is None: # build succeeded for asset_path in asset_files: copy_asset(asset_path, path.join(app.outdir, '_static')) def setup(app): app.connect('build-finished', copy_asset_files) package_dir = path.abspath(path.dirname(__file__)) app.add_domain(CSharpDomain) app.add_css_file('sphinxsharp.css') override_file = path.join(app.confdir, '_static/sphinxsharp-override.css') if path.exists(override_file): app.add_css_file('sphinxsharp-override.css') CSNodes.add_nodes(app) locale_dir = path.join(package_dir, 'locales') app.add_message_catalog('sphinxsharp', locale_dir) return { 'version': '1.0.2', 'parallel_read_safe': True, 'parallel_write_safe': True, }
1.601563
2
articles/migrations/0003_article_published_at.py
mosalaheg/django3.2
0
12199
<filename>articles/migrations/0003_article_published_at.py # Generated by Django 3.2.7 on 2021-10-02 08:24 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('articles', '0002_auto_20211002_1019'), ] operations = [ migrations.AddField( model_name='article', name='published_at', field=models.DateTimeField(blank=True, null=True), ), ]
1.4375
1