hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c383b81e111b7e6a0cda4576f4e669d55c172076 | 78 | py | Python | idmatch/matching/fixtures/__init__.py | javierherrera1996/idmatch | 8bb27dafaa12b7b0bdb745071e81e6b940b7553a | [
"MIT"
] | 55 | 2017-05-27T11:13:33.000Z | 2022-01-27T21:22:28.000Z | idmatch/matching/fixtures/__init__.py | javierherrera1996/idmatch | 8bb27dafaa12b7b0bdb745071e81e6b940b7553a | [
"MIT"
] | 14 | 2017-05-27T11:10:08.000Z | 2022-01-13T00:39:22.000Z | idmatch/matching/fixtures/__init__.py | javierherrera1996/idmatch | 8bb27dafaa12b7b0bdb745071e81e6b940b7553a | [
"MIT"
] | 18 | 2017-05-30T19:08:17.000Z | 2022-01-29T00:19:25.000Z | # coding: utf-8
from wilde import WILDE_VECTOR
from corey import COREY_VECTOR
| 19.5 | 30 | 0.820513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.192308 |
c38411b92515ccabaac79cf9fee8dda8f91e01e8 | 1,343 | py | Python | tournament.py | feat7/chess_lm | 1e8538b980f19616d5042557612024e76f1a1ca6 | [
"MIT"
] | null | null | null | tournament.py | feat7/chess_lm | 1e8538b980f19616d5042557612024e76f1a1ca6 | [
"MIT"
] | null | null | null | tournament.py | feat7/chess_lm | 1e8538b980f19616d5042557612024e76f1a1ca6 | [
"MIT"
] | null | null | null | # """run the models and calculate ELO ratings
# 19.11.2020 - @yashbonde"""
# from argparse import ArgumentParser
# from chess_lm.model import ModelConfig
# from chess_lm.game import Player
# import torch
# def expected(p1, p2):
# return 1 / (1 - 10 ** ((p2 - p1) / 400))
# def elo(p, e, s, k=32):
# return p + k * (s - e)
# def new_elos_after_tournament(p1, p2, s):
# e = 0
# for _p2 in p2:
# e += expected(p1, _p2)
# _p1 = elo(p1, expected(p1, p2), s)
# return _p1
# # ---- script
# args = ArgumentParser(
# description='run tournament and obtain ELO ratings of different models')
# args.add_argument("--m1", type=str, default=".model_sample/z4_0.pt",
# help="path to first model checkpoint file")
# args.add_argument("--m2", type=str, default=".model_sample/z4_0.pt",
# help="path to second model checkpoint file")
# args.add_argument("--num_rounds", type=int, default=800,
# help="number of rounds in the tournament")
# args = args.parse_args()
# # make the baseline configuration and load the models
# config = ModelConfig(
# vocab_size=1793, # Fix: Model shape mismatch error
# n_ctx=60,
# n_embd=128,
# n_head=8,
# n_layer=30,
# n_positions=60,
# )
# m1 = Player(config, args.m1)
# m1 = Player(config, args.m2)
| 27.408163 | 78 | 0.619509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,295 | 0.964259 |
c387c470aa7c4ac79a6f030e94394a414d67b817 | 5,092 | py | Python | workbench.py | swprojects/Serial-Sequence-Creator | cf468a3db777d6b4348fd53d1daa8432f6889f11 | [
"MIT"
] | 1 | 2018-10-29T20:10:43.000Z | 2018-10-29T20:10:43.000Z | workbench.py | swprojects/Serial-Sequence-Creator | cf468a3db777d6b4348fd53d1daa8432f6889f11 | [
"MIT"
] | null | null | null | workbench.py | swprojects/Serial-Sequence-Creator | cf468a3db777d6b4348fd53d1daa8432f6889f11 | [
"MIT"
] | 1 | 2018-10-29T20:11:31.000Z | 2018-10-29T20:11:31.000Z | """
Description:
Requirements: pySerial, wxPython Phoenix
glossary and of other descriptions:
DMM - digital multimeter
PSU - power supply
SBC - single board computer
INS - general instrument commands
GEN - general sequence instructions
"""
import json
import logging
import serial
import serialfunctions as sf
import sys
import time
import wx
from wx.lib.pubsub import setuparg1
from wx.lib.pubsub import pub
#------------------------------------------------#
# workbench
#------------------------------------------------#
class PowerSupply(wx.Panel):
def __init__(self, parent, port, data):
wx.Panel.__init__(self, parent)
self.psu_connection = None
self.port = port
self.manufacturer = data["manufacturer"]
self.send_bytes = data["sendbytes"]
self.end_line = data["endline"]
self.channels = data["channels"]
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
text = wx.StaticText(self)
text.SetLabel("Note: channel numbers do not necessarily indicate left-to-right"
+" on the power supply itself")
hsizer.Add(text, 0, wx.ALL|wx.EXPAND, 5)
hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.volt_channels = {}
self.amp_channels = {}
for n in self.channels:
channel_box = wx.StaticBox(self, label="Channel " +str(n))
channel_box_sizer = wx.StaticBoxSizer(channel_box, wx.HORIZONTAL)
volt_sizer = wx.BoxSizer(wx.VERTICAL)
self.volt_channels[n] = wx.TextCtrl(self)
# self.volt_channels[n].SetFont(DIGITAL_FONT)
volt_set = wx.Button(self, label="Set V", size=(-1, 24))
volt_sizer.Add(self.volt_channels[n], 0, wx.ALL|wx.EXPAND, 5)
volt_sizer.Add(volt_set, 0, wx.ALL|wx.EXPAND, 5)
amp_sizer = wx.BoxSizer(wx.VERTICAL)
self.amp_channels[n] = wx.TextCtrl(self)
amp_set = wx.Button(self, label="Set A", size=(-1, 24))
amp_sizer.Add(self.amp_channels[n], 0, wx.ALL|wx.EXPAND, 5)
amp_sizer.Add(amp_set, 0, wx.ALL|wx.EXPAND, 5)
channel_box_sizer.Add(volt_sizer, 1, wx.ALL|wx.EXPAND, 5)
channel_box_sizer.Add(amp_sizer, 1, wx.ALL|wx.EXPAND, 5)
hsizer2.Add(channel_box_sizer, 0, wx.ALL|wx.EXPAND, 5)
sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)
sizer.Add(hsizer2, 1, wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer)
self.ConnectToPSU(self.port)
def ConnectToPSU(self, port):
# configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(port=port,
baudrate=9600,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.SEVENBITS)
print(ser)
ser.isOpen()
self.psu_connection = ser
# self.timer_update_channel.Start(1)
self.RefreshReadings()
def RefreshReadings(self):
if not self.psu_connection:
return
# get voltage of output in Volts
for ch in self.volt_channels:
cmd = "V" +str(ch) + "?"
reading = self.SendToSerial(cmd)
self.volt_channels[ch].SetValue(reading)
# get current limits of output in Amp
for ch in self.amp_channels:
cmd = "I" +str(ch) + "?"
reading = self.SendToSerial(cmd)
self.amp_channels[ch].SetValue(reading)
def SendToSerial(self, input):
end = self.end_line
ser = self.psu_connection
ser.write(bytes(input + end, "utf8"))
time.sleep(0.1)
out = ""
while ser.inWaiting() > 0:
# print(ser.read(1))
out += str(ser.read(1), "utf8")
return out
def UpdateChannel(self, event):
if not self.psu_connection:
return
v1 = self.SendToSerial(self.psu_connection, "V1?")
self.display_voltage1.SetValue(v1)
def DoStepVoltage(self):
channel = 2 # available channels 0 or 1
for v in range(0, 15):
input = "V" + str(channel) + " " + str(v)
out = self.SendToSerial(self.psu_connection, input)
class Multimeter(wx.Panel):
def __init__(self, parent, data):
wx.Panel.__init__(self, parent)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(sizer)
def OnButton(self, event):
e = event.GetEventObject()
label = e.GetLabel()
name = e.GetName()
if name == "Instrument List":
if label == "Refresh Instruments":
self.DoRefreshInstruments()
| 32.433121 | 103 | 0.546347 | 4,535 | 0.890613 | 0 | 0 | 0 | 0 | 0 | 0 | 885 | 0.173802 |
c38a2ff286af8deb46586d2a5d04d87e2d90d9d1 | 1,723 | py | Python | exawind/prelude/coroutines.py | sayerhs/py-exawind | 7adea1567bd58069774ca56a8a75be7e4d9eefd2 | [
"Apache-2.0"
] | null | null | null | exawind/prelude/coroutines.py | sayerhs/py-exawind | 7adea1567bd58069774ca56a8a75be7e4d9eefd2 | [
"Apache-2.0"
] | null | null | null | exawind/prelude/coroutines.py | sayerhs/py-exawind | 7adea1567bd58069774ca56a8a75be7e4d9eefd2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""\
Coroutine utilities
-------------------
Some code snippets inspired by http://www.dabeaz.com/coroutines/
"""
import re
import functools
def coroutine(func):
"""Prime a coroutine for send commands.
Args:
func (coroutine): A function that takes values via yield
Return:
function: Wrapped coroutine function
"""
@functools.wraps(func)
def _func(*args, **kwargs):
fn = func(*args, **kwargs)
next(fn)
return fn
return _func
@coroutine
def echo(**kwargs):
"""A simple output sink
Useful as a consumer of data from other coroutines that just print to console
"""
while True:
output = (yield)
print(output, **kwargs)
@coroutine
def grep(pattern, targets,
send_close=True,
matcher="search",
flags=0):
"""Unix grep-like utility
Feeds lines matching a target to consumer targets registered with this function
Args:
pattern (str): A regular expression as string (compiled internally)
targets (list): A list of consumer coroutines that want to act on matching lines
send_close (bool): If True, closes targets when grep exits
matcher: ``search``, ``match``, ``findall`` methods of regular expression
flags: Regexp flags used when compiling pattern
"""
pat = re.compile(pattern, flags=flags)
sfunc = getattr(pat, matcher)
try:
while True:
line = (yield)
mat = sfunc(line)
if mat:
for tgt in targets:
tgt.send(mat)
except GeneratorExit:
if send_close:
for tgt in targets:
tgt.close()
| 25.338235 | 88 | 0.597795 | 0 | 0 | 1,175 | 0.68195 | 1,321 | 0.766686 | 0 | 0 | 939 | 0.54498 |
c38a99159a465c6c6adcac264c6b8eb5c21be376 | 1,467 | py | Python | models.py | askomorokhov/fast-api-example | 5d23ddd39413f37697c81f267bb69117011d56f5 | [
"MIT"
] | null | null | null | models.py | askomorokhov/fast-api-example | 5d23ddd39413f37697c81f267bb69117011d56f5 | [
"MIT"
] | null | null | null | models.py | askomorokhov/fast-api-example | 5d23ddd39413f37697c81f267bb69117011d56f5 | [
"MIT"
] | null | null | null | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.orm import relationship
import datetime
from database import Base
class Org(Base):
__tablename__ = "orgs"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True, index=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
buildings = relationship("Building", back_populates="org")
class Building(Base):
__tablename__ = "buildings"
id = Column(Integer, primary_key=True, index=True)
org_id = Column(Integer, ForeignKey(Org.id))
name = Column(String, unique=True, index=True)
address = Column(String)
org = relationship("Org", back_populates="buildings")
groups = relationship("Group", back_populates="building")
class Group(Base):
__tablename__ = "groups"
id = Column(Integer, primary_key=True, index=True)
building_id = Column(Integer, ForeignKey(Building.id))
name = Column(String, index=True)
building = relationship("Building", back_populates="groups")
points = relationship("Point", back_populates="building")
class Point(Base):
__tablename__ = "points"
id = Column(Integer, primary_key=True, index=True)
group_id = Column(Integer, ForeignKey(Building.id))
device_id = Column(Integer, index=True)
name = Column(String)
location = Column(String)
building = relationship("Group", back_populates="points") | 29.34 | 77 | 0.714383 | 1,295 | 0.882754 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.089298 |
c38b6c7baddf81a33d7c82bf0de977f520050f0e | 6,991 | py | Python | mysterious_moose/src/virus.py | fiddlen/code-jam-5 | 924a6f8b0467ce2473811348c943dd6f1fe25972 | [
"MIT"
] | 1 | 2019-06-28T17:10:11.000Z | 2019-06-28T17:10:11.000Z | mysterious_moose/src/virus.py | fiddlen/code-jam-5 | 924a6f8b0467ce2473811348c943dd6f1fe25972 | [
"MIT"
] | 14 | 2019-06-28T18:07:17.000Z | 2019-07-01T21:30:06.000Z | mysterious_moose/src/virus.py | fiddlen/code-jam-5 | 924a6f8b0467ce2473811348c943dd6f1fe25972 | [
"MIT"
] | null | null | null | import math
import pygame
class Virus:
""" Main Virus class """
def __init__(self, impact, virulence, detectability, industry, start_region, renderer=None):
self.blocks = []
self.impact = impact
self.virulence = virulence
self.detectability = detectability
# self.graphic = VirusGraphic(renderer)
self.name = ""
self.industry = industry # the industry the virus is attacking
self.released = False # whether the virus has been launched or not
self.affected_regions = [start_region]
def update_stats(self):
""" updates a virus's key stats to current block values"""
# reset values
self.impact, self.virulence, self.detectability = 0, 0, 0
# read each block and add respective values
for block in self.blocks:
self.impact += block.impact
self.virulence += block.virulence
self.detectability += block.detectability
self.graphic.update_stats(self.name, self.impact, self.virulence, self.detectability)
def valid(self):
""" checks whether the virus is valid or not """
if len(self.blocks) > 0 and 0 <= self.industry <= 2:
return True
else:
return False
def add_block(self, block):
""" adds a block to the virus"""
self.blocks.append(block)
self.update_stats()
def remove_block(self, block):
""" removes a block from a virus"""
self.blocks.remove(block)
self.update_stats()
def update_name(self, name):
if len(name) > 15:
self.name = name[:15]
else:
self.name = name
self.update_stats()
# class VirusGraphic:
# """ can create and return key Virus graphics"""
# def __init__(self, renderer):
# self.renderer = renderer
#
# self.name = ""
# self.resolution = pygame.display.Info()
# self.resolution = (self.resolution.current_w, self.resolution.current_h)
# self.impact, self.virulence, self.detectability = 0, 0, 0
#
# self.card = pygame.Surface((900, 300))
# self.impact_bar = pygame.Surface((345, 80))
# self.virulence_bar = pygame.Surface((345, 80))
# self.detectability_bar = pygame.Surface((345, 80))
#
# self.update(self.resolution)
#
# def update_stats(self, name, impact, virulence, detectability):
# self.name = name
# self.impact = impact
# self.virulence = virulence
# self.detectability = detectability
# self.update(self.resolution)
#
# @staticmethod
# def display_value(x):
# try:
# return math.log(x, 2)/10
# except ValueError:
# return 0
#
# def update(self, resolution):
# """ updates graphical elements when resolution or virus stats change """
# self.resolution = resolution
#
# colours = {
# "outline": (200, 200, 200),
# "internal": (75, 75, 75),
# "text": (255, 255, 255),
# "impact": (255, 50, 50),
# "virulence": (50, 255, 50),
# "detectability": (50, 50, 255)
# }
#
# # main view card
# self.card = pygame.Surface((900, 300))
#
# self.card.fill(colours["outline"])
# internal_bg = pygame.Rect(25, 25, 500, 250)
#
# name_text = self.renderer.fonts["main"].render(self.name, colours["text"], size=60)
#
# impact_icon = pygame.Rect(530, 25, 80, 80)
# virulence_icon = pygame.Rect(530, 110, 80, 80)
# detectability_icon = pygame.Rect(530, 195, 80, 80)
#
# impact_bar_bg = pygame.Rect(615, 25, 260, 80)
# virulence_bar_bg = pygame.Rect(615, 110, 260, 80)
# detectability_bar_bg = pygame.Rect(615, 195, 260, 80)
#
# impact_text = self.renderer.fonts["main"].render("I", colours["text"], size=80)
# virulence_text = self.renderer.fonts["main"].render("V", colours["text"], size=80)
# detectability_text = self.renderer.fonts["main"].render("D", colours["text"], size=80)
#
# impact_bar = pygame.Rect(615, 25, 260 * self.display_value(self.impact), 80)
# virulence_bar = pygame.Rect(615, 110, 260 * self.display_value(self.virulence), 80)
# detectability_bar = pygame.Rect(615, 195, 260 * self.display_value(self.detectability), 80)
#
# pygame.draw.rect(self.card, colours["internal"], internal_bg)
#
# pygame.draw.rect(self.card, colours["internal"], impact_icon)
# pygame.draw.rect(self.card, colours["internal"], virulence_icon)
# pygame.draw.rect(self.card, colours["internal"], detectability_icon)
#
# pygame.draw.rect(self.card, colours["internal"], impact_bar_bg)
# pygame.draw.rect(self.card, colours["internal"], virulence_bar_bg)
# pygame.draw.rect(self.card, colours["internal"], detectability_bar_bg)
#
# self.card.blit(name_text[0], (40, 40))
#
# self.card.blit(impact_text[0], impact_text[0].get_rect(center=(570, 65)))
# self.card.blit(virulence_text[0], virulence_text[0].get_rect(center=(570, 150)))
# self.card.blit(detectability_text[0], detectability_text[0].get_rect(center=(570, 235)))
#
# pygame.draw.rect(self.card, colours["impact"], impact_bar)
# pygame.draw.rect(self.card, colours["virulence"], virulence_bar)
# pygame.draw.rect(self.card, colours["detectability"], detectability_bar)
#
# self.card = pygame.transform.scale(self.card, (resolution[0]//5, resolution[0]//15))
#
# # virus view and creation bars
# self.impact_bar = pygame.Surface((345, 80))
# self.virulence_bar = pygame.Surface((345, 80))
# self.detectability_bar = pygame.Surface((345, 80))
#
# impact_icon = pygame.Rect(0, 0, 80, 80)
# virulence_icon = pygame.Rect(0, 0, 80, 80)
# detectability_icon = pygame.Rect(0, 0, 80, 80)
#
# impact_bar_bg = pygame.Rect(85, 0, 260, 80)
# virulence_bar_bg = pygame.Rect(85, 0, 260, 80)
# detectability_bar_bg = pygame.Rect(85, 0, 260, 80)
#
# pygame.draw.rect(self.impact_bar, colours["internal"], impact_icon)
# pygame.draw.rect(self.virulence_bar, colours["internal"], virulence_icon)
# pygame.draw.rect(self.detectability_bar, colours["internal"], detectability_icon)
#
# pygame.draw.rect(self.impact_bar, colours["internal"], impact_bar_bg)
# pygame.draw.rect(self.virulence_bar, colours["internal"], virulence_bar_bg)
# pygame.draw.rect(self.detectability_bar, colours["internal"], detectability_bar_bg)
#
# self.impact_bar.blit(impact_text[0], impact_text[0].get_rect(center=(40, 40)))
# self.virulence_bar.blit(virulence_text[0], virulence_text[0].get_rect(center=(40, 40)))
# self.detectability_bar.blit(
# detectability_text[0], detectability_text[0].get_rect(center=(40, 40))
# )
| 40.410405 | 101 | 0.61479 | 1,690 | 0.241739 | 0 | 0 | 0 | 0 | 0 | 0 | 5,529 | 0.790874 |
c38b84c928798be42a3d5c701e732748115bcb1c | 3,507 | py | Python | alipay/aop/api/domain/ReduceInfo.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/ReduceInfo.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/ReduceInfo.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ReduceInfo(object):
def __init__(self):
self._brand_name = None
self._consume_amt = None
self._consume_store_name = None
self._payment_time = None
self._promo_amt = None
self._user_name = None
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, value):
self._brand_name = value
@property
def consume_amt(self):
return self._consume_amt
@consume_amt.setter
def consume_amt(self, value):
self._consume_amt = value
@property
def consume_store_name(self):
return self._consume_store_name
@consume_store_name.setter
def consume_store_name(self, value):
self._consume_store_name = value
@property
def payment_time(self):
return self._payment_time
@payment_time.setter
def payment_time(self, value):
self._payment_time = value
@property
def promo_amt(self):
return self._promo_amt
@promo_amt.setter
def promo_amt(self, value):
self._promo_amt = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
def to_alipay_dict(self):
params = dict()
if self.brand_name:
if hasattr(self.brand_name, 'to_alipay_dict'):
params['brand_name'] = self.brand_name.to_alipay_dict()
else:
params['brand_name'] = self.brand_name
if self.consume_amt:
if hasattr(self.consume_amt, 'to_alipay_dict'):
params['consume_amt'] = self.consume_amt.to_alipay_dict()
else:
params['consume_amt'] = self.consume_amt
if self.consume_store_name:
if hasattr(self.consume_store_name, 'to_alipay_dict'):
params['consume_store_name'] = self.consume_store_name.to_alipay_dict()
else:
params['consume_store_name'] = self.consume_store_name
if self.payment_time:
if hasattr(self.payment_time, 'to_alipay_dict'):
params['payment_time'] = self.payment_time.to_alipay_dict()
else:
params['payment_time'] = self.payment_time
if self.promo_amt:
if hasattr(self.promo_amt, 'to_alipay_dict'):
params['promo_amt'] = self.promo_amt.to_alipay_dict()
else:
params['promo_amt'] = self.promo_amt
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ReduceInfo()
if 'brand_name' in d:
o.brand_name = d['brand_name']
if 'consume_amt' in d:
o.consume_amt = d['consume_amt']
if 'consume_store_name' in d:
o.consume_store_name = d['consume_store_name']
if 'payment_time' in d:
o.payment_time = d['payment_time']
if 'promo_amt' in d:
o.promo_amt = d['promo_amt']
if 'user_name' in d:
o.user_name = d['user_name']
return o
| 30.232759 | 87 | 0.600228 | 3,390 | 0.966638 | 0 | 0 | 1,542 | 0.439692 | 0 | 0 | 464 | 0.132307 |
c38bc9c5d5c49a6041942f29f1c2c82abcfe2e97 | 290 | py | Python | ex049.py | igormba/python-exercises | 000190c4b62dc64bbb2fb039a103890945b88fa5 | [
"MIT"
] | null | null | null | ex049.py | igormba/python-exercises | 000190c4b62dc64bbb2fb039a103890945b88fa5 | [
"MIT"
] | null | null | null | ex049.py | igormba/python-exercises | 000190c4b62dc64bbb2fb039a103890945b88fa5 | [
"MIT"
] | null | null | null | '''Rafaça o DESAFIO 009, mostrando a tabuada de um número que o usuário escolher, só que agora utilizando um laço for.'''
n = int(input('Digite um número para ver sua tabuada: '))
print('-' * 12)
for tabu in range(0, 11):
print('{} x {:2} = {}'.format(n, tabu, n*tabu))
print('-' * 12) | 41.428571 | 121 | 0.641379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.641892 |
c38cb6bc625148dcfd0b93b5a724e542998afc6d | 1,899 | py | Python | data_plotting/idlsize/plots.py | krinii/dds-on-hardware | 86905069493130679ad8a3b1bfd3465319106fd0 | [
"MIT"
] | null | null | null | data_plotting/idlsize/plots.py | krinii/dds-on-hardware | 86905069493130679ad8a3b1bfd3465319106fd0 | [
"MIT"
] | null | null | null | data_plotting/idlsize/plots.py | krinii/dds-on-hardware | 86905069493130679ad8a3b1bfd3465319106fd0 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import time
import array
import numpy as np
import pandas as pd
import statistics
import matplotlib.pyplot as plt
import seaborn as sns
# sns.set_theme(style="darkgrid")
x_b = [1, 10, 100, 1000, 10000, 100000, 1000000]
cyc_pi2 = [8379072, 8379072, 3675200, 372864, 37312, 3728, 368]
cyc_pi4 = [8376016, 8376016, 8376016, 1865072, 186752, 18664, 1864]
cyc_lap = [8372616, 8372616, 8372616, 2145304, 214464, 21376, 2072]
# print("Correlation:", np.corrcoef(x_b, cyc_pi2))
# plt.bar(cyc_pi2, x_b , align='center', alpha=0.5)
# plt.legend(['CycloneDDS Laptop', 'CycloneDDS RPi4', 'CycloneDDS RPi2', 'FastDDS Laptop', 'FastDDS RP4'])
# plt.title('CycloneDDS')
barWidth = 0.25
x_pos = np.arange(len(x_b))
r1 = np.arange(len(cyc_lap))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
'''
fig, ax = plt.subplots()
rects3 = ax.bar(x_pos - 2*width/3, cyc_lap, width, label='Laptop')
rects2 = ax.bar(x_pos + width/3, cyc_pi4, width, label='RPi4')
rects3 = ax.bar(x_pos + 3*width/3, cyc_pi2, width, label='RPi2')
'''
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 22)
ax.tick_params(axis = 'both', which = 'minor', labelsize = 22)
plt.bar(r1, cyc_lap, width=barWidth, label='Laptop')
plt.bar(r2, cyc_pi4, width=barWidth, label='RPi4')
plt.bar(r3, cyc_pi2, width=barWidth, label='RPi2')
# plt.bar(x_pos, cyc_pi2, align='center', alpha=0.5)
# plt.xticks(x_pos, x_b)
plt.xticks([r + barWidth for r in range(len(cyc_lap))], x_b)
plt.ylabel('Bytes', fontsize=24)
plt.xlabel('Buffer Size', fontsize=24)
plt.title('IDL size Capacity (CycloneDDS)', fontsize=26)
plt.yscale('log')
plt.grid(b=True, which='both', color='#BBBBBB', linestyle='-', axis='y')
plt.legend(fontsize=24)
'''
plt.yscale('log')
plt.xlabel('Bytes')
plt.xticks(x_b)
plt.ylabel('Samples')
plt.grid(b=True, which='both', color='#BBBBBB', linestyle='-')
'''
plt.show()
| 29.215385 | 106 | 0.691417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 856 | 0.450764 |
c38e1f71e6b3d9b9f233c17af18409553439e9b3 | 446 | py | Python | app/extensions.py | rileymjohnson/fbla | 1f1c37f54edd00af0b47b7c256523c7145f6be6f | [
"MIT"
] | null | null | null | app/extensions.py | rileymjohnson/fbla | 1f1c37f54edd00af0b47b7c256523c7145f6be6f | [
"MIT"
] | null | null | null | app/extensions.py | rileymjohnson/fbla | 1f1c37f54edd00af0b47b7c256523c7145f6be6f | [
"MIT"
] | null | null | null | from flask_bcrypt import Bcrypt
from flask_caching import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import logging
bcrypt = Bcrypt()
login_manager = LoginManager()
db = SQLAlchemy()
migrate = Migrate()
cache = Cache()
debug_toolbar = DebugToolbarExtension()
gunicorn_error_logger = logging.getLogger('gunicorn.error') | 29.733333 | 59 | 0.838565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.035874 |
c38e3282802bbc19a4073c3f750e891c9ae10713 | 207 | py | Python | tests/scripts/negative_linenumber_offsets.py | andyfcx/py-spy | 1e971b91c739237708f11c2ddcb1324ab0bc37c7 | [
"MIT"
] | 8,112 | 2018-08-09T13:35:54.000Z | 2022-03-31T23:23:52.000Z | tests/scripts/negative_linenumber_offsets.py | andyfcx/py-spy | 1e971b91c739237708f11c2ddcb1324ab0bc37c7 | [
"MIT"
] | 327 | 2018-08-21T10:39:06.000Z | 2022-03-29T08:58:22.000Z | tests/scripts/negative_linenumber_offsets.py | andyfcx/py-spy | 1e971b91c739237708f11c2ddcb1324ab0bc37c7 | [
"MIT"
] | 328 | 2018-08-21T09:36:49.000Z | 2022-03-30T10:15:18.000Z | import time
def f():
[
# Must be split over multiple lines to see the error.
# https://github.com/benfred/py-spy/pull/208
time.sleep(1)
for _ in range(1000)
]
f()
| 14.785714 | 61 | 0.550725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.468599 |
c38e6007bf09401e85d2d7df62aaccf825cb43da | 13,175 | py | Python | starfish/core/imagestack/parser/crop.py | haoxusci/starfish | d7bd856024c75f2ce41504406f2a663566c3814b | [
"MIT"
] | 164 | 2018-03-21T21:52:56.000Z | 2022-03-23T17:14:39.000Z | starfish/core/imagestack/parser/crop.py | lbgbox/starfish | 0e879d995d5c49b6f5a842e201e3be04c91afc7e | [
"MIT"
] | 1,728 | 2018-03-15T23:16:09.000Z | 2022-03-12T00:09:18.000Z | starfish/core/imagestack/parser/crop.py | lbgbox/starfish | 0e879d995d5c49b6f5a842e201e3be04c91afc7e | [
"MIT"
] | 66 | 2018-03-25T17:21:15.000Z | 2022-01-16T09:17:11.000Z | from collections import OrderedDict
from typing import Collection, List, Mapping, MutableSequence, Optional, Set, Tuple, Union
import numpy as np
from slicedimage import Tile, TileSet
from starfish.core.imagestack.parser import TileCollectionData, TileData, TileKey
from starfish.core.types import ArrayLike, Axes, Coordinates, Number
class CropParameters:
"""Parameters for cropping an ImageStack at load time."""
def __init__(
self,
*,
permitted_rounds: Optional[Collection[int]]=None,
permitted_chs: Optional[Collection[int]]=None,
permitted_zplanes: Optional[Collection[int]]=None,
x_slice: Optional[Union[int, slice]]=None,
y_slice: Optional[Union[int, slice]]=None,
):
"""
Parameters
----------
permitted_rounds : Optional[Collection[int]]
The rounds in the original dataset to load into the ImageStack. If this is not set,
then all rounds are loaded into the ImageStack.
permitted_chs : Optional[Collection[int]]
The channels in the original dataset to load into the ImageStack. If this is not set,
then all channels are loaded into the ImageStack.
permitted_zplanes : Optional[Collection[int]]
The z-layers in the original dataset to load into the ImageStack. If this is not set,
then all z-layers are loaded into the ImageStack.
x_slice : Optional[Union[int, slice]]
The x-range in the x-y tile that is loaded into the ImageStack. If this is not set,
then the entire x-y tile is loaded into the ImageStack.
y_slice : Optional[Union[int, slice]]
The y-range in the x-y tile that is loaded into the ImageStack. If this is not set,
then the entire x-y tile is loaded into the ImageStack.
"""
self._permitted_rounds = set(permitted_rounds) if permitted_rounds else None
self._permitted_chs = set(permitted_chs) if permitted_chs else None
self._permitted_zplanes = set(permitted_zplanes) if permitted_zplanes else None
self._x_slice = x_slice
self._y_slice = y_slice
def _add_permitted_axes(self, axis_type: Axes, permitted_axis: int) -> None:
"""
Add a value to one of the permitted axes sets.
"""
if axis_type == Axes.ROUND and self._permitted_rounds:
self._permitted_rounds.add(permitted_axis)
if axis_type == Axes.CH and self._permitted_chs:
self._permitted_chs.add(permitted_axis)
if axis_type == Axes.ZPLANE and self._permitted_zplanes:
self._permitted_zplanes.add(permitted_axis)
def filter_tilekeys(self, tilekeys: Collection[TileKey]) -> Collection[TileKey]:
"""
Filters tilekeys for those that should be included in the resulting ImageStack.
"""
results: MutableSequence[TileKey] = list()
for tilekey in tilekeys:
if self._permitted_rounds is not None and tilekey.round not in self._permitted_rounds:
continue
if self._permitted_chs is not None and tilekey.ch not in self._permitted_chs:
continue
if self._permitted_zplanes is not None and tilekey.z not in self._permitted_zplanes:
continue
results.append(tilekey)
return results
@staticmethod
def _crop_axis(size: int, crop: Optional[Union[int, slice]]) -> Tuple[int, int]:
"""
Given the size of along an axis, and an optional cropping, return the start index
(inclusive) and end index (exclusive) of the crop. If no crop is specified, then the
original size (0, size) is returned.
"""
# convert int crops to a slice operation.
if isinstance(crop, int):
if crop < 0 or crop >= size:
raise IndexError("crop index out of range")
return crop, crop + 1
# convert start and stop to absolute values.
start: int
if crop is None or crop.start is None:
start = 0
elif crop.start is not None and crop.start < 0:
start = max(0, size + crop.start)
else:
start = min(size, crop.start)
stop: int
if crop is None or crop.stop is None:
stop = size
elif crop.stop is not None and crop.stop < 0:
stop = max(0, size + crop.stop)
else:
stop = min(size, crop.stop)
return start, stop
@staticmethod
def parse_aligned_groups(tileset: TileSet,
rounds: Optional[Collection[int]] = None,
chs: Optional[Collection[int]] = None,
zplanes: Optional[Collection[int]] = None,
x: Optional[Union[int, slice]] = None,
y: Optional[Union[int, slice]] = None
) -> List["CropParameters"]:
"""Takes a tileset and any optional selected axes lists compares the physical coordinates on each
tile to create aligned coordinate groups (groups of tiles that have the same physical
coordinates)
Parameters
----------
tileset: TileSet
The TileSet to parse
rounds: Optional[Collection[int]]
The rounds in the tileset to include in the final aligned groups. If this is not set,
then all rounds are included.
chs: Optional[Collection[int]]
The chs in the tileset to include in the final aligned groups. If this is not set,
then all chs are included.
zplanes: Optional[Collection[int]]
The zplanes in the tileset to include in the final aligned groups. If this is not set,
then all zplanes are included.
x: Optional[Union[int, slice]]
The x-range in the x-y tile to include in the final aligned groups. If this is not set,
then the entire x-y tile is included.
y: Optional[Union[int, slice]]
The y-range in the x-y tile to include in the final aligned groups. If this is not set,
then the entire x-y tile is included.
Returns
-------
List["CropParameters"]
A list of CropParameters. Each entry describes the r/ch/z values of tiles that are
aligned (have matching coordinates) and are within the selected_axes if provided.
"""
coord_groups: OrderedDict[tuple, CropParameters] = OrderedDict()
for tile in tileset.tiles():
if CropParameters.tile_in_selected_axes(tile, rounds, chs, zplanes):
x_y_coords = (
tile.coordinates[Coordinates.X][0], tile.coordinates[Coordinates.X][1],
tile.coordinates[Coordinates.Y][0], tile.coordinates[Coordinates.Y][1]
)
# A tile with this (x, y) has already been seen, add tile's indices to
# CropParameters
if x_y_coords in coord_groups:
crop_params = coord_groups[x_y_coords]
crop_params._add_permitted_axes(Axes.CH, tile.indices[Axes.CH])
crop_params._add_permitted_axes(Axes.ROUND, tile.indices[Axes.ROUND])
if Axes.ZPLANE in tile.indices:
crop_params._add_permitted_axes(Axes.ZPLANE, tile.indices[Axes.ZPLANE])
else:
coord_groups[x_y_coords] = CropParameters(
permitted_chs=[tile.indices[Axes.CH]],
permitted_rounds=[tile.indices[Axes.ROUND]],
permitted_zplanes=[tile.indices[Axes.ZPLANE]]
if Axes.ZPLANE in tile.indices else None,
x_slice=x,
y_slice=y)
return list(coord_groups.values())
@staticmethod
def tile_in_selected_axes(tile: Tile,
rounds: Optional[Collection[int]] = None,
chs: Optional[Collection[int]] = None,
zplanes: Optional[Collection[int]] = None) -> bool:
"""
Return True if a tile belongs in a list of selected axes.
Parameters
----------
tile:
The tile in question
rounds: Optional[Collection[int]]
The allowed rounds.
chs: Optional[Collection[int]]
The allowed chs.
zplanes: Optional[Collection[int]]
The allowed zplanes.
Returns
-------
Boolean
True if tile belongs with selected axes, False if not.
"""
if rounds and tile.indices[Axes.ROUND] not in rounds:
return False
if chs and tile.indices[Axes.CH] not in chs:
return False
if zplanes and tile.indices[Axes.ZPLANE] not in zplanes:
return False
return True
def crop_shape(self, shape: Mapping[Axes, int]) -> Mapping[Axes, int]:
"""
Given the shape of the original tile, return the shape of the cropped tile.
"""
output_x_shape = CropParameters._crop_axis(shape[Axes.X], self._x_slice)
output_y_shape = CropParameters._crop_axis(shape[Axes.Y], self._y_slice)
width = output_x_shape[1] - output_x_shape[0]
height = output_y_shape[1] - output_y_shape[0]
return {Axes.Y: height, Axes.X: width}
def crop_image(self, image: np.ndarray) -> np.ndarray:
"""
Given the original image, return the cropped image.
"""
output_x_shape = CropParameters._crop_axis(image.shape[1], self._x_slice)
output_y_shape = CropParameters._crop_axis(image.shape[0], self._y_slice)
return image[output_y_shape[0]:output_y_shape[1], output_x_shape[0]:output_x_shape[1]]
def crop_coordinates(
self, coordinates: Mapping[Coordinates, ArrayLike[Number]],
) -> Mapping[Coordinates, ArrayLike[Number]]:
"""
Given a mapping of coordinate to coordinate values, return a mapping of the coordinate to
cropped coordinate values.
"""
output_x_shape = CropParameters._crop_axis(len(coordinates[Coordinates.X]), self._x_slice)
output_y_shape = CropParameters._crop_axis(len(coordinates[Coordinates.Y]), self._y_slice)
return_coords = {
Coordinates.X: coordinates[Coordinates.X][output_x_shape[0]:output_x_shape[1]],
Coordinates.Y: coordinates[Coordinates.Y][output_y_shape[0]:output_y_shape[1]],
}
if Coordinates.Z in coordinates:
return_coords[Coordinates.Z] = coordinates[Coordinates.Z]
return return_coords
class CroppedTileData(TileData):
"""Represent a cropped view of a TileData object."""
def __init__(self, tile_data: TileData, cropping_parameters: CropParameters):
self.backing_tile_data = tile_data
self.cropping_parameters = cropping_parameters
@property
def tile_shape(self) -> Mapping[Axes, int]:
return self.cropping_parameters.crop_shape(self.backing_tile_data.tile_shape)
@property
def numpy_array(self) -> np.ndarray:
return self.cropping_parameters.crop_image(self.backing_tile_data.numpy_array)
@property
def coordinates(self) -> Mapping[Coordinates, ArrayLike[Number]]:
return self.cropping_parameters.crop_coordinates(self.backing_tile_data.coordinates)
@property
def selector(self) -> Mapping[Axes, int]:
return self.backing_tile_data.selector
class CroppedTileCollectionData(TileCollectionData):
"""Represent a cropped view of a TileCollectionData object."""
def __init__(
self,
backing_tile_collection_data: TileCollectionData,
crop_parameters: CropParameters,
) -> None:
self.backing_tile_collection_data = backing_tile_collection_data
self.crop_parameters = crop_parameters
def __getitem__(self, tilekey: TileKey) -> dict:
return self.backing_tile_collection_data[tilekey]
def keys(self) -> Collection[TileKey]:
return self.crop_parameters.filter_tilekeys(self.backing_tile_collection_data.keys())
@property
def group_by(self) -> Set[Axes]:
"""Returns the axes to group by when we load the data."""
return self.backing_tile_collection_data.group_by
@property
def tile_shape(self) -> Mapping[Axes, int]:
return self.crop_parameters.crop_shape(self.backing_tile_collection_data.tile_shape)
@property
def extras(self) -> dict:
return self.backing_tile_collection_data.extras
def get_tile_by_key(self, tilekey: TileKey) -> TileData:
return CroppedTileData(
self.backing_tile_collection_data.get_tile_by_key(tilekey),
self.crop_parameters,
)
def get_tile(self, r: int, ch: int, z: int) -> TileData:
return CroppedTileData(
self.backing_tile_collection_data.get_tile(r, ch, z),
self.crop_parameters,
)
| 42.915309 | 105 | 0.62649 | 12,829 | 0.973738 | 0 | 0 | 6,584 | 0.499734 | 0 | 0 | 4,293 | 0.325844 |
c391b115fbcae9056636fe28c8607436688bbc00 | 6,456 | py | Python | mpc_ros/script/teleop_keyboard.py | NaokiTakahashi12/mpc_ros | 8451fec293a5aee72d5fad0323ec206d08d0ed96 | [
"Apache-2.0"
] | 335 | 2019-03-11T23:03:07.000Z | 2022-03-31T13:40:21.000Z | mpc_ros/script/teleop_keyboard.py | NaokiTakahashi12/mpc_ros | 8451fec293a5aee72d5fad0323ec206d08d0ed96 | [
"Apache-2.0"
] | 30 | 2019-05-02T13:59:14.000Z | 2022-03-30T10:56:34.000Z | mpc_ros/script/teleop_keyboard.py | NaokiTakahashi12/mpc_ros | 8451fec293a5aee72d5fad0323ec206d08d0ed96 | [
"Apache-2.0"
] | 103 | 2018-07-11T15:08:38.000Z | 2022-03-17T13:57:24.000Z | #!/usr/bin/python
# This is a modified verison of turtlebot_teleop.py
# to fullfill the needs of HyphaROS MiniCar use case
# Copyright (c) 2018, HyphaROS Workshop
#
# The original license info are as below:
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, select, termios, tty, math
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
header_msg = """
Control HyphaROS Minicar!
-------------------------
Moving around:
i
j k l
,
w/x : increase/decrease throttle bounds by 10%
e/c : increase/decrease steering bounds by 10%
s : safety mode
space key, k : force stop
anything else : keep previous commands
CTRL-C to quit
"""
# Func for getting keyboard value
def getKey(safety_mode):
if safety_mode: # wait unit keyboard interrupt
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
else: # pass if not detected
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
# Func for showing current bounds
def showInfo(speed_bound, angle_bound):
return "current bounds:\tspeed %s\tangle %s " % (speed_bound, angle_bound)
# Main Func
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('minicar_teleop')
pub_cmd = rospy.Publisher('/ackermann_cmd', AckermannDriveStamped, queue_size=5)
pub_safe = rospy.Publisher('/ackermann_safe', AckermannDriveStamped, queue_size=5)
safe_mode = bool(rospy.get_param('~safety_mode', False)) # true for safety cmds
speed_i = float(rospy.get_param('~speed_incremental', 0.1)) # m/s
angle_i = float(rospy.get_param('~angle_incremental', 5.0*math.pi/180.0)) # rad (=5 degree)
speed_bound = float(rospy.get_param('~speed_bound', 2.0))
angle_bound = float(rospy.get_param('~angle_bound', 30.0*math.pi/180.0))
if safe_mode:
print "Switched to Safety Mode !"
moveBindings = {
'i':(speed_i,0.0),
'j':(0.0,angle_i),
'l':(0.0,-angle_i),
',':(-speed_i,0.0),
}
boundBindings={
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
status = 0
acc = 0.1
target_speed = 0.0 # m/s
target_angle = 0.0 # rad
# Create AckermannDriveStamped msg object
ackermann_msg = AckermannDriveStamped()
#ackermann_msg.header.frame_id = 'car_id' # for future multi-cars applicaton
try:
print(header_msg)
print(showInfo(speed_bound, angle_bound))
while(1):
key = getKey(safe_mode)
if key in moveBindings.keys():
target_speed = target_speed + moveBindings[key][0]
target_angle = target_angle + moveBindings[key][1]
elif key in boundBindings.keys():
speed_bound = speed_bound * boundBindings[key][0]
angle_bound = angle_bound * boundBindings[key][1]
print(showInfo(speed_bound, angle_bound))
if (status == 14):
print(header_msg)
status = (status + 1) % 15
elif key == ' ' or key == 'k' :
target_speed = 0.0
target_angle = 0.0
elif key == 's' : # switch safety mode
safe_mode = not safe_mode
if safe_mode:
print "Switched to Safety Mode !"
else:
print "Back to Standard Mode !"
elif key == '\x03': # cltr + C
break
# Command constraints
if target_speed > speed_bound:
target_speed = speed_bound
if target_speed < -speed_bound:
target_speed = -speed_bound
if target_angle > angle_bound:
target_angle = angle_bound
if target_angle < -angle_bound:
target_angle = -angle_bound
# Publishing command
#ackermann_msg.header.stamp = rospy.Time.now() # for future multi-cars applicaton
ackermann_msg.drive.speed = target_speed
ackermann_msg.drive.steering_angle = target_angle
if safe_mode:
pub_safe.publish(ackermann_msg)
else:
pub_cmd.publish(ackermann_msg)
except Exception as e:
print(e)
finally:
ackermann_msg.drive.speed = 0
ackermann_msg.drive.steering_angle = 0
pub_cmd.publish(ackermann_msg)
pub_safe.publish(ackermann_msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| 36.891429 | 95 | 0.629647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,815 | 0.436029 |
c392d36cc5c68370d275a885ceff96ecfac69bfd | 895 | py | Python | gym-dubins-airplane/gym_dubins_airplane/envs/config.py | hasanisci/gym-dubins-ac | fe205e75f27bf1b8a2858f5b9973ee09e43bfbce | [
"MIT"
] | 2 | 2021-02-06T20:01:56.000Z | 2021-07-12T13:00:49.000Z | gym-dubins-airplane/gym_dubins_airplane/envs/config.py | hasanisci/gym-dubins-ac | fe205e75f27bf1b8a2858f5b9973ee09e43bfbce | [
"MIT"
] | null | null | null | gym-dubins-airplane/gym_dubins_airplane/envs/config.py | hasanisci/gym-dubins-ac | fe205e75f27bf1b8a2858f5b9973ee09e43bfbce | [
"MIT"
] | 2 | 2021-02-14T15:39:15.000Z | 2021-07-12T13:00:53.000Z | import math
class Config:
G = 9.8
EPISODES = 1000
# input dim
window_width = 800 # pixels
window_height = 800 # pixels
window_z = 800 # pixels
diagonal = 800 # this one is used to normalize dist_to_intruder
tick = 30
scale = 30
# distance param
minimum_separation = 555 / scale
NMAC_dist = 150 / scale
horizon_dist = 4000 / scale
initial_min_dist = 3000 / scale
goal_radius = 600 / scale
# speed
min_speed = 50 / scale
max_speed = 80 / scale
d_speed = 5 / scale
speed_sigma = 2 / scale
position_sigma = 10 / scale
# maximum steps of one episode
max_steps = 1000
# reward setting
position_reward = 10. / 10.
heading_reward = 10 / 10.
collision_penalty = -5. / 10
outside_penalty = -1. / 10
step_penalty = -0.01 / 10 | 21.829268 | 74 | 0.579888 | 878 | 0.981006 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.179888 |
c3939f0937a9f440e452d4556c178d4679036846 | 726 | py | Python | exemplos/exemplo-aula-04-01.py | quitaiskiluisf/TI4F-2021-LogicaProgramacao | d12e5c389a43c98f27726df5618fe529183329a8 | [
"Unlicense"
] | null | null | null | exemplos/exemplo-aula-04-01.py | quitaiskiluisf/TI4F-2021-LogicaProgramacao | d12e5c389a43c98f27726df5618fe529183329a8 | [
"Unlicense"
] | null | null | null | exemplos/exemplo-aula-04-01.py | quitaiskiluisf/TI4F-2021-LogicaProgramacao | d12e5c389a43c98f27726df5618fe529183329a8 | [
"Unlicense"
] | null | null | null | # Apresentação
print('Programa para identificar a que cargos eletivos')
print('uma pessoa pode se candidatar com base em sua idade')
print()
# Entradas
idade = int(input('Informe a sua idade: '))
# Processamento e saídas
print('Esta pessoa pode se candidatar a estes cargos:')
if (idade < 18):
print('- Nenhum cargo disponível')
if (idade >= 18):
print('- Vereador')
if (idade >= 21):
print('- Deputado Federal')
print('- Deputado Estadual ou Distrital')
print('- Prefeito ou Vice-Prefeito')
print('- Juiz de paz')
if (idade >= 30):
print('- Governador ou Vice-Governador')
if (idade >= 35):
print('- Presidente ou Vice-Presidente')
print('- Senador')
| 23.419355 | 61 | 0.634986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.605479 |
c395f4dc93b3cc9cf83be3db2fc2eff8ac8f3237 | 13,261 | py | Python | etl/parsers/etw/Microsoft_Windows_UAC_FileVirtualization.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_UAC_FileVirtualization.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_UAC_FileVirtualization.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-UAC-FileVirtualization
GUID : c02afc2b-e24e-4449-ad76-bcc2c2575ead
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2000_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2001_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2002_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2003_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2004_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2005, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2005_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2006, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2006_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2007, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2007_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2008, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2008_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2009, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2009_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2010, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2010_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2011, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2011_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2012, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2012_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2013, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2013_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2014, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2014_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2015, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2015_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2016, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2016_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2017, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2017_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2018, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2018_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2019, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2019_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4001_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"TargetFileNameLength" / Int16ul,
"TargetFileNameBuffer" / Bytes(lambda this: this.TargetFileNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul,
"Exclusions" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5003_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5004_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
| 37.780627 | 123 | 0.681623 | 10,482 | 0.790438 | 0 | 0 | 12,804 | 0.965538 | 0 | 0 | 3,626 | 0.273433 |
c3981f033f50367706655b5344334846814a59d8 | 4,073 | py | Python | fetchData.py | charlingli/automatic-ticket-assignment | a5fac001dd54ec9fc2af9877925109315131dc28 | [
"MIT"
] | null | null | null | fetchData.py | charlingli/automatic-ticket-assignment | a5fac001dd54ec9fc2af9877925109315131dc28 | [
"MIT"
] | null | null | null | fetchData.py | charlingli/automatic-ticket-assignment | a5fac001dd54ec9fc2af9877925109315131dc28 | [
"MIT"
] | null | null | null | import requests
from requests.auth import HTTPBasicAuth
from elasticsearch import Elasticsearch
import json
import sys
import datetime
from operator import itemgetter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
INCIDENT_INDEX = "incident"
INCIDENT_TYPE = "incident"
RESOURCE_INDEX = "resource"
RESOURCE_TYPE = "resource"
LOG_INDEX = "log"
LOG_TYPE = "log"
es = Elasticsearch([{"host": "localhost", "port": 9200}])
if es.indices.exists(index="incident"):
es.indices.delete(index="incident")
incident_mapping = {
"incident": {
"properties": {
"number": {"type": "text"},
"sys_id": {"type": "text"},
"assignment_group": {
"properties": {
"display_value": {"type": "keyword"},
"value": {"type": "keyword"}
}
},
"assigned_to":{
"properties": {
"display_value": {"type": "keyword"},
"value": {"type": "keyword"}
}
},
"sys_updated_on":{"type": "date", "format": "yyyy-MM-dd HH:mm:ss"}
}
}
}
es.indices.create(INCIDENT_INDEX, body={"mappings": incident_mapping})
if not es.indices.exists(index="resource"):
resource_mapping = {
"resource": {
"properties": {
"team_name": {"type": "text"},
"employee": {
"properties": {
"name": {"type": "keyword"},
"start_time": {"type": "date", "format": "hh:mm"},
"end_time": {"type": "date", "format": "hh:mm"},
"workload": {"type": "integer"}
}
}
}
}
}
es.indices.create(RESOURCE_INDEX, body={"mappings": resource_mapping})
if not es.indices.exists(index="log"):
log_mapping = {
"log": {
"properties": {
"number": {"type": "keyword"},
"assignment_group": {"type": "keyword"},
"assigned_to": {"type": "keyword"},
"sys_updated_on": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss"}
}
}
}
es.indices.create(LOG_INDEX, body={"mappings": log_mapping})
maxTime = es.search(index=INCIDENT_INDEX, body={
"aggs" : {
"max_val" : { "max" : { "field" : "sys_updated_on" } }
}
})
#### TODO Change ServiceNow instance
SN_REST_BASE_URL = "https://devXXXXX.service-now.com"
SN_REST_SUFFIX_URL = "/api/now/v1/table/incident"
if maxTime["hits"]["total"] != 0:
maxTime = maxTime["aggregations"]["max_val"]["value_as_string"]
latestTime = maxTime[:10] + "+" + maxTime[11:]
SN_REST_PARAMS_URL = "?sysparm_display_value=true&sysparm_query=sys_updated_on>=" + latestTime
else:
SN_REST_PARAMS_URL = "?sysparm_display_value=true"
URL = SN_REST_BASE_URL + SN_REST_SUFFIX_URL + SN_REST_PARAMS_URL
headers = {"Content-Type":"application/json", "Accept":"application/json"}
response = requests.get(URL, verify = False, auth = HTTPBasicAuth("admin", "Test1234"))
rawData = response.json()["result"]
for count, row in enumerate(rawData):
cleanData = {key:row[key] for key in ("number", "sys_id", "assignment_group", "assigned_to", "sys_updated_on")}
if cleanData["assignment_group"] == "":
cleanData["assignment_group"] = {"display_value": "", "link": ""}
if cleanData["assigned_to"] == "":
cleanData["assigned_to"] = {"display_value": "", "link": ""}
es.index(index=INCIDENT_INDEX, doc_type=INCIDENT_TYPE, id=count, body=cleanData)
jsonFile = open("data/resource.json", "r")
#### TODO Add a firstrun condition
# for index, data in enumerate(jsonFile):
# postData = json.loads(data)
# # print(postData["resource"]["employee"]["workload"])
# es.index(index=RESOURCE_INDEX, doc_type=RESOURCE_TYPE, id=index, body=postData)
# jsonFile.close()
print('automatic-ticket-assignment: Retrieved Updated Ticket Data from ServiceNow') | 34.811966 | 115 | 0.579425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,686 | 0.413945 |
c39bd72623a02f47fb813a60722b70b8cc5a5671 | 10,080 | py | Python | Code/3. Baseline_LSTM.py | davidpaulkim/Stock-price-prediction-using-GAN | 18ae7335401ed3b5d8012026d8c8e36c83439d59 | [
"MIT"
] | 63 | 2021-03-01T08:39:17.000Z | 2022-03-31T10:44:58.000Z | Code/3. Baseline_LSTM.py | davidpaulkim/Stock-price-prediction-using-GAN | 18ae7335401ed3b5d8012026d8c8e36c83439d59 | [
"MIT"
] | 11 | 2021-02-25T23:13:13.000Z | 2022-02-20T05:14:37.000Z | Code/3. Baseline_LSTM.py | davidpaulkim/Stock-price-prediction-using-GAN | 18ae7335401ed3b5d8012026d8c8e36c83439d59 | [
"MIT"
] | 35 | 2021-03-13T21:46:35.000Z | 2022-03-18T08:24:30.000Z | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import tensorflow
from numpy import *
from math import sqrt
from pandas import *
from datetime import datetime, timedelta
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Bidirectional
from tensorflow.keras.layers import BatchNormalization, Embedding, TimeDistributed, LeakyReLU
from tensorflow.keras.layers import LSTM, GRU
from tensorflow.keras.optimizers import Adam
from matplotlib import pyplot
from pickle import load
X_train = np.load("X_train.npy", allow_pickle=True)
y_train = np.load("y_train.npy", allow_pickle=True)
X_test = np.load("X_test.npy", allow_pickle=True)
y_test = np.load("y_test.npy", allow_pickle=True)
yc_train = np.load("yc_train.npy", allow_pickle=True)
yc_test = np.load("yc_test.npy", allow_pickle=True)
#Parameters
LR = 0.001
BATCH_SIZE = 64
N_EPOCH = 50
input_dim = X_train.shape[1]
feature_size = X_train.shape[2]
output_dim = y_train.shape[1]
def basic_lstm(input_dim, feature_size):
model = Sequential()
model.add(Bidirectional(LSTM(units= 128), input_shape=(input_dim, feature_size)))
model.add(Dense(64))
model.add(Dense(units=output_dim))
model.compile(optimizer=Adam(lr = LR), loss='mse')
history = model.fit(X_train, y_train, epochs=N_EPOCH, batch_size=BATCH_SIZE, validation_data=(X_test, y_test),
verbose=2, shuffle=False)
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='validation')
pyplot.legend()
pyplot.show()
return model
model = basic_lstm(input_dim, feature_size)
model.save('LSTM_3to1.h5')
print(model.summary())
yhat = model.predict(X_test, verbose=0)
#print(yhat)
rmse = sqrt(mean_squared_error(y_test, yhat))
print(rmse)
# %% --------------------------------------- Plot the TRAIN result -----------------------------------------------------------------
## TRAIN DATA
def plot_traindataset_result(X_train, y_train):
train_yhat = model.predict(X_train, verbose=0)
X_scaler = load(open('X_scaler.pkl', 'rb'))
y_scaler = load(open('y_scaler.pkl', 'rb'))
train_predict_index = np.load("train_predict_index.npy", allow_pickle=True)
rescaled_real_y = y_scaler.inverse_transform(y_train)
rescaled_predicted_y = y_scaler.inverse_transform(train_yhat)
predict_result = pd.DataFrame()
for i in range(rescaled_predicted_y.shape[0]):
y_predict = pd.DataFrame(rescaled_predicted_y[i], columns=["predicted_price"],
index=train_predict_index[i:i + output_dim])
predict_result = pd.concat([predict_result, y_predict], axis=1, sort=False)
#
real_price = pd.DataFrame()
for i in range(rescaled_real_y.shape[0]):
y_train = pd.DataFrame(rescaled_real_y[i], columns=["real_price"],
index=train_predict_index[i:i + output_dim])
real_price = pd.concat([real_price, y_train], axis=1, sort=False)
predict_result['predicted_mean'] = predict_result.mean(axis=1)
real_price['real_mean'] = real_price.mean(axis=1)
#
# Plot the predicted result
plt.figure(figsize=(16, 8))
plt.plot(real_price["real_mean"])
plt.plot(predict_result["predicted_mean"], color='r')
plt.xlabel("Date")
plt.ylabel("Stock price")
plt.legend(("Real price", "Predicted price"), loc="upper left", fontsize=16)
plt.title("The result of Training", fontsize=20)
plt.show()
# Calculate RMSE
predicted = predict_result["predicted_mean"]
real = real_price["real_mean"]
RMSE = np.sqrt(mean_squared_error(predicted, real))
#print('-- Train RMSE -- ', RMSE)
return RMSE
# %% --------------------------------------- Plot the TEST result -----------------------------------------------------------------
def plot_testdataset_result(X_test, y_test):
test_yhat = model.predict(X_test, verbose=0)
y_scaler = load(open('y_scaler.pkl', 'rb'))
test_predict_index = np.load("test_predict_index.npy", allow_pickle=True)
rescaled_real_y = y_scaler.inverse_transform(y_test)
rescaled_predicted_y = y_scaler.inverse_transform(test_yhat)
predict_result = pd.DataFrame()
for i in range(rescaled_predicted_y.shape[0]):
y_predict = pd.DataFrame(rescaled_predicted_y[i], columns=["predicted_price"],
index=test_predict_index[i:i + output_dim])
predict_result = pd.concat([predict_result, y_predict], axis=1, sort=False)
real_price = pd.DataFrame()
for i in range(rescaled_real_y.shape[0]):
y_train = pd.DataFrame(rescaled_real_y[i], columns=["real_price"],
index=test_predict_index[i:i + output_dim])
real_price = pd.concat([real_price, y_train], axis=1, sort=False)
predict_result['predicted_mean'] = predict_result.mean(axis=1)
real_price['real_mean'] = real_price.mean(axis=1)
Input_Before = '2020-01-01'
predict_result = predict_result.loc[predict_result.index < Input_Before]
real_price = real_price.loc[real_price.index < Input_Before]
print(predict_result.tail(10))
# Plot the predicted result
plt.figure(figsize=(16, 8))
plt.plot(real_price["real_mean"])
plt.plot(predict_result["predicted_mean"], color='r')
plt.xlabel("Date")
plt.ylabel("Stock price")
plt.legend(("Real price", "Predicted price"), loc="upper left", fontsize=16)
plt.title("The result of Testing", fontsize=20)
plt.show()
# Calculate RMSE
predicted = predict_result["predicted_mean"]
real = real_price["real_mean"]
RMSE = np.sqrt(mean_squared_error(predicted, real))
#print('-- Test RMSE -- ', RMSE)
return RMSE
def plot_testdataset_with2020_result(X_test, y_test):
test_yhat = model.predict(X_test, 1, verbose=0)
y_scaler = load(open('y_scaler.pkl', 'rb'))
test_predict_index = np.load("test_predict_index.npy", allow_pickle=True)
rescaled_real_y = y_scaler.inverse_transform(y_test)
rescaled_predicted_y = y_scaler.inverse_transform(test_yhat)
predict_result = pd.DataFrame()
for i in range(rescaled_predicted_y.shape[0]):
y_predict = pd.DataFrame(rescaled_predicted_y[i], columns=["predicted_price"],
index=test_predict_index[i:i + output_dim])
predict_result = pd.concat([predict_result, y_predict], axis=1, sort=False)
real_price = pd.DataFrame()
for i in range(rescaled_real_y.shape[0]):
y_train = pd.DataFrame(rescaled_real_y[i], columns=["real_price"],
index=test_predict_index[i:i + output_dim])
real_price = pd.concat([real_price, y_train], axis=1, sort=False)
predict_result['predicted_mean'] = predict_result.mean(axis=1)
real_price['real_mean'] = real_price.mean(axis=1)
# Plot the predicted result
plt.figure(figsize=(16, 8))
plt.plot(real_price["real_mean"])
plt.plot(predict_result["predicted_mean"], color='r')
plt.xlabel("Date")
plt.ylabel("Stock price")
plt.legend(("Real price", "Predicted price"), loc="upper left", fontsize=16)
plt.title("The result of Testing with 2020", fontsize=20)
plt.show()
# Calculate RMSE
predicted = predict_result["predicted_mean"]
real = real_price["real_mean"]
RMSE = np.sqrt(mean_squared_error(predicted, real))
#print('-- Test RMSE with 2020 -- ', RMSE)
return RMSE
train_RMSE = plot_traindataset_result(X_train, y_train)
print("----- Train_RMSE_LSTM -----", train_RMSE)
test_RMSE = plot_testdataset_result(X_test, y_test)
print("----- Test_RMSE_LSTM -----", test_RMSE)
test_with2020_RMSE = plot_testdataset_with2020_result(X_test, y_test)
print("----- Test_RMSE_LSTM_with2020 -----", test_with2020_RMSE)
'''def plot_last3_testdataset_result(X_test, y_test):
test_yhat = model.predict(X_test[-1].reshape(1, X_test[-1].shape[0], X_test[-1].shape[1]), verbose=0)
X_scaler = load(open('X_scaler.pkl', 'rb'))
y_scaler = load(open('y_scaler.pkl', 'rb'))
test_predict_index = np.load("test_predict_index.npy", allow_pickle=True)
rescaled_real_y = y_scaler.inverse_transform(y_test[-32:])
rescaled_predicted_y = y_scaler.inverse_transform(test_yhat)
#print("----- test dataset rescaled predicted price -----", rescaled_predicted_y)
#print("----- test dataset SHAPE rescaled predicted price -----", rescaled_predicted_y.shape)
## Real price
real_price = pd.DataFrame()
for i in range(rescaled_real_y.shape[0]):
test_predict_index = test_predict_index[-34:]
y_train = pd.DataFrame(rescaled_real_y[i], columns=["real_price"],
index=test_predict_index[i:i + output_dim])
real_price = pd.concat([real_price, y_train], axis=1, sort=False)
## Predicted price
predict_result = pd.DataFrame()
y_predict = pd.DataFrame(rescaled_predicted_y[0], columns=["predicted_price"], index=test_predict_index[-3:])
predict_result = pd.concat([predict_result, y_predict], axis=1, sort=False)
predict_result['predicted_mean'] = predict_result.mean(axis=1)
real_price['real_mean'] = real_price.mean(axis=1)
#
# Plot the predicted result
plt.figure(figsize=(16, 8))
plt.plot(real_price["real_mean"])
plt.plot(predict_result["predicted_mean"], color='r')
plt.xlabel("Date")
plt.ylabel("Stock price")
#plt.ylim(0, 110)
plt.legend(("Real price", "Predicted price"), loc="upper left", fontsize=16)
plt.title("The result of the last set of testdata", fontsize=20)
plt.show()
# Calculate RMSE
predicted = predict_result["predicted_mean"]
real = real_price["real_mean"]
For_MSE = pd.concat([predicted, real], axis=1)
RMSE = np.sqrt(mean_squared_error(predicted, real[-3:]))
#print('-- test dataset RMSE -- ', RMSE)
return RMSE'''
| 36.521739 | 133 | 0.681151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,609 | 0.358036 |
c39c0fa829d146f1ed16fd72d73c49eeda2c3040 | 1,323 | py | Python | Algorithms_medium/0081. Search in Rotated Sorted Array II.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | 4 | 2020-08-11T20:45:15.000Z | 2021-03-12T00:33:34.000Z | Algorithms_medium/0081. Search in Rotated Sorted Array II.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | Algorithms_medium/0081. Search in Rotated Sorted Array II.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | """
0081. Search in Rotated Sorted Array II
Medium
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., [0,0,1,2,2,5,6] might become [2,5,6,0,0,1,2]).
You are given a target value to search. If found in the array return true, otherwise return false.
Example 1:
Input: nums = [2,5,6,0,0,1,2], target = 0
Output: true
Example 2:
Input: nums = [2,5,6,0,0,1,2], target = 3
Output: false
Follow up:
This is a follow up problem to Search in Rotated Sorted Array, where nums may contain duplicates.
Would this affect the run-time complexity? How and why?
"""
class Solution:
def search(self, nums: List[int], target: int) -> bool:
start = 0
end = len(nums) - 1
while start <= end:
mid = (start + end)//2
if nums[mid] == target:
return True
if nums[mid] == nums[end]:
end -= 1
elif nums[mid] > nums[end]:
if nums[start] <= target and target < nums[mid]:
end = mid - 1
else:
start = mid + 1
else:
if nums[mid] < target and target <= nums[end]:
start = mid + 1
else:
end = mid - 1
return False | 30.068182 | 98 | 0.535903 | 715 | 0.540438 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.458806 |
c39cf6425ae750feb018090f70a0e62153124b4d | 4,142 | py | Python | augmentation/main.py | LinaGamer15/withBears | 4714179f8336df726affdf5fa1db5becd2058d33 | [
"MIT"
] | null | null | null | augmentation/main.py | LinaGamer15/withBears | 4714179f8336df726affdf5fa1db5becd2058d33 | [
"MIT"
] | null | null | null | augmentation/main.py | LinaGamer15/withBears | 4714179f8336df726affdf5fa1db5becd2058d33 | [
"MIT"
] | null | null | null | import pathlib, typing, random, xml.etree.ElementTree as ET
from itertools import chain
from typing import List, Tuple
from PIL import Image, ImageOps
def split_background(background: Image.Image) -> list[Image.Image]:
res = []
for x in range(0, background.width-416, 416):
for y in range(0, background.height-416, 416):
res.append(background.crop((x, y, x+416, y+416)))
return res
random.seed(42)
# Load raw images
cur = pathlib.Path(__file__).resolve().parent
backgrounds = [Image.open(i) for i in (cur/'backgrounds').iterdir()]
bears = [Image.open(i) for i in (cur/'bears').iterdir()]
print("Images loaded")
sliced = []
for background in backgrounds:
sliced.extend(split_background(background))
background.close()
backgrounds = sliced
print("Backgrounds sliced")
# Process images
class BearData:
xmin: float
ymin: float
xmax: float
ymax: float
def __init__(self, xmin: float, ymin: float, xmax: float, ymax: float) -> None:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def commit_transposes(image: Image.Image) -> list[Image.Image]:
rotations = [
image,
image.rotate(90, expand=True),
image.rotate(180, expand=True),
image.rotate(270, expand=True)
]
res = chain(*map(lambda im: [
im,
ImageOps.flip(im),
ImageOps.mirror(im),
ImageOps.flip(ImageOps.mirror(im)),
], rotations))
return list(res)
def gen_xml(file: str, bears: list[BearData], width: int, height: int) -> ET.ElementTree:
root = ET.Element("annotation")
ET.SubElement(root, "folder").text = ""
ET.SubElement(root, "filename").text = file + '.png'
source = ET.SubElement(root, "source")
ET.SubElement(source, "database").text = "Unknown"
ET.SubElement(source, "annotation").text = "Unknown"
ET.SubElement(source, "image").text = "Unknown"
size = ET.SubElement(root, "size")
ET.SubElement(size, "width").text = str(width)
ET.SubElement(size, "height").text = str(height)
ET.SubElement(size, "depth")
ET.SubElement(root, "segmented").text = "0"
for bear in bears:
object = ET.SubElement(root, "object")
ET.SubElement(object, "name").text = "polar-bear"
ET.SubElement(object, "truncated").text = "0"
ET.SubElement(object, "occluded").text = "0"
ET.SubElement(object, "difficult").text = "0"
bndbox = ET.SubElement(object, "bndbox")
ET.SubElement(bndbox, "xmin").text = str(bear.xmin)
ET.SubElement(bndbox, "ymin").text = str(bear.ymin)
ET.SubElement(bndbox, "xmax").text = str(bear.xmax)
ET.SubElement(bndbox, "ymax").text = str(bear.ymax)
return ET.ElementTree(root)
def add_bears(background: Image.Image, bears: list[Image.Image]) -> tuple[Image.Image, list[BearData]]:
res_image = background.copy()
res_data = []
for bear in bears:
x = random.randint(0, res_image.width - bear.width)
y = random.randint(0, res_image.height - bear.height)
res_image.paste(bear, (x, y))
res_data.append(BearData(x, y, x+bear.width, y+bear.height))
return (res_image, res_data)
bears = list(chain(*map(commit_transposes, bears)))
print("Bear images generated")
print("Background transposing started")
for background in backgrounds:
for background in commit_transposes(background):
# Finally add bears!
res_image, bear_datas = add_bears(
background,
[bears[random.randint(0, len(bears)-1)] for _ in range(0, random.choices([1, 2, 3], [0.5, 0.35, 0.15])[0])]
)
# Saving
filename = str(len([f for f in (cur / "result").iterdir()]))
res_image.save(cur / f"result/{filename}.png", 'png')
xml_tree = gen_xml(filename, bear_datas, res_image.width, res_image.height)
xml_tree.write(cur / f"result/Annotations/{filename}.xml", "UTF8", xml_declaration=False, short_empty_elements=False)
# Cleanup
background.close()
res_image.close()
for bear in bears:
bear.close()
print("Done!") | 35.706897 | 125 | 0.639788 | 264 | 0.063737 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.122887 |
c39d73486233c045183acbe7040c08d02429c13d | 15,708 | py | Python | HinetPy/win32.py | seisman/HinetPy | d27f5a6e5f0dff3832076e07cfeec11946c14ff4 | [
"MIT"
] | 54 | 2017-07-31T12:50:36.000Z | 2022-03-20T07:42:11.000Z | HinetPy/win32.py | seisman/HinetPy | d27f5a6e5f0dff3832076e07cfeec11946c14ff4 | [
"MIT"
] | 32 | 2017-07-18T07:29:01.000Z | 2022-02-17T13:22:32.000Z | HinetPy/win32.py | seisman/HinetPy | d27f5a6e5f0dff3832076e07cfeec11946c14ff4 | [
"MIT"
] | 24 | 2017-04-17T15:35:20.000Z | 2022-03-23T09:41:49.000Z | """
Processing data in win32 format.
"""
import glob
import logging
import math
import os
import subprocess
import tempfile
from fnmatch import fnmatch
from multiprocessing import Pool, cpu_count
from subprocess import DEVNULL, PIPE, Popen
# Setup the logger
FORMAT = "[%(asctime)s] %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
class Channel:
"""Class for channel."""
def __init__(
self,
id=None,
name=None,
component=None,
latitude=None,
longitude=None,
unit=None,
gain=None,
damping=None,
period=None,
preamplification=None,
lsb_value=None,
):
"""Initialize a channel.
Parameters
----------
id: str
Channel ID.
name: str
Station Name.
component: str
Channel component name (``U|N|E``).
latitude: float
Station latitude.
longitude: float
Station longitude.
unit: str
Unit of data (``m``, ``m/s``, ``m/s/s``, ``rad``).
gain: float
Sensor sensitivity.
damping: float
Damping constant of the sensor.
period: float
Natural period of the seismometer.
preamplification:
Preamplification.
lsb_value:
LSB value.
"""
self.id = id
self.name = name
self.component = component
self.latitude = latitude
self.longitude = longitude
self.unit = unit
self.gain = gain
self.damping = damping
self.period = period
self.preamplification = preamplification
self.lsb_value = lsb_value
def extract_sac(
data,
ctable,
suffix="SAC",
outdir=".",
pmax=8640000,
filter_by_id=None,
filter_by_name=None,
filter_by_component=None,
with_pz=False,
processes=0,
):
"""Extract data as SAC format files.
Parameters
----------
data: str
win32 file to be processed.
ctable: str
Channel table file.
suffix: str
Suffix of output SAC files. Defaults to ``SAC``.
outdir: str
Output directory. Defaults to current directory.
pmax: int
Maximum number of data points. Defaults to 8640000. If the input data
is longer than one day, you have to to increase ``pmax``.
filter_by_id: list of str or wildcard
Filter channels by ID.
filter_by_name: list of str or wildcard
Filter channels by name.
filter_by_component: list of str or wildcard
Filter channels by component.
with_pz: bool
Extract PZ files at the same time.
PZ file has default suffix ``.SAC_PZ``.
processes: int
Number of parallel processes to speed up data extraction.
Use all processes by default.
Note
----
``win2sac`` removes sensitivity from waveform, then multiply by 1.0e9.
Thus the extracted SAC files are velocity in nm/s, or acceleration in nm/s/s.
Examples
--------
>>> extract_sac("0101_201001010000_5.cnt", "0101_20100101.ch")
Extract all channel with specified SAC suffix and output directory:
>>> extract_sac(
... "0101_201001010000_5.cnt",
... "0101_20100101.ch",
... suffix="",
... outdir="20100101000",
... )
Extract only specified channels:
>>> extract_sac(
... "0101_201001010000_5.cnt",
... "0101_20100101.ch",
... filter_by_name="N.NA*",
... filter_by_component="[NE]",
... )
"""
if not (data and ctable):
logger.error("data or ctable is `None'. Data requests may fail. Skipped.")
return
channels = _get_channels(ctable)
logger.info(f"{len(channels)} channels found in {ctable}.")
if filter_by_id or filter_by_name or filter_by_component:
channels = _filter_channels(
channels, filter_by_id, filter_by_name, filter_by_component
)
logger.info(f"{len(channels)} channels to be extracted.")
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
with Pool(processes=_get_processes(processes)) as pool:
with tempfile.NamedTemporaryFile() as ft:
_write_winprm(ctable, ft.name)
args = [(data, ch, suffix, outdir, ft.name, pmax) for ch in channels]
sacfiles = pool.starmap(_extract_channel, args)
logger.info(
"{} SAC data successfully extracted.".format(
len(sacfiles) - sacfiles.count(None)
)
)
if with_pz:
# "SAC_PZ" here is hardcoded.
args = [(ch, "SAC_PZ", outdir) for ch in channels]
pzfiles = pool.starmap(_extract_sacpz, args)
logger.info(
"{} SAC PZ files successfully extracted.".format(
len(pzfiles) - pzfiles.count(None)
)
)
def _get_processes(procs):
"""Choose the best number of processes."""
cpus = cpu_count()
if cpus == 1:
return cpus
if not 0 < procs < cpus:
return cpus - 1
return procs
def extract_pz(
ctable,
suffix="SAC_PZ",
outdir=".",
keep_sensitivity=False,
filter_by_chid=None,
filter_by_name=None,
filter_by_component=None,
):
"""Extract instrumental response in SAC PZ format from channel table.
.. warning::
Only works for instrumental responses of Hi-net network.
RESP files of F-net network can be downloaded from
`F-net website <http://www.fnet.bosai.go.jp/st_info/response.php?LANG=en>`_.
Parameters
----------
ctable: str
Channel table file.
suffix: str
Suffix of SAC PZ files. Defaults to ``SAC_PZ``.
outdir: str
Output directory. Defaults to current directory.
keep_sensivity: bool
win2sac automatically removes sensivity from waveform data
during win32 format to SAC format conversion.
So the generated polezero file should omit the sensitivity.
filter_by_id: list of str or wildcard
Filter channels by ID.
filter_by_name: list of str or wildcard
Filter channels by name.
filter_by_component: list of str or wildcard
Filter channels by component.
Examples
--------
>>> extract_pz("0101_20100101.ch")
Extract all channel with specified suffix and output directory:
>>> extract_pz("0101_20100101.ch", suffix="", outdir="20100101000")
Extract only specified channels:
>>> extract_pz(
... "0101_20100101.ch", filter_by_name="N.NA*", filter_by_component="[NE]"
... )
"""
if not ctable:
logger.error("ctable is `None'. Data requests may fail. Skipped.")
return
channels = _get_channels(ctable)
if filter_by_chid or filter_by_name or filter_by_component:
channels = _filter_channels(
channels, filter_by_chid, filter_by_name, filter_by_component
)
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
for channel in channels:
_extract_sacpz(
channel, suffix=suffix, outdir=outdir, keep_sensitivity=keep_sensitivity
)
def _get_channels(ctable):
"""Get channel information from channel table file.
Parameters
----------
ctable: str
Channle table file.
"""
channels = []
with open(ctable, "r") as f:
for line in f:
# skip blank lines and comment lines
if not line.strip() or line.strip().startswith("#"):
continue
items = line.split()
try:
channel = Channel(
id=items[0],
name=items[3],
component=items[4],
latitude=float(items[13]),
longitude=float(items[14]),
unit=items[8],
gain=float(items[7]),
damping=float(items[10]),
period=float(items[9]),
preamplification=float(items[11]),
lsb_value=float(items[12]),
)
channels.append(channel)
except ValueError as e:
logger.warning(
"Error in parsing channel information for %s.%s (%s). Skipped.",
items[3],
items[4],
items[0],
)
logger.warning("Original error message: %s", e)
return channels
def _filter_channels(
channels, filter_by_id=None, filter_by_name=None, filter_by_component=None
):
"""Filter channels by id, name and/or component.
Parameters
----------
channels: :class:`~HinetPy.win32.Channel`
Channels to be filtered.
filter_by_id: list of str or wildcard
Filter channels by ID.
filter_by_name: list of str or wildcard
Filter channels by name.
filter_by_component: list of str or wildcard
Filter channels by component.
"""
def _filter(channels, key, filters):
filtered_channels = []
if isinstance(filters, list): # filter by list
for channel in channels:
if getattr(channel, key) in filters:
filtered_channels.append(channel)
elif isinstance(filters, str): # filter by wildcard
for channel in channels:
if fnmatch(getattr(channel, key), filters):
filtered_channels.append(channel)
else:
raise ValueError("Only list and wildcard filter are supported.")
return filtered_channels
if filter_by_id:
channels = _filter(channels, "id", filter_by_id)
if filter_by_name:
channels = _filter(channels, "name", filter_by_name)
if filter_by_component:
channels = _filter(channels, "component", filter_by_component)
return channels
def _write_winprm(ctable, prmfile="win.prm"):
"""
Four line parameters file.
"""
with open(prmfile, "w") as f:
msg = ".\n" + ctable + "\n" + ".\n.\n"
f.write(msg)
def _extract_channel(
winfile, channel, suffix="SAC", outdir=".", prmfile="win.prm", pmax=8640000
):
"""Extract one channel data from win32 file.
Parameters
----------
winfile: str
win32 file to be processed.
channel: str
Channel to be extracted.
suffix: str
SAC file suffix.
outdir: str
Output directory.
prmfile: str
Win32 parameter file.
pmax: int
Maximum number of data points.
"""
cmd = [
"win2sac_32",
winfile,
channel.id,
suffix,
outdir,
"-e",
"-p" + prmfile,
"-m" + str(pmax),
]
p = Popen(cmd, stdout=DEVNULL, stderr=PIPE)
# check stderr output
for line in p.stderr.read().decode().split("\n"):
if "The number of points is maximum over" in line:
msg = "The number of data points is over maximum. Try to increase pmax."
raise ValueError(msg)
if f"Data for channel {channel.id} not existed" in line:
# return None if no data avaiable
logger.warning(
f"Data for {channel.name}.{channel.component} ({channel.id}) "
+ "not exists. Skipped."
)
return None
filename = f"{channel.name}.{channel.component}.{suffix}"
if outdir != ".":
filename = os.path.join(outdir, filename)
if os.path.exists(filename): # some channels have no data
if suffix == "": # remove extra dot if suffix is empty
os.rename(filename, filename[:-1])
return filename[:-1]
return filename
def _channel2pz(channel, keep_sensitivity=False):
"""Convert channel information to SAC polezero file.
Transfer function = s^2 / (s^2+2hws+w^2).
"""
# Hi-net use moving coil velocity type seismometer.
if channel.unit != "m/s":
logger.warning(
f"{channel.name}.{channel.component} ({channel.id}): Unit is not velocity."
)
try:
freq = 2.0 * math.pi / channel.period
except ZeroDivisionError:
logger.warning(
f"{channel.name}.{channel.component} ({channel.id}): "
+ "Natural period = 0. Skipped."
)
return None, None, None
# calculate poles, find roots of equation s^2+2hws+w^2=0
real = -channel.damping * freq
imaginary = freq * math.sqrt(1 - channel.damping ** 2)
# calculate constant
fn = 20 # alaways assume normalization frequency is 20 Hz
s = complex(0, 2 * math.pi * fn)
A0 = abs((s ** 2 + 2 * channel.damping * freq * s + freq ** 2) / s ** 2)
if keep_sensitivity:
factor = math.pow(10, channel.preamplification / 20.0)
constant = A0 * channel.gain * factor / channel.lsb_value
else:
constant = A0
return real, imaginary, constant
def _write_pz(pzfile, real, imaginary, constant):
"""Write SAC PZ file.
Parameters
----------
pzfile: str
SAC PoleZero filename.
real: float
Real part of poles.
imaginary: float
Imaginary part of poles
constant: float
Constant in SAC PZ.
"""
with open(pzfile, "w") as pz:
pz.write("ZEROS 3\n")
pz.write("POLES 2\n")
pz.write(f"{real:9.6f} {imaginary:9.6f}\n")
pz.write(f"{real:9.6f} {-imaginary:9.6f}\n")
pz.write(f"CONSTANT {constant:e}\n")
def _extract_sacpz(channel, suffix="SAC_PZ", outdir=".", keep_sensitivity=False):
real, imaginary, constant = _channel2pz(channel, keep_sensitivity=keep_sensitivity)
if (
real is None or imaginary is None or constant is None
): # something wrong with channel information, skipped
return None
pzfile = f"{channel.name}.{channel.component}"
if suffix:
pzfile += "." + suffix
pzfile = os.path.join(outdir, pzfile)
_write_pz(pzfile, real, imaginary, constant)
return pzfile
def merge(datas, total_data, force_sort=False):
"""Merge several win32 files to one win32 file.
Parameters
----------
datas: list of str or wildcard
Win32 files to be merged.
total_data: str
Filename of ouput win32 file.
force_sort: bool
Sort all win32 files by date.
Examples
--------
If win32 files are named by starttime (e.g. ``201304040203.cnt``), sorting
win32 files in list by name/time is prefered:
>>> datas = sorted(glob.glob("20130404*.cnt"))
>>> merge(datas, "outdir/final.cnt")
If win32 files are named randomly, you should set ``force_sort`` to
``True`` to force ``catwin32`` to sort all data by time.
However, it's time consuming. Do NOT use it unless necessary:
>>> datas = ["001.cnt", "002.cnt", "003.cnt"]
>>> merge(datas, "final.cnt", force_sort=True)
You can also use wildcard to specify the win32 files to be merged.
>>> merge("20130404*.cnt", "final.cnt")
"""
if isinstance(datas, str): # wildcard support
datas = sorted(glob.glob(datas))
if not datas:
raise FileNotFoundError("Files to be merged not found.\n")
if os.path.dirname(total_data):
os.makedirs(os.path.dirname(total_data), exist_ok=True)
cmd = ["catwin32", "-o", total_data] + datas
if force_sort: # add -s option to force sort
cmd.append("-s")
subprocess.call(cmd, stdout=DEVNULL, stderr=DEVNULL)
| 29.637736 | 87 | 0.591036 | 1,385 | 0.088172 | 0 | 0 | 0 | 0 | 0 | 0 | 7,842 | 0.499236 |
c39e009f068f7788015732d2e6b0edfd0efd992a | 426 | py | Python | examples/simple.py | realazthat/aiopg-trollius | 6f5edf829d92d1b10c4a1a3a90fe2451539e8dd7 | [
"BSD-2-Clause"
] | 1 | 2021-01-03T00:58:01.000Z | 2021-01-03T00:58:01.000Z | examples/simple.py | 1st1/aiopg | 1bcbd95f9ff97675788dc3dbc2f7889e26b2fba4 | [
"BSD-2-Clause"
] | 2 | 2018-07-20T07:05:46.000Z | 2018-07-20T19:44:44.000Z | examples/simple_old_style.py | soar/aiopg | 9bdff257226b14c1828253efb6d0eb7239b0683a | [
"BSD-2-Clause"
] | 3 | 2018-07-18T06:59:47.000Z | 2018-07-19T22:56:50.000Z | import asyncio
import aiopg
dsn = 'dbname=aiopg user=aiopg password=passwd host=127.0.0.1'
@asyncio.coroutine
def test_select():
pool = yield from aiopg.create_pool(dsn)
with (yield from pool.cursor()) as cur:
yield from cur.execute("SELECT 1")
ret = yield from cur.fetchone()
assert ret == (1,)
print("ALL DONE")
loop = asyncio.get_event_loop()
loop.run_until_complete(test_select())
| 22.421053 | 62 | 0.680751 | 0 | 0 | 239 | 0.561033 | 258 | 0.605634 | 0 | 0 | 76 | 0.178404 |
c39e74a510420ce0e428d6654b2c91df1ac7a9d5 | 162 | py | Python | sta_etl/__init__.py | XeBoris/git-etl | 888f26e51a797dd111c9ca457a0c83b4f00296f0 | [
"MIT"
] | null | null | null | sta_etl/__init__.py | XeBoris/git-etl | 888f26e51a797dd111c9ca457a0c83b4f00296f0 | [
"MIT"
] | null | null | null | sta_etl/__init__.py | XeBoris/git-etl | 888f26e51a797dd111c9ca457a0c83b4f00296f0 | [
"MIT"
] | null | null | null | """Top-level package for sta-etl."""
__author__ = """Boris Bauermeister"""
__email__ = 'Boris.Bauermeister@gmail'
__version__ = '0.1.0'
#from sta_etl import *
| 18 | 38 | 0.697531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.709877 |
c3a0fe8de0f2a234fe164992e77198246617cbd3 | 14,399 | py | Python | care/facility/api/viewsets/patient_external_test.py | MaharashtraStateInnovationSociety/care | 6e7794d2ecb08fa17f2fcea6a4bb0c829f8e48a2 | [
"MIT"
] | null | null | null | care/facility/api/viewsets/patient_external_test.py | MaharashtraStateInnovationSociety/care | 6e7794d2ecb08fa17f2fcea6a4bb0c829f8e48a2 | [
"MIT"
] | null | null | null | care/facility/api/viewsets/patient_external_test.py | MaharashtraStateInnovationSociety/care | 6e7794d2ecb08fa17f2fcea6a4bb0c829f8e48a2 | [
"MIT"
] | null | null | null | from collections import defaultdict
import io
import hashlib
from datetime import date, datetime
from pyexcel_xls import get_data as xls_get
import pandas
import magic
from contextlib import closing
import csv
from django.db import connection
from io import StringIO
import uuid
from psycopg2.errors import UniqueViolation
from django.db import IntegrityError
from django.utils.encoding import force_bytes
from django.utils.timezone import make_aware
from django.conf import settings
from django.utils.datastructures import MultiValueDictKeyError
from django_filters import rest_framework as filters
from django_filters import Filter
from django_filters.filters import DateFromToRangeFilter
from djqscsv import render_to_csv_response
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.mixins import DestroyModelMixin, ListModelMixin, RetrieveModelMixin
from rest_framework.parsers import FormParser, JSONParser, MultiPartParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from core.fernet import FernetEncryption
from care.facility.api.serializers.patient_external_test import (
PatientExternalTestSerializer, PatientExternalTestICMRDataSerializer
)
from care.facility.models import PatientExternalTest, PatientExternalTestUploadHistory
from care.users.models import User, State, District
def prettyerrors(errors):
pretty_errors = defaultdict(list)
for attribute in PatientExternalTest.HEADER_CSV_MAPPING.keys():
if attribute in errors:
for error in errors.get(attribute, ""):
pretty_errors[attribute].append(str(error))
return dict(pretty_errors)
class MFilter(Filter):
def filter(self, qs, value):
if not value:
return qs
values = value.split(",")
_filter = {
self.field_name + "__in": values,
self.field_name + "__isnull": False,
}
qs = qs.filter(**_filter)
return qs
class PatientExternalTestFilter(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
srf_id = filters.CharFilter(field_name="srf_id", lookup_expr="icontains")
mobile_number = filters.CharFilter(field_name="mobile_number", lookup_expr="icontains")
wards = MFilter(field_name="ward__id")
districts = MFilter(field_name="district__id")
local_bodies = MFilter(field_name="local_body__id")
sample_collection_date = DateFromToRangeFilter(field_name="sample_collection_date")
result_date = DateFromToRangeFilter(field_name="result_date")
created_date = DateFromToRangeFilter(field_name="created_date")
class PatientExternalTestViewSet(
RetrieveModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet,
):
serializer_class = PatientExternalTestSerializer
queryset = PatientExternalTest.objects.select_related("ward", "local_body", "district").all().order_by("-id")
permission_classes = (IsAuthenticated,)
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = PatientExternalTestFilter
parser_classes = (MultiPartParser, FormParser, JSONParser)
def get_queryset(self):
queryset = self.queryset
if not self.request.user.is_superuser:
if self.request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
queryset = queryset.filter(district__state=self.request.user.state)
elif self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
queryset = queryset.filter(district=self.request.user.district)
elif self.request.user.user_type >= User.TYPE_VALUE_MAP["LocalBodyAdmin"]:
queryset = queryset.filter(local_body=self.request.user.local_body)
elif self.request.user.user_type >= User.TYPE_VALUE_MAP["WardAdmin"]:
queryset = queryset.filter(ward=self.request.user.ward, ward__isnull=False)
else:
queryset = queryset.none()
return queryset
def destroy(self, request, *args, **kwargs):
if self.request.user.user_type < User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
raise PermissionDenied()
return super().destroy(request, *args, **kwargs)
def check_upload_permission(self):
if (
self.request.user.is_superuser == True
or self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
):
return True
return False
def list(self, request, *args, **kwargs):
if settings.CSV_REQUEST_PARAMETER in request.GET:
mapping = PatientExternalTest.CSV_MAPPING.copy()
pretty_mapping = PatientExternalTest.CSV_MAKE_PRETTY.copy()
queryset = self.filter_queryset(self.get_queryset()).values(*mapping.keys())
return render_to_csv_response(queryset, field_header_map=mapping, field_serializer_map=pretty_mapping)
return super(PatientExternalTestViewSet, self).list(request, *args, **kwargs)
@action(methods=["POST"], detail=False)
def bulk_upsert(self, request, *args, **kwargs):
if not self.check_upload_permission():
raise PermissionDenied("Permission to Endpoint Denied")
# if len(request.FILES.keys()) != 1:
# raise ValidationError({"file": "Upload 1 File at a time"})
# csv_file = request.FILES[list(request.FILES.keys())[0]]
# csv_file.seek(0)
# reader = csv.DictReader(io.StringIO(csv_file.read().decode("utf-8-sig")))
if "sample_tests" not in request.data:
raise ValidationError({"sample_tests": "No Data was provided"})
if type(request.data["sample_tests"]) != type([]):
raise ValidationError({"sample_tests": "Data should be provided as a list"})
errors = {}
counter = 0
ser_objects = []
invalid = False
for sample in request.data["sample_tests"]:
counter += 1
serialiser_obj = PatientExternalTestSerializer(data=sample)
valid = serialiser_obj.is_valid()
current_error = prettyerrors(serialiser_obj._errors)
if current_error and (not valid):
errors[counter] = current_error
invalid = True
ser_objects.append(serialiser_obj)
if invalid:
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
for ser_object in ser_objects:
ser_object.save()
return Response(status=status.HTTP_202_ACCEPTED)
@action(methods=["POST"], detail=False)
def bulk_upsert_icmr(self, request, *args, **kwargs):
if not self.check_upload_permission():
raise PermissionDenied("Permission to Endpoint Denied")
parsed_data = []
states = State.objects.all().prefetch_related("districts")
districts = District.objects.all()
states_dict = {state.name.lower(): state for state in states}
districts_dict = {district.name.lower(): district for district in districts}
excel_data = {}
uploaded_file = request.FILES["file"]
file_hash = hashlib.blake2b()
while True:
chunk = uploaded_file.read(16384)
if not chunk:
break
file_hash.update(chunk)
existing_file_hash = PatientExternalTestUploadHistory.objects.filter(hash=file_hash.hexdigest())
if existing_file_hash.exists():
return Response(data="This file has already been uploaded.", status=status.HTTP_400_BAD_REQUEST)
uploaded_file.seek(0)
file_read = uploaded_file.read()
mime = magic.Magic(mime=True)
mime_type = mime.from_buffer(file_read)
extension = str(uploaded_file).split('.')[-1]
if mime_type == "application/vnd.ms-excel":
excel_data = xls_get(uploaded_file, column_limit=41)
parsed_data = self.parse_excel(excel_data=excel_data, states_dict=states_dict,
districts_dict=districts_dict)
elif mime_type == "text/plain" and extension == "xls":
# assuming the file is uploaded as is when exported from icmr portal
# icmr portal file has an extension of .xls but actually is a tabbed csv file in plaintext format
file_stream = io.StringIO(file_read.decode('utf-8'))
csv_data = pandas.read_csv(file_stream, delimiter='\t').to_dict('records')
parsed_data = self.parse_tabbed_csv(
csv_data=csv_data, states_dict=states_dict, districts_dict=districts_dict)
try:
self.copy_to_db(parsed_data)
except UniqueViolation as error:
return Response(data="Duplicate entries found.", status=status.HTTP_400_BAD_REQUEST)
PatientExternalTestUploadHistory.objects.create(file_name=str(
uploaded_file), uploaded_by=request.user, hash=file_hash.hexdigest(),
most_recent_date_of_sample_tested_in_file=self.most_recent_date_of_sample_tested_in_file)
response_message = "Tests were successfully uploaded and saved."
response = {"message": response_message}
return Response(data=response, status=status.HTTP_200_OK)
def parse_tabbed_csv(self, csv_data, states_dict, districts_dict):
parsed_data = []
self.most_recent_date_of_sample_tested_in_file = None
for row in csv_data:
dictionary = {}
for key, item in row.items():
key, value = self.parse_dictionary(key=key.strip(), item=item,
states_dict=states_dict, districts_dict=districts_dict)
dictionary[key] = value
if dictionary:
parsed_data.append(dictionary)
return parsed_data
def parse_excel(self, excel_data, states_dict, districts_dict):
self.most_recent_date_of_sample_tested_in_file = None
parsed_data = []
file_name = list(excel_data.keys())[0]
keys = []
for i, row in enumerate(excel_data.get(file_name)):
if i == 0:
keys = [item.strip() for item in row]
else:
dictionary = {}
for j, item in enumerate(row):
key, value = self.parse_dictionary(
key=keys[j], item=item, states_dict=states_dict, districts_dict=districts_dict)
dictionary[key] = value
if dictionary:
parsed_data.append(dictionary)
return parsed_data
def parse_dictionary(self, key, item, states_dict, districts_dict):
if isinstance(item, str):
item = item.strip()
key = PatientExternalTest.ICMR_EXCEL_HEADER_KEY_MAPPING.get(key)
if key == "state":
state = states_dict.get(item.lower())
if state:
item = state.id
key = "state_id"
elif key == "district":
district = districts_dict.get(item.lower())
if district:
item = district.id
key = "district_id"
elif key in ["is_hospitalized", "is_repeat"]:
if item and "yes" in item:
item = True
else:
item = False
elif key in ["hospitalization_date", "confirmation_date", "sample_received_date", "entry_date"]:
if "N/A" in item:
item = None
elif item:
item = make_aware(datetime.strptime(item, "%Y-%m-%d %H:%M:%S"))
elif key in ["sample_collection_date"]:
item = make_aware(datetime.strptime(item, "%Y-%m-%d %H:%M:%S")).date()
elif key == "date_of_sample_tested":
item = make_aware(datetime.strptime(item, "%Y-%m-%d %H:%M:%S"))
if self.most_recent_date_of_sample_tested_in_file is None or self.most_recent_date_of_sample_tested_in_file < item:
self.most_recent_date_of_sample_tested_in_file = item
return key, item
def copy_to_db(self, n_records):
fernet = FernetEncryption()
stream = StringIO()
writer = csv.writer(stream, delimiter='\t')
icmr_id_set = set()
for i in n_records:
if i["icmr_id"] not in icmr_id_set:
aadhar = fernet.encrypt(i["aadhar_number"], connection)
passport = fernet.encrypt(i["passport_number"], connection)
writer.writerow([str(uuid.uuid4()), 'false', i["name"], i["age"], i["age_in"], i["gender"], i["address"], aadhar, passport,
i["mobile_number"], i["is_repeat"], i["lab_name"], i["test_type"], i["sample_type"], i["result"],
i["srf_id"], i["patient_category"], i["icmr_id"], i["icmr_patient_id"], i["contact_number_of"],
i["nationality"], i['pincode'], i['village_town'], i['underlying_medical_condition'], i['sample_id'],
i['hospital_name'], i['hospital_state'], i['hospital_district'], i['symptom_status'], i['symptoms'],
i['egene'], i['rdrp'], i['orf1b'], i['remarks'], i['state_id'], i['district_id'], i['is_hospitalized']])
icmr_id_set.add(i["icmr_id"])
stream.seek(0)
with closing(connection.cursor()) as cursor:
cursor.copy_from(
file=stream,
table=PatientExternalTest.objects.model._meta.db_table,
sep='\t',
columns=('external_id', 'deleted', 'name', 'age', 'age_in', 'gender', 'address', 'aadhar_number', 'passport_number',
'mobile_number', 'is_repeat', 'lab_name', 'test_type', 'sample_type', 'result', 'srf_id', 'patient_category',
'icmr_id', 'icmr_patient_id', 'contact_number_of', 'nationality', 'pincode', 'village_town',
'underlying_medical_condition', 'sample_id', 'hospital_name', 'hospital_state', 'hospital_district',
'symptom_status', 'symptoms', 'egene', 'rdrp', 'orf1b', 'remarks', 'state_id', 'district_id', 'is_hospitalized'),
)
| 44.033639 | 139 | 0.647267 | 12,559 | 0.872213 | 0 | 0 | 4,212 | 0.29252 | 0 | 0 | 2,290 | 0.159039 |
c3a2516ed5e2983309b0fcf123980be25b43d165 | 1,061 | py | Python | tests/bsmp/test_commands.py | lnls-sirius/pydrs | 4e44cf0272fcf0020139a6c176a708b4642a644a | [
"MIT"
] | null | null | null | tests/bsmp/test_commands.py | lnls-sirius/pydrs | 4e44cf0272fcf0020139a6c176a708b4642a644a | [
"MIT"
] | 1 | 2022-01-14T14:59:09.000Z | 2022-01-21T18:48:32.000Z | tests/bsmp/test_commands.py | lnls-sirius/pydrs | 4e44cf0272fcf0020139a6c176a708b4642a644a | [
"MIT"
] | 1 | 2022-01-14T14:54:14.000Z | 2022-01-14T14:54:14.000Z | from unittest import TestCase
from siriuspy.pwrsupply.bsmp.constants import ConstPSBSMP
from pydrs.bsmp import CommonPSBSMP, EntitiesPS, SerialInterface
class TestSerialCommandsx0(TestCase):
"""Test BSMP consulting methods."""
def setUp(self):
"""Common setup for all tests."""
self._serial = SerialInterface(path="/serial", baudrate=9600)
self._entities = EntitiesPS()
self._pwrsupply = CommonPSBSMP(
iointerface=self._serial, entities=self._entities, slave_address=1
)
def test_query_protocol_version(self):
"""Test"""
def test_query_variable(self):
"""Test"""
self._pwrsupply.pread_variable(var_id=ConstPSBSMP.V_PS_STATUS, timeout=500)
def test_query_parameter(self):
"""Test"""
self._pwrsupply.parameter_read(var_id=ConstPSBSMP.P_PS_NAME, timeout=500)
def test_write_parameter(self):
"""Test"""
self._pwrsupply.parameter_write(
var_id=ConstPSBSMP.P_PS_NAME, value="pv_test_name", timeout=500
)
| 28.675676 | 83 | 0.679548 | 903 | 0.851084 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.123468 |
c3a3459cc213fe0474ea43964c551e8679004f84 | 1,102 | py | Python | data/hsd11b1_validation/get_smiles_cactus.py | AstraZeneca/jazzy | d06a5848165d2a256b52b75c3365715da0d36c4d | [
"Apache-2.0"
] | null | null | null | data/hsd11b1_validation/get_smiles_cactus.py | AstraZeneca/jazzy | d06a5848165d2a256b52b75c3365715da0d36c4d | [
"Apache-2.0"
] | null | null | null | data/hsd11b1_validation/get_smiles_cactus.py | AstraZeneca/jazzy | d06a5848165d2a256b52b75c3365715da0d36c4d | [
"Apache-2.0"
] | null | null | null | """Converts synonyms into SMILES for the data from Gerber's paper."""
# data/hsd11b1_validation/get_smiles_cactus.py
from io import BytesIO
import pandas as pd
import pycurl
def getsmiles_cactus(name):
"""Converts synonyms into SMILES strings.
A function to use the public cactus (National Institutes of Cancer Research)
webservice to retrieve a smiles string from a synonym.
Args:
name: any trivial or IUPAC name for a molecule
Returns:
Canonical smiles string for that molecule.
"""
url = "https://cactus.nci.nih.gov/chemical/structure/" + name + "/smiles"
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
smiles = buffer.getvalue().decode("UTF-8")
print(name, smiles)
return smiles
def main():
"""Runs a batch of name conversions into SMILES."""
data = "01-robb_data.txt"
df = pd.read_csv(data, sep="\t")
df["SMILES"] = df.apply(lambda row: getsmiles_cactus(row["Iupac"]), axis=1)
df.to_csv("02-robb_data_smiles.txt", sep="\t")
main()
| 25.627907 | 80 | 0.673321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.553539 |
c3a458726fd15d6fab946dbf5224b58ab89013c9 | 4,541 | py | Python | scripts/snippets/eval-clevr-instance-retrieval/eval-referential.py | Glaciohound/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 52 | 2019-12-04T22:26:56.000Z | 2022-03-31T17:04:15.000Z | scripts/snippets/eval-clevr-instance-retrieval/eval-referential.py | guxiwuruo/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 6 | 2020-08-25T07:35:14.000Z | 2021-09-09T04:57:09.000Z | scripts/snippets/eval-clevr-instance-retrieval/eval-referential.py | guxiwuruo/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 5 | 2020-02-10T07:39:24.000Z | 2021-06-23T02:53:42.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : eval-referential.py
# Author : Chi Han, Jiayuan Mao
# Email : haanchi@gmail.com, maojiayuan@gmail.com
# Date : 30.07.2019
# Last Modified Date: 16.10.2019
# Last Modified By : Chi Han, Jiayuan Mao
#
# This file is part of the VCML codebase
# Distributed under MIT license
# -*- coding: utf-8 -*-
# File : eval-referential.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 07/30/2019
#
# This file is part of eval-clevr-instance-retrieval.
# Distributed under terms of the MIT license.
import six
import functools
import sys
from IPython.core import ultratb
import numpy as np
import jacinle.io as io
import jacinle.random as random
from jacinle.cli.argument import JacArgumentParser
from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm
from jacinle.utils.meter import GroupMeters
sys.excepthook = ultratb.FormattedTB(
mode='Plain', color_scheme='Linux', call_pdb=True)
parser = JacArgumentParser()
parser.add_argument('--scene-json', required=True, type='checked_file')
parser.add_argument('--preds-json', required=True, type='checked_file')
args = parser.parse_args()
class Definition(object):
annotation_attribute_names = ['color', 'material', 'shape', 'size']
annotation_relation_names = ['behind', 'front', 'left', 'right']
concepts = {
'color': ['gray', 'red', 'blue', 'green', 'brown', 'purple', 'cyan', 'yellow'],
'material': ['rubber', 'metal'],
'shape': ['cube', 'sphere', 'cylinder'],
'size': ['small', 'large']
}
concept2attribute = {
v: k for k, vs in concepts.items() for v in vs
}
relational_concepts = {
'spatial_relation': ['left', 'right', 'front', 'behind']
}
synonyms = {
"thing": ["thing", "object"],
"sphere": ["sphere", "ball"],
"cube": ["cube", "block"],
"cylinder": ["cylinder"],
"large": ["large", "big"],
"small": ["small", "tiny"],
"metal": ["metallic", "metal", "shiny"],
"rubber": ["rubber", "matte"],
}
word2lemma = {
v: k for k, vs in synonyms.items() for v in vs
}
def_ = Definition()
def get_desc(obj):
names = [obj[k] for k in def_.annotation_attribute_names]
for i, n in enumerate(names):
if n in def_.synonyms:
names[i] = random.choice_list(def_.synonyms[n])
return names
def run_desc_obj(obj, desc):
for d in desc:
dd = def_.word2lemma.get(d, d)
if dd != obj[def_.concept2attribute[dd]]:
return False
return True
def run_desc_pred(all_preds, desc):
s = 10000
for d in desc:
s = np.fmin(s, all_preds[d])
return s
def test(index, all_objs, all_preds, meter):
obj = all_objs[index]
nr_descriptors = random.randint(1, 3)
desc = random.choice_list(get_desc(obj), size=nr_descriptors)
if isinstance(desc, six.string_types):
desc = [desc]
filtered_objs = [i for i, o in enumerate(all_objs) if not run_desc_obj(o, desc)]
all_scores = run_desc_pred(all_preds, desc)
rank = (all_scores[filtered_objs] > all_scores[index]).sum()
# print(desc)
# print(all_scores)
# print(all_scores[index])
meter.update('r@01', rank <= 1)
meter.update('r@02', rank <= 2)
meter.update('r@03', rank <= 3)
meter.update('r@04', rank <= 4)
meter.update('r@05', rank <= 5)
def transpose_scene(scene):
ret = dict()
for k in scene['0']:
ret[k] = np.array([scene[str(o)][k] for o in range(len(scene))])
return ret
def main():
scenes = io.load_json(args.scene_json)['scenes']
preds = io.load(args.preds_json)
if isinstance(preds, dict):
preds = list(preds.values())
if False:
preds = [transpose_scene(s) for s in preds]
# flattened_objs = [o for s in scenes for o in s['objects']]
# flattened_preds = {
# k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
# for k in preds[0]
# }
meter = GroupMeters()
'''
for i, scene in tqdm_gofor(scenes, mininterval=0.5):
for j in range(len(scene['objects'])):
test(j, scene['objects'], preds[i], meter)
'''
for i, pred in tqdm_gofor(preds, mininterval=0.5):
scene = scenes[i]
for j in range(len(scene['objects'])):
test(j, scene['objects'], pred, meter)
print(meter.format_simple('Results:', compressed=False))
if __name__ == '__main__':
main()
| 28.204969 | 87 | 0.611099 | 997 | 0.219555 | 0 | 0 | 0 | 0 | 0 | 0 | 1,558 | 0.343096 |
c3a45be43c7a59facc3ddab37cf1ef4a7a88388b | 139 | py | Python | plenum/test/view_change/slow_nodes/conftest.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/slow_nodes/conftest.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/slow_nodes/conftest.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | import pytest
@pytest.fixture(scope="module")
def client(looper, txnPoolNodeSet, client1, client1Connected):
return client1Connected
| 19.857143 | 62 | 0.791367 | 0 | 0 | 0 | 0 | 122 | 0.877698 | 0 | 0 | 8 | 0.057554 |
c3a46fb3802bbb4ed7a5fbfe67fb1da36da3f753 | 1,316 | py | Python | lib/python/cellranger/feature/utils.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | 1 | 2019-03-29T04:05:58.000Z | 2019-03-29T04:05:58.000Z | lib/python/cellranger/feature/utils.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | null | null | null | lib/python/cellranger/feature/utils.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2018 10X Genomics, Inc. All rights reserved.
#
# Utils for feature-barcoding technology
import numpy as np
import os
import json
import tenkit.safe_json as tk_safe_json
def check_if_none_or_empty(matrix):
if matrix is None or matrix.get_shape()[0] == 0 or matrix.get_shape()[1] == 0:
return True
else:
return False
def write_json_from_dict(input_dict, out_file_name):
with open(out_file_name, 'w') as f:
json.dump(tk_safe_json.json_sanitize(input_dict), f, indent=4, sort_keys=True)
def write_csv_from_dict(input_dict, out_file_name, header=None):
with open(out_file_name, 'w') as f:
if header is not None:
f.write(header)
for (key, value) in input_dict.iteritems():
line = str(key) + ',' + str(value) + '\n'
f.write(line)
def get_depth_string(num_reads_per_cell):
return str(np.round(float(num_reads_per_cell)/1000,1)) + "k"
def all_files_present(list_file_paths):
if list_file_paths is None:
return False
files_none = [fpath is None for fpath in list_file_paths]
if any(files_none):
return False
files_present = [os.path.isfile(fpath) for fpath in list_file_paths]
if not(all(files_present)):
return False
return True
| 27.416667 | 86 | 0.680091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.105623 |
c3a4e349e19a0d79ac0bc87abdcd29c84fe6b957 | 53,492 | py | Python | A_SHERIFS_CAD/lib/hm_visual/Sampling_analysis.py | fault2shaESCWG/CentralApenninesLabFAULT2RISK | 362cbc8b8dda0c2b5ba1e0ef5c9144fb6acb2ed3 | [
"BSD-3-Clause"
] | null | null | null | A_SHERIFS_CAD/lib/hm_visual/Sampling_analysis.py | fault2shaESCWG/CentralApenninesLabFAULT2RISK | 362cbc8b8dda0c2b5ba1e0ef5c9144fb6acb2ed3 | [
"BSD-3-Clause"
] | null | null | null | A_SHERIFS_CAD/lib/hm_visual/Sampling_analysis.py | fault2shaESCWG/CentralApenninesLabFAULT2RISK | 362cbc8b8dda0c2b5ba1e0ef5c9144fb6acb2ed3 | [
"BSD-3-Clause"
] | 2 | 2020-10-30T16:39:30.000Z | 2020-11-27T17:12:43.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""SHERIFS
Seismic Hazard and Earthquake Rates In Fault Systems
Version 1.0
@author: thomas
"""
import numpy as np
import os
from scipy.stats import chisquare
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
def sampling_analysis(Run_name,Model_list,m_Mmax,b_sample,a_s_model,mega_mfd_cummulative,catalog_cum_rate,
xmin,xmax,ymin,ymax,total_list_model,bining_in_mag,total_list_MFD_type,
total_list_scenario_name,file_faults_data,total_list_sample,total_list_BG_hyp):
if not os.path.exists(str(Run_name) + '/analysis/figures/sampling_analysis'):
os.makedirs(str(Run_name) + '/analysis/figures/sampling_analysis')
file_LT_metrics=open(str(Run_name) + '/analysis/txt_files/LT_metrics.txt','w')
file_LT_metrics.write('ScL\tModel\tBG\tbvalue\tMFD\tSc\tsample\tmean_sr\tchi_score\tMmax_score\tNMS_score\tpaleo_score\n')
'''
#read the catalog cumulative rates for all sampling
and compare each branch to the catalog using the chi-squared test
methodology:
for each branch i of the logic tree
30 random MFD calculated from the catalog are extracted for the comaprison
the modeled rate of the branch i are compared the each on of these random samples
the comparison is done using the following formula:
we calculate the absolute value of the difference between the logs of the model minus the log of catalog rate
we had 10 to this asolute value to make it accepatble or the chi-squarred test
we run the chisquared test for an array of ten value corresponding to one unit of magnitude (ten bins of 0.1)
bins of magnitude where one of the two rates are not defined are deleted
the value are conpared to an array of value equal ten (the expected value is the model fits the data)
we save the pvalue calculated
In order to get the p value for the whole MFD, we do the mean of the pvalue for each unit of magnitude
weighted by the number of filled bins in the range of magnitude.
If the p value is close to 1, the two MFD are similar.
personal opinion:
p values superior to 0.9 seam like a good match
p values superior to 0.8 seam like an acceptable match in most cases
p values less than 0.7 make the match difficult to accept
Warning! this method doesn't care if the two maximum magnitude are different,
it will only take the bin where both MFDs are defined.
The fit in terms of Mmax need to rely on some other test. (hopefully I managed to provide one...)
'''
plot_fig=False
index_Mmin=np.where(np.array(np.linspace(4.0,7.0,num=31).round(1))==xmin)[0][0]
index_Mmax=np.where(np.array(np.linspace(4.0,10.0,num=61).round(1))==xmax)[0][0]+1
file = open(str(Run_name) + '/analysis/txt_files/model_performance.txt','w')
file.write('Model\tMFD type\tBG\tScenario Set\tsample\tFit to catalog\tFit to Paleo\tNMS score\n')
index_model=0
for model in Model_list:
if not os.path.exists(str(Run_name) + '/analysis/figures/sampling_analysis/'+model):
os.makedirs(str(Run_name) + '/analysis/figures/sampling_analysis/'+model)
catfile_all=str(Run_name) + '/analysis/figures/catalogue/catalog_rates_all_'+model+'.txt'
with open(catfile_all) as f:#finds where to start reading
lines_cat = f.readlines()
#ranges of magnitude where the test is made
ranges_mag=[[4.0,4.1,4.2,4.3,4.4,4.5,4.6,4.7,4.8,4.9],
[5.0,5.1,5.2,5.3,5.4,5.5,5.6,5.7,5.8,5.9],
[6.0,6.1,6.2,6.3,6.4,6.5,6.6,6.7,6.8,6.9],
[7.0,7.1,7.2,7.3,7.4,7.5,7.6,7.7,7.8,7.9],
[8.0,8.1,8.2,8.3,8.4,8.5,8.6,8.7,8.8,8.9],
[9.0,9.1,9.2,9.3,9.4,9.5,9.6,9.7,9.8,9.9]
]
p_chi_branch=[]
indexes_model=[]
index_branch = 0
for mfd,model_name_i in zip(mega_mfd_cummulative,total_list_model):
if model_name_i ==model:
indexes_model.append(index_branch)
indexes_catalogs_to_test = np.random.choice(range(len(lines_cat))[1:],size=40)
#indexes_catalogs_to_test = range(len(lines_cat)) #take them all, it doesn't take that long
pvalues=[[],
[],
[],
[],
[],
[]
]
weights_pvalues=[[],
[],
[],
[],
[],
[]
]
if plot_fig==True:
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
for i_cat in indexes_catalogs_to_test:
cat_rates_i=lines_cat[i_cat].split('\t')
cat_rates_i=[float(i) for i in cat_rates_i]
if plot_fig==True:
ax1.scatter(bining_in_mag,cat_rates_i,c='k',alpha=0.1,s=0.5)
index_range=0
for range_i in ranges_mag:
diff_rate=[]
target_value=[]
bining_i=[]
for model_rate_i,data_rate_i,mag_i in zip(mfd[index_Mmin:index_Mmax],cat_rates_i,bining_in_mag):
if model_rate_i!=0 and data_rate_i!=0 and mag_i in range_i:
# model_rate.append(-np.log10(model_rate_i)*10.)
# data_rate.append(-np.log10(data_rate_i)*10.)
diff_rate.append(abs(np.log10(model_rate_i)-np.log10(data_rate_i))*10. + 10.)
target_value.append(10.)
bining_i.append(mag_i)
if len(diff_rate)>=2:
pvalues[index_range].append(chisquare(diff_rate,f_exp=target_value)[1]) #pvalue for each range and sample
weights_pvalues[index_range].append(len(diff_rate)) #associated weight depending of number of bin in the range that are filled
if plot_fig==True:
ax2.scatter(bining_i,diff_rate,c='r',alpha=0.2,s=2)
ax2.scatter(bining_i,target_value,c='k',alpha=0.1,s=2)
index_range+=1
if plot_fig==True:
ax1.scatter(bining_in_mag,mfd[index_Mmin:index_Mmax],c='r',s=0.5)
#ax1.set_title(str(round(np.mean(pvalues),3)))
ax1.set_yscale('log')
ax1.set_xlim([xmin,xmax])
ax1.set_ylim([ymin,ymax])
p_total=[]
weight_p=[]
for range_i,p_i,w_i in zip(ranges_mag,pvalues,weights_pvalues):
if len(p_i)!=0 :
weight_p.append(np.mean(w_i))
p_total.append(round(np.mean(p_i),4))
if plot_fig==True:
if round(np.mean(p_i),3) >= 0.9:
color='g'
elif round(np.mean(p_i),3) >= 0.8:
color='orange'
else:
color='r'
ax2.text(np.mean(range_i),25,str(round(np.mean(p_i),3)),fontsize=8,color=color)
p_chi_branch.append(round(np.average(p_total,weights=weight_p),3))
if plot_fig==True:
if round(np.average(p_total,weights=weight_p),3) >= 0.9:
color='g'
elif round(np.average(p_total,weights=weight_p),3) >= 0.8:
color='orange'
else:
color='r'
ax1.set_title(str(round(np.average(p_total,weights=weight_p),3)),color=color)
ax2.set_xlim([xmin-0.1,xmax])
ax2.set_ylim([9,30])
plt.show()
plt.close()
index_branch+=1
'''
# Mmax fit to the Mmax in the catalog
The rule is: The Mmax in the model should be at least the one in the catalog
but the catalog has some uncertainties on the magnitude of large historical EQs
methodology:
we calculate the cumulative density distribution of the Mmax in the catalog
we associate the given density to each Mmax of the models
'''
Mmax_cat=[]
bining_cat=lines_cat[0].split('\t')
bining_cat=[float(i) for i in bining_cat]
for i_cat in range(len(lines_cat)-1):
cat_rates_i=lines_cat[i_cat+1].split('\t')
cat_rates_i=[float(i) for i in cat_rates_i]
i_test=0
try :
while cat_rates_i[i_test]!=0:
i_test+=1
except:
i_test=len(cat_rates_i)-1
Mmax_cat.append(bining_cat[i_test])
distribution_Mmax_cat=[]
for mag_i in bining_cat:
d_i = sum(i <= mag_i+0.1 for i in Mmax_cat)/len(Mmax_cat)
distribution_Mmax_cat.append(d_i)
plt.plot(bining_cat,distribution_Mmax_cat)
plt.xlim([xmax-1.5,xmax])
plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/Mmax_distrib_in_the_cat.png',dpi = 180)
plt.close()
weight_model_Mmax=[]
for Mmax_i,model_name_i in zip(m_Mmax,total_list_model):
if model_name_i == model:
index = np.where(np.array(bining_cat)==Mmax_i)[0][0]
weight_model_Mmax.append(distribution_Mmax_cat[index])
'''
The NMS on a set of faults as a metric for judging the quality of a model
'''
fault_set=['F1','F2','F3']
NMS_set=[]
for fault in fault_set:
NMS_set.append([])
if len(NMS_set) != 0:
sr_sample=[]
for fault in fault_set:
sr_sample.append([])
score_nms=[]
####
# extract the slip-rate of each target fault and does the mean for that branch.
# this can allow to see is some slip-rate values seam to work better
####
srate_sample_file=str(Run_name) + '/analysis/txt_files/slip_rate_sampling.txt'
with open(srate_sample_file) as f:#finds where to start reading
lines_sr = f.readlines()
srep_file=str(Run_name) + '/analysis/txt_files/slip_rep_on_faults_all_data.txt'
try:
with open(srep_file) as f:#finds where to start reading
lines = f.readlines()
line_number=0
for line in lines:
#print(line)
if line.split('\t')[7] in fault_set and line.split('\t')[1]==model:
index_fault=np.where(np.array(fault_set)==line.split('\t')[7])[0][0]
NMS_set[index_fault].append(float(line.split('\t')[-1]))
sr_sample[index_fault].append(float(lines_sr[line_number].split('\t')[-1]))
line_number+=1
if np.sum(NMS_set) != 0. :
#print('score NMS on target faults',np.mean(NMS_set,axis=0))
for i in range(len(p_chi_branch)):
'''
the score is 1 is MSN is less than 20%
the score is 0 if:
at least one of the NMS of the test faults if more than 50%
the mean is more the 40%
between 20 and 40 the score evolves linearily between 1 and 0
(this is very much open to discussion!)
'''
if np.mean(NMS_set,axis=0)[i] > 40.:
score_nms_i = 0.
elif np.mean(NMS_set,axis=0)[i] < 20.:
score_nms_i = 1.
else :
score_nms_i=2 - 1./20.*np.mean(NMS_set,axis=0)[i]
#print('score NMS on target faults',round(score_nms_i,2))
'''hard limit on acceptability'''
for nms_row in NMS_set:
#print(nms_row[i])
if nms_row[i] > 50.:
score_nms_i = 0.
score_nms.append(score_nms_i)
#print('score NMS on target faults',round(score_nms_i,2),' NMS mean:',round(np.mean(NMS_set,axis=0)[i]))
except FileNotFoundError:
print('!!! you need to run the plot_sr_use if you want the NMS metric !!!')
print('Default value = 1. ')
for i in range(len(p_chi_branch)):
score_nms.append(1.)
else:
print('modify Sampling_analysis.py for the NMS metric')
print('Default value = 1. ')
for i in range(len(p_chi_branch)):
score_nms.append(1.)
#deos the mean sr of the faults for each branch
mean_sr_branch = np.mean(sr_sample,axis=0)
# plt.scatter(mean_sr_branch,p_chi_branch)
# plt.show()
'''#############################
Weight based on the fit to the paleo rates
and the RSQSim rates if they exist
#######################################'''
plot_paleo = False
plot_rsqsim_pr =True
#extract the faults data
faults_data = np.genfromtxt(file_faults_data, dtype=[('model', 'U100000'),
('fault_name', 'U100000'),
('type', 'U100000'),
('M', 'f8'),
('sig_M', 'f8'),
('rate', 'f8'),
('sig_rate', 'f8')],
delimiter = '\t',skip_header = 1)
# Dealing with one line files
try:
len_faults_data = len(faults_data)
except TypeError:
faults_data = faults_data.reshape((1,))
rsqsim_pr=False
RSQSim_pr_file = str(Run_name) + '/file_pr_rsqsim.txt'
try:
with open(RSQSim_pr_file) as f:#finds where to start reading
lines = f.readlines()
bin_mag_rsqsim = [round(float(i),1) for i in lines[0].split('\t')[1:-1]]
rqsim_pr_faults=[]
faults_name_rsqsim = []
for line in lines[1:]:
faults_name_rsqsim.append(line.split('\t')[0])
rqsim_pr_faults.append([float(i) for i in line.split('\t')[1:-1]]) #we don't take the last point of the MFD , too specific
index_Mmin_rsqsim=np.where(np.array(bining_in_mag)==bin_mag_rsqsim[0])[0][0]
index_Mmax_rsqsim=np.where(np.array(bining_in_mag)==bin_mag_rsqsim[-1])[0][0]+1
except:
rsqsim_pr=False
#print faults_data
data_model = list(map(lambda i : faults_data[i][0], range(len(faults_data))))
data_fault_name =list( map(lambda i : faults_data[i][1], range(len(faults_data))))
data_type =list( map(lambda i : faults_data[i][2], range(len(faults_data))))
data_M =list( map(lambda i : float(faults_data[i][3]), range(len(faults_data))))
data_sig_M =list( map(lambda i : float(faults_data[i][4]), range(len(faults_data))))
data_rate = list(map(lambda i : float(faults_data[i][5]), range(len(faults_data))))
data_sig_rate =list( map(lambda i : float(faults_data[i][6]), range(len(faults_data))))
score_paleo = []
#
# score_paleo_per_fault = []
# for fault in data_fault_name:
# score_paleo_per_fault.append([])
score_paleo_faults=[]
faults_data=[]
score_pr_rsqsim = []
faults_rsqsim = []
for fault,data_model_i in zip(data_fault_name,data_model):
if data_model_i == model and fault not in faults_data:
score_paleo_faults.append([])
faults_data.append(fault)
if rsqsim_pr == True:
if fault in faults_name_rsqsim and fault not in faults_rsqsim:
score_pr_rsqsim.append([])
faults_rsqsim.append(fault)
participation_rate_file=str(Run_name) + '/analysis/figures/rupture_rate_for_each_fault_cum/' + model + '/file_for_comparison.txt'
with open(participation_rate_file) as f:#finds where to start reading
lines_pr = f.readlines()
paleo_list_mfd = []
paleo_list_bvalue = []
paleo_list_bg = []
paleo_list_scl = []
paleo_list_scenario = []
paleo_list_sample = []
index_branch=0
for line in lines_pr :
index_fault=0
for fault_name in faults_data:
if line.split('\t')[0]==model and line.split('\t')[7]==fault_name:
if index_fault==0:
paleo_list_mfd.append(line.split('\t')[1])
paleo_list_scenario.append(line.split('\t')[2])
paleo_list_bg.append(line.split('\t')[3])
paleo_list_scl.append(line.split('\t')[4])
paleo_list_bvalue.append(line.split('\t')[5])
paleo_list_sample.append(line.split('\t')[6])
mfd_i = [float(i) for i in list(line.split('\t')[(8+index_Mmin):(8+index_Mmax)])]
#######
# COMPARE WITH THE PALEO
#######
self_data_M = []
self_data_sig_M = []
self_data_rate = []
self_data_sig_rate = []
index_fault_in_data = np.where(np.array(data_fault_name)==fault_name)[0]
for index_i in index_fault_in_data:
if data_model[index_i] == model and data_type[index_i] == 'pal':
self_data_M.append(data_M[index_i])
self_data_sig_M.append(data_sig_M[index_i])
self_data_rate.append(data_rate[index_i])
self_data_sig_rate.append(data_sig_rate[index_i])
#calculating the paleoscore using a lognomral distribution for the paleouncertainties
paleo_score_i=[]
for m_i,sm_i,r_i,sr_i in zip(self_data_M,self_data_sig_M,self_data_rate,self_data_sig_rate):
x, y = np.mgrid[4.5:7.5:.01, -5.:0.:.01]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
#2D noraml * log normal law
rv = multivariate_normal([m_i, np.log10(r_i)], [sm_i+0.001, sr_i+0.0000001])
#interpolates the MFD
detailed_bin_mag = np.linspace(bining_in_mag[0],bining_in_mag[-1],1000)
detailed_mfd_i = np.interp(detailed_bin_mag,bining_in_mag,np.log10(mfd_i))
if plot_paleo == True:
plt.contourf(x, y, rv.pdf(pos),alpha=0.5)
plt.scatter(bining_in_mag,np.log10(mfd_i),c='k',marker='s',s=10,linewidths=0.01,alpha=0.7)
plt.scatter(detailed_bin_mag,detailed_mfd_i,c='k',marker='s',s=3,linewidths=0.01,alpha=0.7)
plt.xlim([5.,7.])
plt.ylim([-3,-1.])
plt.grid()
plt.show()
paleo_score_i.append(max([rv.pdf([i,j])/rv.pdf([m_i,np.log10(r_i)]) for i,j in zip(detailed_bin_mag,detailed_mfd_i)]))
#print(max([rv.pdf([i,j])/rv.pdf([m_i,np.log10(r_i)]) for i,j in zip(detailed_bin_mag,detailed_mfd_i)]))
score_paleo_faults[index_fault].append(np.mean(paleo_score_i))
#################
# Compare with RSQSim (if it exists)
# make the mean of the ration where both are defined expect for the last 2 bins
# (the big drop in rate leads to very large ratios but actually it's small rates so it doesn't martter so much)
################
if rsqsim_pr == True and line.split('\t')[6] == '1':
pvalues = []
pshape = []
if fault_name in faults_rsqsim:
index_fault_rsqsim = np.where(np.array(faults_name_rsqsim)==fault_name)[0][0]
fault_pr_rsqsim = rqsim_pr_faults[index_fault_rsqsim]
if plot_rsqsim_pr==True:
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
#for i_cat in indexes_catalogs_to_test:
#cat_rates_i=lines_cat[i_cat].split('\t')
#cat_rates_i=[float(i) for i in cat_rates_i]
if plot_rsqsim_pr==True:
ax1.scatter(bin_mag_rsqsim[:-2],fault_pr_rsqsim[:-2],c='k',alpha=0.9,s=3)
ax1.scatter(bin_mag_rsqsim[-2:],fault_pr_rsqsim[-2:],c='k',alpha=0.5,s=3)
#
# index_range=0
# for range_i in ranges_mag:
diff_rate=[]
# target_value=[]
bining_i=[]
for model_rate_i,data_rate_i,mag_i in zip(mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim-2],fault_pr_rsqsim[:-2],bin_mag_rsqsim[:-2]):
if model_rate_i!=0 and data_rate_i!=0:
if model_rate_i >= data_rate_i:
diff_rate.append(model_rate_i/data_rate_i)
else :
diff_rate.append(data_rate_i/model_rate_i)
bining_i.append(mag_i)
pvalues.append(np.mean(diff_rate)) #pvalue for each range and sample
if plot_rsqsim_pr==True:
ax2.scatter(bining_i,diff_rate,c='b',alpha=0.8,s=2)
index_range+=1
if plot_rsqsim_pr==True:
ax1.scatter(bining_in_mag[index_Mmin_rsqsim:index_Mmax_rsqsim-2],mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim-2],c='r',s=0.5)
ax1.scatter(bining_in_mag[-2:],mfd_i[-2:],c='r',alpha=0.4,s=0.5)
ax1.set_yscale('log')
ax1.set_xlim([xmin+1.,xmax])
ax1.set_ylim([ymin,ymax/100.])
p_total=np.mean(diff_rate)
#test on the shape (normalized mfd)
n_mfdi = [i/sum(mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim-2]) for i in mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim-2]]
n_mfd_rsqsim = [i/sum(fault_pr_rsqsim[:-2]) for i in fault_pr_rsqsim[:-2]]
diff_rate=[]
bining_i=[]
for model_rate_i,data_rate_i,mag_i in zip(n_mfdi,n_mfd_rsqsim,bin_mag_rsqsim[:-2]):
if model_rate_i!=0 and data_rate_i!=0:
if model_rate_i >= data_rate_i:
diff_rate.append(model_rate_i/data_rate_i)
else :
diff_rate.append(data_rate_i/model_rate_i)
bining_i.append(mag_i)
pshape.append(np.mean(diff_rate)) #pvalue for each range and sample
if plot_rsqsim_pr==True:
ax2.scatter(bining_i,diff_rate,c='g',alpha=0.8,s=2)
if plot_rsqsim_pr==True:
if round(p_total,3) >= 1.3:
color='r'
elif round(p_total,3) >= 1.2:
color='orange'
else :
color='g'
if round(np.mean(diff_rate),3) >= 1.3:
color_shape='r'
elif round(np.mean(diff_rate),3) >= 1.2:
color_shape='orange'
else :
color_shape='g'
ax1.set_title(model +' '+ fault_name + ' '+str(round(p_total,2)),color=color)
ax2.set_title(str(round(np.mean(diff_rate),2)),color=color_shape)
ax1.set_xlim([xmin+1.,xmax])
ax2.set_xlim([xmin+1.,xmax])
ax2.set_ylim([0.9,3.])
plt.show()
plt.close()
index_fault +=1
score_paleo = np.mean(score_paleo_faults,axis=0)
'''###################""
Compare with some other MFD at the system level (physics based for example)
#####################"'''
plot_fig_rsqsim=False
RSQSim_MFD = str(Run_name) + '/mfd_RSQSim.txt'
try:
with open(RSQSim_MFD) as f:#finds where to start reading
lines = f.readlines()
bin_mag_rsqsim = [round(float(i),1) for i in lines[0].split('\t')[1:-1]]
mfd_rsqsim = [float(i) for i in lines[1].split('\t')[1:-1]]
index_Mmin_rsqsim=np.where(np.array(bining_in_mag)==bin_mag_rsqsim[0])[0][0]
index_Mmax_rsqsim=np.where(np.array(bining_in_mag)==bin_mag_rsqsim[-1])[0][0]+1
index_branch = 0
for mfd,model_name_i in zip(mega_mfd_cummulative,total_list_model):
if model_name_i ==model:
if plot_fig_rsqsim==True:
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
if plot_fig_rsqsim==True:
ax1.scatter(bin_mag_rsqsim,mfd_rsqsim,c='k',alpha=0.9,s=3)
pvalues = []
diff_rate=[]
bining_i=[]
mfd_i = mfd[index_Mmin:index_Mmax]
for model_rate_i,data_rate_i,mag_i in zip(mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim-2],mfd_rsqsim[:-2],bin_mag_rsqsim[:-2]):
if model_rate_i!=0 and data_rate_i!=0:
if model_rate_i >= data_rate_i:
diff_rate.append(model_rate_i/data_rate_i)
else :
diff_rate.append(data_rate_i/model_rate_i)
bining_i.append(mag_i)
pvalues.append(np.mean(diff_rate))
p_total=np.mean(diff_rate)
if plot_fig_rsqsim==True:
ax2.scatter(bining_i,diff_rate,c='r',alpha=0.9,s=3)
if plot_fig_rsqsim==True:
ax1.scatter(bining_in_mag[index_Mmin_rsqsim:index_Mmax_rsqsim],mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim],c='r',s=0.5)
#test on the shape (normalized mfd)
n_mfdi = [i/sum(mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim-2]) for i in mfd_i[index_Mmin_rsqsim:index_Mmax_rsqsim-2]]
n_mfd_rsqsim = [i/sum(mfd_rsqsim[:-2]) for i in mfd_rsqsim[:-2]]
diff_rate=[]
bining_i=[]
for model_rate_i,data_rate_i,mag_i in zip(n_mfdi,n_mfd_rsqsim,bin_mag_rsqsim[:-2]):
if model_rate_i!=0 and data_rate_i!=0:
if model_rate_i >= data_rate_i:
diff_rate.append(model_rate_i/data_rate_i)
else :
diff_rate.append(data_rate_i/model_rate_i)
bining_i.append(mag_i)
pshape.append(np.mean(diff_rate)) #pvalue for each range and sample
if plot_fig_rsqsim==True:
ax2.scatter(bining_i,diff_rate,c='g',alpha=0.8,s=3)
if plot_fig_rsqsim==True:
if round(p_total,3) >= 1.3:
color='r'
elif round(p_total,3) >= 1.2:
color='orange'
else :
color='g'
if round(np.mean(diff_rate),3) >= 1.4:
color_shape='r'
elif round(np.mean(diff_rate),3) >= 1.3:
color_shape='orange'
else :
color_shape='g'
ax1.set_title(model +' '+str(round(p_total,2)),color=color)
ax2.set_title(str(round(np.mean(diff_rate),2)),color=color_shape)
ax1.set_ylim([ymin/10.,ymax])
ax1.set_xlim([xmin+1.,xmax])
ax2.set_xlim([xmin+1.,xmax])
ax2.set_ylim([0.9,3.])
ax1.set_yscale('log')
plt.show()
plt.close()
index_branch+=1
except:
pass
#print('no rsqsim file')
'''
Setting the weight for each score
'''
#weight the different parameters
#the sum must be one
weight_chi=0.35
weight_Mmax=0.05
weight_NMS_faults_test=0.3
weight_paleo = 0.3
if len(score_nms)==0.:
print('!!! no selected faults for the NMS metric !!!')
print('Default value = 0. Weight is set to 0.')
weight_NMS_faults_test=0.
weight_chi = weight_chi / (weight_chi+weight_Mmax+weight_NMS_faults_test+weight_paleo)
weight_Mmax = weight_Mmax / (weight_chi+weight_Mmax+weight_NMS_faults_test+weight_paleo)
weight_paleo = weight_paleo / (weight_chi+weight_Mmax+weight_NMS_faults_test+weight_paleo)
for i in range(len(p_chi_branch)):
score_nms.append(0.)
if len(score_paleo)==0.:
print('!!! no paleo data on the faults !!!')
print('Default value = 0. Weight is set to 0.')
weight_paleo=0.
weight_chi = weight_chi / (weight_chi+weight_Mmax+weight_NMS_faults_test+weight_paleo)
weight_Mmax = weight_Mmax / (weight_chi+weight_Mmax+weight_NMS_faults_test+weight_paleo)
weight_NMS_faults_test = weight_NMS_faults_test / (weight_chi+weight_Mmax+weight_NMS_faults_test+weight_paleo)
for i in range(len(p_chi_branch)):
score_paleo.append(0.)
'''
Builbing the text file
'''
lt_branch = []
lt_i_before = 'truc'
srep_file=str(Run_name) + '/analysis/txt_files/slip_rep_on_faults_all_data.txt'
try:
with open(srep_file) as f:#finds where to start reading
lines = f.readlines()
ordered_score_paleo = []
i_lt=0
for line in lines:
if line.split('\t')[1]==model:
lt_i=[]
for i in range(7): #add the branches parameters
lt_i.append(line.split('\t')[i])
if str(lt_i) != lt_i_before:
lt_i_before = str(lt_i)
lt_i.append(round(mean_sr_branch[i_lt],3))
lt_i.append(round(p_chi_branch[i_lt],3))
lt_i.append(round(weight_model_Mmax[i_lt],3))
lt_i.append(round(score_nms[i_lt],3))
#oredering the score paleo
i1 = np.where(np.array(paleo_list_mfd)==line.split('\t')[4][4:])[0]
i2= np.where(np.array(paleo_list_scenario)==line.split('\t')[5])[0]
i3 = np.where(np.array(paleo_list_sample)==line.split('\t')[6].split('_')[1])[0]
i4= np.where(np.array(paleo_list_bvalue)==line.split('\t')[3])[0]
i5= np.where(np.array(paleo_list_bg)==line.split('\t')[2][3:])[0]
i6= np.where(np.array(paleo_list_scl)==line.split('\t')[0])[0]
i1 = np.intersect1d(i1,i2)
i1 = np.intersect1d(i1,i3)
i1 = np.intersect1d(i1,i4)
i1 = np.intersect1d(i1,i5)
i1 = np.intersect1d(i1,i6)
# print(line.split('\t')[3],line.split('\t')[2][3:],line.split('\t')[0])
# print(i1)
# index_score_paleo = np.where(np.logical_and(
# np.array(paleo_list_mfd)==line.split('\t')[4].split('_')[1],
# np.array(paleo_list_scenario)==line.split('\t')[5],
# np.array(paleo_list_sample)==line.split('\t')[6].split('_')[1]
# ))[0]
# print(index_score_paleo)
# print(paleo_list_mfd[0],line.split('\t')[4].split('_')[1])
# print(paleo_list_scenario[0],line.split('\t')[5])
# print(paleo_list_sample[0],line.split('\t')[6].split('_')[1])
# print(i1)
# print(len(paleo_list_scl),len(paleo_list_bg),len(paleo_list_bvalue),len(paleo_list_sample),len(paleo_list_scenario),len(paleo_list_mfd))
# print(score_paleo)
lt_i.append(round(np.mean(np.take(score_paleo,i1)),3))
ordered_score_paleo.append(round(np.mean(np.take(score_paleo,i1)),3))
lt_branch.append(lt_i)
i_lt+=1
for lt_i in lt_branch:
line=''
for i in lt_i :
line+=str(i)+'\t'
line=line[:-1]
file_LT_metrics.write(line+'\n')
except (FileNotFoundError, IndexError) as e:
print('!!! you need to run the plot_sr_use if you want the file with the metrics and modify Sampling_analysis.py!!!')
'''
Calculataing the weighted score for each branch
'''
if ordered_score_paleo == []:
ordered_score_paleo = [0 for i in range(len(p_chi_branch))]
final_weigth = []
for i in range(len(p_chi_branch)):
final_weigth.append(p_chi_branch[i] * weight_chi+
weight_model_Mmax[i] * weight_Mmax+
score_nms[i] * weight_NMS_faults_test+
ordered_score_paleo[i] * weight_paleo
)
'''
Plotting section
Weighted average of the different metric
user defined weight for each metric. the figure give the weighted average as a final p value
'''
color_mfd=[]
for MFD_type_i,model_name_i in zip(total_list_MFD_type,total_list_model):
if model_name_i == model:
if MFD_type_i =='GR':
color_mfd.append('darkblue')
else:
color_mfd.append('darkgreen')
#
#
f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, sharey=True)
ax1.axhspan(0.8, 1.1, facecolor='g', alpha=0.1)
ax1.axhspan(0.6, 0.8, facecolor='orange', alpha=0.1)
ax1.axhspan(-0.1, 0.6, facecolor='r', alpha=0.1)
ax2.axhspan(0.8, 1.1, facecolor='g', alpha=0.1)
ax2.axhspan(0.6, 0.8, facecolor='orange', alpha=0.1)
ax2.axhspan(-0.1, 0.6, facecolor='r', alpha=0.1)
ax3.axhspan(0.8, 1.1, facecolor='g', alpha=0.1)
ax3.axhspan(0.6, 0.8, facecolor='orange', alpha=0.1)
ax3.axhspan(-0.1, 0.6, facecolor='r', alpha=0.1)
ax4.axhspan(0.8, 1.1, facecolor='g', alpha=0.1)
ax4.axhspan(0.6, 0.8, facecolor='orange', alpha=0.1)
ax4.axhspan(-0.1, 0.6, facecolor='r', alpha=0.1)
ax5.axhspan(0.8, 1.1, facecolor='g', alpha=0.1)
ax5.axhspan(0.6, 0.8, facecolor='orange', alpha=0.1)
ax5.axhspan(-0.1, 0.6, facecolor='r', alpha=0.1)
ax1.axhline(0.,linestyle=':',linewidth=0.2,color='k')
ax1.axhline(1.,linestyle=':',linewidth=0.2,color='k')
ax2.axhline(0.,linestyle=':',linewidth=0.2,color='k')
ax2.axhline(1.,linestyle=':',linewidth=0.2,color='k')
ax3.axhline(0.,linestyle=':',linewidth=0.2,color='k')
ax3.axhline(1.,linestyle=':',linewidth=0.2,color='k')
ax4.axhline(0.,linestyle=':',linewidth=0.2,color='k')
ax4.axhline(1.,linestyle=':',linewidth=0.2,color='k')
ax5.axhline(0.,linestyle=':',linewidth=0.2,color='k')
ax5.axhline(1.,linestyle=':',linewidth=0.2,color='k')
for i,j in zip(range(len(p_chi_branch)),indexes_model):
if total_list_scenario_name[j] ==total_list_scenario_name[0]:
if weight_model_Mmax[i]==0 or score_nms[i]==0 or ordered_score_paleo[i]< 0.25 or p_chi_branch[i]<0.3:
ax1.scatter(m_Mmax[j],p_chi_branch[i],c='darkred',marker='_',s=15,alpha=0.2,linewidth=1)
ax2.scatter(m_Mmax[j],weight_model_Mmax[i],c='darkred',marker='_',s=15,alpha=0.2,linewidth=1)
ax3.scatter(m_Mmax[j],score_nms[i],c='darkred',marker='_',s=15,alpha=0.2,linewidth=1)
ax4.scatter(m_Mmax[j],ordered_score_paleo[i],c='darkred',marker='_',s=15,alpha=0.2,linewidth=1)
ax5.scatter(m_Mmax[j],final_weigth[i],c='darkred',marker='_',s=15,alpha=0.2,linewidth=1)
else:
ax1.scatter(m_Mmax[j],p_chi_branch[i],c=color_mfd[i],marker='_',s=15,alpha=0.9,linewidth=1)
ax2.scatter(m_Mmax[j],weight_model_Mmax[i],c=color_mfd[i],marker='_',s=15,alpha=0.9,linewidth=1)
ax3.scatter(m_Mmax[j],score_nms[i],c=color_mfd[i],marker='_',s=15,alpha=0.9,linewidth=1)
ax4.scatter(m_Mmax[j],ordered_score_paleo[i],c=color_mfd[i],marker='_',s=15,alpha=0.9,linewidth=1)
ax5.scatter(m_Mmax[j],final_weigth[i],c=color_mfd[i],marker='_',s=15,alpha=0.9,linewidth=1)
else:
if weight_model_Mmax[i]==0 or score_nms[i]==0 or ordered_score_paleo[i]< 0.25 or p_chi_branch[i]<0.3:
ax1.scatter(m_Mmax[j],p_chi_branch[i],c='darkred',marker='|',s=15,alpha=0.2,linewidth=1)
ax2.scatter(m_Mmax[j],weight_model_Mmax[i],c='darkred',marker='|',s=15,alpha=0.2,linewidth=1)
ax3.scatter(m_Mmax[j],score_nms[i],c='darkred',marker='|',s=15,alpha=0.2,linewidth=1)
ax4.scatter(m_Mmax[j],ordered_score_paleo[i],c='darkred',marker='|',s=15,alpha=0.2,linewidth=1)
ax5.scatter(m_Mmax[j],final_weigth[i],c='darkred',marker='|',s=15,alpha=0.2,linewidth=1)
else:
ax1.scatter(m_Mmax[j],p_chi_branch[i],c=color_mfd[i],marker='|',s=15,alpha=0.9,linewidth=1)
ax2.scatter(m_Mmax[j],weight_model_Mmax[i],c=color_mfd[i],marker='|',s=15,alpha=0.9,linewidth=1)
ax3.scatter(m_Mmax[j],score_nms[i],c=color_mfd[i],marker='|',s=15,alpha=0.9,linewidth=1)
ax4.scatter(m_Mmax[j],ordered_score_paleo[i],c=color_mfd[i],marker='|',s=15,alpha=0.9,linewidth=1)
ax5.scatter(m_Mmax[j],final_weigth[i],c=color_mfd[i],marker='|',s=15,alpha=0.9,linewidth=1)
ax1.set_xlabel('Mmax')
ax1.set_ylabel('test value '+str(model))
ax1.set_ylim([-0.05,1.05])
ax1.set_xlim([xmax-1.5,xmax])
ax2.set_xlim([xmax-1.5,xmax])
ax3.set_xlim([xmax-1.5,xmax])
ax4.set_xlim([xmax-1.5,xmax])
ax5.set_xlim([xmax-1.5,xmax])
ax1.set_title('chi test')
ax2.set_title('Mmax test')
ax3.set_title('NMS test')
ax4.set_title('Paleo test')
ax5.set_title('weitghted total')
plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/model_performance.png',dpi = 180)
plt.close()
index_model+=1
# records in a file
for i,j in zip(range(len(p_chi_branch)),indexes_model):
file.write(str(model)+'\t'+str(total_list_MFD_type[j])+'\t'
+str(total_list_BG_hyp[j])+'\t'+str(total_list_scenario_name[j])+'\t'+str(total_list_sample[j])
+'\t'+str(round(p_chi_branch[i],2))+'\t'+str(round(ordered_score_paleo[i],2))+'\t'+str(round(score_nms[i],2))+'\n')
file.close()
#
# f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
#
# ax1.axhspan(0.8, 1., facecolor='g', alpha=0.1)
# ax2.axhspan(0.8, 1.,xmax=0.3, facecolor='g', alpha=0.1)
# ax1.axhspan(0.6, 0.8, facecolor='orange', alpha=0.1)
# ax2.axhspan(0.6, 0.8,xmax=0.5, facecolor='orange', alpha=0.1)
# ax1.axhspan(-0.1, 0.6, facecolor='r', alpha=0.1)
# ax2.axhspan(-0.1, 0.6,xmax=0.5, facecolor='r', alpha=0.1)
# #ax2.axvspan(0., 30.,ymin=0.5, facecolor='g', alpha=0.1)
# ax2.axvspan(30., 50.,ymin=0.818, facecolor='orange', alpha=0.1)
# ax2.axvspan(50., 100, facecolor='r', alpha=0.1)
# for i,j in zip(range(len(p_chi_branch)),indexes_model):
# final_pvalue = p_chi_branch[i]*weight_chi + weight_model_Mmax[i]*weight_Mmax
# if total_list_scenario_name[j] ==total_list_scenario_name[0]:
# ax1.scatter(m_Mmax[j],final_pvalue,c=color_mfd[i],marker='_',s=10,alpha=0.9,linewidth=1)
# ax2.scatter(a_s_model[j],final_pvalue,c=color_mfd[i],marker='_',s=10,alpha=0.9,linewidth=1)
# else:
# ax1.scatter(m_Mmax[j],final_pvalue,c=color_mfd[i],marker='|',s=10,alpha=0.9,linewidth=1)
# ax2.scatter(a_s_model[j],final_pvalue,c=color_mfd[i],marker='|',s=10,alpha=0.9,linewidth=1)
# ax1.set_xlabel('Mmax')
# ax1.set_ylabel('final p value (chi test, Mmax)')
# ax2.set_xlabel('NMS')
# ax2.set_ylim([-0.1,1.])
# ax1.set_xlim([xmax-1.5,xmax])
# ax2.set_xlim([0.,100.])
# ax1.set_title(str(model))
# ax2.set_title('w_chi : '+str(weight_chi)+' w_Mmax : '+str(weight_Mmax))
#
# plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/model_performance_small.png',dpi = 180)
# plt.show()
#
# index_model+=1
#
# f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
# for i,j in zip(range(len(p_chi_branch)),indexes_model):
# if total_list_scenario_name[j] ==total_list_scenario_name[0]:
# ax1.scatter(a_s_model[j],weight_model_Mmax[i],c=color_mfd[i],marker='_',s=10,alpha=0.4,linewidth=1)
# ax2.scatter(p_chi_branch[i],weight_model_Mmax[i],c=color_mfd[i],marker='_',s=10,alpha=0.4,linewidth=1)
# else:
# ax1.scatter(a_s_model[j],weight_model_Mmax[i],c=color_mfd[i],marker='|',s=10,alpha=0.4,linewidth=1)
# ax2.scatter(p_chi_branch[i],weight_model_Mmax[i],c=color_mfd[i],marker='|',s=10,alpha=0.4,linewidth=1)
# ax1.set_xlabel('NMS')
# ax2.set_xlabel('pvalue')
# ax1.set_ylabel('weight Mmax')
# plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/fig_1.png',dpi = 180)
# plt.close()
#
# f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
# for i,j in zip(range(len(p_chi_branch)),indexes_model):
# if total_list_scenario_name[j] ==total_list_scenario_name[0]:
# ax1.scatter(b_sample[j],p_chi_branch[i],c=color_mfd[i],marker='_',s=10,alpha=0.6,linewidth=1)
# ax2.scatter(m_Mmax[j],p_chi_branch[i],c=color_mfd[i],marker='_',s=10,alpha=0.6,linewidth=1)
# else:
# ax1.scatter(b_sample[j],p_chi_branch[i],c=color_mfd[i],marker='|',s=10,alpha=0.6,linewidth=1)
# ax2.scatter(m_Mmax[j],p_chi_branch[i],c=color_mfd[i],marker='|',s=10,alpha=0.6,linewidth=1)
# ax1.set_xlabel('b value')
# ax2.set_xlabel('Mmax')
# ax1.set_ylabel('p value')
# plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/fig_2.png',dpi = 180)
# plt.close()
# f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
# for i,j in zip(range(len(p_chi_branch)),indexes_model):
# final_pvalue = p_chi_branch[i]*weight_chi + weight_model_Mmax[i]*weight_Mmax
# if total_list_scenario_name[j] ==total_list_scenario_name[0]:
# ax1.scatter(b_sample[j],final_pvalue,c=color_mfd[i],marker='_',s=10,alpha=0.6,linewidth=1)
# ax2.scatter(m_Mmax[j],final_pvalue,c=color_mfd[i],marker='_',s=10,alpha=0.6,linewidth=1)
# else:
# ax1.scatter(b_sample[j],final_pvalue,c=color_mfd[i],marker='|',s=10,alpha=0.6,linewidth=1)
# ax2.scatter(m_Mmax[j],final_pvalue,c=color_mfd[i],marker='|',s=10,alpha=0.6,linewidth=1)
# ax1.set_xlabel('b value')
# ax2.set_xlabel('Mmax')
# ax1.set_ylabel('final p value (chi test, Mmax)')
# plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/fig_3.png',dpi = 180)
# plt.close()
#
#
# index_model = 0
# for model in Model_list:
# indexes_model = np.where(np.array(total_list_model) == model)[0]
# mfd_X = []
# for index in indexes_model :
# mfd = mega_mfd_cummulative[index]
# mfd_X.append(mfd)
# b_model = np.take(b_sample,indexes_model)
# Mmax_model = np.take(m_Mmax,indexes_model)
# as_model = np.take(a_s_model,indexes_model)
# if not os.path.exists(str(Run_name) + '/analysis/figures/sampling_analysis/'+model):
# os.makedirs(str(Run_name) + '/analysis/figures/sampling_analysis/'+model)
# mean_rate_catalog = np.array(catalog_cum_rate[index_model])#.mean(axis=0)
# #std_rate_catalog = np.std(np.array(catalog_cum_rate[index_model]),axis=0)
#
# err_rate = [] # line for models, column for magnitudes, ratio between the model and the mean rate of the catalog
# for mfd in mfd_X:
# err_rate_i = []
# for i in range(len(mean_rate_catalog)):
# err_rate_j = mfd[i]/mean_rate_catalog[i]-1.
# err_rate_i.append(err_rate_j)
# err_rate.append(err_rate_i)
#
# colors = ['royalblue','steelblue','powderblue','lightgreen','gold','darkorange','darkred']
# labels = ['<-1','-1<...<-0.5','-0.5<...<-0.2','-0.2<...<0.2','0.2<...<0.5','0.5<...<1','...>1']
#
# for index_i in range(int(len(bining_in_mag)/10.)):
# Mmax1, Mmax2, Mmax3, Mmax4, Mmax5, Mmax6, Mmax7 = [], [], [], [], [], [], []
# b1, b2, b3, b4, b5, b6, b7 = [], [], [], [], [], [], []
# index=0
# for err in np.array(err_rate)[:,index_i*10] :
# if err < -1. :
# Mmax1.append(Mmax_model[index])
# b1.append(b_model[index])
# elif err < -0.5 :
# Mmax2.append(Mmax_model[index])
# b2.append(b_model[index])
# elif err < - 0.2 :
# Mmax3.append(Mmax_model[index])
# b3.append(b_model[index])
# elif err < 0.2 :
# Mmax4.append(Mmax_model[index])
# b4.append(b_model[index])
# elif err < 0.5 :
# Mmax5.append(Mmax_model[index])
# b5.append(b_model[index])
# elif err < 1. :
# Mmax6.append(Mmax_model[index])
# b6.append(b_model[index])
# elif err > 1. :
# Mmax7.append(Mmax_model[index])
# b7.append(b_model[index])
# index+=1
# for color, label, b, Mmax in zip(colors, labels, [b1, b2, b3, b4, b5, b6, b7], [Mmax1, Mmax2, Mmax3, Mmax4, Mmax5, Mmax6, Mmax7]):
# plt.scatter(b,Mmax,c=color,s= 50 ,alpha=0.8,label=label)
# plt.title('Modeled rate / Catalog rate')
# plt.legend(loc=2,fontsize=6)
# plt.xlabel('b value')
# plt.ylabel('Mmax')
# plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/'+str(model)+'_b_Mmax_vs_error_M'+str(bining_in_mag[index_i*10])+'.png',dpi = 100)
# plt.close()
#
# colors = ['royalblue','steelblue','darkorange','red','darkred']
# labels = ['<10%','10%<...<30%','30%<...<50%','50%<...<70%','...>70%']
# Mmax1, Mmax2, Mmax3, Mmax4, Mmax5 = [], [], [], [], []
# b1, b2, b3, b4, b5 = [], [], [], [], []
# index=0
# for NMS in as_model :
# if NMS < 10. :
# Mmax1.append(Mmax_model[index])
# b1.append(b_model[index])
# elif NMS < 30. :
# Mmax2.append(Mmax_model[index])
# b2.append(b_model[index])
# elif NMS < 50. :
# Mmax3.append(Mmax_model[index])
# b3.append(b_model[index])
# elif NMS < 70. :
# Mmax4.append(Mmax_model[index])
# b4.append(b_model[index])
# elif NMS > 70. :
# Mmax5.append(Mmax_model[index])
# b5.append(b_model[index])
# index+=1
# for color, label, b, Mmax in zip(colors, labels, [b1, b2, b3, b4, b5], [Mmax1, Mmax2, Mmax3, Mmax4, Mmax5]):
# plt.scatter(b,Mmax,c=color,s= 50 ,alpha=0.8,label=label)
# plt.title('NMS is the model')
# plt.legend(loc=2,fontsize=6)
# plt.savefig(str(Run_name) + '/analysis/figures/sampling_analysis/'+model+'/'+str(model)+'_b_Mmax_vs_NMS.png',dpi = 100)
# plt.close()
#
#
#
file_LT_metrics.close()
| 48.496827 | 170 | 0.496055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18,558 | 0.34693 |
c3a514fac0e76b7ad6dcfdb91e567e8020f9a5ed | 1,481 | py | Python | tests/rimu_test.py | srackham/rimu-py | 3da67cb362b6d34fd363e9f4ce5e0afb019baa4c | [
"MIT"
] | null | null | null | tests/rimu_test.py | srackham/rimu-py | 3da67cb362b6d34fd363e9f4ce5e0afb019baa4c | [
"MIT"
] | 4 | 2020-03-24T17:59:43.000Z | 2021-06-02T00:48:53.000Z | tests/rimu_test.py | srackham/rimu-py | 3da67cb362b6d34fd363e9f4ce5e0afb019baa4c | [
"MIT"
] | null | null | null | import json
import rimu
from rimu import options
def unexpectedError(_, message):
raise Exception(f'unexpected callback: {message}')
def test_render():
assert rimu.render('Hello World!') == '<p>Hello World!</p>'
def test_jsonTests():
with open('./tests/rimu-tests.json') as f:
data = json.load(f)
for spec in data:
description = spec['description']
unsupported = 'py' in spec.get('unsupported', '')
if unsupported:
print(f'skipped unsupported: {description}')
continue
print(description)
renderOptions = rimu.RenderOptions()
renderOptions.safeMode = spec['options'].get('safeMode')
renderOptions.htmlReplacement = spec['options'].get('htmlReplacement')
renderOptions.reset = spec['options'].get('reset')
msg = ''
def callback(message: rimu.CallbackMessage):
nonlocal msg
msg += f'{message.type}: {message.text}\n'
# Captured callback message.
if spec['expectedCallback'] or unsupported:
renderOptions.callback = callback
else:
# Callback should not occur, this will throw an error.
renderOptions.callback = unexpectedError
input = spec['input']
result = rimu.render(input, renderOptions)
assert result == spec['expectedOutput'], description
if spec['expectedCallback']:
assert msg.strip() == spec['expectedCallback']
| 32.911111 | 78 | 0.621877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.282917 |
c3a64f6638f226e40d6d359d037e5497039713b2 | 1,786 | py | Python | opti.py | ingwarr/tinypack | 489c121bfa16233e34a9f65e01fea982b9bfb12e | [
"Apache-2.0"
] | null | null | null | opti.py | ingwarr/tinypack | 489c121bfa16233e34a9f65e01fea982b9bfb12e | [
"Apache-2.0"
] | null | null | null | opti.py | ingwarr/tinypack | 489c121bfa16233e34a9f65e01fea982b9bfb12e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import urllib2
package_list = ['wget.tcz', 'python-pip.tcz', 'unzip.tcz', 'sudo.tcz',
'mksquashfs.tcz', 'gawk.tcz', 'genisoimage.tcz', 'qemu.tcz',
'pidgin.tcz']
serv_url = "http://distro.ibiblio.org/tinycorelinux/2.x/tcz/"
suffix = ".dep"
UP_SET = set(package_list)
deepness = 0
def file_exists(location):
request = urllib2.Request(location)
request.get_method = lambda: 'HEAD'
try:
urllib2.urlopen(request)
return True
except urllib2.HTTPError:
return False
def opendoor(localset):
print (deepness)
DOWN_SET_LOCAL = set()
locallist = list(localset)
for eat_em in locallist:
url = serv_url + eat_em + suffix
if file_exists(url):
file_link = urllib2.urlopen(url)
data = file_link.read()
deps = data.split()
for dep in deps:
if dep not in UP_SET:
UP_SET.add(dep)
DOWN_SET_LOCAL.add(dep)
package_list.append(dep)
elif dep not in DOWN_SET_LOCAL:
print (dep, " already in UP_SET ergo deepness should be"
"increased, now it ", deepness, " level")
package_list.remove(dep)
package_list.append(dep)
DOWN_SET_LOCAL.add(dep)
else:
print ("This package", dep, " already processed")
else:
print ("File not found (package", eat_em, "has no deps)")
return DOWN_SET_LOCAL
DOWN_SET = opendoor(UP_SET)
while not len(DOWN_SET) == 0:
deepness += 1
DOWN_SET = opendoor(DOWN_SET)
for packname in UP_SET:
print (packname)
print (package_list[::-1])
| 29.766667 | 76 | 0.56271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.19037 |
c3a9bec8e9fb76b1c6c4d5aba4fc8451334e8ec7 | 4,685 | py | Python | tests/test_04_dxf_high_level_structs/test_411_acds_data.py | jpsantos-mf/ezdxf | 2b542a551b2cfc3c0920a5dbf302ff58cea90fbd | [
"MIT"
] | 1 | 2021-06-05T09:15:15.000Z | 2021-06-05T09:15:15.000Z | tests/test_04_dxf_high_level_structs/test_411_acds_data.py | jpsantos-mf/ezdxf | 2b542a551b2cfc3c0920a5dbf302ff58cea90fbd | [
"MIT"
] | null | null | null | tests/test_04_dxf_high_level_structs/test_411_acds_data.py | jpsantos-mf/ezdxf | 2b542a551b2cfc3c0920a5dbf302ff58cea90fbd | [
"MIT"
] | null | null | null | # Copyright (c) 2014-2019, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.sections.acdsdata import AcDsDataSection
from ezdxf import DXFKeyError
from ezdxf.lldxf.tags import internal_tag_compiler, group_tags
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
@pytest.fixture
def section():
entities = group_tags(internal_tag_compiler(ACDSSECTION))
return AcDsDataSection(None, entities)
def test_loader(section):
assert 'ACDSDATA' == section.name.upper()
assert len(section.entities) > 0
def test_acds_record(section):
records = [entity for entity in section.entities if entity.dxftype() == 'ACDSRECORD']
assert len(records) > 0
record = records[0]
assert record.has_section('ASM_Data') is True
assert record.has_section('AcDbDs::ID') is True
assert record.has_section('mozman') is False
with pytest.raises(DXFKeyError):
_ = record.get_section('mozman')
asm_data = record.get_section('ASM_Data')
binary_data = (tag for tag in asm_data if tag.code == 310)
length = sum(len(tag.value) for tag in binary_data)
assert asm_data[2].value == length
def test_write_dxf(section):
result = TagCollector.dxftags(section)
expected = basic_tags_from_text(ACDSSECTION)
assert result[:-1] == expected
ACDSSECTION = """0
SECTION
2
ACDSDATA
70
2
71
6
0
ACDSSCHEMA
90
0
1
AcDb3DSolid_ASM_Data
2
AcDbDs::ID
280
10
91
8
2
ASM_Data
280
15
91
0
101
ACDSRECORD
95
0
90
2
2
AcDbDs::TreatedAsObjectData
280
1
291
1
101
ACDSRECORD
95
0
90
3
2
AcDbDs::Legacy
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
4
2
AcDs:Indexable
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
5
2
AcDbDs::HandleAttribute
280
7
282
1
0
ACDSSCHEMA
90
1
1
AcDb_Thumbnail_Schema
2
AcDbDs::ID
280
10
91
8
2
Thumbnail_Data
280
15
91
0
101
ACDSRECORD
95
1
90
2
2
AcDbDs::TreatedAsObjectData
280
1
291
1
101
ACDSRECORD
95
1
90
3
2
AcDbDs::Legacy
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
4
2
AcDs:Indexable
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
5
2
AcDbDs::HandleAttribute
280
7
282
1
0
ACDSSCHEMA
90
2
1
AcDbDs::TreatedAsObjectDataSchema
2
AcDbDs::TreatedAsObjectData
280
1
91
0
0
ACDSSCHEMA
90
3
1
AcDbDs::LegacySchema
2
AcDbDs::Legacy
280
1
91
0
0
ACDSSCHEMA
90
4
1
AcDbDs::IndexedPropertySchema
2
AcDs:Indexable
280
1
91
0
0
ACDSSCHEMA
90
5
1
AcDbDs::HandleAttributeSchema
2
AcDbDs::HandleAttribute
280
7
91
1
284
1
0
ACDSRECORD
90
0
2
AcDbDs::ID
280
10
320
339
2
ASM_Data
280
15
94
1088
310
414349532042696E61727946696C652855000000000000020000000C00000007104175746F6465736B204175746F434144071841534D203231392E302E302E3536303020556E6B6E6F776E071853756E204D61792020342031353A34373A3233203230313406000000000000F03F068DEDB5A0F7C6B03E06BBBDD7D9DF7CDB
310
3D0D0961736D6865616465720CFFFFFFFF04FFFFFFFF070C3231392E302E302E35363030110D04626F64790C0200000004FFFFFFFF0CFFFFFFFF0C030000000CFFFFFFFF0CFFFFFFFF110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C010000000C040000000C05
310
000000110D046C756D700C0600000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000C01000000110D0E6579655F726566696E656D656E740CFFFFFFFF04FFFFFFFF070567726964200401000000070374726904010000000704737572660400000000070361646A040000000007046772616404000000000709706F7374
310
636865636B0400000000070463616C6304010000000704636F6E760400000000070473746F6C06000000E001FD414007046E746F6C060000000000003E4007046473696C0600000000000000000708666C61746E6573730600000000000000000707706978617265610600000000000000000704686D617806000000000000
310
0000070667726964617206000000000000000007056D6772696404B80B0000070575677269640400000000070576677269640400000000070A656E645F6669656C6473110D0F7665727465785F74656D706C6174650CFFFFFFFF04FFFFFFFF0403000000040000000004010000000408000000110E067265665F76740E0365
310
79650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C030000000C040000000C05000000110D057368656C6C0C0800000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C090000000CFFFFFFFF0C03000000110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFF
310
FFFFFF0CFFFFFFFF0C070000000C040000000C05000000110D04666163650C0A00000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000CFFFFFFFF0C0B0000000B0B110E05666D6573680E036579650D066174747269620CFFFFFFFF04FFFFFFFF0C0C0000000CFFFFFFFF0C09000000110E05746F7275730D
310
07737572666163650CFFFFFFFF04FFFFFFFF0CFFFFFFFF131D7B018BA58BA7C0600EB0424970BC4000000000000000001400000000000000000000000000000000000000000000F03F065087D2E2C5418940066050CEE5F3CA644014000000000000F03F000000000000000000000000000000000B0B0B0B0B110E06726566
310
5F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0C0A0000000C090000000C040000000C05000000110E03456E640E026F660E0341534D0D0464617461
"""
| 17.416357 | 254 | 0.867449 | 0 | 0 | 0 | 0 | 135 | 0.028815 | 0 | 0 | 3,491 | 0.745144 |
c3ab1277636c41159f2aded4987b40def2cfa389 | 772 | py | Python | test_app.py | WindfallLabs/tkit | 43f9269f42963737c54c822593cd316efbacb0a1 | [
"MIT"
] | null | null | null | test_app.py | WindfallLabs/tkit | 43f9269f42963737c54c822593cd316efbacb0a1 | [
"MIT"
] | null | null | null | test_app.py | WindfallLabs/tkit | 43f9269f42963737c54c822593cd316efbacb0a1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
app_test.py
Tests the tkit.App class.
Author: Garin Wally; Oct 2017
License: MIT
"""
import tkit
if __name__ == "__main__":
# Create app
test_app = tkit.App("Test App", 250, 100)
# Create and customize menubar
menubar = tkit.Menubar()
menubar.add_menu("File")
#test_menubar.menus["File"].add_action("Test", app.mainloop)
menubar.menus["File"].add_action("Close", test_app.close)
menubar.add_menu("Help")
menubar.menus["Help"].add_action(
"About", tkit.Popup("About", "This program ...").show_info)
# Add menubar to app
test_app.add_widget(menubar)
test_app.add_widget(tkit.BrowseFile())
# Run it
test_app.add_button("OK", test_app.cmd_collect_values)
test_app.mainloop()
| 24.903226 | 67 | 0.664508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.426166 |
c3abcd7fbeade8ebed9989404add74f693222a5d | 3,652 | py | Python | services/storage/src/simcore_service_storage/models.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | services/storage/src/simcore_service_storage/models.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | 2 | 2018-05-13T09:10:57.000Z | 2019-03-06T08:10:40.000Z | services/storage/src/simcore_service_storage/models.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | """ Database models
"""
from typing import Tuple
import attr
import sqlalchemy as sa
from .settings import DATCORE_STR, SIMCORE_S3_ID, SIMCORE_S3_STR
#FIXME: W0611:Unused UUID imported from sqlalchemy.dialects.postgresql
#from sqlalchemy.dialects.postgresql import UUID
#FIXME: R0902: Too many instance attributes (11/7) (too-many-instance-attributes)
#pylint: disable=R0902
metadata = sa.MetaData()
# File meta data
file_meta_data = sa.Table(
"file_meta_data", metadata,
sa.Column("file_uuid", sa.String, primary_key=True),
sa.Column("location_id", sa.String),
sa.Column("location", sa.String),
sa.Column("bucket_name", sa.String),
sa.Column("object_name", sa.String),
sa.Column("project_id", sa.String),
sa.Column("project_name", sa.String),
sa.Column("node_id", sa.String),
sa.Column("node_name", sa.String),
sa.Column("file_name", sa.String),
sa.Column("user_id", sa.String),
sa.Column("user_name", sa.String)
# sa.Column("state", sa.String())
)
def _parse_datcore(file_uuid: str) -> Tuple[str, str]:
# we should have 12/123123123/111.txt
object_name = "invalid"
dataset_name = "invalid"
parts = file_uuid.split("/")
if len(parts) > 1:
dataset_name = parts[0]
object_name = "/".join(parts[1:])
return dataset_name, object_name
def _locations():
# TODO: so far this is hardcoded
simcore_s3 = {
"name" : SIMCORE_S3_STR,
"id" : 0
}
datcore = {
"name" : DATCORE_STR,
"id" : 1
}
return [simcore_s3, datcore]
def _location_from_id(location_id : str) ->str:
# TODO create a map to sync _location_from_id and _location_from_str
loc_str = "undefined"
if location_id == "0":
loc_str = SIMCORE_S3_STR
elif location_id == "1":
loc_str = DATCORE_STR
return loc_str
def _location_from_str(location : str) ->str:
intstr = "undefined"
if location == SIMCORE_S3_STR:
intstr = "0"
elif location == DATCORE_STR:
intstr = "1"
return intstr
@attr.s(auto_attribs=True)
class FileMetaData:
""" This is a proposal, probably no everything is needed.
It is actually an overkill
file_name : display name for a file
location_id : storage location
location_name : storage location display name
project_id : project_id
projec_name : project display name
node_id : node id
node_name : display_name
bucket_name : name of the bucket
object_name : s3 object name = folder/folder/filename.ending
user_id : user_id
user_name : user_name
file_uuid : unique identifier for a file:
bucket_name/project_id/node_id/file_name = /bucket_name/object_name
state: on of OK, UPLOADING, DELETED
"""
file_uuid: str=""
location_id: str=""
location: str=""
bucket_name: str=""
object_name: str=""
project_id: str=""
project_name: str=""
node_id: str=""
node_name: str=""
file_name: str=""
user_id: str=""
user_name: str=""
def simcore_from_uuid(self, file_uuid: str, bucket_name: str):
parts = file_uuid.split("/")
assert len(parts) == 3
if len(parts) == 3:
self.location = SIMCORE_S3_STR
self.location_id = SIMCORE_S3_ID
self.bucket_name = bucket_name
self.object_name = "/".join(parts[:])
self.file_name = parts[2]
self.project_id = parts[0]
self.node_id = parts[1]
self.file_uuid = file_uuid
| 27.253731 | 81 | 0.625685 | 1,575 | 0.431271 | 0 | 0 | 1,602 | 0.438664 | 0 | 0 | 1,480 | 0.405257 |
c3adcc63188628231b35e310f6dced815f4b0a78 | 13,715 | py | Python | fairtools/utils.py | cmougan/FairShift | a065edb92da7c259a4f402eed3a81e36d65bd01d | [
"MIT"
] | null | null | null | fairtools/utils.py | cmougan/FairShift | a065edb92da7c259a4f402eed3a81e36d65bd01d | [
"MIT"
] | null | null | null | fairtools/utils.py | cmougan/FairShift | a065edb92da7c259a4f402eed3a81e36d65bd01d | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score
from category_encoders import MEstimateEncoder
import numpy as np
from collections import defaultdict
import os
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
def fit_predict(modelo, enc, data, target, test):
pipe = Pipeline([("encoder", enc), ("model", modelo)])
pipe.fit(data, target)
return pipe.predict(test)
def auc_group(model, data, y_true, dicc, group: str = "", min_samples: int = 50):
aux = data.copy()
aux["target"] = y_true
cats = aux[group].value_counts()
cats = cats[cats > min_samples].index.tolist()
cats = cats + ["all"]
if len(dicc) == 0:
dicc = defaultdict(list, {k: [] for k in cats})
for cat in cats:
if cat != "all":
aux2 = aux[aux[group] == cat]
preds = model.predict_proba(aux2.drop(columns="target"))[:, 1]
truth = aux2["target"]
dicc[cat].append(roc_auc_score(truth, preds))
elif cat == "all":
dicc[cat].append(roc_auc_score(y_true, model.predict_proba(data)[:, 1]))
else:
pass
return dicc
def explain(xgb: bool = True):
"""
Provide a SHAP explanation by fitting MEstimate and GBDT
"""
if xgb:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", GradientBoostingClassifier())]
)
pipe.fit(X_tr, y_tr)
explainer = shap.Explainer(pipe[1])
shap_values = explainer(pipe[:-1].transform(X_tr))
shap.plots.beeswarm(shap_values)
return pd.DataFrame(np.abs(shap_values.values), columns=X_tr.columns).sum()
else:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", LogisticRegression())]
)
pipe.fit(X_tr, y_tr)
coefficients = pd.concat(
[pd.DataFrame(X_tr.columns), pd.DataFrame(np.transpose(pipe[1].coef_))],
axis=1,
)
coefficients.columns = ["feat", "val"]
return coefficients.sort_values(by="val", ascending=False)
def calculate_cm(true, preds):
# Obtain the confusion matrix
cm = confusion_matrix(preds, true)
# https://stackoverflow.com/questions/31324218/scikit-learn-how-to-obtain-true-positive-true-negative-false-positive-and-fal
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
return TPR[0]
def metric_calculator(
modelo, data: pd.DataFrame, truth: pd.DataFrame, col: str, group1: str, group2: str
):
aux = data.copy()
aux["target"] = truth
# Filter the data
g1 = data[data[col] == group1]
g2 = data[data[col] == group2]
# Filter the ground truth
g1_true = aux[aux[col] == group1].target
g2_true = aux[aux[col] == group2].target
# Do predictions
p1 = modelo.predict(g1)
p2 = modelo.predict(g2)
# Extract metrics for each group
res1 = calculate_cm(p1, g1_true)
res2 = calculate_cm(p2, g2_true)
return res1 - res2
def plot_rolling(data, roll_mean: int = 5, roll_std: int = 20):
aux = data.rolling(roll_mean).mean().dropna()
stand = data.rolling(roll_std).quantile(0.05, interpolation="lower").dropna()
plt.figure()
for col in data.columns:
plt.plot(aux[col], label=col)
# plt.fill_between(aux.index,(aux[col] - stand[col]),(aux[col] + stand[col]),# color="b",alpha=0.1,)
plt.legend()
plt.show()
def scale_output(data):
return pd.DataFrame(
StandardScaler().fit_transform(data), columns=data.columns, index=data.index
)
import numpy as np
def psi(expected, actual, buckettype="bins", buckets=10, axis=0):
"""Calculate the PSI (population stability index) across all variables
Args:
expected: numpy matrix of original values
actual: numpy matrix of new values, same size as expected
buckettype: type of strategy for creating buckets, bins splits into even splits, quantiles splits into quantile buckets
buckets: number of quantiles to use in bucketing variables
axis: axis by which variables are defined, 0 for vertical, 1 for horizontal
Returns:
psi_values: ndarray of psi values for each variable
Author:
Matthew Burke
github.com/mwburke
worksofchart.com
"""
def _psi(expected_array, actual_array, buckets):
"""Calculate the PSI for a single variable
Args:
expected_array: numpy array of original values
actual_array: numpy array of new values, same size as expected
buckets: number of percentile ranges to bucket the values into
Returns:
psi_value: calculated PSI value
"""
def scale_range(input, min, max):
input += -(np.min(input))
input /= np.max(input) / (max - min)
input += min
return input
breakpoints = np.arange(0, buckets + 1) / (buckets) * 100
if buckettype == "bins":
breakpoints = scale_range(
breakpoints, np.min(expected_array), np.max(expected_array)
)
elif buckettype == "quantiles":
breakpoints = np.stack(
[np.percentile(expected_array, b) for b in breakpoints]
)
expected_percents = np.histogram(expected_array, breakpoints)[0] / len(
expected_array
)
actual_percents = np.histogram(actual_array, breakpoints)[0] / len(actual_array)
def sub_psi(e_perc, a_perc):
"""Calculate the actual PSI value from comparing the values.
Update the actual value to a very small number if equal to zero
"""
if a_perc == 0:
a_perc = 0.0001
if e_perc == 0:
e_perc = 0.0001
value = (e_perc - a_perc) * np.log(e_perc / a_perc)
return value
psi_value = np.sum(
sub_psi(expected_percents[i], actual_percents[i])
for i in range(0, len(expected_percents))
)
return psi_value
if len(expected.shape) == 1:
psi_values = np.empty(len(expected.shape))
else:
psi_values = np.empty(expected.shape[axis])
for i in range(0, len(psi_values)):
if len(psi_values) == 1:
psi_values = _psi(expected, actual, buckets)
elif axis == 0:
psi_values[i] = _psi(expected[:, i], actual[:, i], buckets)
elif axis == 1:
psi_values[i] = _psi(expected[i, :], actual[i, :], buckets)
return psi_values
def loop_estimators(
estimator_set: list,
normal_data,
normal_data_ood,
shap_data,
shap_data_ood,
performance_ood,
target,
state: str,
error_type: str,
target_shift: bool = False,
output_path: str = "",
):
"""
Loop through the estimators and calculate the performance for each
"""
res = []
for estimator in estimator_set:
## ONLY DATA
X_train, X_test, y_train, y_test = train_test_split(
normal_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(normal_data_ood),
np.nan_to_num(list(performance_ood.values())),
)
res.append([state, error_type, estimator, "Only Data", error_te, error_ood])
if target_shift == False:
#### ONLY SHAP
X_train, X_test, y_train, y_test = train_test_split(
shap_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(
estimator_set[estimator].predict(X_test), y_test
)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(shap_data_ood),
np.nan_to_num(list(performance_ood.values())),
)
res.append([state, error_type, estimator, "Only Shap", error_te, error_ood])
### SHAP + DATA
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([shap_data, normal_data], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(
estimator_set[estimator].predict(X_test), y_test
)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([shap_data_ood, normal_data_ood], axis=1)
),
np.nan_to_num(list(performance_ood.values())),
)
res.append(
[state, error_type, estimator, "Data + Shap", error_te, error_ood]
)
folder = os.path.join("results", state + "_" + error_type + ".csv")
columnas = ["state", "error_type", "estimator", "data", "error_te", "error_ood"]
pd.DataFrame(res, columns=columnas).to_csv(folder, index=False)
def loop_estimators_fairness(
estimator_set: list,
normal_data,
normal_data_ood,
target_shift,
target_shift_ood,
shap_data,
shap_data_ood,
performance_ood,
target,
state: str,
error_type: str,
output_path: str = "",
):
"""
Loop through the estimators and calculate the performance for each
Particular fairness case
"""
res = []
for estimator in estimator_set:
## ONLY DATA
X_train, X_test, y_train, y_test = train_test_split(
normal_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(normal_data_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Data", error_te, error_ood])
#### ONLY SHAP
X_train, X_test, y_train, y_test = train_test_split(
shap_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(shap_data_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Shap", error_te, error_ood])
#### ONLY TARGET
X_train, X_test, y_train, y_test = train_test_split(
target_shift, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(target_shift_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Target", error_te, error_ood])
#### TARGET + DISTRIBUTION
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([target_shift, normal_data], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([target_shift_ood, normal_data_ood], axis=1)
),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Data+Target", error_te, error_ood])
### SHAP + DATA
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([shap_data, normal_data, target_shift], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([shap_data_ood, normal_data_ood, target_shift_ood], axis=1)
),
np.nan_to_num(performance_ood),
)
res.append(
[state, error_type, estimator, "Data+Target+Shap", error_te, error_ood]
)
folder = os.path.join("results", state + "_" + error_type + ".csv")
columnas = ["state", "error_type", "estimator", "data", "error_te", "error_ood"]
pd.DataFrame(res, columns=columnas).to_csv(folder, index=False)
| 34.373434 | 129 | 0.612468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,500 | 0.182282 |
c3ae4ba07955d7042040649cd40e5479799ed431 | 1,929 | py | Python | py/py_0527_randomized_binary_search.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0527_randomized_binary_search.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0527_randomized_binary_search.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 527: Randomized Binary Search
# https://projecteuler.net/problem=527
#
# A secret integer t is selected at random within the range 1 ≤ t ≤ n. The
# goal is to guess the value of t by making repeated guesses, via integer g.
# After a guess is made, there are three possible outcomes, in which it will
# be revealed that either g < t, g = t, or g > t. Then the process can repeat
# as necessary. Normally, the number of guesses required on average can be
# minimized with a binary search: Given a lower bound L and upper bound H
# (initialized to L = 1 and H = n), let g = ⌊(L+H)/2⌋ where ⌊⋅⌋ is the integer
# floor function. If g = t, the process ends. Otherwise, if g < t, set L =
# g+1, but if g > t instead, set H = g−1. After setting the new bounds, the
# search process repeats, and ultimately ends once t is found. Even if t can
# be deduced without searching, assume that a search will be required anyway
# to confirm the value. Your friend Bob believes that the standard binary
# search is not that much better than his randomized variant: Instead of
# setting g = ⌊(L+H)/2⌋, simply let g be a random integer between L and H,
# inclusive. The rest of the algorithm is the same as the standard binary
# search. This new search routine will be referred to as a random binary
# search. Given that 1 ≤ t ≤ n for random t, let B(n) be the expected number
# of guesses needed to find t using the standard binary search, and let R(n)
# be the expected number of guesses needed to find t using the random binary
# search. For example, B(6) = 2. 33333333 and R(6) = 2. 71666667 when rounded
# to 8 decimal places. Find R(1010) − B(1010) rounded to 8 decimal places.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 527
timed.caller(dummy, n, i, prob_id)
| 48.225 | 79 | 0.708657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,787 | 0.914066 |
c3af215854abd4c90a3e4ca46f61b27b360bf47e | 242 | py | Python | NOV_17_2020/quiz2.py | refeed/PAlgoritmaTRPLA | e0c79c1d57bee0869e2344651718e8cf053c035f | [
"MIT"
] | null | null | null | NOV_17_2020/quiz2.py | refeed/PAlgoritmaTRPLA | e0c79c1d57bee0869e2344651718e8cf053c035f | [
"MIT"
] | null | null | null | NOV_17_2020/quiz2.py | refeed/PAlgoritmaTRPLA | e0c79c1d57bee0869e2344651718e8cf053c035f | [
"MIT"
] | null | null | null | '''
No 2. Buatlah fungsi tanpa pengembalian nilai, yaitu fungsi segitigabintang.
Misal, jika dipanggil dg segitigabintang(4), keluarannya :
*
**
***
****
'''
def segitigabintang(baris):
for i in range(baris):
print('*' * (i+1))
| 18.615385 | 77 | 0.64876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.665289 |
c3b1a63ea55b50ea0b3fa5da2920a8938c6a980f | 2,402 | py | Python | simple/game_loop_process.py | loyalgarlic/snakepit-game | 5721f688d78a1e3f5f9ef7b82e8d0b9591373863 | [
"Unlicense"
] | 124 | 2016-06-01T16:02:12.000Z | 2022-03-04T09:40:03.000Z | simple/game_loop_process.py | AqZyy1998/snake-multi-online | b75712dce46314b350c363dda959a1e2dbc278bf | [
"Unlicense"
] | 8 | 2016-07-07T11:23:44.000Z | 2020-03-28T21:27:19.000Z | simple/game_loop_process.py | AqZyy1998/snake-multi-online | b75712dce46314b350c363dda959a1e2dbc278bf | [
"Unlicense"
] | 53 | 2016-06-20T00:30:54.000Z | 2021-11-10T04:57:39.000Z | import asyncio
from aiohttp import web
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Queue, Process
import os
from time import sleep
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
tick = asyncio.Condition()
async def wshandler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
recv_task = None
tick_task = None
while 1:
if not recv_task:
recv_task = asyncio.ensure_future(ws.receive())
if not tick_task:
await tick.acquire()
tick_task = asyncio.ensure_future(tick.wait())
done, pending = await asyncio.wait(
[recv_task,
tick_task],
return_when=asyncio.FIRST_COMPLETED)
if recv_task in done:
msg = recv_task.result()
if msg.tp == web.MsgType.text:
print("Got message %s" % msg.data)
ws.send_str("Pressed key code: {}".format(msg.data))
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
recv_task = None
if tick_task in done:
ws.send_str("game loop ticks")
tick.release()
tick_task = None
return ws
def game_loop(asyncio_loop):
# coroutine to run in main thread
async def notify():
await tick.acquire()
tick.notify_all()
tick.release()
queue = Queue()
# function to run in a different process
def worker():
while 1:
print("doing heavy calculation in process {}".format(os.getpid()))
sleep(1)
queue.put("calculation result")
Process(target=worker).start()
while 1:
# blocks this thread but not main thread with event loop
result = queue.get()
print("getting {} in process {}".format(result, os.getpid()))
task = asyncio.run_coroutine_threadsafe(notify(), asyncio_loop)
task.result()
asyncio_loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
asyncio_loop.run_in_executor(executor, game_loop, asyncio_loop)
app = web.Application()
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
| 27.295455 | 78 | 0.620316 | 0 | 0 | 0 | 0 | 0 | 0 | 1,268 | 0.527893 | 319 | 0.132806 |
c3b2b4a92adbd876be0d44bbd3070bbdf63242b5 | 41,018 | py | Python | elex/api/models.py | adamsimp/elex | fe2987c1fec1476ce98f9a6b8b067b3d95434a26 | [
"Apache-2.0"
] | null | null | null | elex/api/models.py | adamsimp/elex | fe2987c1fec1476ce98f9a6b8b067b3d95434a26 | [
"Apache-2.0"
] | null | null | null | elex/api/models.py | adamsimp/elex | fe2987c1fec1476ce98f9a6b8b067b3d95434a26 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ujson as json
import datetime
from elex.api import maps
from elex.api import utils
from collections import OrderedDict
from dateutil import parser as dateutil_parser
PCT_PRECISION = 6
class APElection(utils.UnicodeMixin):
"""
Base class for most objects.
Includes handy methods for transformation of data and AP connections
"""
def set_state_fields_from_reportingunits(self):
"""
Set state fields.
"""
if len(self.reportingunits) > 0:
self.statepostal = str(self.reportingunits[-1].statepostal)
self.statename = str(maps.STATE_ABBR[self.statepostal])
def set_reportingunits(self):
"""
Set reporting units.
If this race has reportingunits,
serialize them into objects.
"""
reportingunits_obj = []
for r in self.reportingunits:
# Don't obliterate good data with possibly empty fields.
SKIP_FIELDS = ['candidates', 'statepostal', 'statename']
for k, v in self.__dict__.items():
if k not in SKIP_FIELDS:
r[k] = v
obj = ReportingUnit(**r)
reportingunits_obj.append(obj)
setattr(self, 'reportingunits', reportingunits_obj)
def set_polid(self):
"""
Set politication id.
If `polid` is zero, set to `None`.
"""
if self.polid == "0":
self.polid = None
def set_reportingunitids(self):
"""
Set reporting unit ID.
Per Tracy / AP developers, if the level is
"state", the reportingunitid is always 1.
"""
if not self.reportingunitid:
if self.level == "state":
# Adds the statepostal to make these reportinunitids unique even for
# national elections. See #0278.
setattr(self, 'reportingunitid', 'state-%s-1' % self.statepostal)
else:
"""
Fixes #226 reportingunitids recycled across levels.
"""
setattr(self, 'reportingunitid', '%s-%s' % (
self.level, self.reportingunitid))
def set_candidates(self):
"""
Set candidates.
If this thing (race, reportingunit) has candidates,
serialize them into objects.
"""
candidate_objs = []
for c in self.candidates:
for k, v in self.__dict__.items():
if k != 'votecount':
c.setdefault(k, v)
c['is_ballot_measure'] = False
if hasattr(self, 'officeid') and getattr(self, 'officeid') == 'I':
c['is_ballot_measure'] = True
if getattr(self, 'statepostal', None) is not None:
statename = maps.STATE_ABBR[self.statepostal]
c['statename'] = statename
obj = CandidateReportingUnit(**c)
candidate_objs.append(obj)
self.candidates = candidate_objs
def serialize(self):
"""
Serialize the object. Should be implemented in all classes that
inherit from :class:`APElection`.
Should return an OrderedDict.
"""
raise NotImplementedError
class Candidate(APElection):
"""
Canonical representation of a
candidate. Should be globally unique
for this election, across races.
"""
def __init__(self, **kwargs):
"""
:param id:
Global identifier.
:param unique_id:
Unique identifier.
:param candidateid:
Candidate ID (raw AP).
:param first:
First name.
:param last:
Last name.
:param party:
Party.
:param polid:
Politician ID.
:param polnum:
Politician number.
"""
self.id = None
self.unique_id = None
self.ballotorder = kwargs.get('ballotorder', None)
self.candidateid = kwargs.get('candidateid', None)
self.first = kwargs.get('first', None)
self.last = kwargs.get('last', None)
self.party = kwargs.get('party', None)
self.polid = kwargs.get('polid', None)
self.polnum = kwargs.get('polnum', None)
self.set_polid()
self.set_unique_id()
self.set_id_field()
def serialize(self):
"""
Implements :meth:`APElection.serialize()`.
"""
return OrderedDict((
('id', self.id),
('candidateid', self.candidateid),
('ballotorder', self.ballotorder),
('first', self.first),
('last', self.last),
('party', self.party),
('polid', self.polid),
('polnum', self.polnum),
))
def set_unique_id(self):
"""
Generate and set unique id.
Candidate IDs are not globally unique.
AP National Politician IDs (NPIDs or polid)
are unique, but only national-level
candidates have them; everyone else gets '0'.
The unique key, then, is the NAME of the ID
we're using and then the ID itself.
Verified this is globally unique with Tracy.
"""
if self.polid:
self.unique_id = 'polid-{0}'.format(self.polid)
else:
self.unique_id = 'polnum-{0}'.format(self.polnum)
def set_id_field(self):
"""
Set id to `<unique_id>`.
"""
self.id = self.unique_id
class BallotMeasure(APElection):
"""
Canonical representation of a ballot measure.
Ballot measures are similar to :class:`Candidate` objects, but represent a
position on a ballot such as "In favor of" or "Against" for ballot
measures such as a referendum.
"""
def __init__(self, **kwargs):
"""
:param id:
Global identifier.
:param unique_id:
Unique identifier.
:param ballotorder:
Order on ballot (e.g. first, second, etc).
:param candidateid:
Candidate idenfitier (raw AP).
:param description:
Description.
:param last:
???
:param polid:
Politician ID.
:param polnum:
Politician number.
:param seatname:
Seat name.
"""
self.id = None
self.unique_id = None
self.ballotorder = kwargs.get('ballotorder', None)
self.candidateid = kwargs.get('candidateid', None)
self.description = kwargs.get('description', None)
self.electiondate = kwargs.get('electiondate', None)
self.last = kwargs.get('last', None)
self.polid = kwargs.get('polid', None)
self.polnum = kwargs.get('polnum', None)
self.seatname = kwargs.get('seatname', None)
self.set_polid()
self.set_unique_id()
self.set_id_field()
def serialize(self):
"""
Implements :meth:`APElection.serialize()`.
"""
return OrderedDict((
('id', self.id),
('candidateid', self.candidateid),
('ballotorder', self.ballotorder),
('description', self.description),
('electiondate', self.electiondate),
('last', self.last),
('polid', self.polid),
('polnum', self.polnum),
('seatname', self.seatname),
))
def set_unique_id(self):
"""
Generate and set unique id.
Candidate IDs are not globally unique.
AP National Politician IDs (NPIDs or polid)
are unique, but only national-level
candidates have them; everyone else gets '0'.
The unique key, then, is the NAME of the ID
we're using and then the ID itself.
Verified this is globally unique with Tracy.
"""
self.unique_id = "%s-%s" % (self.electiondate, self.candidateid)
def set_id_field(self):
"""
Set id to `<unique_id>`.
"""
self.id = self.unique_id
class CandidateReportingUnit(APElection):
"""
Canonical reporesentation of an
AP candidate. Note: A candidate can
be a person OR a ballot measure.
"""
def __init__(self, **kwargs):
self.id = None
self.unique_id = None
self.electiondate = kwargs.get('electiondate', None)
self.first = kwargs.get('first', None)
self.last = kwargs.get('last', None)
self.party = kwargs.get('party', None)
self.candidateid = kwargs.get('candidateID', None)
if kwargs.get('candidateid', None):
self.candidateid = kwargs['candidateid']
self.polid = kwargs.get('polID', None)
if kwargs.get('polid', None):
self.polid = kwargs['polid']
self.ballotorder = kwargs.get('ballotOrder', None)
if kwargs.get('ballotorder', None):
self.ballotorder = kwargs['ballotorder']
self.polnum = kwargs.get('polNum', None)
if kwargs.get('polnum', None):
self.polnum = kwargs['polnum']
self.votecount = kwargs.get('voteCount', 0)
if kwargs.get('votecount', None):
self.votecount = kwargs['votecount']
self.votepct = kwargs.get('votePct', 0.0)
if kwargs.get('votepct', None):
self.votepct = kwargs['votepct']
self.delegatecount = kwargs.get('delegateCount', 0)
if kwargs.get('delegatecount', None):
self.delegatecount = kwargs['delegatecount']
self.winner = kwargs.get('winner', False) == 'X'
self.runoff = kwargs.get('winner', False) == 'R'
self.is_ballot_measure = kwargs.get('is_ballot_measure', None)
self.level = kwargs.get('level', None)
self.reportingunitname = kwargs.get('reportingunitname', None)
self.reportingunitid = kwargs.get('reportingunitid', None)
self.fipscode = kwargs.get('fipscode', None)
self.lastupdated = kwargs.get('lastupdated', None)
self.precinctsreporting = kwargs.get('precinctsreporting', 0)
self.precinctstotal = kwargs.get('precinctstotal', 0)
self.precinctsreportingpct = kwargs.get('precinctsreportingpct', 0.0)
self.uncontested = kwargs.get('uncontested', False)
self.test = kwargs.get('test', False)
self.raceid = kwargs.get('raceid', None)
self.statepostal = kwargs.get('statepostal', None)
self.statename = kwargs.get('statename', None)
self.racetype = kwargs.get('racetype', None)
self.racetypeid = kwargs.get('racetypeid', None)
self.officeid = kwargs.get('officeid', None)
self.officename = kwargs.get('officename', None)
self.seatname = kwargs.get('seatname', None)
self.description = kwargs.get('description', None)
self.seatnum = kwargs.get('seatnum', None)
self.initialization_data = kwargs.get('initialization_data', None)
self.national = kwargs.get('national', False)
self.incumbent = kwargs.get('incumbent', False)
self.electtotal = kwargs.get('electtotal', 0)
self.electwon = kwargs.get('electWon', 0)
self.set_polid()
self.set_unique_id()
self.set_id_field()
def set_id_field(self):
"""
Set id to `<raceid>-<uniqueid>-<reportingunitid>`.
"""
self.id = "%s-%s-%s" % (
self.raceid,
self.unique_id,
self.reportingunitid
)
def set_unique_id(self):
"""
Generate and set unique id.
Candidate IDs are not globally unique.
AP National Politician IDs (NPIDs or polid)
are unique, but only national-level
candidates have them; everyone else gets '0'.
The unique key, then, is the NAME of the ID
we're using and then the ID itself.
Verified this is globally unique with Tracy Lewis.
"""
if not self.is_ballot_measure:
if self.polid:
self.unique_id = 'polid-{0}'.format(self.polid)
else:
self.unique_id = 'polnum-{0}'.format(self.polnum)
else:
self.unique_id = self.candidateid
def serialize(self):
"""
Implements :meth:`APElection.serialize()`.
"""
return OrderedDict((
('id', self.id),
('raceid', self.raceid),
('racetype', self.racetype),
('racetypeid', self.racetypeid),
('ballotorder', self.ballotorder),
('candidateid', self.candidateid),
('description', self.description),
('delegatecount', self.delegatecount),
('electiondate', self.electiondate),
('electtotal', self.electtotal),
('electwon', self.electwon),
('fipscode', self.fipscode),
('first', self.first),
('incumbent', self.incumbent),
('initialization_data', self.initialization_data),
('is_ballot_measure', self.is_ballot_measure),
('last', self.last),
('lastupdated', self.lastupdated),
('level', self.level),
('national', self.national),
('officeid', self.officeid),
('officename', self.officename),
('party', self.party),
('polid', self.polid),
('polnum', self.polnum),
('precinctsreporting', self.precinctsreporting),
('precinctsreportingpct', self.precinctsreportingpct),
('precinctstotal', self.precinctstotal),
('reportingunitid', self.reportingunitid),
('reportingunitname', self.reportingunitname),
('runoff', self.runoff),
('seatname', self.seatname),
('seatnum', self.seatnum),
('statename', self.statename),
('statepostal', self.statepostal),
('test', self.test),
('uncontested', self.uncontested),
('votecount', self.votecount),
('votepct', round(self.votepct, PCT_PRECISION)),
('winner', self.winner),
))
def __unicode__(self):
if self.is_ballot_measure:
payload = "%s" % self.party
else:
payload = "%s %s (%s)" % (self.first, self.last, self.party)
if self.winner:
payload += ' (w)'
return "{}".format(payload)
class ReportingUnit(APElection):
"""
Canonical representation of a single
level of reporting.
"""
def __init__(self, **kwargs):
self.electiondate = kwargs.get('electiondate', None)
self.statepostal = kwargs.get('statePostal', None)
if kwargs.get('statepostal', None):
self.statepostal = kwargs['statepostal']
self.statename = kwargs.get('stateName', None)
if kwargs.get('statename', None):
self.statename = kwargs['statename']
self.level = kwargs.get('level', None)
self.reportingunitname = kwargs.get('reportingunitName', None)
if kwargs.get('reportingunitname', None):
self.reportingunitname = kwargs['reportingunitname']
self.reportingunitid = kwargs.get('reportingunitID', None)
if kwargs.get('reportingunitid', None):
self.reportingunitid = kwargs['reportingunitid']
self.fipscode = kwargs.get('fipsCode', None)
if kwargs.get('fipscode', None):
self.fipscode = kwargs['fipscode']
self.lastupdated = kwargs.get('lastUpdated', None)
if kwargs.get('lastupdated', None):
self.lastupdated = kwargs['lastupdated']
self.precinctsreporting = kwargs.get('precinctsReporting', 0)
if kwargs.get('precinctsreporting', None):
self.precinctsreporting = kwargs['precinctsreporting']
self.precinctstotal = kwargs.get('precinctsTotal', 0)
if kwargs.get('precinctstotal', None):
self.precinctstotal = kwargs['precinctstotal']
self.precinctsreportingpct = kwargs.get('precinctsReportingPct', 0.0)\
* 0.01
if kwargs.get('precinctsreportingpct', None):
self.precinctsreportingpct = kwargs['precinctsreportingpct']
self.uncontested = kwargs.get('uncontested', False)
self.test = kwargs.get('test', False)
self.raceid = kwargs.get('raceid', None)
self.racetype = kwargs.get('racetype', None)
self.racetypeid = kwargs.get('racetypeid', None)
self.officeid = kwargs.get('officeid', None)
self.officename = kwargs.get('officename', None)
self.seatname = kwargs.get('seatname', None)
self.description = kwargs.get('description', None)
self.seatnum = kwargs.get('seatnum', None)
self.initialization_data = kwargs.get('initialization_data', False)
self.national = kwargs.get('national', False)
self.candidates = kwargs.get('candidates', [])
self.votecount = kwargs.get('votecount', 0)
self.electtotal = kwargs.get('electTotal', 0)
self.set_level()
self.pad_fipscode()
self.set_reportingunitids()
self.set_candidates()
self.set_votecount()
self.set_candidate_votepct()
self.set_id_field()
def __unicode__(self):
template = "%s %s (%s %% reporting)"
if self.reportingunitname:
return template % (
self.statepostal,
self.reportingunitname,
self.precinctsreportingpct
)
return template % (
self.statepostal,
self.level,
self.precinctsreportingpct
)
def pad_fipscode(self):
if self.fipscode:
self.fipscode = self.fipscode.zfill(5)
def set_level(self):
"""
New England states report at the township level.
Every other state reports at the county level.
So, change the level from 'subunit' to the
actual level name, either 'state' or 'township'.
"""
if self.statepostal in maps.FIPS_TO_STATE.keys():
if self.level == 'subunit':
self.level = 'township'
if self.level == 'subunit':
self.level = 'county'
def set_id_field(self):
"""
Set id to `<reportingunitid>`.
"""
self.id = self.reportingunitid
def set_votecount(self):
"""
Set vote count.
"""
if not self.uncontested:
for c in self.candidates:
self.votecount = sum([c.votecount for c in self.candidates])
else:
self.votecount = None
def set_candidate_votepct(self):
"""
Set vote percentage for each candidate.
"""
if not self.uncontested:
for c in self.candidates:
try:
c.votepct = float(c.votecount) / float(self.votecount)
except ZeroDivisionError:
pass
def serialize(self):
"""
Implements :meth:`APElection.serialize()`.
"""
return OrderedDict((
('id', self.id),
('reportingunitid', self.reportingunitid),
('reportingunitname', self.reportingunitname),
('description', self.description),
('electiondate', self.electiondate),
('electtotal', self.electtotal),
('fipscode', self.fipscode),
('initialization_data', self.initialization_data),
('lastupdated', self.lastupdated),
('lastupdated', self.lastupdated),
('level', self.level),
('national', self.national),
('officeid', self.officeid),
('officename', self.officename),
('precinctsreporting', self.precinctsreporting),
('precinctsreportingpct', self.precinctsreportingpct),
('precinctstotal', self.precinctstotal),
('raceid', self.raceid),
('racetype', self.racetype),
('racetypeid', self.racetypeid),
('seatname', self.seatname),
('seatnum', self.seatnum),
('statename', self.statename),
('statename', self.statename),
('statepostal', self.statepostal),
('statepostal', self.statepostal),
('test', self.test),
('uncontested', self.uncontested),
('votecount', self.votecount),
))
class Race(APElection):
"""
Canonical representation of a single
race, which is a seat in a political geography
within a certain election.
"""
def __init__(self, **kwargs):
self.electiondate = kwargs.get('electiondate', None)
self.statepostal = kwargs.get('statePostal', None)
self.statename = kwargs.get('stateName', None)
self.test = kwargs.get('test', False)
self.raceid = kwargs.get('raceID', None)
self.racetype = kwargs.get('raceType', None)
self.racetypeid = kwargs.get('raceTypeID', None)
self.officeid = kwargs.get('officeID', None)
self.officename = kwargs.get('officeName', None)
self.party = kwargs.get('party', None)
self.seatname = kwargs.get('seatName', None)
self.description = kwargs.get('description', None)
self.seatnum = kwargs.get('seatNum', None)
self.uncontested = kwargs.get('uncontested', False)
self.lastupdated = kwargs.get('lastUpdated', None)
self.initialization_data = kwargs.get('initialization_data', False)
self.national = kwargs.get('national', False)
self.candidates = kwargs.get('candidates', [])
self.reportingunits = kwargs.get('reportingUnits', [])
self.is_ballot_measure = False
self.set_id_field()
if self.initialization_data:
self.set_candidates()
else:
self.set_reportingunits()
self.set_state_fields_from_reportingunits()
self.set_new_england_counties()
def set_new_england_counties(self):
if self.statepostal in maps.FIPS_TO_STATE.keys():
counties = {}
for c in maps.FIPS_TO_STATE[self.statepostal].keys():
try:
counties[c] = dict([
r.__dict__ for
r in self.reportingunits if
r.level == 'township' and
"Mail Ballots C.D." not in r.reportingunitname and
r.fipscode == c
][0])
# Set some basic information we know about the county.
counties[c]['level'] = 'county'
counties[c]['statepostal'] = self.statepostal
counties[c]['candidates'] = {}
counties[c]['reportingunitname'] =\
maps.FIPS_TO_STATE[self.statepostal][c]
counties[c]['reportingunitid'] = "%s-%s" % (
self.statepostal,
c
)
reporting_units = [
r for
r in self.reportingunits if
r.level == 'township' and
"Mail Ballots C.D." not in r.reportingunitname and
r.fipscode == c
]
# Declaratively sum the precincts / votes for this county.
counties[c]['precinctstotal'] = sum([
r.precinctstotal for
r in reporting_units if
r.level == 'township' and
"Mail Ballots C.D." not in r.reportingunitname and
r.fipscode == c
])
counties[c]['precinctsreporting'] = sum([
r.precinctsreporting for
r in reporting_units if
r.level == 'township' and
"Mail Ballots C.D." not in r.reportingunitname and
r.fipscode == c
])
pcts_tot = float(counties[c]['precinctstotal'])
pcts_rep = float(counties[c]['precinctsreporting'])
try:
counties[c]['precinctsreportingpct'] = pcts_rep / pcts_tot
except ZeroDivisionError:
counties[c]['precinctsreportingpct'] = 0.0
counties[c]['votecount'] = sum([
int(r.votecount or 0) for
r in reporting_units if
r.level == 'township' and
"Mail Ballots C.D." not in r.reportingunitname and
r.fipscode == c
])
for r in reporting_units:
# Set up candidates for each county.
for cru in r.candidates:
if not counties[c]['candidates'].get(cru.unique_id, None):
d = dict(cru.__dict__)
d['level'] = 'county'
d['reportingunitid'] = "%s-%s" % (
self.statepostal,
c
)
fips_dict = maps.FIPS_TO_STATE[self.statepostal]
d['reportingunitname'] = fips_dict[c]
counties[c]['candidates'][cru.unique_id] = d
else:
d = counties[c]['candidates'][cru.unique_id]
d['votecount'] += cru.votecount
d['precinctstotal'] += cru.precinctstotal
d['precinctsreporting'] += cru.precinctsreporting
try:
d['precinctsreportingpct'] = (
float(d['precinctsreporting']) /
float(d['precinctstotal'])
)
except ZeroDivisionError:
d['precinctsreportingpct'] = 0.0
except IndexError:
"""
This is the ME bug from the ME primary.
"""
pass
try:
for ru in counties.values():
ru['candidates'] = ru['candidates'].values()
ru['statename'] = str(maps.STATE_ABBR[ru['statepostal']])
r = ReportingUnit(**ru)
self.reportingunits.append(r)
except AttributeError:
"""
Sometimes, the dict is empty because we have no townships to
roll up into counties. Issue #228.
"""
pass
def set_id_field(self):
"""
Set id to `<raceid>`.
"""
self.id = self.raceid
def serialize(self):
"""
Implements :meth:`APElection.serialize()`.
"""
return OrderedDict((
('id', self.id),
('raceid', self.raceid),
('racetype', self.racetype),
('racetypeid', self.racetypeid),
('description', self.description),
('electiondate', self.electiondate),
('initialization_data', self.initialization_data),
('is_ballot_measure', self.is_ballot_measure),
('lastupdated', self.lastupdated),
('national', self.national),
('officeid', self.officeid),
('officename', self.officename),
('party', self.party),
('seatname', self.seatname),
('seatnum', self.seatnum),
('statename', self.statename),
('statepostal', self.statepostal),
('test', self.test),
('uncontested', self.uncontested)
))
def __unicode__(self):
if self.racetype:
return "%s %s" % (self.racetype, self.officename)
return "%s" % self.officename
class Elections():
"""
Holds a collection of election objects
"""
def get_elections(self, datafile=None):
"""
Get election data from API or cached file.
:param datafile:
If datafile is specified, use instead of making an API call.
"""
if not datafile:
elections = list(utils.api_request('/elections').json().get('elections'))
else:
with open(datafile) as f:
elections = list(json.load(f).get('elections'))
# Developer API expects to give lowercase kwargs to an Election
# object, but initializing from the API / file will have camelCase
# kwargs instead. So, for just this object, lowercase the kwargs.
payload = []
for e in elections:
init_dict = OrderedDict()
for k, v in e.items():
init_dict[k.lower()] = v
payload.append(Election(**init_dict))
return payload
def get_next_election(self, datafile=None, electiondate=None):
"""
Get next election. By default, will be relative to the current date.
:param datafile:
If datafile is specified, use instead of making an API call.
:param electiondate:
If electiondate is specified, gets the next election
after the specified date.
"""
if not electiondate:
today = datetime.datetime.now()
else:
today = dateutil_parser.parse(electiondate)
next_election = None
lowest_diff = None
for e in self.get_elections(datafile=datafile):
diff = (dateutil_parser.parse(e.electiondate) - today).days
if diff > 0:
if not lowest_diff and not next_election:
next_election = e
lowest_diff = diff
elif lowest_diff and next_election:
if diff < lowest_diff:
next_election = e
lowest_diff = diff
return next_election
class Election(APElection):
"""
Canonical representation of an election on
a single date.
"""
def __init__(self, **kwargs):
"""
:param electiondate:
The date of the election.
:param datafile:
A cached data file.
"""
self.id = None
self.resultstype = kwargs.get('resultstype', False)
self.electiondate = kwargs.get('electiondate', None)
self.national = kwargs.get('national', None)
self.api_key = kwargs.get('api_key', None)
self.parsed_json = kwargs.get('parsed_json', None)
self.next_request = kwargs.get('next_request', None)
self.datafile = kwargs.get('datafile', None)
self.resultslevel = kwargs.get('resultslevel', 'ru')
self.setzerocounts = kwargs.get('setzerocounts', False)
self.raceids = kwargs.get('raceids', [])
self.officeids = kwargs.get('officeids', None)
self.set_id_field()
self._response = None
def __unicode__(self):
return "{}".format(self.electiondate)
def set_id_field(self):
"""
Set id to `<electiondate>`.
"""
self.id = self.electiondate
def get(self, path, **params):
"""
Farms out request to api_request.
Could possibly handle choosing which
parser backend to use -- API-only right now.
Also the entry point for recording, which
is set via environment variable.
:param path:
API url path.
:param \**params:
A dict of optional parameters to be included in API request.
"""
self._response = utils.api_request('/elections/{0}'.format(path), **params)
return self._response.json()
def get_uniques(self, candidate_reporting_units):
"""
Parses out unique candidates and ballot measures
from a list of CandidateReportingUnit objects.
"""
unique_candidates = OrderedDict()
unique_ballot_measures = OrderedDict()
for c in candidate_reporting_units:
if c.is_ballot_measure:
if not unique_ballot_measures.get(c.candidateid, None):
unique_ballot_measures[c.candidateid] = BallotMeasure(
last=c.last,
candidateid=c.candidateid,
polid=c.polid,
ballotorder=c.ballotorder,
polnum=c.polnum,
seatname=c.seatname,
description=c.description,
electiondate=self.electiondate
)
else:
if not unique_candidates.get(c.candidateid, None):
unique_candidates[c.candidateid] = Candidate(
first=c.first,
last=c.last,
candidateid=c.candidateid,
polid=c.polid,
ballotorder=c.ballotorder,
polnum=c.polnum,
party=c.party
)
candidates = [v for v in unique_candidates.values()]
ballot_measures = [v for v in unique_ballot_measures.values()]
return candidates, ballot_measures
def get_raw_races(self, **params):
"""
Convenience method for fetching races by election date.
Accepts an AP formatting date string, e.g., YYYY-MM-DD.
Accepts any number of URL params as kwargs.
If datafile passed to constructor, the file will be used instead of
making an HTTP request.
:param \**params:
A dict of additional parameters to pass to API.
Ignored if `datafile` was passed to the constructor.
"""
if self.datafile:
with open(self.datafile, 'r') as readfile:
payload = json.loads(readfile.read())
self.electiondate = payload.get('electionDate')
return payload
else:
payload = self.get(self.electiondate, **params)
return payload
def get_race_objects(self, parsed_json):
"""
Get parsed race objects.
:param parsed_json:
Dict of parsed AP election JSON.
"""
if len(parsed_json['races']) > 0:
if parsed_json['races'][0].get('candidates', None):
payload = []
for r in parsed_json['races']:
if len(self.raceids) > 0 and r['raceID'] in self.raceids:
r['initialization_data'] = True
payload.append(Race(**r))
else:
r['initialization_data'] = True
payload.append(Race(**r))
return payload
if len(self.raceids) > 0:
return [Race(**r) for r in parsed_json['races'] if r['raceID'] in self.raceids]
else:
return [Race(**r) for r in parsed_json['races']]
else:
return []
def get_units(self, race_objs):
"""
Parses out races, reporting_units,
and candidate_reporting_units in a
single loop over the race objects.
:param race_objs:
A list of top-level Race objects.
"""
races = []
reporting_units = []
candidate_reporting_units = []
for race in race_objs:
race.electiondate = self.electiondate
if not race.initialization_data:
for unit in race.reportingunits:
unit.electiondate = self.electiondate
for candidate in unit.candidates:
if candidate.is_ballot_measure:
race.is_ballot_measure = True
candidate.electiondate = self.electiondate
candidate_reporting_units.append(candidate)
del unit.candidates
reporting_units.append(unit)
del race.candidates
del race.reportingunits
races.append(race)
else:
for candidate in race.candidates:
if candidate.is_ballot_measure:
race.is_ballot_measure = True
candidate.electiondate = self.electiondate
candidate_reporting_units.append(candidate)
del race.candidates
del race.reportingunits
races.append(race)
return races, reporting_units, candidate_reporting_units
def serialize(self):
"""
Implements :meth:`APElection.serialize()`.
"""
return OrderedDict((
('id', self.id),
('electiondate', self.electiondate),
('resultstype', self.resultstype)
))
@property
def races(self):
"""
Return list of race objects.
"""
raw_races = self.get_raw_races(
omitResults=True,
level="ru",
resultsType=self.resultstype,
national=self.national,
officeID=self.officeids,
apiKey=self.api_key
)
race_objs = self.get_race_objects(raw_races)
races, reporting_units, candidate_reporting_units = self.get_units(
race_objs
)
return races
@property
def reporting_units(self):
"""
Return list of reporting unit objects.
"""
raw_races = self.get_raw_races(
omitResults=False,
level="ru",
resultsType=self.resultstype,
national=self.national,
officeID=self.officeids,
apiKey=self.api_key
)
race_objs = self.get_race_objects(raw_races)
races, reporting_units, candidate_reporting_units = self.get_units(
race_objs
)
return reporting_units
@property
def candidate_reporting_units(self):
"""
Return list of candidate reporting unit objects.
"""
raw_races = self.get_raw_races(
omitResults=True,
level="ru",
resultsType=self.resultstype,
national=self.national,
officeID=self.officeids,
apiKey=self.api_key
)
race_objs = self.get_race_objects(raw_races)
races, reporting_units, candidate_reporting_units = self.get_units(
race_objs
)
return candidate_reporting_units
@property
def results(self):
"""
Return list of candidate reporting unit objects with results.
"""
raw_races = self.get_raw_races(
omitResults=False,
level=self.resultslevel,
setzerocounts=self.setzerocounts,
resultsType=self.resultstype,
national=self.national,
officeID=self.officeids,
apiKey=self.api_key
)
race_objs = self.get_race_objects(raw_races)
races, reporting_units, candidate_reporting_units = self.get_units(
race_objs
)
return candidate_reporting_units
@property
def candidates(self):
"""
Return list of candidate objects with results.
"""
raw_races = self.get_raw_races(
omitResults=True,
level="ru",
resultsType=self.resultstype,
national=self.national,
officeID=self.officeids,
apiKey=self.api_key
)
race_objs = self.get_race_objects(raw_races)
races, reporting_units, candidate_reporting_units = self.get_units(
race_objs
)
candidates, ballot_measures = self.get_uniques(
candidate_reporting_units
)
return candidates
@property
def ballot_measures(self):
"""
Return list of ballot measure objects with results.
"""
raw_races = self.get_raw_races(
omitResults=True,
level="ru",
resultsType=self.resultstype,
national=self.national,
apiKey=self.api_key
)
race_objs = self.get_race_objects(raw_races)
races, reporting_units, candidate_reporting_units = self.get_units(
race_objs
)
candidates, ballot_measures = self.get_uniques(
candidate_reporting_units
)
return ballot_measures
| 35.269132 | 95 | 0.544493 | 40,739 | 0.993198 | 0 | 0 | 3,593 | 0.087596 | 0 | 0 | 12,204 | 0.297528 |
c3b37ff598b9916778a7dde772f21314904e9f2f | 1,308 | py | Python | Python/dataset_info.py | ashwinvis/augieretal_jfm_2019_shallow_water | 88d97c2bd5df0795ca636306c1d795ef1d3a8949 | [
"CC-BY-4.0"
] | 1 | 2019-08-23T11:06:53.000Z | 2019-08-23T11:06:53.000Z | Python/dataset_info.py | ashwinvis/augieretal_jfm_2019_shallow_water | 88d97c2bd5df0795ca636306c1d795ef1d3a8949 | [
"CC-BY-4.0"
] | 1 | 2019-08-23T13:00:31.000Z | 2019-08-23T13:00:31.000Z | Python/dataset_info.py | ashwinvis/augieretal_jfm_2019_shallow_water | 88d97c2bd5df0795ca636306c1d795ef1d3a8949 | [
"CC-BY-4.0"
] | null | null | null | # coding: utf-8
"""Preview dataset content without extracting."""
import os
import itertools
from pathlib import Path
from zipfile import ZipFile
from concurrent.futures import ThreadPoolExecutor as Pool
import hashlib
cwd = Path(__file__).parent / "dataset"
ls = lambda pattern: sorted(cwd.glob(pattern))
def all_files(prefix="W"):
return itertools.chain(
ls(f"{prefix}[0-9].zip"), ls(f"{prefix}[0-9][0-9].zip")
)
def md5(filename):
md5 = hashlib.md5()
def update(chunk):
md5.update(chunk)
with open(filename, "rb") as f:
chunks = iter(lambda: f.read(8192), b"")
for chunk in chunks:
update(chunk)
return md5.hexdigest()
def info(filename):
with ZipFile(filename) as zipf:
return (
os.path.basename(zipf.filename), # Zip file
os.path.split(zipf.namelist()[0])[0], # First and only directory
# md5(filename) # Checksum slow
)
# Uncomment to see all contents
# zipf.printdir()
if __name__ == "__main__":
for prefix in ("W", "WL"):
with Pool() as pool:
files = all_files(prefix)
results = pool.map(info, files)
results = (" ".join(r) for r in results)
print("\n".join(sorted(results)))
print()
| 24.222222 | 77 | 0.600917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.204893 |
c3b3fb64872cf127ceebbdc36e7d35ef4a1b48d5 | 1,472 | py | Python | tests/test_reference.py | momojohobo/wecs | 2129c8095e8fe1bdea38762e393ef637438c9655 | [
"BSD-3-Clause"
] | null | null | null | tests/test_reference.py | momojohobo/wecs | 2129c8095e8fe1bdea38762e393ef637438c9655 | [
"BSD-3-Clause"
] | null | null | null | tests/test_reference.py | momojohobo/wecs | 2129c8095e8fe1bdea38762e393ef637438c9655 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from fixtures import world
from wecs.core import UID
from wecs.core import NoSuchUID
from wecs.core import Component
@Component()
class Reference:
uid: UID
def test_user_defined_names(world):
entity = world.create_entity(name="foo")
assert entity._uid.name == "foo"
def test_automatic_names(world):
entity = world.create_entity()
assert entity._uid.name
def test_automatic_unique_names(world):
entity_1 = world.create_entity()
entity_2 = world.create_entity()
assert entity_1._uid.name != entity_2._uid.name
# This test feels silly... More on it when serialization comes knocking.
def test_uid():
uid_1 = UID()
uid_2 = UID()
assert uid_1 is not uid_2
assert uid_1 != uid_2
def test_reference():
c = Reference(uid=UID())
def test_resolving_reference(world):
to_entity = world.create_entity()
from_entity = world.create_entity()
from_entity.add_component(Reference(uid=to_entity._uid))
world.flush_component_updates()
reference = world.get_entity(from_entity.get_component(Reference).uid)
assert reference is to_entity
def test_resolving_dangling_reference(world):
to_entity = world.create_entity()
from_entity = world.create_entity()
from_entity.add_component(Reference(uid=to_entity._uid))
to_entity.destroy()
world.flush_component_updates()
with pytest.raises(NoSuchUID):
world.get_entity(from_entity.get_component(Reference).uid)
| 24.533333 | 74 | 0.741168 | 29 | 0.019701 | 0 | 0 | 42 | 0.028533 | 0 | 0 | 82 | 0.055707 |
c3b5d97a6b2a3a566084a5a85a04ad3f65b6b305 | 5,268 | py | Python | vizsgaremek/test_conduit_logged_in.py | femese/conduit | 3ab5cc6a3b37e28d7712c2780f62a8091df2fad5 | [
"MIT"
] | null | null | null | vizsgaremek/test_conduit_logged_in.py | femese/conduit | 3ab5cc6a3b37e28d7712c2780f62a8091df2fad5 | [
"MIT"
] | null | null | null | vizsgaremek/test_conduit_logged_in.py | femese/conduit | 3ab5cc6a3b37e28d7712c2780f62a8091df2fad5 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from pages.home_page import HomePage
from pages.profile_page import ProfilePage
from pages.login_page import LoginPage
from pages.registration_page import RegistrationPage
from pages.article_page import ArticlePage
from pages.new_article_page import NewArticlePage
from pages.navigation_bar import NavigationBar
import pytest
import csv
browser_options = Options()
browser_options.add_experimental_option("excludeSwitches", ["enable-logging"])
browser_options.headless = True
URL = 'http://localhost:1667'
class Test_Conduit_Logged_In:
def setup_method(self, method):
self.browser = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
self.browser.maximize_window()
self.browser.get(URL)
self.homepage = HomePage(driver=self.browser)
self.homepage.login_button.click()
login_page = LoginPage(driver=self.browser)
login_page.fill_login_details('teszt@teszt.com', 'Teszt1teszt')
login_page.signin_button.click()
def teardown_method(self, method):
self.browser.close()
def test_one_article(self):
self.homepage = HomePage(driver=self.browser)
self.homepage.logout_button.find()
self.homepage.article_button.click()
new_article_page = NewArticlePage(driver=self.browser)
new_article_page.title_input.send_text_to_input("Title")
new_article_page.summary_input.send_text_to_input("Summary")
new_article_page.main_body_input.send_text_to_input("Main article")
new_article_page.tags_input.send_text_to_input("nonsense")
new_article_page.publish_button.click()
article_page = ArticlePage(driver=self.browser)
assert article_page.main_textfield.text() == "Main article"
def test_new_articles(self):
number_of_paginator = len(self.homepage.page_list_buttons)
reader = csv.reader(open('./vizsgaremek/articles.csv', 'r'), delimiter=';')
for row in reader:
navigation_bar = NavigationBar(driver=self.browser)
navigation_bar.logout_button.find()
navigation_bar.article_button.click()
new_article_page = NewArticlePage(driver=self.browser)
new_article_page.title_input.send_text_to_input(row[0])
new_article_page.summary_input.send_text_to_input(row[1])
new_article_page.main_body_input.send_text_to_input(row[2])
new_article_page.tags_input.send_text_to_input(row[3])
new_article_page.publish_button.click()
navigation_bar.home_button.click()
assert len(self.homepage.page_list_buttons) > number_of_paginator
def test_page_list(self):
self.homepage = HomePage(driver=self.browser)
for x in self.homepage.page_list_buttons:
x.click()
self.homepage = HomePage(driver=self.browser)
assert self.homepage.is_last_page_active()
def test_list_articles(self):
assert len(self.homepage.article_list) > 0
def test_change_article(self):
article_page = self.create_article()
txt_to_change = article_page.main_textfield.text()
article_page.edit_button.find()
article_page.edit_button.click()
article_edit_page = NewArticlePage(self.browser)
article_edit_page.main_body_input.send_text_to_input(txt_to_change[:len(txt_to_change)//2].strip() + "changed")
article_edit_page.publish_button.click()
assert article_page.main_textfield.text() == txt_to_change[:len(txt_to_change)//2].strip() + "changed"
def test_save_to_file(self):
self.homepage.profile_button.click()
profile_page = ProfilePage(self.browser)
self.homepage.article_list[0].click()
article_page = ArticlePage(self.browser)
txt_to_save = article_page.main_textfield.text()
txt_file = open("./vizsgaremek/test.txt", "w")
txt_file.write(txt_to_save)
txt_file.close()
txt_file = open("./vizsgaremek/test.txt", "r")
assert txt_file.read() == txt_to_save
txt_file.close()
def test_delete_article(self):
article_page = self.create_article()
article_page.delete_button.find()
article_page.delete_button.click()
assert (article_page.delete_popup.text() == "Deleted the article. Going home...")
def test_logout(self):
self.homepage.logout_button.click()
assert self.homepage.login_button.text().strip() == "Sign in"
def create_article(self):
self.homepage.logout_button.find()
self.homepage.article_button.click()
new_article_page = NewArticlePage(driver=self.browser)
new_article_page.title_input.send_text_to_input("Test article title")
new_article_page.summary_input.send_text_to_input("Test article summary")
new_article_page.main_body_input.send_text_to_input("Test article main text")
new_article_page.tags_input.send_text_to_input("test, article, tags")
new_article_page.publish_button.click()
return ArticlePage(driver=self.browser) | 45.808696 | 119 | 0.714503 | 4,618 | 0.876614 | 0 | 0 | 0 | 0 | 0 | 0 | 378 | 0.071754 |
c3b8ec1c8a770c6bded530d6c754e56f0b14dd73 | 2,818 | py | Python | tests/data.py | MathMagique/azure-cost-mon | 0a2a883eb587ee46bd166f8e23ab0b920eee961a | [
"MIT"
] | 65 | 2017-05-22T16:26:37.000Z | 2022-03-11T06:39:51.000Z | tests/data.py | MathMagique/azure-cost-mon | 0a2a883eb587ee46bd166f8e23ab0b920eee961a | [
"MIT"
] | 27 | 2017-05-02T07:48:34.000Z | 2021-03-31T09:53:56.000Z | tests/data.py | MathMagique/azure-cost-mon | 0a2a883eb587ee46bd166f8e23ab0b920eee961a | [
"MIT"
] | 17 | 2017-06-06T21:39:28.000Z | 2021-07-08T14:13:52.000Z | api_output_for_empty_months = """"Usage Data Extract",
"",
"AccountOwnerId","Account Name","ServiceAdministratorId","SubscriptionId","SubscriptionGuid","Subscription Name","Date","Month","Day","Year","Product","Meter ID","Meter Category","Meter Sub-Category","Meter Region","Meter Name","Consumed Quantity","ResourceRate","ExtendedCost","Resource Location","Consumed Service","Instance ID","ServiceInfo1","ServiceInfo2","AdditionalInfo","Tags","Store Service Identifier","Department Name","Cost Center","Unit Of Measure","Resource Group",'
"""
sample_data = [{u'AccountName': u'Platform',
u'AccountOwnerId': u'donald.duck',
u'AdditionalInfo': u'',
u'ConsumedQuantity': 23.0,
u'ConsumedService': u'Virtual Network',
u'CostCenter': u'1234',
u'Date': u'03/01/2017',
u'Day': 1,
u'DepartmentName': u'Engineering',
u'ExtendedCost': 0.499222332425423563466,
u'InstanceId': u'platform-vnet',
u'MeterCategory': u'Virtual Network',
u'MeterId': u'c90286c8-adf0-438e-a257-4468387df385',
u'MeterName': u'Hours',
u'MeterRegion': u'All',
u'MeterSubCategory': u'Gateway Hour',
u'Month': 3,
u'Product': u'Windows Azure Compute 100 Hrs Virtual Network',
u'ResourceGroup': u'',
u'ResourceLocation': u'All',
u'ResourceRate': 0.0304347826086957,
u'ServiceAdministratorId': u'',
u'ServiceInfo1': u'',
u'ServiceInfo2': u'',
u'StoreServiceIdentifier': u'',
u'SubscriptionGuid': u'abc3455ac-3feg-2b3c5-abe4-ec1111111e6',
u'SubscriptionId': 23467313421,
u'SubscriptionName': u'Production',
u'Tags': u'',
u'UnitOfMeasure': u'Hours',
u'Year': 2017},
{u'AccountName': u'Platform',
u'AccountOwnerId': u'donald.duck',
u'AdditionalInfo': u'',
u'ConsumedQuantity': 0.064076,
u'ConsumedService': u'Microsoft.Storage',
u'CostCenter': u'1234',
u'Date': u'03/01/2017',
u'Day': 1,
u'DepartmentName': u'Engineering',
u'ExtendedCost': 0.50000011123124314235234522345,
u'InstanceId': u'/subscriptions/abc3455ac-3feg-2b3c5-abe4-ec1111111e6/resourceGroups/my-group/providers/Microsoft.Storage/storageAccounts/ss7q3264domxo',
u'MeterCategory': u'Windows Azure Storage',
u'MeterId': u'd23a5753-ff85-4ddf-af28-8cc5cf2d3882',
u'MeterName': u'Standard IO - Page Blob/Disk (GB)',
u'MeterRegion': u'All Regions',
u'MeterSubCategory': u'Locally Redundant',
u'Month': 3,
u'Product': u'Locally Redundant Storage Standard IO - Page Blob/Disk',
u'ResourceGroup': u'my-group',
u'ResourceLocation': u'euwest',
u'ResourceRate': 0.0377320156152495,
u'ServiceAdministratorId': u'',
u'ServiceInfo1': u'',
u'ServiceInfo2': u'',
u'StoreServiceIdentifier': u'',
u'SubscriptionGuid': u'abc3455ac-3feg-2b3c5-abe4-ec1111111e6',
u'SubscriptionId': 23467313421,
u'SubscriptionName': u'Production',
u'Tags': None,
u'UnitOfMeasure': u'GB',
u'Year': 2017}]
| 42.059701 | 484 | 0.710078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,259 | 0.801632 |
c3b991b53eeef979bb9bae9ccb93646196a11001 | 644 | py | Python | python lab 5 & 6/l5q3.py | gonewithharshwinds/itt-lab | 257eb0d38b09eac7991b490ec64c068ef51d7fb2 | [
"MIT"
] | 1 | 2022-01-06T00:07:36.000Z | 2022-01-06T00:07:36.000Z | python lab 5 & 6/l5q3.py | gonewithharshwinds/itt-lab | 257eb0d38b09eac7991b490ec64c068ef51d7fb2 | [
"MIT"
] | null | null | null | python lab 5 & 6/l5q3.py | gonewithharshwinds/itt-lab | 257eb0d38b09eac7991b490ec64c068ef51d7fb2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
m1 = int(input("Enter no. of rows : \t"))
n1 = int(input("Enter no. of columns : \t"))
a = []
print("Enter Matrix 1:\n")
for i in range(n1):
row = list(map(int, input().split()))
a.append(row)
print(a)
m2 = int(n1)
print("\n Your Matrix 2 must have",n1,"rows and",m1,"columns \n")
n2 = int(m1)
b = []
for i in range(n2):
row = list(map(int, input().split()))
b.append(row)
print(b)
res = []
res = [ [ 0 for i in range(m2) ] for j in range(n1) ]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
res[i][j] += a[i][k] * b[k][j]
print(res) | 26.833333 | 66 | 0.534161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.215839 |
c3baff370d4baee282c8608fd09e9a208ebac3e2 | 596 | py | Python | ifitwala_ed/hr/doctype/training_feedback/training_feedback.py | mohsinalimat/ifitwala_ed | 8927695ed9dee36e56571c442ebbe6e6431c7d46 | [
"MIT"
] | 13 | 2020-09-02T10:27:57.000Z | 2022-03-11T15:28:46.000Z | ifitwala_ed/hr/doctype/training_feedback/training_feedback.py | mohsinalimat/ifitwala_ed | 8927695ed9dee36e56571c442ebbe6e6431c7d46 | [
"MIT"
] | 43 | 2020-09-02T07:00:42.000Z | 2021-07-05T13:22:58.000Z | ifitwala_ed/hr/doctype/training_feedback/training_feedback.py | mohsinalimat/ifitwala_ed | 8927695ed9dee36e56571c442ebbe6e6431c7d46 | [
"MIT"
] | 6 | 2020-10-19T01:02:18.000Z | 2022-03-11T15:28:47.000Z | # Copyright (c) 2021, ifitwala and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe import _
class TrainingFeedback(Document):
def validate(self):
training_event = frappe.get_doc("Training Event", self.training_event)
if training_event.status != 1:
frappe.throw(_("{0} must first be submitted").format(_("Training Event")))
emp_event_details = frappe.db.get_value("Training Event Employee", {
"parent": self.training_event,
"employee": self.employee
}, ["name", "attendance"], as_dict=True)
| 33.111111 | 77 | 0.746644 | 417 | 0.699664 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.365772 |
c3bc07408ee6c6e99e906c09ccb7a3d1f5fbf34d | 9,891 | py | Python | classo/compact_func.py | muellsen/classo | d86ddeb3fe3fd00b955340fbdf9bfd802b64f566 | [
"MIT"
] | 20 | 2020-10-01T08:18:08.000Z | 2021-07-30T09:21:23.000Z | classo/compact_func.py | muellsen/classo | d86ddeb3fe3fd00b955340fbdf9bfd802b64f566 | [
"MIT"
] | 14 | 2020-11-12T14:39:20.000Z | 2021-01-06T15:59:14.000Z | classo/compact_func.py | muellsen/classo | d86ddeb3fe3fd00b955340fbdf9bfd802b64f566 | [
"MIT"
] | 5 | 2020-09-27T20:22:01.000Z | 2021-01-17T18:41:50.000Z | import numpy as np
import numpy.linalg as LA
from .solve_R1 import problem_R1, Classo_R1, pathlasso_R1
from .solve_R2 import problem_R2, Classo_R2, pathlasso_R2
from .solve_R3 import problem_R3, Classo_R3, pathlasso_R3
from .solve_R4 import problem_R4, Classo_R4, pathlasso_R4
from .path_alg import solve_path, pathalgo_general, h_lambdamax
"""
Classo and pathlasso are the main functions,
they can call every algorithm acording
to the method and formulation required
"""
# can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR'
def Classo(
matrix,
lam,
typ="R1",
meth="DR",
rho=1.345,
get_lambdamax=False,
true_lam=False,
e=None,
rho_classification=-1.0,
w=None,
intercept=False,
return_sigm=True,
):
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R3":
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
e = len(matrices[0]) / 2
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R3(pb, lam / lambdamax)
else:
beta, s = Classo_R3(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
elif typ == "R4":
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
e = len(matrices[0])
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R4(pb, lam / lambdamax)
else:
beta, s = Classo_R4(pb, lam)
elif typ == "R2":
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "ODE"
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R2(pb, lam / lambdamax)
else:
beta = Classo_R2(pb, lam)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
out = solve_path(
matrices,
lam / lambdamax,
False,
rho_classification,
"C2",
intercept=intercept,
)
else:
out = solve_path(
matrices, lam, False, rho_classification, "C2", intercept=intercept
)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
out = solve_path(
matrices, lam / lambdamax, False, 0, "C1", intercept=intercept
)
else:
out = solve_path(matrices, lam, False, 0, "C1", intercept=intercept)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
else: # LS
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "DR"
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R1(pb, lam / lambdamax)
else:
beta = Classo_R1(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
if w is not None:
if intercept:
beta[1:] = beta[1:] / w
else:
beta = beta / w
if typ in ["R3", "R4"] and return_sigm:
if get_lambdamax:
return (lambdamax, beta, s)
else:
return (beta, s)
if get_lambdamax:
return (lambdamax, beta)
else:
return beta
def pathlasso(
matrix,
lambdas=False,
n_active=0,
lamin=1e-2,
typ="R1",
meth="Path-Alg",
rho=1.345,
true_lam=False,
e=None,
return_sigm=False,
rho_classification=-1.0,
w=None,
intercept=False,
):
Nactive = n_active
if Nactive == 0:
Nactive = False
if type(lambdas) is bool:
lambdas = lamin ** (np.linspace(0.0, 1, 100))
if lambdas[0] < lambdas[-1]:
lambdass = [
lambdas[i] for i in range(len(lambdas) - 1, -1, -1)
] # reverse the list if needed
else:
lambdass = [lambdas[i] for i in range(len(lambdas))]
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R2":
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R2(pb, lambdass, n_active=Nactive)
elif typ == "R3":
if intercept:
# here we use the fact that for R1 and R3, the intercept is simple beta0 = ybar-Xbar .vdot(beta) so by changing the X to X-Xbar and y to y-ybar we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R3(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
elif typ == "R4":
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R4(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices,
lambdass,
"C2",
n_active=Nactive,
rho=rho_classification,
intercept=intercept,
)
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices, lambdass, "C1", n_active=Nactive, intercept=intercept
)
else: # R1
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R1(pb, lambdass, n_active=n_active)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
real_path = [lam * lambdamax for lam in lambdass]
if w is not None:
if intercept:
ww = np.array([1] + list(w))
else:
ww = w
BETA = np.array([beta / ww for beta in BETA])
if typ in ["R3", "R4"] and return_sigm:
return (np.array(BETA), real_path, S)
return (np.array(BETA), real_path)
| 29.972727 | 185 | 0.522394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,142 | 0.115458 |
c3c0682686fc342ab84606c799f8486241e754b8 | 1,547 | py | Python | python/eggroll/core/datastructure/__init__.py | liszekei/eggroll | 6a8cc5e1c9106d2633dc415092151f921f003743 | [
"Apache-2.0"
] | 209 | 2019-08-08T18:38:26.000Z | 2022-03-23T06:20:40.000Z | python/eggroll/core/datastructure/__init__.py | liszekei/eggroll | 6a8cc5e1c9106d2633dc415092151f921f003743 | [
"Apache-2.0"
] | 110 | 2019-08-09T02:50:47.000Z | 2022-03-07T10:30:21.000Z | python/eggroll/core/datastructure/__init__.py | liszekei/eggroll | 6a8cc5e1c9106d2633dc415092151f921f003743 | [
"Apache-2.0"
] | 77 | 2019-08-15T08:11:52.000Z | 2022-03-23T06:19:44.000Z | # Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib import import_module
from concurrent.futures import _base, ThreadPoolExecutor
from eggroll.core.datastructure.threadpool import ErThreadUnpooledExecutor
from eggroll.core.datastructure.queue import _PySimpleQueue
from eggroll.utils.log_utils import get_logger
L = get_logger()
try:
from queue import SimpleQueue
except ImportError:
SimpleQueue = _PySimpleQueue
def create_executor_pool(canonical_name: str = None, max_workers=None, thread_name_prefix=None, *args, **kwargs) -> _base.Executor:
if not canonical_name:
canonical_name = "concurrent.futures.ThreadPoolExecutor"
module_name, class_name = canonical_name.rsplit(".", 1)
_module = import_module(module_name)
_class = getattr(_module, class_name)
return _class(max_workers=max_workers, thread_name_prefix=thread_name_prefix, *args, **kwargs)
def create_simple_queue(*args, **kwargs):
return SimpleQueue()
| 37.731707 | 131 | 0.771816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.420814 |
c3c1914056571f2105ec5f247161279e62320742 | 2,460 | py | Python | install/TexGen/Python/Scripts/cotton.py | dalexa10/puma | ca02309c9f5c71e2e80ad8d64155dd6ca936c667 | [
"NASA-1.3"
] | 14 | 2021-06-17T17:17:07.000Z | 2022-03-26T05:20:20.000Z | install/TexGen/Python/Scripts/cotton.py | dalexa10/puma | ca02309c9f5c71e2e80ad8d64155dd6ca936c667 | [
"NASA-1.3"
] | 6 | 2021-11-01T20:37:39.000Z | 2022-03-11T17:18:53.000Z | install/TexGen/Python/Scripts/cotton.py | dalexa10/puma | ca02309c9f5c71e2e80ad8d64155dd6ca936c667 | [
"NASA-1.3"
] | 8 | 2021-07-20T09:24:23.000Z | 2022-02-26T16:32:00.000Z | # =============================================================================
# TexGen: Geometric textile modeller.
# Copyright (C) 2015 Louise Brown
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# =============================================================================
# Create a textile
Textile = CTextile()
# Create a lenticular section
Section = CSectionLenticular(0.3, 0.14)
Section.AssignSectionMesh(CSectionMeshTriangulate(30))
# Create 4 yarns
Yarns = (CYarn(), CYarn(), CYarn(), CYarn())
# Add nodes to the yarns to describe their paths
Yarns[0].AddNode(CNode(XYZ(0, 0, 0)))
Yarns[0].AddNode(CNode(XYZ(0.35, 0, 0.15)))
Yarns[0].AddNode(CNode(XYZ(0.7, 0, 0)))
Yarns[1].AddNode(CNode(XYZ(0, 0.35, 0.15)))
Yarns[1].AddNode(CNode(XYZ(0.35, 0.35, 0)))
Yarns[1].AddNode(CNode(XYZ(0.7, 0.35, 0.15)))
Yarns[2].AddNode(CNode(XYZ(0, 0, 0.15)))
Yarns[2].AddNode(CNode(XYZ(0, 0.35, 0)))
Yarns[2].AddNode(CNode(XYZ(0, 0.7, 0.15)))
Yarns[3].AddNode(CNode(XYZ(0.35, 0, 0)))
Yarns[3].AddNode(CNode(XYZ(0.35, 0.35, 0.15)))
Yarns[3].AddNode(CNode(XYZ(0.35, 0.7, 0)))
# We want the same interpolation and section shape for all the yarns so loop over them all
for Yarn in Yarns:
# Set the interpolation function
Yarn.AssignInterpolation(CInterpolationCubic())
# Assign a constant cross-section all along the yarn
Yarn.AssignSection(CYarnSectionConstant(Section))
# Set the resolution
Yarn.SetResolution(8)
# Add repeats to the yarn
Yarn.AddRepeat(XYZ(0.7, 0, 0))
Yarn.AddRepeat(XYZ(0, 0.7, 0))
# Add the yarn to our textile
Textile.AddYarn(Yarn)
# Create a domain and assign it to the textile
Textile.AssignDomain(CDomainPlanes(XYZ(0, 0, -0.1), XYZ(0.7, 0.7, 0.25)));
# Add the textile with the name "cotton"
AddTextile("cotton", Textile)
| 31.948052 | 90 | 0.667073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,372 | 0.557724 |
c3c25464c43357bfebaa11a128018e71bd442a63 | 3,201 | py | Python | glove.py | 2014mchidamb/TorchGlove | 8be513cb9c07cad9fb1ea7400f977d7b0ed62ecc | [
"MIT"
] | 96 | 2017-02-27T20:43:08.000Z | 2022-03-14T13:13:27.000Z | glove.py | 2014mchidamb/TorchGlove | 8be513cb9c07cad9fb1ea7400f977d7b0ed62ecc | [
"MIT"
] | 2 | 2017-07-29T01:12:08.000Z | 2021-07-24T16:05:45.000Z | glove.py | 2014mchidamb/TorchGlove | 8be513cb9c07cad9fb1ea7400f977d7b0ed62ecc | [
"MIT"
] | 20 | 2017-02-27T22:11:28.000Z | 2022-03-14T13:13:28.000Z | from nltk.tokenize import word_tokenize
from torch.autograd import Variable
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
# Set parameters
context_size = 3
embed_size = 2
xmax = 2
alpha = 0.75
batch_size = 20
l_rate = 0.001
num_epochs = 10
# Open and read in text
text_file = open('short_story.txt', 'r')
text = text_file.read().lower()
text_file.close()
# Create vocabulary and word lists
word_list = word_tokenize(text)
vocab = np.unique(word_list)
w_list_size = len(word_list)
vocab_size = len(vocab)
# Create word to index mapping
w_to_i = {word: ind for ind, word in enumerate(vocab)}
# Construct co-occurence matrix
comat = np.zeros((vocab_size, vocab_size))
for i in range(w_list_size):
for j in range(1, context_size+1):
ind = w_to_i[word_list[i]]
if i-j > 0:
lind = w_to_i[word_list[i-j]]
comat[ind, lind] += 1.0/j
if i+j < w_list_size:
rind = w_to_i[word_list[i+j]]
comat[ind, rind] += 1.0/j
# Non-zero co-occurrences
coocs = np.transpose(np.nonzero(comat))
# Weight function
def wf(x):
if x < xmax:
return (x/xmax)**alpha
return 1
# Set up word vectors and biases
l_embed, r_embed = [
[Variable(torch.from_numpy(np.random.normal(0, 0.01, (embed_size, 1))),
requires_grad = True) for j in range(vocab_size)] for i in range(2)]
l_biases, r_biases = [
[Variable(torch.from_numpy(np.random.normal(0, 0.01, 1)),
requires_grad = True) for j in range(vocab_size)] for i in range(2)]
# Set up optimizer
optimizer = optim.Adam(l_embed + r_embed + l_biases + r_biases, lr = l_rate)
# Batch sampling function
def gen_batch():
sample = np.random.choice(np.arange(len(coocs)), size=batch_size, replace=False)
l_vecs, r_vecs, covals, l_v_bias, r_v_bias = [], [], [], [], []
for chosen in sample:
ind = tuple(coocs[chosen])
l_vecs.append(l_embed[ind[0]])
r_vecs.append(r_embed[ind[1]])
covals.append(comat[ind])
l_v_bias.append(l_biases[ind[0]])
r_v_bias.append(r_biases[ind[1]])
return l_vecs, r_vecs, covals, l_v_bias, r_v_bias
# Train model
for epoch in range(num_epochs):
num_batches = int(w_list_size/batch_size)
avg_loss = 0.0
for batch in range(num_batches):
optimizer.zero_grad()
l_vecs, r_vecs, covals, l_v_bias, r_v_bias = gen_batch()
# For pytorch v2 use, .view(-1) in torch.dot here. Otherwise, no need to use .view(-1).
loss = sum([torch.mul((torch.dot(l_vecs[i].view(-1), r_vecs[i].view(-1)) +
l_v_bias[i] + r_v_bias[i] - np.log(covals[i]))**2,
wf(covals[i])) for i in range(batch_size)])
avg_loss += loss.data[0]/num_batches
loss.backward()
optimizer.step()
print("Average loss for epoch "+str(epoch+1)+": ", avg_loss)
# Visualize embeddings
if embed_size == 2:
# Pick some random words
word_inds = np.random.choice(np.arange(len(vocab)), size=10, replace=False)
for word_ind in word_inds:
# Create embedding by summing left and right embeddings
w_embed = (l_embed[word_ind].data + r_embed[word_ind].data).numpy()
x, y = w_embed[0][0], w_embed[1][0]
plt.scatter(x, y)
plt.annotate(vocab[word_ind], xy=(x, y), xytext=(5, 2),
textcoords='offset points', ha='right', va='bottom')
plt.savefig("glove.png")
| 30.198113 | 89 | 0.704155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 0.170884 |
c3c4959c92c9a5cca979b6d9be7d2c7d25629f6c | 971 | py | Python | tests/python/tblink_rpc_smoke.py | tblink-rpc/tblink-rpc-core | 4daac39138930a726014914952047708162c3451 | [
"Apache-2.0"
] | 1 | 2022-03-30T11:58:00.000Z | 2022-03-30T11:58:00.000Z | tests/python/tblink_rpc_smoke.py | tblink-rpc/tblink-rpc-core | 4daac39138930a726014914952047708162c3451 | [
"Apache-2.0"
] | null | null | null | tests/python/tblink_rpc_smoke.py | tblink-rpc/tblink-rpc-core | 4daac39138930a726014914952047708162c3451 | [
"Apache-2.0"
] | null | null | null | '''
Created on Jul 5, 2021
@author: mballance
'''
from tblink_rpc_testcase import TblinkRpcTestcase
import sys
from tblink_rpc_core.json.json_transport import JsonTransport
import asyncio
from tblink_rpc_core.param_val_map import ParamValMap
from tblink_rpc_core.endpoint import Endpoint
class TblinkRpcSmoke(TblinkRpcTestcase):
def test_smoke(self):
print("Smoke: ")
transport = JsonTransport(self.reader, self.writer)
loop = asyncio.get_event_loop()
endpoint = Endpoint(transport)
# Start the receive loop
asyncio.ensure_future(transport.run())
print("--> build_complete")
loop.run_until_complete(endpoint.build_complete())
print("<-- build_complete")
print("--> connect_complete")
loop.run_until_complete(endpoint.connect_complete())
print("<-- connect_complete")
self.fail("Python assert") | 27.742857 | 61 | 0.661174 | 681 | 0.701339 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.195675 |
c3c4fcd9bbcff4febb72c2cd8b9d3f0b7db5b1a1 | 445 | py | Python | backend/utils/management/commands/createsu.py | stasfilin/rss_portal | e6e9f8d254c80c8a7a40901b3b7dab059f259d55 | [
"MIT"
] | null | null | null | backend/utils/management/commands/createsu.py | stasfilin/rss_portal | e6e9f8d254c80c8a7a40901b3b7dab059f259d55 | [
"MIT"
] | null | null | null | backend/utils/management/commands/createsu.py | stasfilin/rss_portal | e6e9f8d254c80c8a7a40901b3b7dab059f259d55 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Command for creating default superuser
"""
def handle(self, *args, **options):
if not User.objects.filter(username="admin").exists():
User.objects.create_superuser("admin", "admin@admin.com", "admin123456")
self.stdout.write(self.style.SUCCESS("Superuser created"))
| 31.785714 | 84 | 0.689888 | 346 | 0.777528 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.262921 |
c3c622cc0704f66c0ca320f04b393a4ce95e43c7 | 13,856 | py | Python | addition_module/DSDG/DUM/train.py | weihaoxie/FaceX-Zoo | db0b087e4f4d28152e172d6c8d3767a8870733b4 | [
"Apache-2.0"
] | 1 | 2022-02-07T02:03:37.000Z | 2022-02-07T02:03:37.000Z | addition_module/DSDG/DUM/train.py | weihaoxie/FaceX-Zoo | db0b087e4f4d28152e172d6c8d3767a8870733b4 | [
"Apache-2.0"
] | null | null | null | addition_module/DSDG/DUM/train.py | weihaoxie/FaceX-Zoo | db0b087e4f4d28152e172d6c8d3767a8870733b4 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division
import torch
import matplotlib.pyplot as plt
import argparse, os
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
from models.CDCNs_u import Conv2d_cd, CDCN_u
from Load_OULUNPUcrop_train import Spoofing_train_g, SeparateBatchSampler, Normaliztion, ToTensor, \
RandomHorizontalFlip, Cutout, RandomErasing
from Load_OULUNPUcrop_valtest import Spoofing_valtest, Normaliztion_valtest, ToTensor_valtest
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
from utils import AvgrageMeter, performances
# Dataset root
train_image_dir = '/export2/home/wht/oulu_img_crop/train_file_flod/'
val_image_dir = '/export2/home/wht/oulu_img_crop/dev_file_flod/'
test_image_dir = '/export2/home/wht/oulu_img_crop/test_file_flod/'
train_map_dir = '/export2/home/wht/oulu_img_crop/train_depth_flod/'
val_map_dir = '/export2/home/wht/oulu_img_crop/dev_depth_flod/'
test_map_dir = '/export2/home/wht/oulu_img_crop/test_depth_flod/'
train_list = '/export2/home/wht/oulu_img_crop/protocols/Protocol_1/Train_g.txt'
val_list = '/export2/home/wht/oulu_img_crop/protocols/Protocol_1/Dev.txt'
test_list = '/export2/home/wht/oulu_img_crop/protocols/Protocol_1/Test.txt'
def contrast_depth_conv(input):
''' compute contrast depth in both of (out, label) '''
'''
input 32x32
output 8x32x32
'''
kernel_filter_list = [
[[1, 0, 0], [0, -1, 0], [0, 0, 0]], [[0, 1, 0], [0, -1, 0], [0, 0, 0]], [[0, 0, 1], [0, -1, 0], [0, 0, 0]],
[[0, 0, 0], [1, -1, 0], [0, 0, 0]], [[0, 0, 0], [0, -1, 1], [0, 0, 0]],
[[0, 0, 0], [0, -1, 0], [1, 0, 0]], [[0, 0, 0], [0, -1, 0], [0, 1, 0]], [[0, 0, 0], [0, -1, 0], [0, 0, 1]]
]
kernel_filter = np.array(kernel_filter_list, np.float32)
kernel_filter = torch.from_numpy(kernel_filter.astype(np.float)).float().cuda()
# weights (in_channel, out_channel, kernel, kernel)
kernel_filter = kernel_filter.unsqueeze(dim=1)
input = input.unsqueeze(dim=1).expand(input.shape[0], 8, input.shape[1], input.shape[2])
contrast_depth = F.conv2d(input, weight=kernel_filter, groups=8)
return contrast_depth
class Contrast_depth_loss(nn.Module):
def __init__(self):
super(Contrast_depth_loss, self).__init__()
return
def forward(self, out, label):
contrast_out = contrast_depth_conv(out)
contrast_label = contrast_depth_conv(label)
criterion_MSE = nn.MSELoss().cuda()
loss = criterion_MSE(contrast_out, contrast_label)
return loss
def train_test():
isExists = os.path.exists(args.log)
if not isExists:
os.makedirs(args.log)
log_file = open(args.log + '/' + args.log + '_log_P1.txt', 'a')
log_file.write('Oulu-NPU, P1:\n ')
log_file.flush()
print('train from scratch!\n')
log_file.write('train from scratch!\n')
log_file.write('lr:%.6f, lamda_kl:%.6f , batchsize:%d\n' % (args.lr, args.kl_lambda, args.batchsize))
log_file.flush()
model = CDCN_u(basic_conv=Conv2d_cd, theta=0.7)
# model = ResNet18_u()
model = model.cuda()
model = torch.nn.DataParallel(model)
lr = args.lr
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
print(model)
criterion_absolute_loss = nn.MSELoss().cuda()
criterion_contrastive_loss = Contrast_depth_loss().cuda()
for epoch in range(args.epochs):
if (epoch + 1) % args.step_size == 0:
lr *= args.gamma
loss_absolute_real = AvgrageMeter()
loss_absolute_fake = AvgrageMeter()
loss_contra_real = AvgrageMeter()
loss_contra_fake = AvgrageMeter()
loss_kl_real = AvgrageMeter()
loss_kl_fake = AvgrageMeter()
###########################################
''' train '''
###########################################
model.train()
# load random 16-frame clip data every epoch
train_data = Spoofing_train_g(train_list, train_image_dir, train_map_dir,
transform=transforms.Compose(
[RandomErasing(), RandomHorizontalFlip(), ToTensor(), Cutout(),
Normaliztion()]))
train_real_idx, train_fake_idx = train_data.get_idx()
batch_sampler = SeparateBatchSampler(train_real_idx, train_fake_idx, batch_size=args.batchsize, ratio=args.ratio)
dataloader_train = DataLoader(train_data, num_workers=8, batch_sampler=batch_sampler)
for i, sample_batched in enumerate(dataloader_train):
# get the inputs
inputs, map_label, spoof_label = sample_batched['image_x'].cuda(), sample_batched['map_x'].cuda(), \
sample_batched['spoofing_label'].cuda()
optimizer.zero_grad()
# forward + backward + optimize
mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = model(inputs)
mu_real = mu[:int(args.batchsize * args.ratio), :, :]
logvar_real = logvar[:int(args.batchsize * args.ratio), :, :]
map_x_real = map_x[:int(args.batchsize * args.ratio), :, :]
map_label_real = map_label[:int(args.batchsize * args.ratio), :, :]
absolute_loss_real = criterion_absolute_loss(map_x_real, map_label_real)
contrastive_loss_real = criterion_contrastive_loss(map_x_real, map_label_real)
kl_loss_real = -(1 + logvar_real - (mu_real - map_label_real).pow(2) - logvar_real.exp()) / 2
kl_loss_real = kl_loss_real.sum(dim=1).sum(dim=1).mean()
kl_loss_real = args.kl_lambda * kl_loss_real
mu_fake = mu[int(args.batchsize * args.ratio):, :, :]
logvar_fake = logvar[int(args.batchsize * args.ratio):, :, :]
map_x_fake = map_x[int(args.batchsize * args.ratio):, :, :]
map_label_fake = map_label[int(args.batchsize * args.ratio):, :, :]
absolute_loss_fake = 0.1 * criterion_absolute_loss(map_x_fake, map_label_fake)
contrastive_loss_fake = 0.1 * criterion_contrastive_loss(map_x_fake, map_label_fake)
kl_loss_fake = -(1 + logvar_fake - (mu_fake - map_label_fake).pow(2) - logvar_fake.exp()) / 2
kl_loss_fake = kl_loss_fake.sum(dim=1).sum(dim=1).mean()
kl_loss_fake = 0.1 * args.kl_lambda * kl_loss_fake
absolute_loss = absolute_loss_real + absolute_loss_fake
contrastive_loss = contrastive_loss_real + contrastive_loss_fake
kl_loss = kl_loss_real + kl_loss_fake
loss = absolute_loss + contrastive_loss + kl_loss
loss.backward()
optimizer.step()
n = inputs.size(0)
loss_absolute_real.update(absolute_loss_real.data, n)
loss_absolute_fake.update(absolute_loss_fake.data, n)
loss_contra_real.update(contrastive_loss_real.data, n)
loss_contra_fake.update(contrastive_loss_fake.data, n)
loss_kl_real.update(kl_loss_real.data, n)
loss_kl_fake.update(kl_loss_fake.data, n)
scheduler.step()
# whole epoch average
print(
'epoch:%d, Train: Absolute_loss: real=%.4f,fake=%.4f, '
'Contrastive_loss: real=%.4f,fake=%.4f, kl_loss: real=%.4f,fake=%.4f' % (
epoch + 1, loss_absolute_real.avg, loss_absolute_fake.avg, loss_contra_real.avg, loss_contra_fake.avg,
loss_kl_real.avg, loss_kl_fake.avg))
# validation/test
if epoch < 200:
epoch_test = 200
else:
epoch_test = 50
# epoch_test = 1
if epoch % epoch_test == epoch_test - 1:
model.eval()
with torch.no_grad():
###########################################
''' val '''
###########################################
# val for threshold
val_data = Spoofing_valtest(val_list, val_image_dir, val_map_dir,
transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
dataloader_val = DataLoader(val_data, batch_size=1, shuffle=False, num_workers=4)
map_score_list = []
for i, sample_batched in enumerate(dataloader_val):
# get the inputs
inputs, spoof_label = sample_batched['image_x'].cuda(), sample_batched['spoofing_label'].cuda()
val_maps = sample_batched['val_map_x'].cuda() # binary map from PRNet
optimizer.zero_grad()
mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = model(inputs.squeeze(0))
score_norm = mu.sum(dim=1).sum(dim=1) / val_maps.squeeze(0).sum(dim=1).sum(dim=1)
map_score = score_norm.mean()
map_score_list.append('{} {}\n'.format(map_score, spoof_label[0][0]))
map_score_val_filename = args.log + '/' + args.log + '_map_score_val.txt'
with open(map_score_val_filename, 'w') as file:
file.writelines(map_score_list)
###########################################
''' test '''
##########################################
# test for ACC
test_data = Spoofing_valtest(test_list, test_image_dir, test_map_dir,
transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
dataloader_test = DataLoader(test_data, batch_size=1, shuffle=False, num_workers=4)
map_score_list = []
for i, sample_batched in enumerate(dataloader_test):
# get the inputs
inputs, spoof_label = sample_batched['image_x'].cuda(), sample_batched['spoofing_label'].cuda()
test_maps = sample_batched['val_map_x'].cuda()
optimizer.zero_grad()
mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = model(inputs.squeeze(0))
score_norm = mu.sum(dim=1).sum(dim=1) / test_maps.squeeze(0).sum(dim=1).sum(dim=1)
map_score = score_norm.mean()
map_score_list.append('{} {}\n'.format(map_score, spoof_label[0][0]))
map_score_test_filename = args.log + '/' + args.log + '_map_score_test.txt'
with open(map_score_test_filename, 'w') as file:
file.writelines(map_score_list)
#############################################################
# performance measurement both val and test
#############################################################
val_threshold, test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_ACER_test_threshold = performances(
map_score_val_filename, map_score_test_filename)
print('epoch:%d, Val: val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f' % (
epoch + 1, val_threshold, val_ACC, val_ACER))
log_file.write('\n epoch:%d, Val: val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f \n' % (
epoch + 1, val_threshold, val_ACC, val_ACER))
print('epoch:%d, Test: ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f' % (
epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
log_file.write('epoch:%d, Test: ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f \n' % (
epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
log_file.flush()
if epoch % epoch_test == epoch_test - 1:
# save the model until the next improvement
torch.save(model.state_dict(), args.log + '/' + args.log + '_%d.pkl' % (epoch + 1))
print('Finished Training')
log_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="save quality using landmarkpose model")
parser.add_argument('--gpus', type=str, default='0, 1, 2, 3', help='the gpu id used for predict')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--batchsize', type=int, default=64, help='initial batchsize')
parser.add_argument('--step_size', type=int, default=500, help='how many epochs lr decays once') # 500
parser.add_argument('--gamma', type=float, default=0.5, help='gamma of optim.lr_scheduler.StepLR, decay of lr')
parser.add_argument('--kl_lambda', type=float, default=0.001, help='')
parser.add_argument('--ratio', type=float, default=0.75, help='real and fake in batchsize ')
parser.add_argument('--echo_batches', type=int, default=50, help='how many batches display once') # 50
parser.add_argument('--epochs', type=int, default=1600, help='total training epochs')
parser.add_argument('--log', type=str, default="CDCN_U_P1", help='log and save model name')
parser.add_argument('--finetune', action='store_true', default=False, help='whether finetune other models')
args = parser.parse_args()
train_test()
| 47.77931 | 152 | 0.58444 | 403 | 0.029085 | 0 | 0 | 0 | 0 | 0 | 0 | 2,768 | 0.199769 |
c3c6dc209f06eeee94d2df7708439a9145986364 | 230 | py | Python | examples/test_python_folder_classify.py | TensorPy/TensorPy | d8715a843081c48cc090b7168e144f7db36faff9 | [
"MIT"
] | 45 | 2016-10-20T01:38:31.000Z | 2021-06-05T15:34:03.000Z | examples/test_python_folder_classify.py | mdmintz/TensorPy | d8715a843081c48cc090b7168e144f7db36faff9 | [
"MIT"
] | 9 | 2017-03-25T12:10:11.000Z | 2020-09-25T21:19:47.000Z | examples/test_python_folder_classify.py | mdmintz/TensorPy | d8715a843081c48cc090b7168e144f7db36faff9 | [
"MIT"
] | 33 | 2016-10-22T11:41:34.000Z | 2021-03-18T15:51:10.000Z | from tensorpy import image_base
classifications = image_base.classify_folder_images('./images')
print("*** Displaying Image Classification Results as a list: ***")
for classification in classifications:
print(classification)
| 32.857143 | 67 | 0.791304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.304348 |
c3c7e2985a05bdb8275ebaf24c3f0b3132457a34 | 4,201 | py | Python | deep_audio_features/bin/basic_test.py | tyiannak/deep_audio_features | 9cfaf4f12883752ffe7eaaa373c2667893a00e3b | [
"MIT"
] | 40 | 2020-07-24T17:09:44.000Z | 2022-02-26T10:22:12.000Z | deep_audio_features/bin/basic_test.py | tyiannak/deep_audio_features | 9cfaf4f12883752ffe7eaaa373c2667893a00e3b | [
"MIT"
] | 40 | 2020-07-20T17:21:20.000Z | 2022-01-28T23:02:07.000Z | deep_audio_features/bin/basic_test.py | tyiannak/deep_audio_features | 9cfaf4f12883752ffe7eaaa373c2667893a00e3b | [
"MIT"
] | 5 | 2020-08-20T09:19:00.000Z | 2022-01-05T18:29:37.000Z | import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.models.cnn import load_cnn
from deep_audio_features.lib.training import test
from deep_audio_features.utils.model_editing import drop_layers
import deep_audio_features.bin.config
import numpy
def test_model(modelpath, ifile, layers_dropped,
test_segmentation=False, verbose=True):
"""Loads a model and predicts each classes probability
Arguments:
modelpath {str} : A path where the model was stored.
ifile {str} : A path of a given wav file,
which will be tested.
test_segmentation {bool}: If True extracts segment level
predictions of a sequence
verbose {bool}: If True prints the predictions
Returns:
y_pred {np.array} : An array with the probability of each class
that the model predicts.
posteriors {np.array}: An array containing the unormalized
posteriors of each class.
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
# Restore model
model, hop_length, window_length = load_cnn(modelpath)
model = model.to(device)
class_names = model.classes_mapping
max_seq_length = model.max_sequence_length
zero_pad = model.zero_pad
spec_size = model.spec_size
fuse = model.fuse
# Apply layer drop
model = drop_layers(model, layers_dropped)
model.max_sequence_length = max_seq_length
# print('Model:\n{}'.format(model))
# Move to device
model.to(device)
# Create test set
test_set = FeatureExtractorDataset(X=[ifile],
# Random class -- does not matter at all
y=[0],
fe_method="MEL_SPECTROGRAM",
oversampling=False,
max_sequence_length=max_seq_length,
zero_pad=zero_pad,
forced_size=spec_size,
fuse=fuse, show_hist=False,
test_segmentation=test_segmentation,
hop_length=hop_length, window_length=window_length)
# Create test dataloader
test_loader = DataLoader(dataset=test_set, batch_size=1,
num_workers=4, drop_last=False,
shuffle=False)
# Forward a sample
posteriors, y_pred, _ = test(model=model, dataloader=test_loader,
cnn=True,
classifier=True if layers_dropped == 0
else False)
if verbose:
print("--> Unormalized posteriors:\n {}\n".format(posteriors))
print("--> Predictions:\n {}".format([class_names[yy] for yy in y_pred]))
return y_pred, numpy.array(posteriors)
if __name__ == '__main__':
# Read arguments -- model
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', required=True,
type=str, help='Model')
parser.add_argument('-i', '--input', required=True,
type=str, help='Input file for testing')
parser.add_argument('-s', '--segmentation', required=False,
action='store_true',
help='Return segment predictions')
parser.add_argument('-L', '--layers', required=False, default=0,
help='Number of final layers to cut. Default is 0.')
args = parser.parse_args()
# Get arguments
model = args.model
ifile = args.input
layers_dropped = int(args.layers)
segmentation = args.segmentation
# Test the model
d, p = test_model(modelpath=model, ifile=ifile,
layers_dropped=layers_dropped,
test_segmentation=segmentation)
| 35.905983 | 90 | 0.583671 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,180 | 0.280886 |
c3c861bcdbf10ce7a55f230e67e34074b5d82dda | 3,113 | py | Python | RoboticsLanguage/Tools/Exceptions.py | robotcaresystems/roboticslanguage | 3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed | [
"Apache-2.0"
] | 64 | 2018-05-15T14:36:44.000Z | 2022-03-09T05:00:31.000Z | RoboticsLanguage/Tools/Exceptions.py | robotcaresystems/roboticslanguage | 3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed | [
"Apache-2.0"
] | 9 | 2018-04-17T21:12:27.000Z | 2019-11-08T20:53:32.000Z | RoboticsLanguage/Tools/Exceptions.py | robotcaresystems/roboticslanguage | 3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed | [
"Apache-2.0"
] | 10 | 2018-03-27T12:09:12.000Z | 2021-02-16T08:07:26.000Z | #
# This is the Robotics Language compiler
#
# ErrorHandling.py: Implements Error Handling functions
#
# Created on: June 22, 2017
# Author: Gabriel A. D. Lopes
# Licence: Apache 2.0
# Copyright: 2014-2017 Robot Care Systems BV, The Hague, The Netherlands. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from contextlib import contextmanager
class ReturnException(Exception):
pass
@contextmanager
def tryToProceed():
'''
Attempts to proceed when there is an exception. This function is coupled
with the action 'return' of the exception function. For example:
from RoboticsLanguage.Tools import Exceptions
def run_function():
with Exceptions.exception('test'):
a = 'a' + 1
print 'reaches this point'
with Exceptions.exception('test', action='return'):
raise Exception('test')
print 'does not reach this point'
with Exceptions.tryToProceed():
run_function()
print 'does not reach this point'
print 'reaches this point'
'''
try:
yield
except Exception as e:
if type(e).__name__ == 'ReturnException':
pass
else:
raise e
@contextmanager
def exception(key='default', code=None, parameters={}, **options):
'''
Generic exception function used in a 'with' context. Can be used fos system/libraries exceptions,
or to generate own exceptions. Usage:
# system error
with Exceptions.exception('test'):
a = 'a' + 1
# forced error
with Exceptions.exception('forced', action='stop'):
raise Exception('name')
'''
try:
yield
except Exception as e:
# get the logger level and action if defined.
level = options['level'] if 'level' in options.keys() else 'error'
action = options['action'] if 'action' in options.keys() else None
try:
# try to identify who sent the exception
emitter = re.search("<.*'([^']*)'>", str(type(e))).group(1)
except:
emitter = 'unknown'
# show the message
showExceptionMessage(emitter, key, e, level, action)
# apply actions
if action == 'stop':
# stop the RoL script
sys.exit(1)
elif action == 'return':
# this will return the parent function
raise ReturnException
def showExceptionMessage(emitter, key, exception, level, action):
print 'emitter: ' + emitter
print 'key: ' + key
print 'exception: ' + str(exception)
print 'level: ' + level
print 'action: ' + str(action)
def raiseException(group, key, code=None, parameters={}):
with exception(group, code, parameters):
raise Exception(key)
| 26.836207 | 99 | 0.680373 | 40 | 0.012849 | 1,765 | 0.566977 | 1,797 | 0.577257 | 0 | 0 | 2,049 | 0.658208 |
c3c8d411583e67c11896bf47299529f51cd0b641 | 5,765 | py | Python | idMatching_windowcards.py | CarnegieHall/metadata-matching | 8b3ccc3d0aea764f884cc20a723a6479d4edacf4 | [
"MIT"
] | 4 | 2016-04-29T05:07:38.000Z | 2021-07-30T19:18:50.000Z | idMatching_windowcards.py | CarnegieHall/metadata-matching | 8b3ccc3d0aea764f884cc20a723a6479d4edacf4 | [
"MIT"
] | 7 | 2016-04-26T20:16:19.000Z | 2019-01-28T19:47:09.000Z | idMatching_windowcards.py | CarnegieHall/metadata-matching | 8b3ccc3d0aea764f884cc20a723a6479d4edacf4 | [
"MIT"
] | null | null | null | # !/usr/local/bin/python3.4.2
# ----Copyright (c) 2016 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/quality-control/blob/master/LICENSE----
# run script with 5 arguments:
# argument 0 is the script name
# argument 1 is the path to the Isilon HDD volume containing the assets
# argument 2 is the path to the metadata spreadsheet [~/Carnegie_Hall_wcs.csv]
# argument 3 is the path ~/OPAS_ID_exports/OPAS_wcs_IDs_titles.csv
# argument 4 is the path to the folder you want to save your unmatched performance IDs to
# argument 5 is the harddrive ID/volume that will be added to the output filename (E.g. ABH_20150901)
import csv
import glob
import itertools
import json
import os
from os.path import isfile, join, split
import sys
filePath_1 = str(sys.argv[1])
filePath_2 = str(sys.argv[2])
filePath_3 = str(sys.argv[3])
filePath_4 = str(sys.argv[4])
fileDict = {}
wcDict = {}
titleDict = {}
##matchedList = []
unmatchedIDs = []
#Set a variable to equal the harddrive volume number, which is extracted from the file path
volume = sys.argv[len(sys.argv)-1]
#Extract filenames from the full file path and build dictionary
for full_filePath in glob.glob(os.path.join(filePath_1, '*.tif')):
file_name = os.path.basename(full_filePath)
file_wcID = os.path.basename(full_filePath).split('_')[0]
fileDict[str(file_name)] = {}
fileDict[str(file_name)]['File Name'] = file_name
fileDict[str(file_name)]['Source Unique ID'] = file_wcID
with open(filePath_2, 'rU') as f:
with open(filePath_3, encoding='utf-8') as g:
wcData = csv.reader(f, dialect='excel', delimiter=',')
next(wcData, None) # skip the headers
titleData = csv.reader(g, dialect='excel', delimiter=',')
for row in titleData:
event_id = row[0]
titleMatch_id = ''.join(['CONC', event_id])
text = row[1]
if not text:
text = '[No title available]'
# event_date = ????
# event_year = ????
titleDict[titleMatch_id] = text
# titleDict[titleMatch_id]['Text'] = text
# # titleDict[titleMatch_id]['Full Date'] = event_date
# # titleDict[titleMatch_id]['Year'] = event_year
for row in wcData:
opas_id = row[0]
source_unique_id = row[1].strip()
collection = row[2]
if 'Window Cards' in collection:
# need to match any of these:
# Main Hall Window Cards
# Recital Hall Window Cards
# Zankel Hall Window Cards
cortexFolder = 'CH_WindowCards_01'
event = row[3]
entities = row[4]
date_full = row[5]
date_year = row[6]
event_date_freetext = row[7]
note = row[10]
try:
if opas_id:
opas_id = ''.join(['CONC', opas_id])
title = ''.join([titleDict[opas_id], ', ', date_year])
# date_full = titleDict[opas_id]['Full Date']
# date_year = titleDict[opas_id]['Year']
else:
opas_id = ''.join([cortexFolder])
title = event
# date_full = ''
# date_year = ''
wcDict[str(source_unique_id)] = {}
wcDict[str(source_unique_id)]['OPAS ID'] = opas_id
wcDict[str(source_unique_id)]['Collection'] = collection
wcDict[str(source_unique_id)]['Date (Free text)'] = event_date_freetext
wcDict[str(source_unique_id)]['Date (Year)'] = date_year
wcDict[str(source_unique_id)]['Date (Full)'] = date_full
wcDict[str(source_unique_id)]['Note'] = note
wcDict[str(source_unique_id)]['Title'] = title
#If OPAS ID from metadata spreadsheet is NOT in OPAS ID export, it will cause a KeyError
#This exception catches those errors, and adds the IDs to a list of unmatched IDs
#Since we added "CONC" to the OPAS ID above, we remove it here (opas_id[4:]) to allow for easier OPAS QC
except KeyError:
if opas_id not in unmatchedIDs:
unmatchedIDs.append(opas_id[4:])
##print (json.dumps(wcDict, indent=4))
for key in fileDict:
file_wcID = fileDict[key]['Source Unique ID']
if file_wcID in wcDict.keys():
fileDict[key]['OPAS ID'] = wcDict[file_wcID]['OPAS ID']
fileDict[key]['Collection'] = wcDict[file_wcID]['Collection']
fileDict[key]['Date (Full)'] = wcDict[file_wcID]['Date (Full)']
fileDict[key]['Date (Year)'] = wcDict[file_wcID]['Date (Year)']
fileDict[key]['Date (Free text)'] = wcDict[file_wcID]['Date (Free text)']
fileDict[key]['Note'] = wcDict[file_wcID]['Note']
fileDict[key]['Title'] = wcDict[file_wcID]['Title']
matchedFiles_name = ''.join([str(filePath_1), '/Central_OPASmatchedFiles_WindowCards_', volume, '.csv'])
unmatchedIDs_name = ''.join([str(filePath_4), '/unmatched_WindowCards_IDs_', volume, '.txt'])
# This writes the nested dictionary to a CSV file
fields = ['OPAS ID', 'Source Unique ID', 'Collection', 'Title', 'Date (Full)', 'Date (Year)', 'Date (Free text)', 'Note', 'File Name']
with open(matchedFiles_name, 'w', newline='') as csvfile:
w = csv.DictWriter(csvfile, fields)
w.writeheader()
for k in fileDict:
w.writerow({field: fileDict[k].get(field) for field in fields})
#This saves the unmatched OPAS IDs as a text file, so you can check the issues in OPAS
with open(unmatchedIDs_name, 'w') as h:
h.write(','.join(str(opas_id) for opas_id in unmatchedIDs)) | 42.389706 | 134 | 0.612142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,324 | 0.403122 |
c3c8e031aee7868f8730d77f9fc2d1e3db73ac7c | 1,020 | py | Python | data_input/__init__.py | carlosvin/pricecalculator | 2c2c409e4a7f3e7d52001b19630a37a4e1a827ae | [
"Apache-2.0"
] | null | null | null | data_input/__init__.py | carlosvin/pricecalculator | 2c2c409e4a7f3e7d52001b19630a37a4e1a827ae | [
"Apache-2.0"
] | null | null | null | data_input/__init__.py | carlosvin/pricecalculator | 2c2c409e4a7f3e7d52001b19630a37a4e1a827ae | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
from urllib.request import Request, urlopen
import logging
import parsing
__author__ = 'carlos'
class Downloader(object):
def __init__(self, url):
self.url = url
def read(self):
request = Request( self.url )
request.add_header('Accept-encoding', 'text/html')
response = urlopen(request)
charset = response.headers.get('charset')
data = response.read()
logging.debug('Read %u bytes from %s (%s)' % (len(data), self.url, charset))
return data
class StocksInfoUpdater(object):
def __init__(self, url):
self.downloader = Downloader(url)
self.parser = parsing.StockParser()
def update(self):
dataread = self.downloader.read()
self.parser.feed(dataread)
return self.parser.stocks
@property
def stocks(self):
return self.parser.stocks
@property
def url(self):
return self.downloader.url
| 21.702128 | 84 | 0.594118 | 873 | 0.855882 | 0 | 0 | 128 | 0.12549 | 0 | 0 | 95 | 0.093137 |
c3caacf1b7d05e15e360c1c4d0096f1b7b8c5a0b | 1,403 | py | Python | ursina/string_utilities.py | jtiai/ursina | 6b424a7052c91e49aa3d19dae27fc3abe0f59e0e | [
"MIT"
] | 1 | 2020-09-04T14:32:33.000Z | 2020-09-04T14:32:33.000Z | ursina/string_utilities.py | Lewis7Lewis/ursina | 38fd34c820dcfe5be7e82db16323631570cdf96a | [
"MIT"
] | 1 | 2021-04-09T00:00:39.000Z | 2021-04-09T00:00:39.000Z | ursina/string_utilities.py | Lewis7Lewis/ursina | 38fd34c820dcfe5be7e82db16323631570cdf96a | [
"MIT"
] | 1 | 2021-04-09T00:02:59.000Z | 2021-04-09T00:02:59.000Z | import re
import traceback
from textwrap import dedent
def camel_to_snake(value):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', value)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snake_to_camel(value):
camel = ''
words = value.split('_')
for w in words:
camel += w.title()
return camel
def multireplace(string, replacements, ignore_case=False):
"""
Given a string and a dict, replaces occurrences of the dict keys found in the
string, with their corresponding values. The replacements will occur in "one pass",
i.e. there should be no clashes.
:param str string: string to perform replacements on
:param dict replacements: replacement dictionary {str_to_find: str_to_replace_with}
:param bool ignore_case: whether to ignore case when looking for matches
:rtype: str the replaced string
"""
rep_sorted = sorted(replacements, key=lambda s: len(s[0]), reverse=True)
rep_escaped = [re.escape(replacement) for replacement in rep_sorted]
pattern = re.compile("|".join(rep_escaped), re.I if ignore_case else 0)
return pattern.sub(lambda match: replacements[match.group(0)], string)
def printvar(var):
print(traceback.extract_stack(limit=2)[0][3][9:][:-1],"=", var)
if __name__ == '__main__':
print(camel_to_snake('CamelToSnake'))
print(snake_to_camel('snake_to_camel'))
printvar('test')
| 33.404762 | 87 | 0.684961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.417676 |
c3cd50d3bef2109a7394181b196687c2fce15100 | 24 | py | Python | catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/processors/__init__.py | yxiao1996/dev | e2181233aaa3d16c472b792b58fc4863983825bd | [
"CC-BY-2.0"
] | 2 | 2018-06-25T02:51:25.000Z | 2018-06-25T02:51:27.000Z | catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/processors/__init__.py | yxiao1996/dev | e2181233aaa3d16c472b792b58fc4863983825bd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/processors/__init__.py | yxiao1996/dev | e2181233aaa3d16c472b792b58fc4863983825bd | [
"CC-BY-2.0"
] | 2 | 2018-09-04T06:44:21.000Z | 2018-10-15T02:30:50.000Z |
from .identity import * | 12 | 23 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c3ce5c789ffdabd456c4f50d5c4cea5acccf135f | 558 | py | Python | setup.py | whipper-team/morituri-eaclogger | 4cbdeb24d713ab8c9358ac90f4740d8cec76d3c4 | [
"0BSD"
] | 9 | 2018-10-18T13:33:01.000Z | 2022-01-17T19:25:38.000Z | setup.py | JoeLametta/morituri-eaclogger | 4cbdeb24d713ab8c9358ac90f4740d8cec76d3c4 | [
"0BSD"
] | 6 | 2016-07-03T20:47:05.000Z | 2018-02-09T14:58:43.000Z | setup.py | whipper-team/morituri-eaclogger | 4cbdeb24d713ab8c9358ac90f4740d8cec76d3c4 | [
"0BSD"
] | 3 | 2016-07-03T19:58:36.000Z | 2018-02-07T15:34:41.000Z | from setuptools import setup
from eaclogger import __version__ as plugin_version
setup(
name="whipper-plugin-eaclogger",
version=plugin_version,
description="A plugin for whipper which provides EAC style log reports",
author="JoeLametta, supermanvelo",
maintainer="JoeLametta",
license="ISC License",
url="https://github.com/whipper-team/whipper-plugin-eaclogger",
packages=["eaclogger", "eaclogger.logger"],
entry_points={
"whipper.logger": [
"eac = eaclogger.logger.eac:EacLogger"
]
}
)
| 29.368421 | 76 | 0.688172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.496416 |
c3cf716bf2646fc626aab11469e78c6e67817ef2 | 201 | py | Python | main.py | cankut/image-to-prime | d681ac0a6435f41662f638479ee7cfde2f203cb0 | [
"MIT"
] | null | null | null | main.py | cankut/image-to-prime | d681ac0a6435f41662f638479ee7cfde2f203cb0 | [
"MIT"
] | null | null | null | main.py | cankut/image-to-prime | d681ac0a6435f41662f638479ee7cfde2f203cb0 | [
"MIT"
] | null | null | null | from PrimeSearcher import PrimeSearcher
###
ps = PrimeSearcher("./images/euler.jpg")
ps.rescale(60*60, fit_to_original=True)
ps.search(max_iterations=1000, noise_count=1, break_on_find=False)
| 18.272727 | 66 | 0.761194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.114428 |
c3cf778a7c7e4eb0ab55391af44f975aea597584 | 2,432 | py | Python | Phyton/sinavKontrol.py | huseyinozdem/programlamaninTemelleri | 8aebc1aae40426f449c33165a1d1cb4600ee50bd | [
"MIT"
] | null | null | null | Phyton/sinavKontrol.py | huseyinozdem/programlamaninTemelleri | 8aebc1aae40426f449c33165a1d1cb4600ee50bd | [
"MIT"
] | null | null | null | Phyton/sinavKontrol.py | huseyinozdem/programlamaninTemelleri | 8aebc1aae40426f449c33165a1d1cb4600ee50bd | [
"MIT"
] | null | null | null | import sys
import os
import random
klasorAdi = os.path.dirname(sys.argv[0])
dosyaIsmi = klasorAdi + "/test.txt"
soruSayisi = 40
ogrenciSayisi = 60
d = {}
dogruSayisi = {}
yalisSayisi = {}
bosSayisi = {}
puan = {}
def sinavHazirla():
for j in range(1, soruSayisi + 1):
r1 = random.randint(1, 5)
d[0, j] = chr(64 + r1)
for i in range(1, ogrenciSayisi + 1):
for j in range(1, soruSayisi + 1):
r1 = random.randint(1, 5)
r2 = random.randint(0, 99)
d[i, j] = chr(64 + r1)
if r2 in range(41, 61):
d[i, j] = chr(32)
if r2 in range(61, 100):
d[i, j] = d[0, j]
def sinavDegerlendir():
for i in range(1, ogrenciSayisi + 1):
dogruSayisi[i] = 0
yalisSayisi[i] = 0
bosSayisi[i] = 0
puan[i] = 0
soruBasinaDusenPuan = 100 / soruSayisi
for i in range(1, ogrenciSayisi + 1):
for j in range(1, soruSayisi + 1):
if d[i, j] != chr(32):
if d[i, j] == d[0, j]:
dogruSayisi[i] += 1
else:
d[i, j] = chr(ord(d[i, j]) + 32)
yalisSayisi[i] += 1
bosSayisi[i] = soruSayisi - (dogruSayisi[i] + yalisSayisi[i])
puan[i] = soruBasinaDusenPuan * dogruSayisi[i]
def sinavSirala():
for i in range(1, ogrenciSayisi):
for j in range(i + 1, ogrenciSayisi + 1):
if puan[i] < puan[j]:
for k in range(1, soruSayisi + 1):
g = d[i, k]
d[i, k] = d[j, k]
d[j, k] = g
g = dogruSayisi[i] ; dogruSayisi[i] = dogruSayisi[j] ; dogruSayisi[j] = g
g = yalisSayisi[i] ; yalisSayisi[i] = yalisSayisi[j] ; yalisSayisi[j] = g
g = bosSayisi[i] ; bosSayisi[i] = bosSayisi[j] ; bosSayisi[j] = g
g = puan[i] ; puan[i] = puan[j] ; puan[j] = g
def sinavYaz():
dosya = open(dosyaIsmi, "w")
s = ' '
for j in range(1, soruSayisi + 1):
s += d[0 ,j]
print(s, file=dosya)
for i in range(1, ogrenciSayisi + 1):
s = '%3d.' % i
for j in range(1, soruSayisi + 1):
s += d[i, j]
s += ' ** Doğru Sayısı:%3d Yanlış Sayısı:%3d Boş Sayısı:%3d Puan:%6.2f' %\
(dogruSayisi[i], yalisSayisi[i], bosSayisi[i], puan[i])
print(s, file=dosya)
dosya.close()
def sinavOku():
if os.path.isfile(dosyaIsmi)==False:
print("dosya diskte mevcut değil")
else:
dosya = open(dosyaIsmi, "r")
for s in dosya:
print(s, end="")
dosya.close()
sinavHazirla()
sinavDegerlendir()
sinavSirala()
sinavYaz()
sinavOku()
| 24.565657 | 81 | 0.557155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.05526 |
c3cf7f9ab95399cee3acf2f3ba359c7fc5fb1065 | 5,639 | py | Python | heufybot/modules/commands/time_command.py | Heufneutje/PyHeufyBot | 9d26587c47a4ea75a3f4f1af6d40958bec2c9a87 | [
"MIT"
] | 3 | 2015-12-19T15:41:35.000Z | 2017-11-01T12:33:01.000Z | heufybot/modules/commands/time_command.py | Heufneutje/PyHeufyBot | 9d26587c47a4ea75a3f4f1af6d40958bec2c9a87 | [
"MIT"
] | 26 | 2015-01-10T10:51:24.000Z | 2019-03-07T10:51:46.000Z | heufybot/modules/commands/time_command.py | Heufneutje/PyHeufyBot | 9d26587c47a4ea75a3f4f1af6d40958bec2c9a87 | [
"MIT"
] | 8 | 2015-01-28T12:18:06.000Z | 2018-11-28T21:39:21.000Z | from twisted.plugin import IPlugin
from heufybot.moduleinterface import IBotModule
from heufybot.modules.commandinterface import BotCommand
from heufybot.utils.timeutils import now, timestamp
from zope.interface import implements
from datetime import datetime
class TimeCommand(BotCommand):
implements(IPlugin, IBotModule)
name = "Time"
timeBaseURL = "https://maps.googleapis.com/maps/api/timezone/json?"
def triggers(self):
return ["time"]
def load(self):
self.help = "Commands: time <lat> <lon>, time <place>, time <nickname> | Get the current local time for the " \
"given latlon, place or user."
self.commandHelp = {}
self.googleKey = None
if "api-keys" not in self.bot.storage:
self.bot.storage["api-keys"] = {}
if "google" in self.bot.storage["api-keys"]:
self.googleKey = self.bot.storage["api-keys"]["google"]
def execute(self, server, source, command, params, data):
if not self.googleKey:
self.replyPRIVMSG(server, source, "No API key found.")
return
# Use the user's nickname as a parameter if none were given
if len(params) == 0:
params.append(data["user"].nick)
selfSearch = True
else:
selfSearch = False
# Try using latlon to get the location
try:
lat = float(params[0])
lon = float(params[1])
location = self.bot.moduleHandler.runActionUntilValue("geolocation-latlon", lat, lon)
if not location:
self.replyPRIVMSG(server, source, "I can't determine locations at the moment. Try again later.")
return
if not location["success"]:
self.replyPRIVMSG(server, source, "I don't think that's even a location in this multiverse...")
return
self._handleCommandWithLocation(server, source, location)
return
except (IndexError, ValueError):
pass # The user did not give a latlon, so continue using other methods
# Try to determine the user's location from a nickname
if self.bot.config.serverItemWithDefault(server, "use_userlocation", False):
userLoc = self.bot.moduleHandler.runActionUntilValue("userlocation", server, source, params[0], selfSearch)
if selfSearch:
if not userLoc:
return
elif not userLoc["success"]:
return
if userLoc and userLoc["success"]:
if "lat" in userLoc:
location = self.bot.moduleHandler.runActionUntilValue("geolocation-latlon", userLoc["lat"],
userLoc["lon"])
else:
location = self.bot.moduleHandler.runActionUntilValue("geolocation-place", userLoc["place"])
if not location:
self.replyPRIVMSG(server, source, "I can't determine locations at the moment. Try again later.")
return
if not location["success"]:
self.replyPRIVMSG(server, source, "I don't think that's even a location in this multiverse...")
return
self._handleCommandWithLocation(server, source, location)
return
# Try to determine the location by the name of the place
location = self.bot.moduleHandler.runActionUntilValue("geolocation-place", " ".join(params))
if not location:
self.replyPRIVMSG(server, source, "I can't determine locations at the moment. Try again later.")
return
if not location["success"]:
self.replyPRIVMSG(server, source, "I don't think that's even a location in this multiverse...")
return
self._handleCommandWithLocation(server, source, location)
def _handleCommandWithLocation(self, server, source, location):
formattedTime = self._getTime(location["latitude"], location["longitude"])
self.replyPRIVMSG(server, source, "Location: {} | {}".format(location["locality"], formattedTime))
def _getTime(self, lat, lon):
currentTime = timestamp(now())
params = {
"location": "{},{}".format(lat, lon),
"timestamp": currentTime,
"key": self.googleKey
}
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", self.timeBaseURL, params)
if not result:
return "No time for this location could be found at this moment. Try again later."
timeJSON = result.json()
if timeJSON["status"] != "OK":
if "error_message" in timeJSON:
return timeJSON["error_message"]
else:
return "An unknown error occurred while requesting the time."
resultDate = datetime.fromtimestamp(currentTime + int(timeJSON["dstOffset"]) + int(timeJSON["rawOffset"]))
properDay = self._getProperDay(resultDate.day)
formattedTime = resultDate.strftime("%H:%M (%I:%M %p) on %A, " + properDay + " of %B, %Y")
return "Timezone: {} | Local time is {}".format(timeJSON["timeZoneName"], formattedTime)
def _getProperDay(self, day):
if day in [1, 21, 31]:
return "{}st".format(day)
elif day in [2, 22]:
return "{}nd".format(day)
elif day in [3, 33]:
return "{}rd".format(day)
else:
return "{}th".format(day)
timeCommand = TimeCommand()
| 44.401575 | 119 | 0.592126 | 5,346 | 0.94804 | 0 | 0 | 0 | 0 | 0 | 0 | 1,484 | 0.263167 |
c3d03fc207ef78fc939d12e5c945e9251e9a8a37 | 2,132 | py | Python | problem11.py | Scitator/fivt_bioinfo17 | f0b861edc5ffc106f9802ed3ef6cd78b25570025 | [
"MIT"
] | null | null | null | problem11.py | Scitator/fivt_bioinfo17 | f0b861edc5ffc106f9802ed3ef6cd78b25570025 | [
"MIT"
] | null | null | null | problem11.py | Scitator/fivt_bioinfo17 | f0b861edc5ffc106f9802ed3ef6cd78b25570025 | [
"MIT"
] | 1 | 2019-12-05T20:47:29.000Z | 2019-12-05T20:47:29.000Z | import pandas as pd
import click
import collections
def kmer_suffix(kmer):
return kmer[1:]
def kmer_prefix(kmer):
return kmer[:-1]
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def build_graph(kmers):
graph = collections.defaultdict(list)
for kmer in kmers:
prefix = kmer_prefix(kmer)
suffix = kmer_suffix(kmer)
graph[prefix].append(suffix)
return graph
def find_start_vertex(graph):
counter = collections.defaultdict(lambda: 0)
for key, value in graph.items():
counter[key] += 0
if len(value) == 0:
return key
for node in value:
counter[node] += 1
counter_sort = sorted(counter.items(), key=lambda x: x[1])
return counter_sort[0][0]
def find_eulerian_tour(graph):
"""
stack St;
в St кладём любую вершину (стартовая вершина);
пока St не пустой
пусть V - значение на вершине St;
если степень(V) = 0, то
добавляем V к ответу;
снимаем V с вершины St;
иначе
находим любое ребро, выходящее из V;
удаляем его из графа;
второй конец этого ребра кладём в St;
"""
ans = []
stack = [find_start_vertex(graph)]
while stack:
curr_v = stack[-1]
if len(graph[curr_v]) == 0:
ans.append(curr_v)
stack.pop()
else:
next_v = graph[curr_v].pop()
stack.append(next_v)
return list(reversed(ans))
def dna_reconstruction(k, dna):
kmers = [x for x in chunks(dna, k)]
graph = build_graph(kmers)
path = find_eulerian_tour(graph)
result = [x[0] for x in path] + [path[-1][1:]]
return "".join(result)
@click.command()
@click.option(
"--fin",
type=str,
default="problem11_input.tsv")
def main(fin):
df = pd.read_csv(fin, sep="\t")
assert all(x in df.columns.values.tolist() for x in ["k", "dna"])
for i, row in df.iterrows():
print(dna_reconstruction(row["k"], row["dna"]))
if __name__ == '__main__':
main()
| 23.688889 | 69 | 0.584897 | 0 | 0 | 126 | 0.05424 | 303 | 0.130435 | 0 | 0 | 685 | 0.294877 |
c3d17c2c456d36f8cb0a5fd4496941d685d48e93 | 328 | py | Python | ch22/import_test.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | ch22/import_test.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | ch22/import_test.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python3
import sys
import re
import time
import datetime
import os
for module in sorted(sys.modules):
print("%-20s : %s" % (module, sys.modules[module]))
print('USER : ', os.environ['USER'])
print('PWD : ', os.environ['PWD'])
print('PYTHONPATH: ', os.environ.get('PYTHONPATH'))
print(sys.path)
| 19.294118 | 55 | 0.652439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.301829 |
c3d31d6dc22314e66346271479257cc51c92d100 | 997 | py | Python | tests/infrastructure/test_bpelearner.py | maximzubkov/codeprep | 807ee1ea33796b6853c45e9dcb4e866b3f09a5f2 | [
"Apache-2.0"
] | 33 | 2020-03-02T23:42:15.000Z | 2022-03-18T02:34:32.000Z | tests/infrastructure/test_bpelearner.py | maximzubkov/codeprep | 807ee1ea33796b6853c45e9dcb4e866b3f09a5f2 | [
"Apache-2.0"
] | 10 | 2020-02-27T13:43:00.000Z | 2021-04-21T12:11:44.000Z | tests/infrastructure/test_bpelearner.py | maximzubkov/codeprep | 807ee1ea33796b6853c45e9dcb4e866b3f09a5f2 | [
"Apache-2.0"
] | 9 | 2020-03-16T14:28:06.000Z | 2021-09-30T09:40:56.000Z | # SPDX-FileCopyrightText: 2020 Hlib Babii <hlibbabii@gmail.com>
#
# SPDX-License-Identifier: Apache-2.0
from unittest import mock
import pytest
from codeprep.bpepkg.bpe_config import BpeConfig, BpeParam, BpeConfigNotSupported
from codeprep.pipeline.bpelearner import run
@mock.patch('codeprep.pipeline.bpelearner.Dataset', autospec=True)
def test_run_word_end(mocked_dataset):
bpe_config = BpeConfig({
BpeParam.BASE: 'code',
BpeParam.WORD_END: True,
BpeParam.UNICODE: 'yes',
BpeParam.CASE: 'yes'
})
with pytest.raises(BpeConfigNotSupported):
run(mocked_dataset, 1, bpe_config)
@mock.patch('codeprep.pipeline.bpelearner.Dataset', autospec=True)
def test_run_bytes_bpe(mocked_dataset):
bpe_config = BpeConfig({
BpeParam.BASE: 'code',
BpeParam.WORD_END: False,
BpeParam.UNICODE: 'bytes',
BpeParam.CASE: 'yes'
})
with pytest.raises(BpeConfigNotSupported):
run(mocked_dataset, 1, bpe_config) | 29.323529 | 81 | 0.713139 | 0 | 0 | 0 | 0 | 718 | 0.72016 | 0 | 0 | 211 | 0.211635 |
c3d376d5f7adc2553dd1e51178275f57c44c8d80 | 6,321 | py | Python | tcheck.py | zcm/tcheck | 0a0b1c362cd630875d725247e9bfda541880614d | [
"Apache-2.0"
] | null | null | null | tcheck.py | zcm/tcheck | 0a0b1c362cd630875d725247e9bfda541880614d | [
"Apache-2.0"
] | null | null | null | tcheck.py | zcm/tcheck | 0a0b1c362cd630875d725247e9bfda541880614d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import torrent_parser as tp
import asyncio
import contextlib
import pathlib
import argparse
import pprint
import hashlib
import concurrent.futures
import os.path
import logging
import tqdm
class TorrentChecker(object):
def __init__(self, datadir=pathlib.Path('.'), data_file_globs=["**"],
checkers=None, pieces=None):
self._data_file_globs = data_file_globs
self._datadir = datadir
self._checkers = checkers
self._pieces = pieces
self._logger = logging.getLogger("TorrentChecker")
self._cancelled = False
def _IsWantedDataFile(self, paths):
for glob in self._data_file_globs:
for path in paths:
if path.match(glob):
return True
return False
def _RaiseIfCancelled(self):
if self._cancelled:
raise asyncio.CancelledError()
def _GetPieceHash(self, datadir, piece_index, piece_len, paths, offset):
first_time = True
bytes_remaining = piece_len
hasher = hashlib.sha1()
for path in paths:
full_path = datadir.joinpath(path)
#logging.debug("Hashing piece %d in file %s", piece_index, path)
if bytes_remaining == 0:
raise ValueError(
"Too many paths passed into Check for piece size {}: {!r}".format(
piece_len, paths))
with open(full_path, "rb") as fobj:
if first_time:
fobj.seek(offset)
first_time = False
while bytes_remaining != 0:
self._RaiseIfCancelled()
data = fobj.read(bytes_remaining)
if not data:
break
hasher.update(data)
bytes_remaining -= len(data)
return hasher.hexdigest()
def _Check(self, datadir, piece_index, piece_sha1, piece_len, paths, offset):
if self._pieces and piece_index not in self._pieces:
#self._logger.warning('skipped %d', piece_index)
return
sha1 = self._GetPieceHash(datadir, piece_index, piece_len, paths, offset)
if piece_sha1 == sha1:
#logging.info(
# ("Piece %d (len %d) verifies correctly with hash %r, containing files\n"
# "%s"),
# piece_index, piece_len, sha1, paths)
pass
else:
self._logger.warning(
("Piece %d (len %d) containing files %r (offset %d) does not verify."
"\n expected: %r != actual: %r"),
piece_index, piece_len, paths, offset, piece_sha1, sha1)
def _CollectPieces(self, piece_len, pieces, file_infos):
file_infos_iter = iter(file_infos)
cur_file_info = next(file_infos_iter)
prev_offset = 0
#logging.debug("piece_len = %d", piece_len)
for piece_index, piece_sha1 in enumerate(pieces):
offset = prev_offset
bytes_covered_total = 0
piece_paths = []
while bytes_covered_total < piece_len:
#path = os.path.join(datadir, *cur_file_info['path'])
path = pathlib.PurePath(*cur_file_info['path'])
piece_paths.append(path)
size = cur_file_info['length']
effective_size = size - offset
newly_covered_bytes = min(piece_len - bytes_covered_total, effective_size)
bytes_covered_total += newly_covered_bytes
offset += newly_covered_bytes
#logging.debug("piece = %d, offset = %d, bct = %d, size = %d",
#piece_index, offset,
#bytes_covered_total, size)
if offset == size:
#logging.debug("resetting offset")
offset = 0
try:
cur_file_info = next(file_infos_iter)
except StopIteration:
break
#logging.debug("bct = %d", bytes_covered_total)
#logging.debug(
# "yielding (%d, %r, %r, %d)", piece_index, piece_sha1, piece_paths,
# prev_offset)
yield (piece_index, piece_sha1, piece_paths, prev_offset)
prev_offset = offset
def CheckTorrent(self, torrent_file):
parsed = tp.parse_torrent_file(torrent_file)
info = parsed['info']
piece_len = info['piece length']
pieces = info['pieces']
file_infos = None
torrent_name = info['name']
if 'files' in info:
file_infos = info['files']
else:
file_infos = [info]
info['path'] = [f'{self._datadir}/{torrent_name}']
datadir = pathlib.Path(self._datadir, torrent_name)
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._checkers) as executor:
futures = []
try:
for piece_index, piece_sha1, piece_paths, offset in self._CollectPieces(
piece_len, pieces, file_infos):
if not self._IsWantedDataFile(piece_paths):
#logging.debug(
# "Skipping files which matched no data_file_globs: %r",
# piece_paths)
continue
futures.append(
executor.submit(
TorrentChecker._Check, self, datadir, piece_index, piece_sha1,
piece_len, piece_paths, offset))
for future in tqdm.tqdm(
concurrent.futures.as_completed(futures), total=len(futures),
unit='piece', dynamic_ncols=True, leave=False):
future.result()
except:
self._logger.warning("Cancelling pending work")
for future in futures:
future.cancel()
self._cancelled = True
raise
def main():
parser = argparse.ArgumentParser(description='Verify downloaded torrents')
parser.add_argument('torrent_file', type=str)
parser.add_argument('data_file_globs', nargs='+', type=str, default=["**"])
parser.add_argument('--checkers', default=None, type=int)
parser.add_argument('--loglevel', default=None, type=str)
parser.add_argument('--datadir', default=pathlib.Path('.'), type=pathlib.Path)
parser.add_argument('--pieces', default=None, type=str)
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.loglevel.upper()))
pieces = None
if args.pieces:
pieces = args.pieces.split('-')
if len(pieces) == 1:
pieces = int(pieces[0])
pieces = range(pieces, pieces + 1)
else:
pieces = range(int(pieces[0]), int(pieces[1]))
checker = TorrentChecker(
data_file_globs=args.data_file_globs,
datadir=args.datadir,
checkers=args.checkers,
pieces=pieces)
checker.CheckTorrent(args.torrent_file)
if __name__ == '__main__':
main()
# vim: set et ts=2 sw=2 sts=2
| 33.802139 | 83 | 0.641196 | 5,048 | 0.798608 | 1,409 | 0.222908 | 0 | 0 | 0 | 0 | 1,236 | 0.195539 |
c3d3a48562b302ec3d1c4f7d9f346e8c2423f4ac | 78 | py | Python | segmentation_tools/__init__.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 3 | 2018-10-10T22:15:10.000Z | 2020-11-20T15:17:45.000Z | segmentation_tools/__init__.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 2 | 2019-10-31T13:29:05.000Z | 2021-08-12T17:32:32.000Z | segmentation_tools/__init__.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 2 | 2020-06-04T18:40:52.000Z | 2022-03-18T15:53:05.000Z | # Functions to segment chromosomes
from . import chromosome
from . import cell | 26 | 34 | 0.807692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.435897 |
c3d419ee047550f261d26c3946541ba1b4cb36e0 | 3,127 | py | Python | kha/scraper.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | 2 | 2021-06-06T15:29:08.000Z | 2021-06-07T20:37:38.000Z | kha/scraper.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | null | null | null | kha/scraper.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | 1 | 2021-05-31T16:48:08.000Z | 2021-05-31T16:48:08.000Z | """Scrape episodes from online sources."""
from datetime import datetime
import re
from typing import Dict, Iterable, Match, Optional, Tuple
import requests
from .episode import Episode
from .settings \
import WUNSCHLISTE_IMPLIED_TIMEZONE, \
WUNSCHLISTE_QUERY_PARAMETERS, WUNSCHLISTE_URL
WUNSCHLISTE_SELECT_EPISODE_PATTERN = r'(?ms)<li.*?</li>'
WUNSCHLISTE_PARSE_EPISODE_PATTERN = r"""(?msx)
(?:
heute|
morgen|
[A-Z][a-z],[^<]+ # Weekday
(?P<day>\d{2})\.
(?P<month>\d{2})\.
<.*?> # Multiple text nodes or tags
(?P<year>\d{4})
)
<.*?> # Multiple text nodes or tags
(?P<hour>\d{1,2}):
(?P<minute>\d{2})[^<]+h
<.*?"Episode"> # Multiple text nodes or tags
(?P<episode_number>[^<]+)
(?:<[^>]+>)+ # Multiple tags
(?P<name>[^<]+)
(?:<[^>]+>)+ # Multiple tags
(?P<rerun>(?:\s+\(Wdh.\))?)
"""
def scrape_wunschliste(html: Optional[str] = None) \
-> Iterable[Episode]:
"""Scrape episodes from wunschliste.de"""
def get_html() -> str:
response = requests.get(WUNSCHLISTE_URL,
params=WUNSCHLISTE_QUERY_PARAMETERS)
response.raise_for_status()
return response.text
def parse_episodes(html_source: str) \
-> Iterable[Tuple[str, Optional[Match[str]]]]:
return (
(
episode_html,
re.search(WUNSCHLISTE_PARSE_EPISODE_PATTERN,
episode_html)
)
for episode_html
in re.findall(WUNSCHLISTE_SELECT_EPISODE_PATTERN,
html_source)
)
def cleanup_html(html_dict: Dict[str, str]) -> Dict[str, str]:
return {
key: re.sub(r'(?m)(?:\s|\\n)+(?=\s|\\n)', '', value)
for key, value in html_dict.items()
}
def to_episode(raw_episode_dict: Dict[str, str]) -> Episode:
return Episode(
int(raw_episode_dict['episode_number']),
name=raw_episode_dict['name'],
date_published=datetime(
int(raw_episode_dict['year']),
int(raw_episode_dict['month']),
int(raw_episode_dict['day']),
hour=int(raw_episode_dict['hour']),
minute=int(raw_episode_dict['minute']),
tzinfo=WUNSCHLISTE_IMPLIED_TIMEZONE,
),
sd_date_published=datetime.now(),
is_rerun=bool(raw_episode_dict['rerun']),
is_spinoff=not raw_episode_dict['name'].startswith('Folge'),
tz=WUNSCHLISTE_IMPLIED_TIMEZONE,
)
for episode_html, episode_match \
in parse_episodes(html or get_html()):
if not episode_match:
raise RuntimeError(
f'Unable to parse episode from {repr(episode_html)}')
if episode_match.groupdict()['day']:
yield to_episode(
cleanup_html(episode_match.groupdict())
)
| 33.265957 | 72 | 0.532779 | 0 | 0 | 2,101 | 0.67189 | 0 | 0 | 0 | 0 | 891 | 0.284938 |
c3d49173df679851d73eb5d1c962ff92c378def9 | 1,824 | py | Python | locust/locustfile.py | FannySundlofSopra/locust-on-azure | 09a5fb7e928ffacf2f4422c9c5bd92cbb88ae80c | [
"MIT"
] | null | null | null | locust/locustfile.py | FannySundlofSopra/locust-on-azure | 09a5fb7e928ffacf2f4422c9c5bd92cbb88ae80c | [
"MIT"
] | null | null | null | locust/locustfile.py | FannySundlofSopra/locust-on-azure | 09a5fb7e928ffacf2f4422c9c5bd92cbb88ae80c | [
"MIT"
] | null | null | null | from locust import HttpUser, task, between
from locust.contrib.fasthttp import FastHttpUser
class TestUser(FastHttpUser):
@task
def viewPage(self):
self.client.get('/insamlingar/varldshjalte')
self.client.get('/webpack-runtime-72a2735cd8a1a24911f7.js')
self.client.get('/framework-3f3b31f3b6fc5c344dca.js')
self.client.get('/app-9cd3bdb66ddb863d5142.js')
self.client.get('/styles-407fe62976dc5310c43e.js')
self.client.get('/commons-16f36d497b002bdafac4.js')
self.client.get('/9c31700cf97414fc836e3860377cce64191bc134-9c53b563cbb98335accd.js.js')
self.client.get('/component---src-templates-page-template-tsx-6c62c930e383b3f3ce6b.js')
self.client.get('/page-data/insamlingar/varldshjalte/page-data.json')
self.client.get('/page-data/sq/d/1014302582.json')
self.client.get('/page-data/sq/d/1203226985.json')
self.client.get('/page-data/sq/d/1677386854.json')
self.client.get('/page-data/sq/d/187643644.json')
self.client.get('/page-data/sq/d/28066254.json')
self.client.get('/page-data/sq/d/3200608417.json')
self.client.get('/page-data/sq/d/3296809872.json')
self.client.get('/page-data/sq/d/538779877.json')
self.client.get('/page-data/app-data.json')
self.client.get('/logo.svg')
self.client.get('/eng-flag.svg')
self.client.get('/logo-rh-sm.svg')
self.client.get('/logo-rh-md.svg')
self.client.get('/icon-chevron.svg')
self.client.get('/logo-90-konto.png')
self.client.get('/icon-facebook.svg')
self.client.get('/icon-instagram.svg')
self.client.get('/icon-twitter.svg')
self.client.get('/logo-svt.png')
self.client.get('/logo-sr.png')
self.client.get('/logo-ur.png') | 49.297297 | 95 | 0.660088 | 1,731 | 0.949013 | 0 | 0 | 1,691 | 0.927083 | 0 | 0 | 882 | 0.483553 |
c3d6fc016b75ad1d8fdff7a053452786b343e77c | 251 | py | Python | lino_book/projects/lydia/tests/dumps/18.12.0/invoicing_plan.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 3 | 2016-08-25T05:58:09.000Z | 2019-12-05T11:13:45.000Z | lino_book/projects/lydia/tests/dumps/18.12.0/invoicing_plan.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 18 | 2016-11-12T21:38:58.000Z | 2019-12-03T17:54:38.000Z | lino_book/projects/lydia/tests/dumps/18.12.0/invoicing_plan.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 9 | 2016-10-15T11:12:33.000Z | 2021-09-22T04:37:37.000Z | # -*- coding: UTF-8 -*-
logger.info("Loading 1 objects to table invoicing_plan...")
# fields: id, user, today, journal, max_date, partner, course
loader.save(create_invoicing_plan(1,6,date(2015,3,1),1,None,None,None))
loader.flush_deferred_objects()
| 35.857143 | 71 | 0.737052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.517928 |
c3d72d083cc46a58431880b45d7e590b6d5dc93f | 4,943 | py | Python | PyYADL/redis_lock.py | PawelJ-PL/PyYADL | baa748200d75f8bdd8252d95e0296b2df933bc90 | [
"MIT"
] | 2 | 2018-02-20T22:08:00.000Z | 2018-05-29T22:02:03.000Z | PyYADL/redis_lock.py | PawelJ-PL/PyYADL | baa748200d75f8bdd8252d95e0296b2df933bc90 | [
"MIT"
] | 1 | 2018-01-14T20:03:18.000Z | 2018-02-20T22:07:49.000Z | PyYADL/redis_lock.py | PawelJ-PL/PyYADL | baa748200d75f8bdd8252d95e0296b2df933bc90 | [
"MIT"
] | null | null | null | from time import time
from json import dumps, loads
from redis import StrictRedis, ConnectionPool, WatchError
from PyYADL.distributed_lock import AbstractDistributedLock
class RedisLock(AbstractDistributedLock):
def __init__(self, name, prefix=None, ttl=-1, existing_connection_pool=None, redis_host='localhost', redis_port=6379,
redis_password=None, redis_db=0, **kwargs):
super().__init__(name, prefix, ttl)
client_connection = existing_connection_pool or ConnectionPool(host=redis_host, port=redis_port,
password=redis_password, db=redis_db, **kwargs)
self._client = StrictRedis(connection_pool=client_connection)
self.LOCK_KEY = self._build_lock_key()
def _build_lock_key(self):
key = ''
if self.prefix:
key = key + self.prefix + ':'
key = key + 'lock:' + self.name
return key
def _write_lock_if_not_exists(self):
value = dumps({'timestamp': int(time()), 'secret': self._secret, 'exclusive': True})
ttl = self.ttl if self.ttl > 0 else None
result = self._client.set(name=self.LOCK_KEY, value=value, ex=ttl, nx=True)
return bool(result)
def _verify_secret(self) -> bool:
result = self._client.get(self.LOCK_KEY)
secret = loads(result.decode('utf-8')).get('secret') if result is not None else None
if secret is None:
raise RuntimeError('release unlocked lock')
return secret == self._secret
def _delete_lock(self):
return bool(self._client.delete(self.LOCK_KEY))
class RedisWriteLock(RedisLock):
pass
class RedisReadLock(RedisLock):
def _write_lock_if_not_exists(self):
with self._client.pipeline() as pipe:
try:
pipe.watch(self.LOCK_KEY)
raw_lock_data = pipe.get(self.LOCK_KEY)
lock_data = loads(raw_lock_data.decode('utf-8')) if raw_lock_data else self._generate_new_lock_data()
if not self._is_valid_read_lock_data(lock_data):
return False
lock_data['secret'] = list(set(lock_data['secret'] + [self._secret]))
lock_data['timestamp'] = int(time())
ttl = self.ttl if self.ttl > 0 else None
pipe.multi()
pipe.set(self.LOCK_KEY, value=dumps(lock_data), ex=ttl)
pipe.execute()
return True
except WatchError:
self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY)
return self._write_lock_if_not_exists()
@staticmethod
def _is_valid_read_lock_data(lock_data):
return (lock_data.get('exclusive', True) is False) and (isinstance(lock_data.get('secret'), (list, set, tuple)))
def _generate_new_lock_data(self):
return {'timestamp': int(time()), 'secret': [self._secret], 'exclusive': False}
def _verify_secret(self) -> bool:
with self._client.pipeline() as pipe:
try:
pipe.watch(self.LOCK_KEY)
raw_lock_data = pipe.get(self.LOCK_KEY)
if raw_lock_data is None:
return False
lock_data = loads(raw_lock_data.decode('utf-8'))
if not self._is_valid_read_lock_data(lock_data):
return False
return self._secret in lock_data['secret']
except WatchError:
self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY)
return self._verify_secret()
def _delete_lock(self):
with self._client.pipeline() as pipe:
try:
pipe.watch(self.LOCK_KEY)
raw_lock_data = pipe.get(self.LOCK_KEY)
if raw_lock_data is None:
return False
lock_data = loads(raw_lock_data.decode('utf-8'))
if not self._is_valid_read_lock_data(lock_data):
return False
if self._secret not in lock_data['secret']:
return False
secrets = lock_data['secret']
secrets.remove(self._secret)
ttl = pipe.ttl(self.LOCK_KEY)
if not secrets:
pipe.multi()
pipe.delete(self.LOCK_KEY)
pipe.execute()
return True
else:
lock_data['secret'] = secrets
pipe.multi()
pipe.set(self.LOCK_KEY, value=dumps(lock_data), ex=ttl)
pipe.execute()
return True
except WatchError:
self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY)
return self._delete_lock()
| 42.247863 | 121 | 0.580012 | 4,764 | 0.963787 | 0 | 0 | 179 | 0.036213 | 0 | 0 | 388 | 0.078495 |
c3d757831ced0c808c54a19099c1901ac199f8e6 | 68,660 | py | Python | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_rr/cmp_leslie3d/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_rr/cmp_leslie3d/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_rr/cmp_leslie3d/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.064476,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.253331,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.335857,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.188561,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.32652,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.187268,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.70235,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.134893,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.73557,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0634506,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00683549,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0740694,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0505527,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.13752,
'Execution Unit/Register Files/Runtime Dynamic': 0.0573882,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.196646,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.52332,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.94177,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000460515,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000460515,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000398547,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000152883,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000726193,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00204577,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00450687,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0485976,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.09123,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.13364,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.165059,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.46206,
'Instruction Fetch Unit/Runtime Dynamic': 0.35385,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.105913,
'L2/Runtime Dynamic': 0.029468,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.17194,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.980098,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.062596,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0625961,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.46873,
'Load Store Unit/Runtime Dynamic': 1.3514,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.154351,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.308703,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0547797,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0563481,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.192201,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0219751,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.445403,
'Memory Management Unit/Runtime Dynamic': 0.0783231,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 19.7794,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.221364,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0123057,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0948945,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.328564,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.08337,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0264891,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.223494,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.136566,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0663464,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.107014,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0540172,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.227378,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.054944,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.17456,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0258003,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00278287,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0303044,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.020581,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0561047,
'Execution Unit/Register Files/Runtime Dynamic': 0.0233639,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0704667,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.187539,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.06715,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000190311,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000190311,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000166083,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 6.44697e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000295648,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000842353,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00181317,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0197851,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.2585,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0536059,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0671989,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.53809,
'Instruction Fetch Unit/Runtime Dynamic': 0.143245,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0437782,
'L2/Runtime Dynamic': 0.0122258,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.03053,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.401989,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0256686,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0256687,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.15174,
'Load Store Unit/Runtime Dynamic': 0.554247,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0632944,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.126589,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0224634,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0231115,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.078249,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00881622,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.272946,
'Memory Management Unit/Runtime Dynamic': 0.0319277,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.7706,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0678693,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00381932,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0328129,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.104502,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.9133,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.026525,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.223522,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.134947,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0653442,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.105398,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0532012,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.223943,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0540457,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.17099,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0254944,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00274083,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0300875,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0202701,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0555819,
'Execution Unit/Register Files/Runtime Dynamic': 0.0230109,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0700187,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.184639,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.06049,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000189419,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000189419,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000165268,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 6.41336e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000291182,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000835288,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00180597,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0194862,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.23949,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0535352,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0661838,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.51816,
'Instruction Fetch Unit/Runtime Dynamic': 0.141846,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0430708,
'L2/Runtime Dynamic': 0.0119442,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.01377,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.39345,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0251266,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0251265,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.13243,
'Load Store Unit/Runtime Dynamic': 0.542492,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.061958,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.123916,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0219891,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0226268,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.077067,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00880337,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.270949,
'Memory Management Unit/Runtime Dynamic': 0.0314302,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.7251,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0670636,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0037643,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0323038,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.103132,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.89134,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0267206,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.223676,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.135946,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.065922,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.10633,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0536717,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.225923,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.054553,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.17378,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0256832,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00276506,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0303382,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0204493,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0560214,
'Execution Unit/Register Files/Runtime Dynamic': 0.0232144,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0705957,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.186858,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.06505,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000185308,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000185308,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000161703,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 6.27614e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000293756,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000826076,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00176602,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0196585,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.25045,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0538894,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.066769,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.52965,
'Instruction Fetch Unit/Runtime Dynamic': 0.142909,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0448622,
'L2/Runtime Dynamic': 0.0125198,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.03051,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.40245,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0256681,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0256682,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.15172,
'Load Store Unit/Runtime Dynamic': 0.554705,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0632932,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.126587,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.022463,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0231278,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0777482,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00886089,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.272444,
'Memory Management Unit/Runtime Dynamic': 0.0319887,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.7619,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0675603,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00379641,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0326076,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.103964,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.91114,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 8.437790202507701,
'Runtime Dynamic': 8.437790202507701,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.377528,
'Runtime Dynamic': 0.14026,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 61.4145,
'Peak Power': 94.5267,
'Runtime Dynamic': 9.9394,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 61.037,
'Total Cores/Runtime Dynamic': 9.79914,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.377528,
'Total L3s/Runtime Dynamic': 0.14026,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 75.12035 | 124 | 0.682275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46,943 | 0.683702 |
c3d88ae30b5927313a3fbd970dd4a5f973d6b45f | 37,327 | py | Python | ros/devel/lib/python2.7/dist-packages/darknet_ros_msgs/msg/_CheckForObjectsAction.py | wutianze/ComP | 021440aa98aa03ee3b86ed3db196b95477b9f80b | [
"MIT"
] | 3 | 2021-08-20T03:25:37.000Z | 2022-03-31T02:47:28.000Z | ros/devel/lib/python2.7/dist-packages/darknet_ros_msgs/msg/_CheckForObjectsAction.py | wutianze/ComP | 021440aa98aa03ee3b86ed3db196b95477b9f80b | [
"MIT"
] | null | null | null | ros/devel/lib/python2.7/dist-packages/darknet_ros_msgs/msg/_CheckForObjectsAction.py | wutianze/ComP | 021440aa98aa03ee3b86ed3db196b95477b9f80b | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from darknet_ros_msgs/CheckForObjectsAction.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import darknet_ros_msgs.msg
import sensor_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class CheckForObjectsAction(genpy.Message):
_md5sum = "98095af4078a4c5df88f8e6a4db52e32"
_type = "darknet_ros_msgs/CheckForObjectsAction"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
CheckForObjectsActionGoal action_goal
CheckForObjectsActionResult action_result
CheckForObjectsActionFeedback action_feedback
================================================================================
MSG: darknet_ros_msgs/CheckForObjectsActionGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalID goal_id
CheckForObjectsGoal goal
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: darknet_ros_msgs/CheckForObjectsGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Check if objects in image
# Goal definition
int16 id
sensor_msgs/Image image
================================================================================
MSG: sensor_msgs/Image
# This message contains an uncompressed image
# (0, 0) is at top-left corner of image
#
Header header # Header timestamp should be acquisition time of image
# Header frame_id should be optical frame of camera
# origin of frame should be optical center of camera
# +x should point to the right in the image
# +y should point down in the image
# +z should point into to plane of the image
# If the frame_id here and the frame_id of the CameraInfo
# message associated with the image conflict
# the behavior is undefined
uint32 height # image height, that is, number of rows
uint32 width # image width, that is, number of columns
# The legal values for encoding are in file src/image_encodings.cpp
# If you want to standardize a new string format, join
# ros-users@lists.sourceforge.net and send an email proposing a new encoding.
string encoding # Encoding of pixels -- channel meaning, ordering, size
# taken from the list of strings in include/sensor_msgs/image_encodings.h
uint8 is_bigendian # is this data bigendian?
uint32 step # Full row length in bytes
uint8[] data # actual matrix data, size is (step * rows)
================================================================================
MSG: darknet_ros_msgs/CheckForObjectsActionResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
CheckForObjectsResult result
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: darknet_ros_msgs/CheckForObjectsResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Result definition
int16 id
darknet_ros_msgs/BoundingBoxes bounding_boxes
================================================================================
MSG: darknet_ros_msgs/BoundingBoxes
Header header
Header image_header
BoundingBox[] bounding_boxes
================================================================================
MSG: darknet_ros_msgs/BoundingBox
float64 probability
int64 xmin
int64 ymin
int64 xmax
int64 ymax
int16 id
string Class
================================================================================
MSG: darknet_ros_msgs/CheckForObjectsActionFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
CheckForObjectsFeedback feedback
================================================================================
MSG: darknet_ros_msgs/CheckForObjectsFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Feedback definition
"""
__slots__ = ['action_goal','action_result','action_feedback']
_slot_types = ['darknet_ros_msgs/CheckForObjectsActionGoal','darknet_ros_msgs/CheckForObjectsActionResult','darknet_ros_msgs/CheckForObjectsActionFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
action_goal,action_result,action_feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(CheckForObjectsAction, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.action_goal is None:
self.action_goal = darknet_ros_msgs.msg.CheckForObjectsActionGoal()
if self.action_result is None:
self.action_result = darknet_ros_msgs.msg.CheckForObjectsActionResult()
if self.action_feedback is None:
self.action_feedback = darknet_ros_msgs.msg.CheckForObjectsActionFeedback()
else:
self.action_goal = darknet_ros_msgs.msg.CheckForObjectsActionGoal()
self.action_result = darknet_ros_msgs.msg.CheckForObjectsActionResult()
self.action_feedback = darknet_ros_msgs.msg.CheckForObjectsActionFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
_x = self.action_goal.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
_x = self.action_goal.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_h3I().pack(_x.action_goal.goal.id, _x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs))
_x = self.action_goal.goal.image.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width))
_x = self.action_goal.goal.image.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_BI().pack(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step))
_x = self.action_goal.goal.image.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
_x = self.action_result.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
_x = self.action_result.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_result.status.status))
_x = self.action_result.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_h3I().pack(_x.action_result.result.id, _x.action_result.result.bounding_boxes.header.seq, _x.action_result.result.bounding_boxes.header.stamp.secs, _x.action_result.result.bounding_boxes.header.stamp.nsecs))
_x = self.action_result.result.bounding_boxes.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_result.result.bounding_boxes.image_header.seq, _x.action_result.result.bounding_boxes.image_header.stamp.secs, _x.action_result.result.bounding_boxes.image_header.stamp.nsecs))
_x = self.action_result.result.bounding_boxes.image_header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.action_result.result.bounding_boxes.bounding_boxes)
buff.write(_struct_I.pack(length))
for val1 in self.action_result.result.bounding_boxes.bounding_boxes:
_x = val1
buff.write(_get_struct_d4qh().pack(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax, _x.id))
_x = val1.Class
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
_x = self.action_feedback.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
_x = self.action_feedback.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_feedback.status.status))
_x = self.action_feedback.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.action_goal is None:
self.action_goal = darknet_ros_msgs.msg.CheckForObjectsActionGoal()
if self.action_result is None:
self.action_result = darknet_ros_msgs.msg.CheckForObjectsActionResult()
if self.action_feedback is None:
self.action_feedback = darknet_ros_msgs.msg.CheckForObjectsActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_goal.goal_id.id = str[start:end]
_x = self
start = end
end += 14
(_x.action_goal.goal.id, _x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs,) = _get_struct_h3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.image.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.goal.image.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.image.encoding = str[start:end].decode('utf-8')
else:
self.action_goal.goal.image.encoding = str[start:end]
_x = self
start = end
end += 5
(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step,) = _get_struct_BI().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.action_goal.goal.image.data = str[start:end]
_x = self
start = end
end += 12
(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_result.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_result.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.text = str[start:end].decode('utf-8')
else:
self.action_result.status.text = str[start:end]
_x = self
start = end
end += 14
(_x.action_result.result.id, _x.action_result.result.bounding_boxes.header.seq, _x.action_result.result.bounding_boxes.header.stamp.secs, _x.action_result.result.bounding_boxes.header.stamp.nsecs,) = _get_struct_h3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.result.bounding_boxes.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.result.bounding_boxes.header.frame_id = str[start:end]
_x = self
start = end
end += 12
(_x.action_result.result.bounding_boxes.image_header.seq, _x.action_result.result.bounding_boxes.image_header.stamp.secs, _x.action_result.result.bounding_boxes.image_header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.result.bounding_boxes.image_header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.result.bounding_boxes.image_header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_result.result.bounding_boxes.bounding_boxes = []
for i in range(0, length):
val1 = darknet_ros_msgs.msg.BoundingBox()
_x = val1
start = end
end += 42
(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax, _x.id,) = _get_struct_d4qh().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.Class = str[start:end].decode('utf-8')
else:
val1.Class = str[start:end]
self.action_result.result.bounding_boxes.bounding_boxes.append(val1)
_x = self
start = end
end += 12
(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_feedback.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_feedback.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.text = str[start:end].decode('utf-8')
else:
self.action_feedback.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
_x = self.action_goal.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
_x = self.action_goal.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_h3I().pack(_x.action_goal.goal.id, _x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs))
_x = self.action_goal.goal.image.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width))
_x = self.action_goal.goal.image.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_BI().pack(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step))
_x = self.action_goal.goal.image.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
_x = self.action_result.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
_x = self.action_result.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_result.status.status))
_x = self.action_result.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_h3I().pack(_x.action_result.result.id, _x.action_result.result.bounding_boxes.header.seq, _x.action_result.result.bounding_boxes.header.stamp.secs, _x.action_result.result.bounding_boxes.header.stamp.nsecs))
_x = self.action_result.result.bounding_boxes.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_result.result.bounding_boxes.image_header.seq, _x.action_result.result.bounding_boxes.image_header.stamp.secs, _x.action_result.result.bounding_boxes.image_header.stamp.nsecs))
_x = self.action_result.result.bounding_boxes.image_header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.action_result.result.bounding_boxes.bounding_boxes)
buff.write(_struct_I.pack(length))
for val1 in self.action_result.result.bounding_boxes.bounding_boxes:
_x = val1
buff.write(_get_struct_d4qh().pack(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax, _x.id))
_x = val1.Class
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
_x = self.action_feedback.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
_x = self.action_feedback.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_feedback.status.status))
_x = self.action_feedback.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.action_goal is None:
self.action_goal = darknet_ros_msgs.msg.CheckForObjectsActionGoal()
if self.action_result is None:
self.action_result = darknet_ros_msgs.msg.CheckForObjectsActionResult()
if self.action_feedback is None:
self.action_feedback = darknet_ros_msgs.msg.CheckForObjectsActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_goal.goal_id.id = str[start:end]
_x = self
start = end
end += 14
(_x.action_goal.goal.id, _x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs,) = _get_struct_h3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.image.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.goal.image.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.image.encoding = str[start:end].decode('utf-8')
else:
self.action_goal.goal.image.encoding = str[start:end]
_x = self
start = end
end += 5
(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step,) = _get_struct_BI().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.action_goal.goal.image.data = str[start:end]
_x = self
start = end
end += 12
(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_result.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_result.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.text = str[start:end].decode('utf-8')
else:
self.action_result.status.text = str[start:end]
_x = self
start = end
end += 14
(_x.action_result.result.id, _x.action_result.result.bounding_boxes.header.seq, _x.action_result.result.bounding_boxes.header.stamp.secs, _x.action_result.result.bounding_boxes.header.stamp.nsecs,) = _get_struct_h3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.result.bounding_boxes.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.result.bounding_boxes.header.frame_id = str[start:end]
_x = self
start = end
end += 12
(_x.action_result.result.bounding_boxes.image_header.seq, _x.action_result.result.bounding_boxes.image_header.stamp.secs, _x.action_result.result.bounding_boxes.image_header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.result.bounding_boxes.image_header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.result.bounding_boxes.image_header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.action_result.result.bounding_boxes.bounding_boxes = []
for i in range(0, length):
val1 = darknet_ros_msgs.msg.BoundingBox()
_x = val1
start = end
end += 42
(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax, _x.id,) = _get_struct_d4qh().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.Class = str[start:end].decode('utf-8')
else:
val1.Class = str[start:end]
self.action_result.result.bounding_boxes.bounding_boxes.append(val1)
_x = self
start = end
end += 12
(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_feedback.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_feedback.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.text = str[start:end].decode('utf-8')
else:
self.action_feedback.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_d4qh = None
def _get_struct_d4qh():
global _struct_d4qh
if _struct_d4qh is None:
_struct_d4qh = struct.Struct("<d4qh")
return _struct_d4qh
_struct_h3I = None
def _get_struct_h3I():
global _struct_h3I
if _struct_h3I is None:
_struct_h3I = struct.Struct("<h3I")
return _struct_h3I
_struct_BI = None
def _get_struct_BI():
global _struct_BI
if _struct_BI is None:
_struct_BI = struct.Struct("<BI")
return _struct_BI
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| 41.428413 | 246 | 0.63943 | 35,951 | 0.963137 | 0 | 0 | 0 | 0 | 0 | 0 | 9,070 | 0.242988 |
c3d941ff5907d9662b0cf2643809b6e39408eb71 | 612 | py | Python | datalabframework/paths.py | Quyenna/datalabframework | 918738f38438c0bb483e67602a022cf135f8d509 | [
"MIT"
] | null | null | null | datalabframework/paths.py | Quyenna/datalabframework | 918738f38438c0bb483e67602a022cf135f8d509 | [
"MIT"
] | null | null | null | datalabframework/paths.py | Quyenna/datalabframework | 918738f38438c0bb483e67602a022cf135f8d509 | [
"MIT"
] | null | null | null | import os
_rootdir = os.getcwd()
def find_rootdir(filenames = ('__main__.py', 'main.ipynb')):
path = os.getcwd()
while os.path.isdir(path):
ls = os.listdir(path)
if any([f in ls for f in filenames]):
return os.path.abspath(path)
else:
path += '/..'
# nothing found: using the current working dir
return os.getcwd()
def set_rootdir(path=None):
global _rootdir
if path and os.path.isdir(path):
_rootdir = os.path.abspath(path)
else:
_rootdir = find_rootdir()
return _rootdir
def rootdir():
return _rootdir
| 20.4 | 60 | 0.601307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.124183 |
c3d9819c60978679ed12ac424127e68b0245461c | 1,292 | py | Python | python/dailycodingproblems/problem0003.py | smhnr27/code | fbb34db248d36ffdad104b4f8fe73d0a64b9e15a | [
"MIT"
] | null | null | null | python/dailycodingproblems/problem0003.py | smhnr27/code | fbb34db248d36ffdad104b4f8fe73d0a64b9e15a | [
"MIT"
] | null | null | null | python/dailycodingproblems/problem0003.py | smhnr27/code | fbb34db248d36ffdad104b4f8fe73d0a64b9e15a | [
"MIT"
] | null | null | null | # Given the root to a binary tree, implement serialize(root), which serializes the tree into a string, and deserialize(s), which deserializes the string back into the tree.
#
# For example, given the following Node class:
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
# The following test should pass:
# node = Node('root', Node('left', Node('left.left')), Node('right'))
# assert deserialize(serialize(node)).left.left.val == 'left.left'
def serialize(root, string=''):
string += root.val
string += '('
if root.left != None:
string = serialize(root.left,string)
string += '|'
if root.right != None:
string = serialize(root.right,string)
string += ')'
return string
def deserialize(string):
nestDepth = 0
end = None
for x in range(0,len(string)):
if string[x] == ')':
nestDepth -= 1
if string[x] == '(':
if nestDepth == 0:
val = string[:x]
argStart = x
nestDepth += 1
if string[x] == '|' and nestDepth <= 1:
left = deserialize(string[argStart + 1:x])
right = deserialize(string[x + 1:-1])
end = Node(val, left, right)
return end
node = Node('root', Node('left', Node('left.left')), Node('right'))
assert deserialize(serialize(node)).left.left.val == 'left.left' | 26.367347 | 172 | 0.653251 | 117 | 0.090557 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.346749 |
c3da32e04dd68552d6766ba134d4dbed387f0a82 | 2,051 | py | Python | test.py | ndwuhuangwei/py-radio-autoencoder | 842cd1f14a17ee0798766dffcf132950a9e745bd | [
"CC0-1.0"
] | null | null | null | test.py | ndwuhuangwei/py-radio-autoencoder | 842cd1f14a17ee0798766dffcf132950a9e745bd | [
"CC0-1.0"
] | null | null | null | test.py | ndwuhuangwei/py-radio-autoencoder | 842cd1f14a17ee0798766dffcf132950a9e745bd | [
"CC0-1.0"
] | 1 | 2021-09-06T14:05:53.000Z | 2021-09-06T14:05:53.000Z | import math
import random
import numpy as np
# 先生成一个随机的信源
def random_sources():
random_sources = random.randint(0, 16)
print('这个随机数是', random_sources)
return hanming(random_sources)
# return bin(int(random_sources))
# 进行编码,使用异或规则生成有校验位的(7,4)汉明码字
# def hanming(code_0):
# # 把十进制的数字转变成二进制
# code1 = bin(int(code_0))
# code = str(code1)[2:]
# print('{0}变成二进制'.format(code_0), code)
# # # 判断待验证位数是否达到4位,不足位数前面补0
# while len(code) < 4:
# code = '0' + code
# # 将码字转变成列表格式,方便后面进行操作
# # print '补齐4位之后',code
# code_list = list(code)
# # 编码结构即码字,对于(7,4)线性分组码汉明码而言
# code_1 = int(code_list[0]) ^ int(code_list[2]) ^ int(code_list[3])
# code_2 = int(code_list[0]) ^ int(code_list[1]) ^ int(code_list[2])
# code_4 = int(code_list[1]) ^ int(code_list[2]) ^ int(code_list[3])
# code_list.insert(0, str(code_1))
# code_list.insert(1, str(code_2))
# code_list.insert(2, str(code_4))
# hanming_code = ''.join(code_list)
# print('生成的(7,4)汉明码字:' + hanming_code)
# return code_list
def hanming(code_0):
# 把十进制的数字转变成二进制
code1 = bin(int(code_0))
code = str(code1)[2:]
print('{0}变成二进制'.format(code_0), code)
# # 判断待验证位数是否达到4位,不足位数前面补0
while len(code) < 4:
code = '0' + code
# 将码字转变成列表格式,方便后面进行操作
# print '补齐4位之后',code
code_list = list(code)
# 编码结构即码字,对于(7,4)线性分组码汉明码而言
code_1 = int(code_list[0]) ^ int(code_list[1]) ^ int(code_list[3]) ^ 1
code_2 = int(code_list[0]) ^ int(code_list[2]) ^ int(code_list[3]) ^ 1
code_4 = int(code_list[1]) ^ int(code_list[2]) ^ int(code_list[3]) ^ 1
code_list.insert(0, str(code_1))
code_list.insert(1, str(code_2))
code_list.insert(3, str(code_4))
hanming_code = ''.join(code_list)
print('生成的(7,4)汉明码字:' + hanming_code)
return code_list
if __name__ == '__main__':
# x是原始信号,生成的(7,4)汉明码
# x1 = random_sources()
x1 = hanming(3)
print(x1)
| 31.553846 | 100 | 0.592394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,637 | 0.644742 |
c3da95c06f9dee9d167e749a7cc66d5cb5c8f2b0 | 17,778 | py | Python | backend/app/bug_killer_app/test/api/test_bug.py | SeanFitzpatrick0/BugKiller | c7dd328ac539aa75e8a1d908dd35722df4e78ab4 | [
"Apache-2.0"
] | null | null | null | backend/app/bug_killer_app/test/api/test_bug.py | SeanFitzpatrick0/BugKiller | c7dd328ac539aa75e8a1d908dd35722df4e78ab4 | [
"Apache-2.0"
] | null | null | null | backend/app/bug_killer_app/test/api/test_bug.py | SeanFitzpatrick0/BugKiller | c7dd328ac539aa75e8a1d908dd35722df4e78ab4 | [
"Apache-2.0"
] | null | null | null | import json
from unittest import TestCase
from unittest.mock import patch
from bug_killer_api_interface.schemas.entities.bug import BugResolution
from bug_killer_api_interface.schemas.request.bug import CreateBugPayload, UpdateBugPayload
from bug_killer_api_interface.schemas.request.project import CreateProjectPayload
from bug_killer_api_interface.schemas.response.bug import BugResponse
from bug_killer_app.access.entities.bug import create_project_bug, resolve_project_bug
from bug_killer_app.access.entities.project import create_project
from bug_killer_app.api.bug import get_bug_handler, create_bug_handler, update_bug_handler, resolve_bug_handler, \
delete_bug_handler
from bug_killer_app.domain.response import HttpStatusCode, message_body
from bug_killer_app.test.helpers import create_event, assert_response, assert_dict_attributes_not_none, \
assert_dict_attributes_equals, create_cognito_authorizer_request_context
from bug_killer_app.test.test_doubles.db.transact_write import DummyTransactWrite
from bug_killer_utils.dates import to_utc_str
from bug_killer_utils.function import run_async
class TestGetBug(TestCase):
TEST_NAME = 'GetBug'
USER1 = f'{TEST_NAME}_USER1'
@classmethod
@patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite)
def setUpClass(cls):
project_with_bug_future = create_project(
TestGetBug.USER1,
CreateProjectPayload.test_double()
)
cls.project_with_bug = run_async(project_with_bug_future)
bug_to_get_future = create_project_bug(
TestGetBug.USER1,
CreateBugPayload.test_double(project_id=cls.project_with_bug.id)
)
cls.bug_to_get = run_async(bug_to_get_future)
def test_error_when_missing_auth_header(self):
# Given
evt = create_event()
# When
rsp = get_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value'))
def test_error_when_missing_id(self):
# Given
evt = create_event(request_context=create_cognito_authorizer_request_context('user'))
# When
rsp = get_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body('Missing required pathParameters parameter "bugId" in request')
)
def test_error_when_bug_doesnt_exist(self):
# Given
bug_id = 'does_not_exist'
evt = create_event(
request_context=create_cognito_authorizer_request_context('user'),
path={'bugId': bug_id}
)
# When
rsp = get_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.NOT_FOUND_STATUS, message_body(f'No bug found with id: "{bug_id}"'))
def test_error_when_user_lacks_permission(self):
# Given
user = 'lacks_access_user'
evt = create_event(
request_context=create_cognito_authorizer_request_context(user),
path={'bugId': self.bug_to_get.id}
)
# When
rsp = get_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.FORBIDDEN_STATUS,
message_body(f'{user} does not have permission to read project {self.project_with_bug.id}')
)
def test_gets_bug(self):
# Given
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': self.bug_to_get.id}
)
# When
rsp = get_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.OK_STATUS,
BugResponse(project_id=self.project_with_bug.id, bug=self.bug_to_get).api_dict()
)
class TestCreateBug(TestCase):
TEST_NAME = 'CreateBug'
USER1 = f'{TEST_NAME}_USER1'
@classmethod
@patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite)
def setUpClass(cls):
project_future = create_project(
TestCreateBug.USER1,
CreateProjectPayload.test_double()
)
cls.project = run_async(project_future)
def test_error_when_missing_auth_header(self):
# Given
evt = create_event()
# When
rsp = create_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value'))
def test_error_when_missing_project_id(self):
# Given
evt = create_event(request_context=create_cognito_authorizer_request_context('user'))
# When
rsp = create_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body('Missing required body parameter "projectId" in request')
)
def test_error_when_user_lacks_access(self):
# Given
user = 'lacks_access'
evt = create_event(
request_context=create_cognito_authorizer_request_context(user),
body=CreateBugPayload.test_double(project_id=self.project.id).api_dict()
)
# When
rsp = create_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.FORBIDDEN_STATUS,
message_body(f'{user} does not have permission to update project {self.project.id}')
)
def test_error_when_project_not_found(self):
# Given
project_id = 'does_not_exist'
evt = create_event(
request_context=create_cognito_authorizer_request_context('user'),
body=CreateBugPayload.test_double(project_id=project_id).api_dict()
)
# When
rsp = create_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.NOT_FOUND_STATUS,
message_body(f'No project found with id: "{project_id}"')
)
def test_user_creates_bug(self):
# Given
payload = CreateBugPayload.test_double(project_id=self.project.id)
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
body=payload.api_dict()
)
# When
rsp = create_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.CREATED_STATUS)
assert json.loads(rsp['body'])['projectId'] is not None
bug = json.loads(rsp['body'])['bug']
assert_dict_attributes_not_none(bug, ['id', 'createdOn', 'lastUpdatedOn'])
assert_dict_attributes_equals(
bug,
{'title': payload.title, 'description': payload.description, 'tags': payload.tags, 'resolved': None}
)
class TestUpdateBug(TestCase):
TEST_NAME = 'UpdateBug'
USER1 = f'{TEST_NAME}_USER1'
@classmethod
@patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite)
def setUpClass(cls):
project_future = create_project(
TestUpdateBug.USER1,
CreateProjectPayload.test_double()
)
cls.project = run_async(project_future)
bug_to_update_future = create_project_bug(
cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id))
change_update_bug_future = create_project_bug(
cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id))
cls.bug_to_update = run_async(bug_to_update_future)
cls.change_update_bug = run_async(change_update_bug_future)
def test_error_when_missing_auth_header(self):
# Given
evt = create_event()
# When
rsp = update_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value'))
def test_error_when_missing_project_id(self):
# Given
evt = create_event(request_context=create_cognito_authorizer_request_context('user'))
# When
rsp = update_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body('Missing required pathParameters parameter "bugId" in request')
)
def test_error_when_empty_payload(self):
# Given
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': self.bug_to_update.id},
body=UpdateBugPayload().api_dict()
)
# When
rsp = update_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body('No changes provided in update payload')
)
def test_error_when_bug_not_found(self):
# Given
bug_id = 'does_not_exist'
evt = create_event(
request_context=create_cognito_authorizer_request_context('user'),
path={'bugId': bug_id},
body=UpdateBugPayload(title='title update').api_dict()
)
# When
rsp = update_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.NOT_FOUND_STATUS,
message_body(f'No bug found with id: "{bug_id}"')
)
def test_error_when_updates_match_existing_bug(self):
# Given
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': self.change_update_bug.id},
body=UpdateBugPayload(title=self.change_update_bug.title).api_dict()
)
# When
rsp = update_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body('All changes in payload matches the existing record')
)
def test_error_when_user_lacks_permission_to_update(self):
# Given
user = 'user_lacks_access'
evt = create_event(
request_context=create_cognito_authorizer_request_context(user),
path={'bugId': self.bug_to_update.id},
body=UpdateBugPayload(title='some_edit').api_dict()
)
# When
rsp = update_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.FORBIDDEN_STATUS,
message_body(f'{user} does not have permission to read project {self.project.id}')
)
def test_user_updates_bug(self):
# Given
new_title = 'new_title'
bug_before_update = self.bug_to_update
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': self.bug_to_update.id},
body=UpdateBugPayload(title=new_title).api_dict()
)
# When
rsp = update_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.OK_STATUS)
assert json.loads(rsp['body'])['projectId'] is not None
bug = json.loads(rsp['body'])['bug']
assert_dict_attributes_equals(
bug,
{
'id': bug_before_update.id,
'createdOn': to_utc_str(bug_before_update.created_on),
'title': new_title,
'description': bug_before_update.description,
'tags': bug_before_update.tags,
'resolved': None
}
)
class TestResolveBug(TestCase):
TEST_NAME = 'ResolveBug'
USER1 = f'{TEST_NAME}_USER1'
@classmethod
@patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite)
def setUpClass(cls):
project_future = create_project(
TestResolveBug.USER1,
CreateProjectPayload.test_double()
)
cls.project = run_async(project_future)
bug_to_resolve_future = create_project_bug(cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id))
resolved_bug_future = create_project_bug(cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id))
cls.bug_to_resolve = run_async(bug_to_resolve_future)
resolved_bug = run_async(resolved_bug_future)
resolved_bug_future = resolve_project_bug(cls.USER1, resolved_bug.id)
cls.resolved_bug = run_async(resolved_bug_future)[1]
def test_error_when_missing_auth_header(self):
# Given
evt = create_event()
# When
rsp = resolve_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value'))
def test_error_when_no_bug_id(self):
# Given
evt = create_event(request_context=create_cognito_authorizer_request_context('user'))
# When
rsp = resolve_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body('Missing required pathParameters parameter "bugId" in request')
)
def test_error_when_bug_not_found(self):
# Given
bug_id = 'does_not_exist'
evt = create_event(
request_context=create_cognito_authorizer_request_context('user'),
path={'bugId': bug_id}
)
# When
rsp = resolve_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.NOT_FOUND_STATUS,
message_body(f'No bug found with id: "{bug_id}"')
)
def test_error_when_user_lacks_access(self):
# Given
user = 'lacks_access_user'
evt = create_event(
request_context=create_cognito_authorizer_request_context(user),
path={'bugId': self.bug_to_resolve.id}
)
# When
rsp = resolve_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.FORBIDDEN_STATUS,
message_body(f'{user} does not have permission to read project {self.project.id}')
)
def test_error_when_resolving_already_resolved_bug(self):
# Given
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': self.resolved_bug.id}
)
# When
rsp = resolve_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body(
f'Bug {self.resolved_bug.id} has already been resolved by {self.resolved_bug.resolved.resolver_id} '
f'on {self.resolved_bug.resolved.resolved_on}'
)
)
def test_user_resolves_bug(self):
# Given
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': self.bug_to_resolve.id}
)
# When
rsp = resolve_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.OK_STATUS)
bug_resolution = BugResolution.parse_obj(json.loads(rsp['body'])['bug']['resolved'])
assert bug_resolution.resolver_id == self.USER1
assert bug_resolution.resolved_on is not None
class TestDeleteBug(TestCase):
TEST_NAME = 'DeleteBug'
USER1 = f'{TEST_NAME}_USER1'
@classmethod
@patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite)
def setUpClass(cls):
project_future = create_project(
TestDeleteBug.USER1,
CreateProjectPayload.test_double()
)
cls.project = run_async(project_future)
bug_to_delete_future = create_project_bug(cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id))
cls.bug_to_delete = run_async(bug_to_delete_future)
def test_error_when_missing_auth_header(self):
# Given
evt = create_event()
# When
rsp = delete_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value'))
def test_error_when_bug_id_not_given(self):
# Given
evt = create_event(request_context=create_cognito_authorizer_request_context('user'))
# When
rsp = delete_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.BAD_REQUEST_STATUS,
message_body('Missing required pathParameters parameter "bugId" in request')
)
def test_error_when_bug_not_found(self):
# Given
bug_id = 'Does not exist'
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': bug_id},
)
# When
rsp = delete_bug_handler(evt, None)
# Then
assert_response(rsp, HttpStatusCode.NOT_FOUND_STATUS, message_body(f'No bug found with id: "{bug_id}"'))
def test_user_deletes_project(self):
# Given
evt = create_event(
request_context=create_cognito_authorizer_request_context(self.USER1),
path={'bugId': self.bug_to_delete.id},
)
# When
rsp = delete_bug_handler(evt, None)
# Then
assert_response(
rsp,
HttpStatusCode.OK_STATUS,
BugResponse(project_id=self.project.id, bug=self.bug_to_delete).api_dict()
)
| 32.56044 | 118 | 0.646754 | 16,650 | 0.936551 | 0 | 0 | 2,853 | 0.160479 | 0 | 0 | 2,661 | 0.149679 |
c3dabc6965dd2618eed250729b37fe4568407913 | 566 | py | Python | Module3/notes/imshow_example.py | FernanOrtega/DAT210x | bcafca952b2ca440acfd19e08764c5a150cc32a4 | [
"MIT"
] | null | null | null | Module3/notes/imshow_example.py | FernanOrtega/DAT210x | bcafca952b2ca440acfd19e08764c5a150cc32a4 | [
"MIT"
] | null | null | null | Module3/notes/imshow_example.py | FernanOrtega/DAT210x | bcafca952b2ca440acfd19e08764c5a150cc32a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 13:15:22 2017
@author: fernando
"""
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
matplotlib.style.use('ggplot') # Look Pretty
# If the above line throws an error, use plt.style.use('ggplot') instead
df = pd.read_csv("concrete.csv")
plt.imshow(df.corr(), cmap=plt.cm.Blues, interpolation='nearest')
plt.colorbar()
tick_marks = [i for i in range(len(df.columns))]
plt.xticks(tick_marks, df.columns, rotation='vertical')
plt.yticks(tick_marks, df.columns)
plt.show() | 24.608696 | 72 | 0.724382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.411661 |
c3dac7924e9e075694dc1f3a5fa25da3cbacbc9b | 1,488 | py | Python | api/models.py | AnuragTimilsina/SchoolSystemAPI | 9ac55dc862953a075dbdc69d5c4176742d8da5b6 | [
"MIT"
] | null | null | null | api/models.py | AnuragTimilsina/SchoolSystemAPI | 9ac55dc862953a075dbdc69d5c4176742d8da5b6 | [
"MIT"
] | null | null | null | api/models.py | AnuragTimilsina/SchoolSystemAPI | 9ac55dc862953a075dbdc69d5c4176742d8da5b6 | [
"MIT"
] | 1 | 2021-07-16T11:28:36.000Z | 2021-07-16T11:28:36.000Z | from django.db import models
from users.models import User
class Assignment(models.Model):
title = models.CharField(max_length=50)
teacher = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
class GradedAssignment(models.Model):
student = models.ForeignKey(User, on_delete=models.CASCADE)
assignment = models.ForeignKey(Assignment,
on_delete=models.SET_NULL,
blank=True,
null=True)
grade = models.FloatField()
def __str__(self):
return self.student.username
class Choice(models.Model):
title = models.CharField(max_length=50)
def __str__(self):
return self.title
class Question(models.Model):
question = models.CharField(max_length=200)
choices = models.ManyToManyField(Choice)
answer = models.ForeignKey(Choice,
on_delete=models.CASCADE,
related_name='answer',
blank=True,
null=True)
assignment = models.ForeignKey(Assignment,
on_delete=models.CASCADE,
related_name='questions',
blank=True,
null=True)
order = models.SmallIntegerField()
def __str__(self):
return self.question
| 30.367347 | 63 | 0.551075 | 1,417 | 0.952285 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.012769 |
c3daf8ed9bcb88200aa1371e56dc95da838977ae | 1,543 | py | Python | src/main/functions/poisson_regression.py | far2raf/method-optimization-resit | 75f87067942dbd0eafe092c1831d3267c01e3c3a | [
"MIT"
] | null | null | null | src/main/functions/poisson_regression.py | far2raf/method-optimization-resit | 75f87067942dbd0eafe092c1831d3267c01e3c3a | [
"MIT"
] | 1 | 2021-04-30T21:05:25.000Z | 2021-04-30T21:05:25.000Z | src/main/functions/poisson_regression.py | far2raf/method-optimization-resit | 75f87067942dbd0eafe092c1831d3267c01e3c3a | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse
from src.main.functions.interface_function import InterfaceFunction
class PoissonRegression(InterfaceFunction):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# В формулах в задании и на вики почему-то максимизирует loss
# Для наглядности сделан специальный параметр меняющий выражения для минимизации
self._maximization_to_minimization = -1
def _function(self, w, X):
mean = np.exp(X.dot(w))
return np.random.poisson(mean)
def _loss(self, w, X, y):
S, F = X.shape
xw = X.dot(w)
first = y.T.dot(xw)
exp_part = np.exp(xw)
second = np.ones((S, 1)).T.dot(exp_part)
main = (first - second) / S
total = self._maximization_to_minimization * main + self._loss_regularization_part(w)
return total
def _loss_pure_gradient(self, w, X, y):
S, F = X.shape
xw = X.dot(w)
exp_part = np.exp(xw)
diff = y - exp_part
main = 1 / S * X.T.dot(diff)
return self._maximization_to_minimization * main
def _loss_hessian(self, w, X, y):
F = w.shape[0]
xw = X.dot(w)
exp_part = np.exp(xw)
# MOCK should be checked
M = scipy.sparse.diags([exp_part.view().reshape(-1)], [0]) # (S, S)
main = -X.T.dot(M).dot(X)
assert main.shape == (F, F)
total = self._maximization_to_minimization * main + self._loss_hessian_regularization_part(w)
return total
| 31.489796 | 101 | 0.60661 | 1,547 | 0.933052 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.173703 |
c3dafcd023eb4196e2c03de73e78ae172fc56c0a | 105 | py | Python | fastapi version/src/fastapp/__init__.py | abhiWriteCode/TextSummarization | e2ed2dddc6afaa5a5106cfda19a3bd8d520f63a4 | [
"MIT"
] | null | null | null | fastapi version/src/fastapp/__init__.py | abhiWriteCode/TextSummarization | e2ed2dddc6afaa5a5106cfda19a3bd8d520f63a4 | [
"MIT"
] | null | null | null | fastapi version/src/fastapp/__init__.py | abhiWriteCode/TextSummarization | e2ed2dddc6afaa5a5106cfda19a3bd8d520f63a4 | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from . import api
app = FastAPI(debug=True)
app.include_router(api.router) | 15 | 30 | 0.780952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c3dbdd7fe56b64e44976be0048a19f5dd5080ab7 | 1,769 | py | Python | Onaeri/timekeeper.py | Lakitna/Onaeri | 7a851e39a06c2d6fdb44393a8be4ba851a9d51a6 | [
"MIT"
] | null | null | null | Onaeri/timekeeper.py | Lakitna/Onaeri | 7a851e39a06c2d6fdb44393a8be4ba851a9d51a6 | [
"MIT"
] | 7 | 2017-11-08T13:14:12.000Z | 2018-11-24T14:55:23.000Z | Onaeri/timekeeper.py | Lakitna/Onaeri | 7a851e39a06c2d6fdb44393a8be4ba851a9d51a6 | [
"MIT"
] | 1 | 2018-11-24T14:52:55.000Z | 2018-11-24T14:52:55.000Z | import time
import math
from . import settings
class TimeKeeper:
"""
Handles timekeeping in timecodes
"""
def __init__(self, minpertimecode=None,
runtime=0, update=True, latestcode=None):
self._minPerTimeCode = minpertimecode or settings.Global.minPerTimeCode
self.latestCode = latestcode or self.code()
self.update = update
self.runtime = runtime
def tick(self):
"""
Progress the timekeeper and set update flag on timeCode change.
"""
if self.latestCode == self.code():
self.update = False
else:
self.update = True
self.runtime += 1
def code(self, h=None, m=None, s=None, dry=False):
"""
Calculate a new timecode
"""
if h is None and m is None and s is None:
h = time.localtime().tm_hour
m = time.localtime().tm_min
s = time.localtime().tm_sec
if h is None:
h = 0
if m is None:
m = 0
if s is None:
s = 0
if isinstance(h, tuple):
if len(h) > 2:
s = h[2]
if len(h) > 1:
m = h[1]
h = h[0]
ret = math.floor(((h * 60) + m + (s / 60)) / self._minPerTimeCode)
if not dry:
self.latestCode = ret
return ret
def timestamp(self, code=None):
"""
Return the timestring of a timecode
"""
if code is None:
code = self.latestCode
minutes = code * self._minPerTimeCode
h = math.floor(minutes / 60)
m = math.floor(minutes % 60)
s = math.floor((minutes % 1) * 60)
return "%02d:%02d:%02d" % (h, m, s)
| 26.014706 | 79 | 0.50424 | 1,719 | 0.971735 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.145845 |
c3dc3299c8eb137f9e3ec5991afb8d2669fe74f8 | 5,223 | py | Python | storm_analysis/L1H/cs_analysis.py | bintulab/storm-analysis | 71ae493cbd17ddb97938d0ae2032d97a0eaa76b2 | [
"CNRI-Python"
] | null | null | null | storm_analysis/L1H/cs_analysis.py | bintulab/storm-analysis | 71ae493cbd17ddb97938d0ae2032d97a0eaa76b2 | [
"CNRI-Python"
] | null | null | null | storm_analysis/L1H/cs_analysis.py | bintulab/storm-analysis | 71ae493cbd17ddb97938d0ae2032d97a0eaa76b2 | [
"CNRI-Python"
] | 1 | 2021-04-19T18:17:06.000Z | 2021-04-19T18:17:06.000Z | #!/usr/bin/env python
"""
Perform compressed sensing analysis on a dax file using the
homotopy approach. Return the results in hres image format and
as a list of object locations.
Hazen 09/12
"""
import numpy
import storm_analysis.sa_library.datareader as datareader
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.sa_library.readinsight3 as readinsight3
import storm_analysis.sa_library.writeinsight3 as writeinsight3
import storm_analysis.L1H.setup_A_matrix as setup_A_matrix
import storm_analysis.L1H.homotopy_imagea_c as homotopy_imagea_c
def analyze(movie_name, settings_name, hres_name, bin_name):
movie_data = datareader.inferReader(movie_name)
#
# FIXME:
#
# This should also start at the same frame as hres in the event of a restart.
#
i3_file = writeinsight3.I3Writer(bin_name)
params = parameters.ParametersL1H().initFromFile(settings_name)
#
# Load the a matrix and setup the homotopy image analysis class.
#
a_mat_file = params.getAttr("a_matrix")
print("Using A matrix file:", a_mat_file)
a_mat = setup_A_matrix.loadAMatrix(a_mat_file)
image = movie_data.loadAFrame(0)
htia = homotopy_imagea_c.HomotopyIA(a_mat,
params.getAttr("epsilon"),
image.shape)
#
# This opens the file. If it already exists, then it sets the file pointer
# to the end of the file & returns the number of the last frame analyzed.
#
curf = htia.openHRDataFile(hres_name)
#
# Figure out which frame to start & stop at.
#
[dax_x,dax_y,dax_l] = movie_data.filmSize()
if params.hasAttr("start_frame"):
if (params.getAttr("start_frame") >= curf) and (params.getAttr("start_frame") < dax_l):
curf = params.getAttr("start_frame")
if params.hasAttr("max_frame"):
if (params.getAttr("max_frame") > 0) and (params.getAttr("max_frame") < dax_l):
dax_l = params.getAttr("max_frame")
print("Starting analysis at frame", curf)
#
# Analyze the dax data.
#
total_peaks = 0
try:
while(curf<dax_l):
# Load image, subtract baseline & remove negative values.
image = movie_data.loadAFrame(curf).astype(numpy.float)
# Convert to photo-electrons.
image -= params.getAttr("camera_offset")
image = image/params.getAttr("camera_gain")
# Remove negative values.
mask = (image < 0)
image[mask] = 0
# Analyze image.
hres_image = htia.analyzeImage(image)
peaks = htia.saveHRFrame(hres_image, curf + 1)
[cs_x,cs_y,cs_a,cs_i] = htia.getPeaks(hres_image)
i3_file.addMoleculesWithXYAItersFrame(cs_x, cs_y, cs_a, cs_i, curf+1)
peaks = cs_x.size
total_peaks += peaks
print("Frame:", curf, peaks, total_peaks)
curf += 1
except KeyboardInterrupt:
print("Analysis stopped.")
# cleanup
htia.closeHRDataFile()
i3_file.close()
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description = 'L1H analysis - Babcock, Optics Express, 2013')
parser.add_argument('--movie', dest='movie', type=str, required=True,
help = "The name of the movie to analyze, can be .dax, .tiff or .spe format.")
parser.add_argument('--xml', dest='settings', type=str, required=True,
help = "The name of the settings xml file.")
parser.add_argument('--hres', dest='hres', type=str, required=True,
help = "The name of 'high resolution' output file. This a compressed version of the final image.")
parser.add_argument('--bin', dest='mlist', type=str, required=True,
help = "The name of the localizations output file. This is a binary file in Insight3 format.")
args = parser.parse_args()
analyze(args.movie, args.settings, args.hres, args.mlist)
#
# The MIT License
#
# Copyright (c) 2012 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 35.290541 | 122 | 0.671262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,434 | 0.466016 |
c3dc8e9574bfd412866a0cadf2943bfad0114e52 | 3,620 | py | Python | ism/dal/sqlite3_dao.py | kaliklipper/python_state_machine | 386eb10113c621fbbac6be8d7afbd1b384b4bf47 | [
"MIT"
] | 1 | 2021-07-13T23:23:32.000Z | 2021-07-13T23:23:32.000Z | ism/dal/sqlite3_dao.py | kaliklipper/python_state_machine | 386eb10113c621fbbac6be8d7afbd1b384b4bf47 | [
"MIT"
] | 9 | 2021-03-30T15:04:02.000Z | 2021-04-08T18:28:38.000Z | ism/dal/sqlite3_dao.py | kaliklipper/python_state_machine | 386eb10113c621fbbac6be8d7afbd1b384b4bf47 | [
"MIT"
] | null | null | null | """
Methods for handling DB creation and CRUD operations in Sqlite3.
"""
# Standard library imports
import logging
import sqlite3
# Local application imports
from ism.exceptions.exceptions import UnrecognisedParameterisationCharacter
from ism.interfaces.dao_interface import DAOInterface
class Sqlite3DAO(DAOInterface):
"""Implements Methods for handling DB creation and CRUD operations against SQLITE3"""
def __init__(self, *args):
self.db_path = args[0]['database']['db_path']
self.raise_on_sql_error = args[0].get('database', {}).get('raise_on_sql_error', False)
self.logger = logging.getLogger('ism.sqlite3_dao.Sqlite3DAO')
self.logger.info('Initialising Sqlite3DAO.')
self.cnx = None
def close_connection(self):
if self.cnx:
self.cnx.close()
def create_database(self, *args):
"""Calling open_connection creates the database in SQLITE3
Seems redundant but is useful to honour the interface.
"""
self.open_connection(*args)
self.close_connection()
def execute_sql_query(self, sql, params=()):
"""Execute a SQL query and return the result.
@:param query. { sql: 'SELECT ...', params: params
"""
try:
self.open_connection()
cursor = self.cnx.cursor()
cursor.execute(sql, params)
rows = cursor.fetchall()
self.close_connection()
return rows
except sqlite3.Error as e:
logging.error(f'Error executing sql query ({sql}) ({params}): {e}')
if self.raise_on_sql_error:
raise e
def execute_sql_statement(self, sql, params=()):
"""Execute a SQL statement and return the exit code"""
try:
self.open_connection()
cursor = self.cnx.cursor()
cursor.execute(sql, params)
self.cnx.commit()
self.close_connection()
except sqlite3.Error as e:
logging.error(f'Error executing sql query ({sql}) ({params}): {e}')
if self.raise_on_sql_error:
raise e
def open_connection(self, *args) -> sqlite3.Connection:
"""Creates a database connection.
Opens a SQLITE3 database connection and returns a connector.
"""
try:
self.cnx = sqlite3.connect(self.db_path)
return self.cnx
except sqlite3.Error as error:
self.logger.error("Error while connecting to Sqlite3 database.", error)
@staticmethod
def prepare_parameterised_statement(sql: str) -> str:
"""Prepare a parameterised sql statement for this RDBMS.
Third party developers will want to use the DAO to run CRUD
operations against the DB, but we support multiple RDBMS. e.g.
MySql: INSERT INTO Employee
(id, Name, Joining_date, salary) VALUES (%s,%s,%s,%s)
Sqlite3: INSERT INTO Employee
(id, Name, Joining_date, salary) VALUES (?,?,?,?)
This method ensures that the parameterisation is set correctly
for the RDBMS in use. Method doesn't use very vigorous checking but
as this should only be an issue while developing a new action pack
it should be sufficient for now.
"""
if '%s' in sql:
return sql.replace('%s', '?')
elif '?' in sql:
return sql
else:
raise UnrecognisedParameterisationCharacter(
f'Parameterisation character not recognised / found in SQL string ({sql})'
)
| 34.807692 | 94 | 0.614641 | 3,327 | 0.919061 | 0 | 0 | 1,068 | 0.295028 | 0 | 0 | 1,667 | 0.460497 |
c3ddb0f5cc3958cd4468c89bd47c157287e41cea | 7,373 | py | Python | src/content_selection/old_lda.py | elenakhas/summarization_system | c8da139daea768898bb1b32ff671d204bba3a9a7 | [
"MIT"
] | null | null | null | src/content_selection/old_lda.py | elenakhas/summarization_system | c8da139daea768898bb1b32ff671d204bba3a9a7 | [
"MIT"
] | null | null | null | src/content_selection/old_lda.py | elenakhas/summarization_system | c8da139daea768898bb1b32ff671d204bba3a9a7 | [
"MIT"
] | null | null | null | import os
import json
import argparse
from gensim import corpora
from gensim.utils import simple_preprocess
from gensim.models import LdaModel
# reaad json file
def parseJson(json_file):
'''
parsing the JSON file from the pre-processing pipeline
:param json_file
:return: dictionary with docID as key
'''
with open(json_file) as f:
data = json.load(f)
return data
def get_corpus_topics(text, lda_model):
'''
:param text:
:param lda_model:
:return: list of document with topic IDs
'''
doc_topic_dist = []
_texts = [' '.join(t for t in text)]
texts = [simple_preprocess(doc) for doc in _texts]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(line) for line in texts]
doc_topics = lda_model.get_document_topics(corpus, minimum_probability=0.0)
for _d in doc_topics:
doc_topic_dist.append(_d)
return doc_topic_dist
def lda_analysis(input_data, num_topics=3, num_sentences=20):
picked_sentences = {}
# treat each set of documents as a separate corpus and find topics?
for key, value in input_data.items():
_texts = []
for k, v in input_data[key].items():
_texts.append(' '.join(input_data[key][k]['lemmas']))
texts = [simple_preprocess(doc) for doc in _texts]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(line) for line in texts]
# build lda model:
lda_model = LdaModel(corpus=corpus, id2word=dictionary, num_topics=num_topics)
# get document topic distribution:
doc_topic_dist = get_corpus_topics(_texts, lda_model)
# print(lda_model.show_topics(num_words=20))
topic_terms = lda_model.show_topics(num_words=50)
# get top words for each topic:
topic_term_dict = {}
rel_terms = []
for topic_dist in topic_terms:
topic_id = topic_dist[0]
topic_term_dict[topic_id] = {}
topic_terms = topic_dist[1]
for _split in topic_terms.split('+'):
topic_term_prob = _split.split('*')[0]
topic_term = str(_split.split('*')[1]).replace('"', '').strip()
topic_term_dict[topic_id][topic_term] = float(topic_term_prob)
# rel_terms.append(topic_term)
picked_sentences[key] = {}
# pick sentences from the corpus that have highest score for the topic terms according to some score
summary_sentences = {}
sen_ranker = []
# calculate rank for each sentence with respect to each topic:
for k, v in input_data[key].items():
sen = k
# sen = sen.lower()
sen_length = len(sen.split(' '))
sen_id = input_data[key][sen]['doc_id']
if sen_length < 10:
continue
sen_topic = []
# compute score for each topic:
for topic in range(num_topics):
rel_sen_terms = list(set(input_data[key][k]['lemmas']) & set(topic_term_dict[topic].keys()))
sen_score = 0
for term in rel_sen_terms:
sen_score += topic_term_dict[topic][term]
sen_topic.append((topic, sen_score, sen, sen_id))
# select top one from sen_topic and append to sen_ranker:
top_sen_topic = sorted(sen_topic, key=lambda x: x[1], reverse=True)[0]
sen_ranker.append(top_sen_topic)
for _sen in sen_ranker:
topic = _sen[0]
sen_score = _sen[1]
sen = _sen[2]
sen_id = _sen[3]
input_data[key][sen].update({"LDAscore": sen_score})
input_data[key][sen].update({"lda_topic_id": topic})
return input_data
def update_scores(dic):
'''
Updates the sentence scores in the dictionary by combining tf-idf, concreteness and LDA scoring
'''
new_dict = {}
for topic_id, sent in dic.items():
new_dict[topic_id] = dict()
tf_idf = []
concreteness = []
lda = []
for key, info in sent.items():
tf_idf.append(info['tf_idf'])
concreteness.append(info['concreteness'])
try:
lda.append(info['LDAscore'])
except KeyError:
continue
m_tf_idf = max(tf_idf)
m_concrete = max(concreteness)
m_lda = max(lda)
for key, info in sent.items():
if info['length'] > 7:
info['tf_idf'] = info['tf_idf'] / m_tf_idf
info['concreteness'] = info['concreteness'] / m_concrete
try:
info['LDAscore'] = info['LDAscore'] / m_lda
info['total'] = (info['tf_idf'] * info['concreteness'] * info['LDAscore']) / info['length']
sent_info = {k: v for k, v in info.items()}
new_dict[topic_id][key] = sent_info
except KeyError:
continue
return new_dict
def select_sent(data, num_sentences):
picked_sent = {}
for topic_id, sent in data.items():
candidates = []
group_1 = []
group_2 = []
group_3 = []
for key, info in sent.items():
try:
total = info['total']
if info['lda_topic_id'] == 0:
group_1.append((key, total))
elif info['lda_topic_id'] == 1:
group_2.append((key, total))
else:
group_3.append((key, total))
# candidates.append((key, total))
except KeyError:
continue
sorted_1 = sorted(group_1, key=lambda x: x[1], reverse=True)[:int(num_sentences / 3)]
sorted_2 = sorted(group_2, key=lambda x: x[1], reverse=True)[:int(num_sentences / 3)]
sorted_3 = sorted(group_3, key=lambda x: x[1], reverse=True)[:int(num_sentences / 3)]
sorted_sentences = sorted_1 + sorted_2 + sorted_3
picked_sent[topic_id] = dict()
for sentence, score in sorted_sentences:
sent_info = data[topic_id][sentence]
sent_info['total'] = score
picked_sent[topic_id][sentence] = sent_info
return picked_sent
def sentence_selection_wrapper(input_data, selected_json_path, num_sentences=20, overwrite=False):
if os.path.exists(selected_json_path) and not overwrite:
with open(selected_json_path) as infile:
return json.load(infile)
new_dict = lda_analysis(input_data, num_topics=3)
update_and_normalize = update_scores(new_dict)
picked_sentences = select_sent(update_and_normalize, num_sentences)
with open(selected_json_path, "w") as outfile:
json.dump(picked_sentences, outfile, indent=2)
return picked_sentences
if __name__ == "__main__":
# Test LDA module
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="config.json")
parser.add_argument("--deliverable", type=str, default="D2", help='deliverable number, i.e. D2')
parser.add_argument("--split", type=str, default="training", choices=["devtest", "evaltest", "training"])
parser.add_argument("--run_id", default=None)
parser.add_argument("--test", default=False)
args = parser.parse_args()
run(args)
| 35.447115 | 111 | 0.595416 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,333 | 0.180795 |
c3de38c22cfda9f4f7342bce1d98e9209e645392 | 1,267 | py | Python | uva/247.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | 3 | 2020-06-25T21:04:02.000Z | 2021-05-12T03:33:19.000Z | uva/247.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | null | null | null | uva/247.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | 1 | 2020-06-25T21:04:06.000Z | 2020-06-25T21:04:06.000Z | # Author: btjanaka (Bryon Tjanaka)
# Problem: (UVa) 247
import sys
from collections import defaultdict
def kosaraju(g, g_rev):
order = []
visited = set()
def visit(u):
visited.add(u)
for v in g[u]:
if v not in visited:
visit(v)
order.append(u)
for u in g:
if u not in visited: visit(u)
components = []
visited.clear()
def build_comp(u):
components[-1].append(u)
visited.add(u)
for v in g_rev[u]:
if v not in visited:
build_comp(v)
for u in order[::-1]:
if u not in visited:
components.append([])
build_comp(u)
return components
def main():
case = 1
while True:
# input
n, m = map(int, input().split())
if n == 0 and m == 0: break
g, g_rev = defaultdict(set), defaultdict(set)
for _ in range(m):
u, v = input().strip().split()
g[u].add(v)
g[v]
g_rev[v].add(u)
g_rev[u]
# output
if case != 1: print()
print(f"Calling circles for data set {case}:")
for c in kosaraju(g, g_rev):
print(", ".join(c))
case += 1
main()
| 19.796875 | 54 | 0.481452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.088398 |
c3de8bb295f59593da837f846389cf50e4339c8b | 1,970 | py | Python | lib/dataset/utils.py | decisionforce/mmTransformer | be25d26118d2dfdac72b1d1e0cf6cbf14f7f4a0b | [
"Apache-2.0"
] | 199 | 2021-03-23T06:10:50.000Z | 2022-03-31T08:23:00.000Z | lib/dataset/utils.py | yuzhouxianzhi/mmTransformer | be25d26118d2dfdac72b1d1e0cf6cbf14f7f4a0b | [
"Apache-2.0"
] | 16 | 2021-04-12T12:48:46.000Z | 2022-03-10T14:11:26.000Z | lib/dataset/utils.py | yuzhouxianzhi/mmTransformer | be25d26118d2dfdac72b1d1e0cf6cbf14f7f4a0b | [
"Apache-2.0"
] | 23 | 2021-03-29T01:37:56.000Z | 2022-03-30T01:48:41.000Z | import math
import numpy as np
from sklearn.linear_model import LinearRegression
def get_heading_angle(traj: np.ndarray):
"""
get the heading angle
traj: [N,2] N>=6
"""
# length == 6
# sort position
_traj = traj.copy()
traj = traj.copy()
traj = traj[traj[:, 0].argsort()]
traj = traj[traj[:, 1].argsort()]
if traj.T[0].max()-traj.T[0].min() > traj.T[1].max()-traj.T[1].min(): # * dominated by x
reg = LinearRegression().fit(traj[:, 0].reshape(-1, 1), traj[:, 1])
traj_dir = _traj[-2:].mean(0) - _traj[:2].mean(0)
reg_dir = np.array([1, reg.coef_[0]])
angle = np.arctan(reg.coef_[0])
else:
# using y as sample and x as the target to fit a line
reg = LinearRegression().fit(traj[:, 1].reshape(-1, 1), traj[:, 0])
traj_dir = _traj[-2:].mean(0) - _traj[:2].mean(0)
reg_dir = np.array([reg.coef_[0], 1])*np.sign(reg.coef_[0])
if reg.coef_[0] == 0:
import pdb
pdb.set_trace()
angle = np.arctan(1/reg.coef_[0])
if angle < 0:
angle = 2*np.pi + angle
if (reg_dir*traj_dir).sum() < 0: # not same direction
angle = (angle+np.pi) % (2*np.pi)
# angle from y
angle_to_y = angle-np.pi/2
angle_to_y = -angle_to_y
return angle_to_y
def transform_coord(coords, angle):
x = coords[..., 0]
y = coords[..., 1]
x_transform = np.cos(angle)*x-np.sin(angle)*y
y_transform = np.cos(angle)*y+np.sin(angle)*x
output_coords = np.stack((x_transform, y_transform), axis=-1)
return output_coords
def transform_coord_flip(coords, angle):
x = coords[:, 0]
y = coords[:, 1]
x_transform = math.cos(angle)*x-math.sin(angle)*y
y_transform = math.cos(angle)*y+math.sin(angle)*x
x_transform = -1*x_transform # flip
# y_transform = -1*y_transform # flip
output_coords = np.stack((x_transform, y_transform), axis=-1)
return output_coords
| 31.269841 | 93 | 0.586294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.12335 |
c3dfc2511f343d7ceb9692a36dbb37532c595d9b | 1,637 | py | Python | src/utils/schema/jobserver/migrations/0013_migrate_job_completion.py | gravitationalwavedc/gwcloud_job_server | fb96ed1dc6baa240d1a38ac1adcd246577285294 | [
"MIT"
] | null | null | null | src/utils/schema/jobserver/migrations/0013_migrate_job_completion.py | gravitationalwavedc/gwcloud_job_server | fb96ed1dc6baa240d1a38ac1adcd246577285294 | [
"MIT"
] | 8 | 2020-06-06T08:39:37.000Z | 2021-09-22T18:01:47.000Z | src/utils/schema/jobserver/migrations/0013_migrate_job_completion.py | gravitationalwavedc/gwcloud_job_server | fb96ed1dc6baa240d1a38ac1adcd246577285294 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-05-17 22:34
from django.db import migrations, models
from django.utils import timezone
def migrate_job_completion(apps, schema_editor):
Job = apps.get_model("jobserver", "Job")
JobHistory = apps.get_model("jobserver", "JobHistory")
# Iterate over all jobs
for job in Job.objects.all():
# Check if there are any _job_completion_ job history instances for this job
histories = JobHistory.objects.filter(job=job)
if histories.filter(what='_job_completion_').exists():
# Nothing more to do for this job
continue
job_error = False
# Check that all job steps exist with a success value
job_steps = histories.values_list('what').distinct()
for step in job_steps:
if not histories.filter(what=step, state=500).exists():
job_error = True
if job_error:
JobHistory.objects.create(
job=job,
what="_job_completion_",
state=400, # ERROR
timestamp=timezone.now(),
details="Error state migrated forwards"
)
else:
JobHistory.objects.create(
job=job,
what="_job_completion_",
state=500, # ERROR
timestamp=timezone.now(),
details="Success state migrated forwards"
)
class Migration(migrations.Migration):
dependencies = [
('jobserver', '0012_auto_20200517_2234'),
]
operations = [
migrations.RunPython(migrate_job_completion),
]
| 31.480769 | 84 | 0.592547 | 195 | 0.11912 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.271839 |
c3e1060bf2a185aa6c94956d406f3149c414b1ec | 8,603 | py | Python | tests/test.py | limited/Superior-Cache-ANalyzer | 0552cd10136b2bee953d22277fdc700ce7c6dd2d | [
"Apache-2.0"
] | null | null | null | tests/test.py | limited/Superior-Cache-ANalyzer | 0552cd10136b2bee953d22277fdc700ce7c6dd2d | [
"Apache-2.0"
] | null | null | null | tests/test.py | limited/Superior-Cache-ANalyzer | 0552cd10136b2bee953d22277fdc700ce7c6dd2d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2018 Comcast Cable Communications Management, LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains a suite of unit tests for the SCAN utility
"""
#pylint: disable=W0212
import typing
import struct
import os
import sys
import argparse
try:
from scan import utils, directory, span, config, stripe
except ImportError as e:
print("Tests should be run from the project's root directory (or while it's installed)! (%s)" % e, file=sys.stderr)
exit(1)
DISK_HEADER_SIZE = struct.calcsize("5IQ")
SPAN_BLOCK_HEADER_SIZE = struct.calcsize("4IiII")
SPAN_BLOCK_HEADER_LENGTH = 0x4000 * utils.STORE_BLOCK_SIZE
# offset: 4294967296
# length: 4294967296
rawSpanBlockHeader = struct.pack("4IiII", 0, 1, 0, 1, 1, 1, 0)
rawDirEntry = struct.pack("HHHHH", 0xA000, 0, 0x2FFF, 0, 0)
def testSpan() -> typing.List[str]:
"""
Checks the loaded span against what it should be.
"""
results = []
if not config.spans():
return ["(Span): No spans loaded!"]
s = config.spans()['storage/cache.db'][1]
# Disk Header tests
if s.header.sizeof != DISK_HEADER_SIZE:
results.append("header size incorrect, is %d, should be %d" % \
(s.header.sizeof, DISK_HEADER_SIZE))
if s.header.volumes != 1:
results.append("found %d volumes in header, expected 1" % (s.header.volumes,))
if s.header.free:
results.append("header.free was %d, should've been 0" % (s.header.free,))
if s.header.used != 1:
results.append("header.used was %d, should've been 1" % (s.header.used,))
if s.header.diskvolBlocks != 1:
results.append("found %d diskvol_blocks in header, should've been 1" % (s.header.diskvolBlocks,))
if s.header.blocks != 0x7fff:
results.append("found 0x%X blocks in header, should've been 0x7fff" % (s.header.blocks,))
if len(s.header) != s.header.diskvolBlocks:
results.append("header length should be equal to diskvolBlocks (was %d, expected %d)" %\
(len(s.header), s.header.diskvolBlocks))
# Actual span tests
if len(s.blocks) != 1:
results.append("found %d blocks, should've been 1" % (len(s.blocks),))
if len(s) != len(s.blocks):
results.append("length '%d' doesn't match number of blocks '%d'" % (len(s), len(s.blocks)))
return ["(Span): %s" % r for r in results] + testStripe(s[0])
def testSpanBlockHeader(sbh: stripe.SpanBlockHeader) -> typing.List[str]:
"""
Tests various aspects of a stripe.
Returns a list of the tests failed.
"""
results = []
if sbh.sizeof != SPAN_BLOCK_HEADER_SIZE:
results.append("sizeof returns %d, should be %d!" %\
(sbh.sizeof, SPAN_BLOCK_HEADER_SIZE))
if sbh.number != 1:
results.append("number was %d, should've been 0" % (sbh.number,))
if sbh.offset != 0x4000:
results.append("offset was 0x%X, should've been 0x4000" % (sbh.offset,))
if sbh.length != 0x4000:
results.append("length was 0x%X, should've been 0x4000" % (sbh.length,))
if len(sbh) != SPAN_BLOCK_HEADER_LENGTH:
results.append("len() was 0x%X, should've been 0x%X" % (len(sbh), SPAN_BLOCK_HEADER_LENGTH))
if sbh.Type is not utils.CacheType.HTTP:
results.append("type was %r, should've been CacheType.HTTP" % (sbh.Type,))
# if not sbh.free:
# results.append("reported it was unused, should have been used.")
if sbh.avgObjSize != 8000:
results.append("average object size was %d, should've been 8000" % (sbh.avgObjSize,))
return ["(SpanBlockHeader): %s" % r for r in results]
def testDirEntry(dirent: directory.DirEntry = None) -> typing.List[str]:
"""
Tests various aspects of a DirEntry.
Returns a list of the tests failed.
"""
results = []
if dirent is None:
dirent = directory.DirEntry(rawDirEntry)
if dirent._offset != 0xA000:
results.append("bad offset bits, expected 0xA000, got '0x%X'" % dirent._offset)
if dirent.Offset != 0xA000 * config.INK_MD5_SIZE():
results.append("bad offset, expected 0x%X, got '0x%X'" %\
(0xA000*config.INK_MD5_SIZE(), dirent.Offset))
if not dirent:
results.append("__bool__ gave 'False' when 'True' was expected")
if len(dirent) != 0x200:
results.append("bad size, expected 512, got '%d" % len(dirent))
if dirent.sizeof != 10:
results.append("sizeof gave wrong size, expected 10, got '%d'" % dirent.sizeof)
if dirent.next != 0:
results.append("bad next value, expected 0 got '%d'" % dirent.next)
if dirent.token:
results.append("token was set, but shouldn't be")
if dirent.pinned:
results.append("pinned was set, but shouldn't be")
if dirent.phase:
results.append("phase was set, but shouldn't be")
if not dirent.head:
results.append("head was not set, but should be")
return ["(DirEntry): %s" % r for r in results]
def testDoc(doc: directory.Doc = None) -> typing.List[str]:
"""
Tests various aspects of a Doc.
Returns a list of the tests failed.
"""
# TODO - figure out what Doc is and test it here.
return []
def testStripe(s: stripe.Stripe) -> typing.List[str]:
"""
Tests various aspects of a stripe
Returns a list of the tests failed.
"""
results = []
s.readDir()
if s.writeCursor != 0x60000:
results.append("write cursor at 0x%X, should've been at 0x60000" % (s.writeCursor,))
if s.lastWritePos != 0x60000:
results.append("last write position at 0x%X, should've been at 0x60000" % (s.lastWritePos,))
if s.aggPos != 0x60000:
results.append("agg. position at 0x%X, should've been at 0x60000" % (s.aggPos,))
if s.generation:
results.append("generation was %d, should've been 0" % (s.generation,))
if s.phase:
results.append("phase was %d, should've been 0" % (s.phase,))
if s.cycle:
results.append("cycle was %d, should've been 0" % (s.cycle,))
if s.syncSerial:
results.append("sync-serial was %d, should've been 0" % (s.syncSerial,))
if s.writeSerial:
results.append("write-serial was %d, should've been 0" % (s.writeSerial,))
if s.dirty:
results.append("dirty was %d, should've been 0" % (s.dirty,))
if s.sectorSize != 0x1000:
results.append("sector size was 0x%X, should've been 0x1000" % (s.sectorSize,))
if s.unused:
results.append("unused was %d, should've been 0" % (s.unused,))
if s.numBuckets != 4182:
results.append("contains %d buckets, but should have 4182" % (s.numBuckets,))
if s.numSegs != 1:
results.append("has %d segments, should be 1" % (s.numSegs,))
if s.numDirEntries != 16728:
results.append("contains %d DirEntrys, but should be 16728" % (s.numDirEntries,))
if s.contentOffset != 0x60000:
results.append("content starts at 0x%X, but should start at 0x60000" % (s.contentOffset,))
if s.directoryOffset != 0x6000:
results.append("directory (copy A) starts at 0x%X, but should start at 0x6000" % (s.directoryOffset,))
return ["(Stripe): %s" % r for r in results] + testSpanBlockHeader(s.spanBlockHeader)
def main() -> int:
"""
Runs the tests and prints the failed tests to stdout followed by a count of passed/failed tests.
Returns the number of failed tests.
"""
args = argparse.ArgumentParser(description="Testing Suite for the Superior Cache ANalyzer",
epilog="NOTE: this test assumes that the cache is in the state defined "\
"by scan.test.py, which is meant to run this test script through autest.")
args.add_argument("--ats_configs",
help="Specify the path to an ATS installation's config files to use for the tester."\
" (if --ats_root is also specified, this should be relative to that)",
type=str)
args.add_argument("--ats_root",
help="Specify the path to the root ATS installation (NOTE: Changes the pwd)",
type=str)
args = args.parse_args()
if args.ats_root:
os.chdir(args.ats_root)
if args.ats_configs:
config.init(args.ats_configs)
results = testSpan()
for result in results:
print(result)
print("Failed %d tests." % len(results))
return len(results)
if __name__ == '__main__':
# Once tests are stable, will exit with `main`'s return value.
exit(main())
| 31.17029 | 116 | 0.676043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,942 | 0.458212 |
c3e1f510ef63ae835103c4ad8efe6e325362dcc1 | 8,810 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_powermgmt_policy.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_powermgmt_policy.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_powermgmt_policy.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_powermgmt_policy
short_description: Manages the Power Management Policy of an ESXI host system
description:
- This module can be used to manage the Power Management Policy of ESXi host systems in given vCenter infrastructure.
version_added: 2.8
author:
- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
policy:
description:
- Set the Power Management Policy of the host system.
choices: [ 'high-performance', 'balanced', 'low-power', 'custom' ]
default: 'balanced'
type: str
esxi_hostname:
description:
- Name of the host system to work with.
- This is required parameter if C(cluster_name) is not specified.
type: str
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- This is required parameter if C(esxi_hostname) is not specified.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set the Power Management Policy of a host system to high-performance
vmware_host_powermgmt_policy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_host }}'
policy: high-performance
validate_certs: no
delegate_to: localhost
- name: Set the Power Management Policy of all host systems from cluster to high-performance
vmware_host_powermgmt_policy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
policy: high-performance
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
result:
description: metadata about host system's Power Management Policy
returned: always
type: dict
sample: {
"changed": true,
"result": {
"esxi01": {
"changed": true,
"current_state": "high-performance",
"desired_state": "high-performance",
"msg": "Power policy changed",
"previous_state": "balanced"
}
}
}
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
from ansible.module_utils._text import to_native
class VmwareHostPowerManagement(PyVmomi):
"""
Class to manage power management policy of an ESXi host system
"""
def __init__(self, module):
super(VmwareHostPowerManagement, self).__init__(module)
cluster_name = self.params.get('cluster_name')
esxi_host_name = self.params.get('esxi_hostname')
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system with given configuration.")
def ensure(self):
"""
Manage power management policy of an ESXi host system
"""
results = dict(changed=False, result=dict())
policy = self.params.get('policy')
host_change_list = []
power_policies = {
'high-performance': {
'key': 1,
'short_name': 'static'
},
'balanced': {
'key': 2,
'short_name': 'dynamic'
},
'low-power': {
'key': 3,
'short_name': 'low'
},
'custom': {
'key': 4,
'short_name': 'custom'
}
}
for host in self.hosts:
changed = False
results['result'][host.name] = dict(msg='')
power_system = host.configManager.powerSystem
# get current power policy
power_system_info = power_system.info
current_host_power_policy = power_system_info.currentPolicy
# the "name" and "description" parameters are pretty useless
# they store only strings containing "PowerPolicy.<shortName>.name" and "PowerPolicy.<shortName>.description"
if current_host_power_policy.shortName == "static":
current_policy = 'high-performance'
elif current_host_power_policy.shortName == "dynamic":
current_policy = 'balanced'
elif current_host_power_policy.shortName == "low":
current_policy = 'low-power'
elif current_host_power_policy.shortName == "custom":
current_policy = 'custom'
results['result'][host.name]['desired_state'] = policy
# Don't do anything if the power policy is already configured
if current_host_power_policy.key == power_policies[policy]['key']:
results['result'][host.name]['changed'] = changed
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = policy
results['result'][host.name]['msg'] = "Power policy is already configured"
else:
# get available power policies and check if policy is included
supported_policy = False
power_system_capability = power_system.capability
available_host_power_policies = power_system_capability.availablePolicy
for available_policy in available_host_power_policies:
if available_policy.shortName == power_policies[policy]['short_name']:
supported_policy = True
if supported_policy:
if not self.module.check_mode:
try:
power_system.ConfigurePowerPolicy(key=power_policies[policy]['key'])
changed = True
results['result'][host.name]['changed'] = True
results['result'][host.name]['msg'] = "Power policy changed"
except vmodl.fault.InvalidArgument:
self.module.fail_json(msg="Invalid power policy key provided for host '%s'" % host.name)
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to configure power policy for host '%s': %s" %
(host.name, to_native(host_config_fault.msg)))
else:
changed = True
results['result'][host.name]['changed'] = True
results['result'][host.name]['msg'] = "Power policy will be changed"
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = policy
else:
changed = False
results['result'][host.name]['changed'] = changed
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = current_policy
self.module.fail_json(msg="Power policy '%s' isn't supported for host '%s'" %
(policy, host.name))
host_change_list.append(changed)
if any(host_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
"""
Main
"""
argument_spec = vmware_argument_spec()
argument_spec.update(
policy=dict(type='str', default='balanced',
choices=['high-performance', 'balanced', 'low-power', 'custom']),
esxi_hostname=dict(type='str', required=False),
cluster_name=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
host_power_management = VmwareHostPowerManagement(module)
host_power_management.ensure()
if __name__ == '__main__':
main()
| 37.330508 | 121 | 0.592168 | 5,152 | 0.58479 | 0 | 0 | 0 | 0 | 0 | 0 | 3,914 | 0.444268 |
c3e416fee43806fffc3e5957dc5258f61a408baa | 12,483 | py | Python | drizzlepac/run_hla_flag_filter.py | srodney/drizzlepac | c554523331a6204ce113d4317b7286ad39094f74 | [
"BSD-3-Clause"
] | 2 | 2020-02-10T16:15:58.000Z | 2021-03-24T20:08:03.000Z | drizzlepac/run_hla_flag_filter.py | srodney/drizzlepac | c554523331a6204ce113d4317b7286ad39094f74 | [
"BSD-3-Clause"
] | null | null | null | drizzlepac/run_hla_flag_filter.py | srodney/drizzlepac | c554523331a6204ce113d4317b7286ad39094f74 | [
"BSD-3-Clause"
] | 1 | 2020-09-02T18:08:39.000Z | 2020-09-02T18:08:39.000Z | #!/usr/bin/env python
"""This script simply calls drizzlepac/hlautils/hla_flag_filter.py for test purposes"""
import json
import glob
import os
import pdb
import sys
from astropy.table import Table
import drizzlepac
from drizzlepac.hlautils import config_utils
from drizzlepac.hlautils import poller_utils
def run_hla_flag_filter():
from drizzlepac.hlautils import hla_flag_filter
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# All below lines are to get it working, not actual final code.
out_file = glob.glob("??????.out")[0]
# out_file = "j92c01.out" # acs_10265_01
# #out_file = "j9es06.out" # acs_10595_06
# Get parameter values
if os.getcwd().endswith("orig"): sys.exit("Don't run in the orig dir! YOU'LL RUIN EVERYTHING!")
for cmd in ['rm -f *.*', 'cp orig/* .']:
print(cmd)
os.system(cmd)
obs_info_dict, total_list = poller_utils.interpret_obset_input(out_file)
out_pars_file = "pars.json"
for total_item in total_list:
total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for filter_item in total_item.fdp_list:
filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for expo_item in total_item.edp_list:
expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
# * * * * hla_flag_filter.run_source_list_flagging inputs for HLA Classic test run* * * *
if out_file == "j92c01.out": # acs_10265_01
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10265_01/flag_testing/hla
mode = "dao"
drizzled_image = "hst_10265_01_acs_wfc_f606w_drz.fits"
flt_list = ["j92c01b4q_flc.fits", "j92c01b5q_flc.fits", "j92c01b7q_flc.fits", "j92c01b9q_flc.fits"]
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 # force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
exptime = 5060.0
catalog_name = "hst_10265_01_acs_wfc_f606w_{}phot.txt".format(mode)
catalog_data = Table.read(catalog_name, format='ascii')
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
# for filt_key in filter_sorted_flt_dict.keys(): flt_list = filter_sorted_flt_dict[filt_key]
# os.remove("hst_10265_01_acs_wfc_f606w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file_old(all_drizzled_filelist[0].replace("drz.fits","wht.fits"))
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig/hst_10265_01_acs_wfc_f606w_{}phot_orig.txt hst_10265_01_acs_wfc_f606w_{}phot.txt -i hst_10265_01_acs_wfc_f606w_drz.fits hst_10265_01_acs_wfc_f606w_drz.fits -m absolute -p none".format(mode,mode)
if out_file == "j9es06.out": # acs_10595_06
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10595_06_flag_testing/
mode = "sex"
drizzled_image = "hst_10595_06_acs_wfc_f435w_drz.fits"
flt_list = ["j9es06rbq_flc.fits", "j9es06rcq_flc.fits", "j9es06req_flc.fits", "j9es06rgq_flc.fits"]
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 #force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
exptime = 710.0
catalog_data = Table.read(catalog_name, format='ascii')
catalog_data = Table.read(dict_newTAB_matched2drz[all_drizzled_filelist[0]], format='ascii')
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
# os.remove("hst_10595_06_acs_wfc_f435w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file("hst_10595_06_acs_wfc_f435w_wht.fits")
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig_cats/hst_10595_06_acs_wfc_f435w_{}phot.txt hst_10595_06_acs_wfc_f435w_{}phot.txt -i hst_10595_06_acs_wfc_f435w_drz.fits hst_10595_06_acs_wfc_f435w_drz.fits -m absolute -p none".format(mode,mode)
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# Execute hla_flag_filter.run_source_list_flaging
catalog_data = hla_flag_filter.run_source_list_flaging(drizzled_image, flt_list,
param_dict, exptime,
catalog_name, catalog_data,
proc_type, drz_root_dir, debug = True)
catalog_data.write(catalog_name, delimiter=",",format='ascii',overwrite=True)
print("Wrote {}".format(catalog_name))
try:
os.system(comp_cmd)
except:
print("skipping automatic comparision run")
#=======================================================================================================================
def run_hla_flag_filter_HLAClassic():
from drizzlepac.hlautils import hla_flag_filter_HLAClassic
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# All below lines are to get it working, not actual final code.
out_file = glob.glob("??????.out")[0]
# out_file = "j92c01.out" # acs_10265_01
# #out_file = "j9es06.out" # acs_10595_06
# Get parameter values
if os.getcwd().endswith("orig"): sys.exit("Don't run in the orig dir! YOU'LL RUIN EVERYTHING!")
for cmd in ['rm -f *.*', 'cp orig/* .']:
print(cmd)
os.system(cmd)
obs_info_dict, total_list = poller_utils.interpret_obset_input(out_file)
out_pars_file = "pars.json"
for total_item in total_list:
total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for filter_item in total_item.fdp_list:
filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for expo_item in total_item.edp_list:
expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
# * * * * hla_flag_filter.run_source_list_flagging inputs for HLA Classic test run* * * *
if out_file == "j92c01.out": # acs_10265_01
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10265_01/flag_testing/hla
mode = "dao"
all_drizzled_filelist = ["hst_10265_01_acs_wfc_f606w_drz.fits"]
working_hla_red = os.getcwd()
filter_sorted_flt_dict = {"f606w": ["j92c01b4q_flc.fits", "j92c01b5q_flc.fits", "j92c01b7q_flc.fits", "j92c01b9q_flc.fits"]}
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 # force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
readnoise_dictionary_drzs = {"hst_10265_01_acs_wfc_f606w_drz.fits": 4.97749985}
scale_dict_drzs = {"hst_10265_01_acs_wfc_f606w_drz.fits": 0.05}
zero_point_AB_dict = {"hst_10265_01_acs_wfc_f606w_drz.fits": 26.5136022236}
exp_dictionary_scis = {"hst_10265_01_acs_wfc_f606w_drz.fits": 5060.0}
detection_image = "hst_10265_01_acs_wfc_total_drz.fits"
dict_newTAB_matched2drz = {"hst_10265_01_acs_wfc_f606w_drz.fits": "hst_10265_01_acs_wfc_f606w_{}phot.txt".format(mode)}
phot_table_matched2cat = {all_drizzled_filelist[0]: Table.read(dict_newTAB_matched2drz[all_drizzled_filelist[0]], format='ascii')}
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
rms_dict = {"hst_10265_01_acs_wfc_f606w_drz.fits": "hst_10265_01_acs_wfc_f606w_rms.fits"}
# for filt_key in filter_sorted_flt_dict.keys(): flt_list = filter_sorted_flt_dict[filt_key]
# os.remove("hst_10265_01_acs_wfc_f606w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file_old(all_drizzled_filelist[0].replace("drz.fits","wht.fits"))
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig/hst_10265_01_acs_wfc_f606w_{}phot_orig.txt hst_10265_01_acs_wfc_f606w_{}phot.txt -i hst_10265_01_acs_wfc_f606w_drz.fits hst_10265_01_acs_wfc_f606w_drz.fits -m absolute -p none".format(mode,mode)
if out_file == "j9es06.out": # acs_10595_06
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10595_06_flag_testing/
mode = "sex"
all_drizzled_filelist = ["hst_10595_06_acs_wfc_f435w_drz.fits"]
working_hla_red = os.getcwd()
filter_sorted_flt_dict = {"f435w": ["j9es06rbq_flc.fits", "j9es06rcq_flc.fits", "j9es06req_flc.fits", "j9es06rgq_flc.fits"]}
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 #force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
readnoise_dictionary_drzs = {"hst_10595_06_acs_wfc_f435w_drz.fits": 5.247499925}
scale_dict_drzs = {"hst_10595_06_acs_wfc_f435w_drz.fits": 0.05}
zero_point_AB_dict = {"hst_10595_06_acs_wfc_f435w_drz.fits": 25.6888167958}
exp_dictionary_scis = {"hst_10595_06_acs_wfc_f435w_drz.fits": 710.0}
detection_image = "hst_10595_06_acs_wfc_total_drz.fits"
dict_newTAB_matched2drz = {"hst_10595_06_acs_wfc_f435w_drz.fits": "hst_10595_06_acs_wfc_f435w_{}phot.txt".format(mode)}
phot_table_matched2cat = {all_drizzled_filelist[0]: Table.read(dict_newTAB_matched2drz[all_drizzled_filelist[0]], format='ascii')}
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
rms_dict = {"hst_10595_06_acs_wfc_f435w_drz.fits": "hst_10595_06_acs_wfc_f435w_rms.fits"}
# os.remove("hst_10595_06_acs_wfc_f435w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file("hst_10595_06_acs_wfc_f435w_wht.fits")
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig_cats/hst_10595_06_acs_wfc_f435w_{}phot.txt hst_10595_06_acs_wfc_f435w_{}phot.txt -i hst_10595_06_acs_wfc_f435w_drz.fits hst_10595_06_acs_wfc_f435w_drz.fits -m absolute -p none".format(mode,mode)
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# Execute hla_flag_filter.run_source_list_flaging
catalog_data = hla_flag_filter_HLAClassic.run_source_list_flaging(all_drizzled_filelist, filter_sorted_flt_dict,
param_dict, exp_dictionary_scis,
dict_newTAB_matched2drz, phot_table_matched2cat,
proc_type, drz_root_dir, debug = True)
catalog_name = dict_newTAB_matched2drz[all_drizzled_filelist[0]]
catalog_data.write(catalog_name, delimiter=",",format='ascii',overwrite=True)
print("Wrote {}".format(catalog_name))
try:
os.system(comp_cmd)
except:
print("skipping automatic comparision run")
if __name__ == "__main__":
run_hla_flag_filter_HLAClassic()
| 61.79703 | 340 | 0.667868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,170 | 0.494272 |
c3e5a8f772560507fa6d343866762b6688837518 | 71 | py | Python | get_position.py | Zerschleuniger/fracture_mechanics-automate_franc2d | a16df615cd163ed8a573c000aad074c1387f6add | [
"MIT"
] | null | null | null | get_position.py | Zerschleuniger/fracture_mechanics-automate_franc2d | a16df615cd163ed8a573c000aad074c1387f6add | [
"MIT"
] | null | null | null | get_position.py | Zerschleuniger/fracture_mechanics-automate_franc2d | a16df615cd163ed8a573c000aad074c1387f6add | [
"MIT"
] | null | null | null | import pyautogui
import time
time.sleep(3)
print(pyautogui.position()) | 14.2 | 27 | 0.802817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c3e6442d2623708c8ac504af5f8c38b19fd3c0c5 | 4,182 | py | Python | tests/test_concatenated_brain_lstm.py | Dranero/NeuroEvolution-CTRNN_new | 19751b1511cebe59c7605ba97737530b69861088 | [
"MIT"
] | null | null | null | tests/test_concatenated_brain_lstm.py | Dranero/NeuroEvolution-CTRNN_new | 19751b1511cebe59c7605ba97737530b69861088 | [
"MIT"
] | null | null | null | tests/test_concatenated_brain_lstm.py | Dranero/NeuroEvolution-CTRNN_new | 19751b1511cebe59c7605ba97737530b69861088 | [
"MIT"
] | null | null | null | from tools.configurations import ConcatenatedBrainLSTMCfg
from brains.concatenated_brains import ConcatenatedLSTM
from brains.lstm import LSTMNumPy
from brains.ffnn import FeedForwardNumPy
import numpy as np
from gym.spaces import Box
class TestConcatenatedLSTM:
def test_concatenated_lstm_output(self, concat_lstm_config: ConcatenatedBrainLSTMCfg):
input_size = 28
output_size = 8
input_space = Box(-1, 1, (input_size,))
output_space = Box(-1, 1, (output_size,))
# Create random individual
individual_size = ConcatenatedLSTM.get_individual_size(concat_lstm_config, input_space, output_space)
individual = np.random.randn(individual_size).astype(np.float32)
concatenated_lstm = ConcatenatedLSTM(input_space, output_space, individual, concat_lstm_config)
# Basic assertion to test if the architecture of the concatenated brain matches the chosen configuration
assert (concatenated_lstm.feed_forward_front.hidden_layers ==
concat_lstm_config.feed_forward_front.hidden_layers)
assert concatenated_lstm.lstm.input_space.shape[0] == concat_lstm_config.feed_forward_front.hidden_layers[-1]
assert (concatenated_lstm.feed_forward_back.hidden_layers ==
concat_lstm_config.feed_forward_back.hidden_layers)
# To test the concatenated brain, construct the individual parts alone and later compare the results
# First construct the leading Feed Forward part
ff_front_cfg = concat_lstm_config.feed_forward_front
ff_front_output_space = Box(-1, 1, (ff_front_cfg.hidden_layers[-1],))
ff_front_individual_size = FeedForwardNumPy.get_individual_size(ff_front_cfg, input_space, ff_front_output_space)
current_index = 0
ff_front_individual = individual[current_index:current_index + ff_front_individual_size]
current_index += ff_front_individual_size
feed_forward_front = FeedForwardNumPy(input_space, ff_front_output_space, ff_front_individual, ff_front_cfg)
# Create input space for Feed Forward part at the back here because it is the output space for the LSTM
ff_back_cfg = concat_lstm_config.feed_forward_back
ff_back_input_space = Box(-1, 1, (ff_back_cfg.hidden_layers[0],))
# Create LSTM
lstm_cfg = concat_lstm_config.lstm
lstm_individual_size = LSTMNumPy.get_individual_size(lstm_cfg, ff_front_output_space, ff_back_input_space)
lstm_individual = individual[current_index:current_index + lstm_individual_size]
current_index += lstm_individual_size
lstm = LSTMNumPy(ff_front_output_space, ff_back_input_space, lstm_individual, lstm_cfg)
# Create Feed Forward at the back here
ff_back_individual_size = FeedForwardNumPy.get_individual_size(ff_back_cfg, ff_back_input_space, output_space)
ff_back_individual = individual[current_index:current_index + ff_back_individual_size]
current_index += ff_back_individual_size
feed_forward_back = FeedForwardNumPy(ff_back_input_space, output_space, ff_back_individual, ff_back_cfg)
assert current_index == len(individual)
# Hidden and cell states are random, initialize them to the same arrays
hidden_concat = np.random.randn(*concatenated_lstm.lstm.hidden.shape)
cell_concat = np.random.randn(*concatenated_lstm.lstm.cell_state.shape)
hidden_single_step = hidden_concat.copy()
cell_single_step = cell_concat.copy()
concatenated_lstm.lstm.hidden = hidden_concat
concatenated_lstm.lstm.cell_state = cell_concat
lstm.hidden = hidden_single_step
lstm.cell_state = cell_single_step
# Construct random input and compare the results
random_input_concat = np.random.randn(input_size)
random_input_single_steps = np.copy(random_input_concat)
output_concat = concatenated_lstm.step(random_input_concat)
x = feed_forward_front.step(random_input_single_steps)
x = lstm.step(x)
output_single_steps = feed_forward_back.step(x)
assert np.allclose(output_concat, output_single_steps)
| 45.956044 | 121 | 0.752989 | 3,943 | 0.94285 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.131516 |
c3e72c6c31d212fced0fce571e32ee5d5eba0f2d | 1,432 | py | Python | tests/pfmsoft/util/file/conftest.py | DonalChilde/Pfm-Util | 6be95278e61d3007da193742e089ea2ae7faa190 | [
"MIT"
] | 1 | 2021-09-25T22:03:01.000Z | 2021-09-25T22:03:01.000Z | tests/pfmsoft/util/file/conftest.py | DonalChilde/Pfm-Util | 6be95278e61d3007da193742e089ea2ae7faa190 | [
"MIT"
] | null | null | null | tests/pfmsoft/util/file/conftest.py | DonalChilde/Pfm-Util | 6be95278e61d3007da193742e089ea2ae7faa190 | [
"MIT"
] | null | null | null | import json
from pathlib import Path
import pytest
@pytest.fixture(scope="module")
def test_path_root(tmp_path_factory):
file_test_root = tmp_path_factory.mktemp("file_util")
return file_test_root
@pytest.fixture(scope="module")
def json_data():
data = {"key1": "value1", "key2": "value2", "key3": "value3"}
return data
@pytest.fixture(scope="module")
def json_test_file_path(test_path_root: Path, json_data):
file_name = "test_data.json"
file_path: Path = test_path_root / file_name
with open(file_path, "w") as json_file:
json.dump(json_data, json_file)
return file_path
@pytest.fixture(scope="module")
def delta_root(test_path_root):
root_path: Path = test_path_root / Path("test_files")
root_path.mkdir()
return root_path
@pytest.fixture(scope="module")
def delta_files(delta_root: Path):
branch_1 = delta_root / Path("branch_1/alpha/bravo/charlie/delta/echo")
branch_1.mkdir(parents=True)
branch_2 = delta_root / Path("branch_2/one/two/three")
branch_2.mkdir(parents=True)
files = []
file_1 = delta_root / Path("branch_2/one") / Path("uno.txt")
file_1.touch()
files.append(file_1)
file_2 = delta_root / Path("branch_2/one/two") / Path("dos.txt")
file_2.touch()
files.append(file_2)
file_3 = delta_root / Path("branch_2/one/two/three") / Path("tres.txt")
file_3.touch()
files.append(file_3)
return files
| 27.018868 | 75 | 0.697626 | 0 | 0 | 0 | 0 | 1,365 | 0.953212 | 0 | 0 | 273 | 0.190642 |
c3e9f4343126ec46f0c5a89073232da6448d66bd | 3,118 | py | Python | shell/core/help.py | dromero1452/shellsploit-framework | 38ce78542fd2dd2ac30f6567972d695ede1e4709 | [
"MIT"
] | 2 | 2019-12-23T15:47:02.000Z | 2020-01-06T09:51:57.000Z | shell/core/help.py | badfish5150/shellsploit-framework | 22bb910d33379ca29ddd10ba93a63e9ff1eab99d | [
"MIT"
] | null | null | null | shell/core/help.py | badfish5150/shellsploit-framework | 22bb910d33379ca29ddd10ba93a63e9ff1eab99d | [
"MIT"
] | 1 | 2021-12-23T16:35:24.000Z | 2021-12-23T16:35:24.000Z | # ------------------Bombermans Team---------------------------------#
# Author : B3mB4m
# Concat : b3mb4m@protonmail.com
# Project : https://github.com/b3mb4m/Shellsploit
# LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE
# ------------------------------------------------------------------#
from .color import *
def mainhelp():
print (bcolors.GREEN + """
Usage Commands
===============
\tCommands Description
\t------------ -------------
\thelp Help menu
\tos Command directly ur computer
\tuse Select Module For Use
\tclear Clear the menu
\tshow shellcodes Show Shellcodes of Current Database
\tshow backdoors Show Backdoors of Current Database
\tshow injectors Show Injectors(Shellcode,dll,so etc..)
\tshow encoders Show Encoders(Py,Ruby,PHP,Shellcode etc..)
""")
def shellcodehelp():
print (bcolors.GREEN + """
Shellcode Commands
===================
\tCommands Description
\t------------ -------------
\tback Exit Current Module
\tset Set Value Of Options To Modules
\tunset Unset Value Of Options To Modules
\tip Get IP address(Requires net connection)
\tos Command directly ur computer
\tclear Clear the menu
\tdisas Disassembly the shellcode(Support : x86/x64)
\twhatisthis Learn which kind of shellcode it is
\titeration Encoder iteration time
\tgenerate Generate shellcode
\toutput Save option to shellcode(txt,py,c,cpp,exe,raw,dll)
\tshow encoders List all obfucscation encoders
\tshow options Show Current Options Of Selected Module
""")
def injectorhelp():
print (bcolors.GREEN + """
Injector Commands
===================
\tCommands Description
\t------------ -------------
\tset Set Value Of Options To Modules
\tunset Unset Value Of Options To Modules
\thelp Help menu
\tback Exit Current Module
\tos Command directly ur computer
\tpids Get PID list of computer
\tgetpid Get specific PID on list(Ex. getpid Python)
\tclear Clear the menu
\tinject Start injector
\tshow options Show current options of selected module
\tshow shellcode Show current shellcode of selected module
""")
def backdoorshelp():
print (bcolors.GREEN + """
Injector Commands
===================
\tCommands Description
\t------------ -------------
\tset Set Value Of Options To Modules
\tunset Unset Value Of Options To Modules
\thelp Help menu
\tback Exit Current Module
\tos Command directly ur computer
\tclear Clear the menu
\tgenerate Generate backdoor
\tshow options Show current options of selected module
""")
| 36.255814 | 74 | 0.536562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,887 | 0.925914 |
c3ef2c796af21bf862af0de2fcb23a9f957c5de9 | 732 | py | Python | django_vcr/management/commands/download_tapes.py | areedtomlinson/django-vcr | 902f38346a01a8e6124e859e21d7acb9c97241fc | [
"MIT"
] | null | null | null | django_vcr/management/commands/download_tapes.py | areedtomlinson/django-vcr | 902f38346a01a8e6124e859e21d7acb9c97241fc | [
"MIT"
] | null | null | null | django_vcr/management/commands/download_tapes.py | areedtomlinson/django-vcr | 902f38346a01a8e6124e859e21d7acb9c97241fc | [
"MIT"
] | null | null | null | import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = 'url'
help = 'Download VCR tapes from remote server'
option_list = BaseCommand.option_list + (
make_option(
'-u', '--url',
dest='url',
default=None,
help='URL for zip/tar/gz file that contains all necessary tapes.'
),
)
# TODO: make an option to change overwrite behavior
def handle(self, *args, **options):
print("This command will download VCR tapes from a remote server.")
# TODO: use urllib2 to fetch from URL
# TODO: unzip/uncompress and move to VCR_CASSETTE_PATH
| 27.111111 | 77 | 0.639344 | 619 | 0.845628 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.439891 |
c3f037f9b6896ab3320e40dc02cc6755e166843f | 958 | py | Python | common/src/stack/command/stack/commands/dump/plugin_bootaction.py | shivanshs9/stacki | 258740748281dfe89b0f566261eaf23102f91aa4 | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/command/stack/commands/dump/plugin_bootaction.py | shivanshs9/stacki | 258740748281dfe89b0f566261eaf23102f91aa4 | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/command/stack/commands/dump/plugin_bootaction.py | shivanshs9/stacki | 258740748281dfe89b0f566261eaf23102f91aa4 | [
"BSD-3-Clause"
] | null | null | null | # @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
class Plugin(stack.commands.Plugin):
def provides(self):
return 'bootaction'
def run(self, args):
if args and 'bootaction' not in args:
return
document_prep = {'bootaction':[]}
# if there is no data use an empty list as a placeholder.
bootaction_data = self.owner.call('list.bootaction')
if not bootaction_data:
return document_prep
bootaction_prep = []
for item in bootaction_data:
if item['args']:
args = item['args'].split()
else:
args = []
bootaction_prep.append({
'name':item['bootaction'],
'kernel':item['kernel'],
'ramdisk':item['ramdisk'],
'type':item['type'],
'args':args,
'os':item['os'],
})
document_prep['bootaction'] = bootaction_prep
return(document_prep)
| 21.288889 | 60 | 0.662839 | 758 | 0.791232 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.398747 |
c3f051a0ba567a1bfa80d8a15622a56fe1837dca | 608 | py | Python | swcms_social/faq/migrations/0005_auto_20180419_1001.py | ivanff/swcms | 20d121003243abcc26e41409bc44f1c0ef3c6c2a | [
"MIT"
] | null | null | null | swcms_social/faq/migrations/0005_auto_20180419_1001.py | ivanff/swcms | 20d121003243abcc26e41409bc44f1c0ef3c6c2a | [
"MIT"
] | 1 | 2019-06-25T11:17:35.000Z | 2019-06-25T11:17:54.000Z | swcms_social/faq/migrations/0005_auto_20180419_1001.py | ivanff/swcms-social | 20d121003243abcc26e41409bc44f1c0ef3c6c2a | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-04-19 07:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('faq', '0004_auto_20180322_1330'),
]
operations = [
migrations.AlterModelOptions(
name='faq',
options={'ordering': ('subject__order', 'order'), 'verbose_name': 'статья', 'verbose_name_plural': 'статьи'},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ('order',), 'verbose_name': 'тема помощи', 'verbose_name_plural': 'темы помощи'},
),
]
| 27.636364 | 121 | 0.597039 | 555 | 0.867188 | 0 | 0 | 0 | 0 | 0 | 0 | 285 | 0.445313 |