hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f50846638b0fd8d698d6237e05231b9a37225f4b | 22,357 | py | Python | device.py | zhangyintai/Experiment_Manager | 800f95068a12b64d4a7e524fe406d5ef3b47f521 | [
"MIT"
] | null | null | null | device.py | zhangyintai/Experiment_Manager | 800f95068a12b64d4a7e524fe406d5ef3b47f521 | [
"MIT"
] | null | null | null | device.py | zhangyintai/Experiment_Manager | 800f95068a12b64d4a7e524fe406d5ef3b47f521 | [
"MIT"
] | null | null | null | # coding=UTF-8
"""
--------------------------------------------------------
Copyright (c) ****-2018 ESR, Inc. All rights reserved.
--------------------------------------------------------
Author: Mingdong Zhu
Date: 2019/03/07
Design Name: The user interface of the DDS software
Purpose: Design an UI and test function for DDS board
using Python 3.6.3
--------------------------------------------------------
"""
# _name_ = 'main_process'
import time
import numpy as np
import dds
def num_to_bytes(num, bytenum, high_head=True):
"""To get the bytes format of a given decimal number
(used for data_pro)
:param num: A given number
:type num: int
:param bytenum: The number of` bytes (or len()) of the return word
:type bytenum: int
:param high_head: True/False -- big/little-endian; eg:num_to_bytes(1, 2, True/False)-->b'\x00\x01' or b'\x01\x00'
:type high_head: bool
:returns: Bytes for num, len() = bytenum
:rtype: bytes
"""
if high_head:
return np.array([num], dtype='>u8').tobytes()[-bytenum:] # big-endian
else:
return np.array([num], dtype='<u8').tobytes()[:bytenum] # little-endian
def bytes_to_num(bytes_, signed_=True, big_=True):
"""To get the int format of a given bytes
(used for data_pro)
:param bytes_: A given bytes
:type bytes_: bytes
:param signed_: True for signed input
:type signed_: bool
:param big_: Same as the "high_head" in the function 'num_to_bytes'
:type big_: bool
:returns: Int for bytes
:rtype: int
"""
if not signed_:
if big_:
return int.from_bytes(bytes_, byteorder='big')
else:
return int.from_bytes(bytes_, byteorder='little')
else:
if big_:
return int.from_bytes(bytes_, byteorder='big', signed=True)
else:
return int.from_bytes(bytes_, byteorder='little', signed=True)
def bytes_to_hexstr(bytes_, space=True):
"""To get the string format of a given bytes
(used for print/debug)
:param bytes_: A given bytes
:type bytes_: bytes
:param space: True for insert a ' ' per byte
:type space: bool
:returns: String for bytes
:rtype: str
"""
# ss = s_str.encode('hex') # original solution in Python2
string = bytes_.hex() # original solution in Python2
if space:
string_with_space = [string[i:i + 2] for i in range(0, len(string), 2)]
return ' '.join(string_with_space)
else:
return string
class FPGA(dds.HardWare): # GenWave,
""" A class used for integration, in other word, the final application """
"""To clarify the user-defined scan-sign ******
var_type = [0, 1, 2, 3, 4], which is show the scan_para's variable type
[0, 1, 2, 3, 4] represents ["no scan", "amp", "freq", "phase", "time"]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num), which show the scan_para's type and group number
para_num = 0, 1...; The group number for the scan_para
"""
def __init__(self, dev_index=0, test_mode=False):
""" To launch the Instantiation of classes"""
# GenWave.__init__(self)
dds.HardWare.__init__(self, dev_index=dev_index, test_mode=test_mode)
def cw_play(self, ch_num, amp, freq, phase):
"""Single channel setting for DDS
(can be applied in spectrum test or non-sequence wave_play)
:param ch_num: The number ch to be set, [0,1,...,15] is available
:type ch_num: int
:param amp: Amplitude of DDS, range:[0,1]
:type amp: float
:param freq: Frequency of DDS, unit: MHz
:type freq: int or float
:param amp: Phase of DDS, unit: pi, range: [0,2)
:type amp: float
:returns: unit: MHz, Hz
:rtype: float, float
"""
hp_channel, reg_wr = self.ch2identify(ch_num)
ch_num_byte = num_to_bytes(2**ch_num, 2)
dds_data_list = self.dds_data_form(hp_channel, amp, freq, phase)
print(bytes_to_hexstr(dds_data_list[0]))
self.l_configure(ch_num_byte, reg_wr, dds_data_list[0])
"""
return specification:
1--the real digital freq (set)
2--the difference of freq (real - set)
"""
return dds_data_list[1], dds_data_list[2]
def ttl_set(self, ch_num, level):
"""To set the TTL manually
:param ch_num: channel number of TTL, [0,1] correspond to TTL9,10 and 0x5/6 0,1
:type ch_num: int
:param level: 0/1 for low and high
:type level: int
:returns:
:rtype:
"""
word_in_num = 5*16 + ch_num + 16*level
word_in_bytes = num_to_bytes(word_in_num % 256, 2)
print(bytes_to_hexstr(word_in_bytes))
self.write(word_in_bytes)
def ad5371_ini(self):
"""To initialize the AD5371 which is a 40-ch low-speed DAC
:param :
:type :
:returns:
:rtype:
"""
self.write(b'\x00\x34'+b'\x00'+b'\x02'+b'\x20\x00') # the b'\x02' can be b'\x03',b'\x04'
self.write(b'\x00\x34'+b'\x00'+b'\x03'+b'\x20\x00') # the OFS_g1 is set to be +10V.
self.write(b'\x00\x34'+b'\x00'+b'\x04'+b'\x20\x00') # the OFS_g2~4 is set to be +10V.
self.write(b'\x00\x34'+b'\x00'+b'\x80'+b'\x80\x00') # C
self.write(b'\x00\x34'+b'\x00'+b'\x40'+b'\xFF\xFC') # M
self.write(b'\x00\x34'+b'\x00'+b'\xC0'+b'\x80\x00') # X = +10
stamp_list = [0, 1, 3]
self.ad5371_wr_stamp_set(stamp_list) # To set the SPI rate
# self.ad5371_play_set(ch_num, [106, 59, 111])
print('AD5371 initial has been finished')
#################################################################
# integration-experiment function
# 以下都是支持多个通道的操作
#################################################################
def initial_dds(self):
"""To initialize and synchronize the 16 DDSs
:param :
:type :
:returns:
:rtype:
"""
ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
self.delay_para_set()
self.sync_on()
for index_1 in range(len(ch_num_list)):
if ch_num_list[index_1] < 4:
self.initial_AD9915(ch_num_list[index_1])
else:
self.initial_ad9910(ch_num_list[index_1])
self.mannual_sync_2g5()
self.mannual_sync_1g()
self.sync_off()
self.stamp_reset() # When there are some bugs, this one will be used
print('channel ', ch_num_list, ' initial has been finished')
def phase_clear_dds(self, ch_num_list):
"""To clear the phase of DDS in ch_num_list, after that the phase in accumulator will be 0
What's more, if a dds is play at a freq != 0, we need to stop it and clear the phase for "sequence play".
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..15]
:type ch_num_list: list
:returns:
:rtype:
"""
for index_1 in range(len(ch_num_list)):
if ch_num_list[index_1] < 4:
self.phase_clear_2g5(ch_num_list[index_1])
else:
self.phase_clear_1g(ch_num_list[index_1])
# print 'phase of channel ',ch_num_list,' has been cleared'
def sequence_data_download(self, ch_num_list, raw_data_list_list, check_sign=False):
"""To download the sequence play data for multi channels
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..15]
:type ch_num_list: list
:param raw_data_list_list: List of raw_data_list(for one channel)
:*** format of raw_data_list: [ [scan_sign,[A,f(MHz),fai(pi)],[level,time]], ...]
:*** eg: [ [scan_sign0,[A0, f0, fai0],[level0, time0]], [scan_sign1,[A1, f1, fai1],[level1, time1]], ... ]
: scan_sign: int, [0,1, .. ,4,5,..8]--["no scan", "amp"_0, "freq"_0, "phase"_0, "time"_0]
: amp: float, range: [0,1]
: freq: int or float, unit: MHz
: phase: float, unit: pi, range: [0,2)
: level: str, 'high'/'low'
: time: float, unit: us
:type raw_data_list_list: list
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
if len(ch_num_list) != len(raw_data_list_list):
print('mismatch of ch_num and data_list')
exit()
else:
play_address_word = b''
for index_1 in range(len(ch_num_list)):
raw_data_list_temp = raw_data_list_list[index_1]
play_address_word_temp = self.single_data_download(ch_num_list[index_1], raw_data_list_temp,
check_sign, print_sign=True)
play_address_word += play_address_word_temp
print('\ndata-download of channel ', ch_num_list, ' has been finished')
self.play_sequence_set(ch_num_list, play_address_word, print_sign=True)
# return play_address_word
"""
var_type = [0, 1, 2, 3, 4], which is show the scan_para's variable type
[0, 1, 2, 3, 4] represents ["no scan", "amp", "freq", "phase", "time"]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num), which show the scan_para's type and group number
para_num = 0, 1...; The group number for the scan_para
"""
def play(self, var_type, scan_para_list, check_sign=False):
"""To download the scan data and trigger the play
What's more ,a PMT counter receive function will also be carried
:param var_type: Int represents the variable type
:type var_type: int
:param scan_para_list: List of scan data
:*** format: [[N_0, para0, para1], [N_1, para0, para1],..]
:type scan_para_list: list
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
print('')
scan_para_gen = self.scan_data_gen(var_type, scan_para_list)
print(bytes_to_hexstr(scan_para_gen[0]))
self.scan_data_download(scan_para_gen[0], print_sign=True)
if check_sign:
if not self.scan_data_check(scan_para_gen[0]):
self.write(b'\x00\x00')
print('Scan_data download check failed!')
exit()
print('Play ins is ', bytes_to_hexstr(b'\x00\x01' + scan_para_gen[0][0:4]))
self.write(b'\x00\x01' + scan_para_gen[0][0:4])
print("total_play ", scan_para_gen[1])
return self.counter_receive(scan_para_gen[1])
def counter_receive(self, cnt_number):#PMT
"""To receive PMT counter's result for each single play
:param cnt_number: Total number of single play in current play
:type cnt_number: int
:returns: A list of PMT counter's result
:rtype: list
"""
readout_bytes = b''
cnt_result_list = []
counter_end_sign = True
print('')
# t1 = time.time()
while counter_end_sign:
temp = self.read()
readout_bytes += temp
while readout_bytes != b'':
# print('Current time consumed is ', time.time()-t1)
# print(bytes_to_hexstr(readout_bytes))
# print('')
if readout_bytes[0:2] == b'\xFF\xFA': # start sign
readout_bytes = readout_bytes[2:]
cnt_addr_start = bytes_to_num(readout_bytes[0:2])
elif readout_bytes[0:2] == b'\xFF\xF5': # stop sign(The end sign of this infinite loop)
readout_bytes = readout_bytes[2:]
cnt_addr_stop = bytes_to_num(readout_bytes[0:2])
counter_end_sign = False # To break from the whole while-loop
break
else:
if readout_bytes[0:2] == b'\xFF\xF8':
cnt_result_list.append('overflow')
else:
cnt_result_list.append(bytes_to_num(readout_bytes[0:2]))
readout_bytes = readout_bytes[2:]
# print('the start and stop of cnt_addr are %d, %d' % (cnt_addr_start, cnt_addr_stop))
# print('The length of result is %d' % len(cnt_result_list))
if cnt_number == (cnt_addr_stop-cnt_addr_start) + 1:
print('The cnt_number match the input scan number')
else:
print('The cnt_number miss match')
# print('Counter number is ', cnt_number)
print('The counter results is ', cnt_result_list)
return cnt_result_list
def ad5371_play(self, ch_num_list, raw_wave_list, play_sign=True, check_sign=False):#PMT
"""To receive PMT counter's result for each single play
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..39]
:type ch_num_list: list
:param raw_wave_list: List of raw_wave_data, len(raw_wave_list[0]) = len(ch_num_list)
:*** format : [[ch0_pt0, ch1_pt0, ...], [ch0_pt1, ch1_pt1, ...], ...]
:type raw_wave_list: list
:param play_sign: True/False -- Enable/Disable the play
:type play_sign: bool
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
addr_start, addr_stop = self.dac_ad5371_data_download(ch_num_list, raw_wave_list, check_sign)
if play_sign:
ch_num = len(ch_num_list)
self.ad5371_play_set(ch_num, [106, 59, 111]) # [106, 59, 111]
self.write(b'\x00\x31' + addr_start + addr_stop)
print(bytes_to_hexstr(b'\x00\x31' + addr_start + addr_stop))
time.sleep((bytes_to_num(addr_stop)-bytes_to_num(addr_start))*1e-6)
if __name__ == '__main__':
"""
var_type = [0, 1, 2, 3, 4]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num)
para_num = 0, 1...
"""
# # Part1
# """ DDS and TTL test modules """
# fpga = DDSTestClass(1)
# fpga.dll.flushInputBuffer() # To refresh the USB, just copy
# fpga.initial_device()
#
# var_type = 0
# play_ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
# # play_ch_num_list = [0, 1, 2, 3, 4, 5]
# # play_ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
# fpga.test_fun_basic(play_ch_num_list, var_type, check_sign=True)
# Part2 4
""" AD5371 test modules """
ad5371 = DacTestClass(1)
ad5371.dll.flushInputBuffer()
ad5371.ad5371_ini()
ad5371.ch_test_new(10)
# # Part3
# """ AD5371 test modules """
# fpga = DDSTestClass(1)
# fpga.cw_play(ch_num=5, amp=1, freq=0, phase=0)
# ch_num=5
# hp_channel, reg_wr = fpga.ch2identify(ch_num)
# ch_num_byte = num_to_bytes(2**ch_num, 2)
# print(fpga.l_read(ch_num_byte, reg_wr, right_rd=b'\x00\x00\x00\x00\x00\x00\x00\x00'))
| 40.428571 | 902 | 0.538534 | # coding=UTF-8
"""
--------------------------------------------------------
Copyright (c) ****-2018 ESR, Inc. All rights reserved.
--------------------------------------------------------
Author: Mingdong Zhu
Date: 2019/03/07
Design Name: The user interface of the DDS software
Purpose: Design an UI and test function for DDS board
using Python 3.6.3
--------------------------------------------------------
"""
# _name_ = 'main_process'
import time
import numpy as np
import dds
def num_to_bytes(num, bytenum, high_head=True):
"""To get the bytes format of a given decimal number
(used for data_pro)
:param num: A given number
:type num: int
:param bytenum: The number of` bytes (or len()) of the return word
:type bytenum: int
:param high_head: True/False -- big/little-endian; eg:num_to_bytes(1, 2, True/False)-->b'\x00\x01' or b'\x01\x00'
:type high_head: bool
:returns: Bytes for num, len() = bytenum
:rtype: bytes
"""
if high_head:
return np.array([num], dtype='>u8').tobytes()[-bytenum:] # big-endian
else:
return np.array([num], dtype='<u8').tobytes()[:bytenum] # little-endian
def bytes_to_num(bytes_, signed_=True, big_=True):
"""To get the int format of a given bytes
(used for data_pro)
:param bytes_: A given bytes
:type bytes_: bytes
:param signed_: True for signed input
:type signed_: bool
:param big_: Same as the "high_head" in the function 'num_to_bytes'
:type big_: bool
:returns: Int for bytes
:rtype: int
"""
if not signed_:
if big_:
return int.from_bytes(bytes_, byteorder='big')
else:
return int.from_bytes(bytes_, byteorder='little')
else:
if big_:
return int.from_bytes(bytes_, byteorder='big', signed=True)
else:
return int.from_bytes(bytes_, byteorder='little', signed=True)
def bytes_to_hexstr(bytes_, space=True):
"""To get the string format of a given bytes
(used for print/debug)
:param bytes_: A given bytes
:type bytes_: bytes
:param space: True for insert a ' ' per byte
:type space: bool
:returns: String for bytes
:rtype: str
"""
# ss = s_str.encode('hex') # original solution in Python2
string = bytes_.hex() # original solution in Python2
if space:
string_with_space = [string[i:i + 2] for i in range(0, len(string), 2)]
return ' '.join(string_with_space)
else:
return string
class FPGA(dds.HardWare): # GenWave,
""" A class used for integration, in other word, the final application """
"""To clarify the user-defined scan-sign ******
var_type = [0, 1, 2, 3, 4], which is show the scan_para's variable type
[0, 1, 2, 3, 4] represents ["no scan", "amp", "freq", "phase", "time"]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num), which show the scan_para's type and group number
para_num = 0, 1...; The group number for the scan_para
"""
def __init__(self, dev_index=0, test_mode=False):
""" To launch the Instantiation of classes"""
# GenWave.__init__(self)
dds.HardWare.__init__(self, dev_index=dev_index, test_mode=test_mode)
def cw_play(self, ch_num, amp, freq, phase):
"""Single channel setting for DDS
(can be applied in spectrum test or non-sequence wave_play)
:param ch_num: The number ch to be set, [0,1,...,15] is available
:type ch_num: int
:param amp: Amplitude of DDS, range:[0,1]
:type amp: float
:param freq: Frequency of DDS, unit: MHz
:type freq: int or float
:param amp: Phase of DDS, unit: pi, range: [0,2)
:type amp: float
:returns: unit: MHz, Hz
:rtype: float, float
"""
hp_channel, reg_wr = self.ch2identify(ch_num)
ch_num_byte = num_to_bytes(2**ch_num, 2)
dds_data_list = self.dds_data_form(hp_channel, amp, freq, phase)
print(bytes_to_hexstr(dds_data_list[0]))
self.l_configure(ch_num_byte, reg_wr, dds_data_list[0])
"""
return specification:
1--the real digital freq (set)
2--the difference of freq (real - set)
"""
return dds_data_list[1], dds_data_list[2]
def ttl_set(self, ch_num, level):
"""To set the TTL manually
:param ch_num: channel number of TTL, [0,1] correspond to TTL9,10 and 0x5/6 0,1
:type ch_num: int
:param level: 0/1 for low and high
:type level: int
:returns:
:rtype:
"""
word_in_num = 5*16 + ch_num + 16*level
word_in_bytes = num_to_bytes(word_in_num % 256, 2)
print(bytes_to_hexstr(word_in_bytes))
self.write(word_in_bytes)
def ad5371_ini(self):
"""To initialize the AD5371 which is a 40-ch low-speed DAC
:param :
:type :
:returns:
:rtype:
"""
self.write(b'\x00\x34'+b'\x00'+b'\x02'+b'\x20\x00') # the b'\x02' can be b'\x03',b'\x04'
self.write(b'\x00\x34'+b'\x00'+b'\x03'+b'\x20\x00') # the OFS_g1 is set to be +10V.
self.write(b'\x00\x34'+b'\x00'+b'\x04'+b'\x20\x00') # the OFS_g2~4 is set to be +10V.
self.write(b'\x00\x34'+b'\x00'+b'\x80'+b'\x80\x00') # C
self.write(b'\x00\x34'+b'\x00'+b'\x40'+b'\xFF\xFC') # M
self.write(b'\x00\x34'+b'\x00'+b'\xC0'+b'\x80\x00') # X = +10
stamp_list = [0, 1, 3]
self.ad5371_wr_stamp_set(stamp_list) # To set the SPI rate
# self.ad5371_play_set(ch_num, [106, 59, 111])
print('AD5371 initial has been finished')
#################################################################
# integration-experiment function
# 以下都是支持多个通道的操作
#################################################################
def initial_dds(self):
"""To initialize and synchronize the 16 DDSs
:param :
:type :
:returns:
:rtype:
"""
ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
self.delay_para_set()
self.sync_on()
for index_1 in range(len(ch_num_list)):
if ch_num_list[index_1] < 4:
self.initial_AD9915(ch_num_list[index_1])
else:
self.initial_ad9910(ch_num_list[index_1])
self.mannual_sync_2g5()
self.mannual_sync_1g()
self.sync_off()
self.stamp_reset() # When there are some bugs, this one will be used
print('channel ', ch_num_list, ' initial has been finished')
def phase_clear_dds(self, ch_num_list):
"""To clear the phase of DDS in ch_num_list, after that the phase in accumulator will be 0
What's more, if a dds is play at a freq != 0, we need to stop it and clear the phase for "sequence play".
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..15]
:type ch_num_list: list
:returns:
:rtype:
"""
for index_1 in range(len(ch_num_list)):
if ch_num_list[index_1] < 4:
self.phase_clear_2g5(ch_num_list[index_1])
else:
self.phase_clear_1g(ch_num_list[index_1])
# print 'phase of channel ',ch_num_list,' has been cleared'
def sequence_data_download(self, ch_num_list, raw_data_list_list, check_sign=False):
"""To download the sequence play data for multi channels
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..15]
:type ch_num_list: list
:param raw_data_list_list: List of raw_data_list(for one channel)
:*** format of raw_data_list: [ [scan_sign,[A,f(MHz),fai(pi)],[level,time]], ...]
:*** eg: [ [scan_sign0,[A0, f0, fai0],[level0, time0]], [scan_sign1,[A1, f1, fai1],[level1, time1]], ... ]
: scan_sign: int, [0,1, .. ,4,5,..8]--["no scan", "amp"_0, "freq"_0, "phase"_0, "time"_0]
: amp: float, range: [0,1]
: freq: int or float, unit: MHz
: phase: float, unit: pi, range: [0,2)
: level: str, 'high'/'low'
: time: float, unit: us
:type raw_data_list_list: list
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
if len(ch_num_list) != len(raw_data_list_list):
print('mismatch of ch_num and data_list')
exit()
else:
play_address_word = b''
for index_1 in range(len(ch_num_list)):
raw_data_list_temp = raw_data_list_list[index_1]
play_address_word_temp = self.single_data_download(ch_num_list[index_1], raw_data_list_temp,
check_sign, print_sign=True)
play_address_word += play_address_word_temp
print('\ndata-download of channel ', ch_num_list, ' has been finished')
self.play_sequence_set(ch_num_list, play_address_word, print_sign=True)
# return play_address_word
"""
var_type = [0, 1, 2, 3, 4], which is show the scan_para's variable type
[0, 1, 2, 3, 4] represents ["no scan", "amp", "freq", "phase", "time"]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num), which show the scan_para's type and group number
para_num = 0, 1...; The group number for the scan_para
"""
def play(self, var_type, scan_para_list, check_sign=False):
"""To download the scan data and trigger the play
What's more ,a PMT counter receive function will also be carried
:param var_type: Int represents the variable type
:type var_type: int
:param scan_para_list: List of scan data
:*** format: [[N_0, para0, para1], [N_1, para0, para1],..]
:type scan_para_list: list
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
print('')
scan_para_gen = self.scan_data_gen(var_type, scan_para_list)
print(bytes_to_hexstr(scan_para_gen[0]))
self.scan_data_download(scan_para_gen[0], print_sign=True)
if check_sign:
if not self.scan_data_check(scan_para_gen[0]):
self.write(b'\x00\x00')
print('Scan_data download check failed!')
exit()
print('Play ins is ', bytes_to_hexstr(b'\x00\x01' + scan_para_gen[0][0:4]))
self.write(b'\x00\x01' + scan_para_gen[0][0:4])
print("total_play ", scan_para_gen[1])
return self.counter_receive(scan_para_gen[1])
def counter_receive(self, cnt_number):#PMT
"""To receive PMT counter's result for each single play
:param cnt_number: Total number of single play in current play
:type cnt_number: int
:returns: A list of PMT counter's result
:rtype: list
"""
readout_bytes = b''
cnt_result_list = []
counter_end_sign = True
print('')
# t1 = time.time()
while counter_end_sign:
temp = self.read()
readout_bytes += temp
while readout_bytes != b'':
# print('Current time consumed is ', time.time()-t1)
# print(bytes_to_hexstr(readout_bytes))
# print('')
if readout_bytes[0:2] == b'\xFF\xFA': # start sign
readout_bytes = readout_bytes[2:]
cnt_addr_start = bytes_to_num(readout_bytes[0:2])
elif readout_bytes[0:2] == b'\xFF\xF5': # stop sign(The end sign of this infinite loop)
readout_bytes = readout_bytes[2:]
cnt_addr_stop = bytes_to_num(readout_bytes[0:2])
counter_end_sign = False # To break from the whole while-loop
break
else:
if readout_bytes[0:2] == b'\xFF\xF8':
cnt_result_list.append('overflow')
else:
cnt_result_list.append(bytes_to_num(readout_bytes[0:2]))
readout_bytes = readout_bytes[2:]
# print('the start and stop of cnt_addr are %d, %d' % (cnt_addr_start, cnt_addr_stop))
# print('The length of result is %d' % len(cnt_result_list))
if cnt_number == (cnt_addr_stop-cnt_addr_start) + 1:
print('The cnt_number match the input scan number')
else:
print('The cnt_number miss match')
# print('Counter number is ', cnt_number)
print('The counter results is ', cnt_result_list)
return cnt_result_list
def ad5371_play(self, ch_num_list, raw_wave_list, play_sign=True, check_sign=False):#PMT
"""To receive PMT counter's result for each single play
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..39]
:type ch_num_list: list
:param raw_wave_list: List of raw_wave_data, len(raw_wave_list[0]) = len(ch_num_list)
:*** format : [[ch0_pt0, ch1_pt0, ...], [ch0_pt1, ch1_pt1, ...], ...]
:type raw_wave_list: list
:param play_sign: True/False -- Enable/Disable the play
:type play_sign: bool
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
addr_start, addr_stop = self.dac_ad5371_data_download(ch_num_list, raw_wave_list, check_sign)
if play_sign:
ch_num = len(ch_num_list)
self.ad5371_play_set(ch_num, [106, 59, 111]) # [106, 59, 111]
self.write(b'\x00\x31' + addr_start + addr_stop)
print(bytes_to_hexstr(b'\x00\x31' + addr_start + addr_stop))
time.sleep((bytes_to_num(addr_stop)-bytes_to_num(addr_start))*1e-6)
class DDSTestClass(FPGA):
def __init__(self, dev_index=0, test_mode=False):
FPGA.__init__(self, dev_index=dev_index, test_mode=test_mode)
self.pulse_width = 5 # default value
# pulse_width1 = 0.1536
# pulse_width2 = 3.520
# pulse_width_ex = 3.1232 # 3.1168
def initial_device(self):
self.initial_dds()
self.phase_clear_dds([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
# self.stamp_reset()
def sequence_bool2int(self, var_type, raw_data_list_list):
"""To transfer the bool and para_num into scan_sign which can be applied in sequence generation
:param var_type: Int represents the variable type
:type var_type: int
:param raw_data_list_list: List of raw_wave_data
:type raw_data_list_list: list
:returns:
:rtype:
"""
for ch_index in range(len(raw_data_list_list)):
raw_data_list_pro = raw_data_list_list[ch_index]
for seq_index in range(len(raw_data_list_pro)):
if raw_data_list_pro[seq_index][0][0] and var_type > 0:
raw_data_list_pro[seq_index][0] = var_type + 4*raw_data_list_pro[seq_index][0][1]
else:
raw_data_list_pro[seq_index][0] = 0
def gen_fun_sync(self, raw_data_list_list, ch_num_len, cycles):
"""To generate a test_data in the empty list
:param raw_data_list_list:
:type raw_data_list_list: list
:param ch_num_len: the len of ch_num_list
:type ch_num_len: int
:param cycles: the len of ch_num_list
:type cycles: int
:returns:
:rtype:
"""
for index_ch in range(ch_num_len):
for index_cycle in range(cycles):
raw_data_list_list[index_ch].extend([[[True, 0], [1, 0.1, 0], ['high', self.pulse_width]],
[[False, 0], [0, 0.1, 0], ['low', self.pulse_width]],
[[True, 1], [1, 0.1, 0], ['high', self.pulse_width]],
[[False, 1], [0, 0, 0], ['low', self.pulse_width]]])
def scan_gen_basic(self, var_type):
"""To generate a test_scan_data
:param var_type:
:type var_type: int
:returns:
:rtype:
"""
scan_para_list = []
var_list = [[0, 0],
[1, 0.5], [100, .01], [0, 1], [5, 20],
[1, 0.5], [100, .01], [0, 1], [15, 20]]
# var_list = [[0, 0],
# [1, 0.5], [100, 10], [0, 1], [5, 20],
# [1, 0.5], [100, 10], [0, 1], [15, 20]]
n_list = [1, 2]
for loop_index in range(2):
for index in range(len(n_list)):
scan_para_list.append([n_list[index], var_list[var_type][index], var_list[var_type+4][index]])
print('scan_para_list is ', scan_para_list)
return scan_para_list
def test_fun_basic(self, play_ch_num_list, var_type, check_sign=False):
"""To carry out the test for DDS with scan
:type play_ch_num_list: list
:type var_type: int
:type check_sign: bool
:returns:
:rtype:
"""
# pulse_width = 4
ch_num_len = len(play_ch_num_list)
cycles = 2
loop_cycles = 1
# To generate raw_data_list_list
raw_data_list_list = []
for ii in range(ch_num_len): # To generate a list of lists
raw_data_list_list.append([])
self.gen_fun_sync(raw_data_list_list, ch_num_len, cycles)
self.sequence_bool2int(var_type, raw_data_list_list)
print(raw_data_list_list[0])
# To generate scan_para
scan_para_list = self.scan_gen_basic(var_type)
# To download the sequence data
t1 = time.time()
self.sequence_data_download(play_ch_num_list, raw_data_list_list, check_sign)
print(play_ch_num_list)
print(raw_data_list_list)
print(len(play_ch_num_list))
print(len(raw_data_list_list))
print('Time consumed in download is', time.time()-t1)
# To download the scan data and play
# t1 = time.time()
#scan_para_list = [[1, 1, 1], [2, 0.5, 1], [1, 1, 1], [2, 0.5, 1]]
scan_para_list = [[2, 1, 0]]
#for loop_index in range(1):
self.play(0, scan_para_list, check_sign)
print(var_type)
print(scan_para_list)
# print('Current time consumed is ', time.time()-t1)
# print('Time consumed in total is', time.time()-t1)
def spectrum_test(self):
"""A method to test the spectrum"""
ch_num = 0
freq_set = 600
a, b = self.cw_play(ch_num, 1, freq_set, 0) # amp = 1, phase = 0
print(a, ' ', b)
class DacTestClass(FPGA):
def __init__(self, dev_index=0, test_mode=False):
FPGA.__init__(self, dev_index=dev_index, test_mode=test_mode)
# Read!!
def ch_test_new(self, ch_number, sin_pts=50):
"""To make AD5371 play a 50-point sine waveform
:param ch_number: To set the number of channels enabled to play
:type ch_number: int
:returns:
:rtype:
"""
ch_list = []
raw_wave_list = []
for index in range(ch_number):
ch_list.append(index)
print(ch_list)
for x in range(sin_pts):
raw_wave_list.append([])
data_pts = np.sin((float(x)/sin_pts * 0.8 +0)*2*np.pi) * 10
for loop_index in range(ch_number):
raw_wave_list[x].append(data_pts)
print(raw_wave_list)
self.ad5371_play(ch_list, raw_wave_list, play_sign=True, check_sign=True)
if __name__ == '__main__':
"""
var_type = [0, 1, 2, 3, 4]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num)
para_num = 0, 1...
"""
# # Part1
# """ DDS and TTL test modules """
# fpga = DDSTestClass(1)
# fpga.dll.flushInputBuffer() # To refresh the USB, just copy
# fpga.initial_device()
#
# var_type = 0
# play_ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
# # play_ch_num_list = [0, 1, 2, 3, 4, 5]
# # play_ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
# fpga.test_fun_basic(play_ch_num_list, var_type, check_sign=True)
# Part2 4
""" AD5371 test modules """
ad5371 = DacTestClass(1)
ad5371.dll.flushInputBuffer()
ad5371.ad5371_ini()
ad5371.ch_test_new(10)
# # Part3
# """ AD5371 test modules """
# fpga = DDSTestClass(1)
# fpga.cw_play(ch_num=5, amp=1, freq=0, phase=0)
# ch_num=5
# hp_channel, reg_wr = fpga.ch2identify(ch_num)
# ch_num_byte = num_to_bytes(2**ch_num, 2)
# print(fpga.l_read(ch_num_byte, reg_wr, right_rd=b'\x00\x00\x00\x00\x00\x00\x00\x00'))
| 505 | 5,387 | 50 |
5302b93b77e85403459c6d3e9e7609e976336e0b | 403 | py | Python | listings/urls.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | listings/urls.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | listings/urls.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z | from django.conf.urls import url
from .views import listings_listing_view, listings_api_view
urlpatterns = [
url(
r'^listings/(?P<listing_hostname>[a-z0-9-\.]+)/?$',
listings_listing_view,
name='listings_listing_view',
),
url(
r'^api/v1/listings/(?P<listing_hostname>[a-z0-9-\.]+)/?$',
listings_api_view,
name='listings_api_view',
)
]
| 21.210526 | 66 | 0.610422 | from django.conf.urls import url
from .views import listings_listing_view, listings_api_view
urlpatterns = [
url(
r'^listings/(?P<listing_hostname>[a-z0-9-\.]+)/?$',
listings_listing_view,
name='listings_listing_view',
),
url(
r'^api/v1/listings/(?P<listing_hostname>[a-z0-9-\.]+)/?$',
listings_api_view,
name='listings_api_view',
)
]
| 0 | 0 | 0 |
10559bf17d1f5c778b0ad69a4bbaca775ed978fb | 641 | py | Python | code/begin/Session.py | redxyb/Flask | 4ee226501f16eb0fa5cb585dc6bf780005fa8a28 | [
"MIT"
] | null | null | null | code/begin/Session.py | redxyb/Flask | 4ee226501f16eb0fa5cb585dc6bf780005fa8a28 | [
"MIT"
] | null | null | null | code/begin/Session.py | redxyb/Flask | 4ee226501f16eb0fa5cb585dc6bf780005fa8a28 | [
"MIT"
] | null | null | null | '''
Author: xyb
Date: 2020-08-10 18:35:32
LastEditTime: 2020-08-10 18:52:50
'''
from flask import Flask, make_response, request
app = Flask(__name__)
app.secret_key = 'dfslkfjdlfsdkjfnskj' #直接设置
#间接设置
# class DefaultConfig(object):
# SECRET_KEY = 'dfslkfjdlfsdkjfnskj'
# app.config.from_object(DefaultConfig)
@app.route('/set_session')
@app.route('/get_session')
if __name__ == "__main__":
app.run(host='', port=5000, debug=False)
| 21.366667 | 53 | 0.700468 | '''
Author: xyb
Date: 2020-08-10 18:35:32
LastEditTime: 2020-08-10 18:52:50
'''
from flask import Flask, make_response, request
app = Flask(__name__)
app.secret_key = 'dfslkfjdlfsdkjfnskj' #直接设置
#间接设置
# class DefaultConfig(object):
# SECRET_KEY = 'dfslkfjdlfsdkjfnskj'
# app.config.from_object(DefaultConfig)
@app.route('/set_session')
def set_session():
session['username'] = 'xyb'
return 'set seccion is ok'
@app.route('/get_session')
def get_session():
username = session.get('username')
return 'get session username {}'.format(username)
if __name__ == "__main__":
app.run(host='', port=5000, debug=False)
| 150 | 0 | 44 |
a20f29e99f0bfe18d2e7d7416b1c44845378a3e2 | 10,451 | py | Python | audio_pipeline/audio_processing/subtitle_utils.py | AlexWimpory/video-caption | 4252835bc69ecb54e6d0e0af49f2e77c76fd78ad | [
"MIT"
] | null | null | null | audio_pipeline/audio_processing/subtitle_utils.py | AlexWimpory/video-caption | 4252835bc69ecb54e6d0e0af49f2e77c76fd78ad | [
"MIT"
] | null | null | null | audio_pipeline/audio_processing/subtitle_utils.py | AlexWimpory/video-caption | 4252835bc69ecb54e6d0e0af49f2e77c76fd78ad | [
"MIT"
] | 1 | 2020-12-02T17:21:12.000Z | 2020-12-02T17:21:12.000Z | import tempfile
from pysubs2 import SSAFile, SSAStyle, Color, SSAEvent, make_time
from audio_pipeline import logging_config
from audio_pipeline.audio_processing.ffmpeg_processor import run_ffmpeg
logger = logging_config.get_logger(__name__)
def _adjust_for_clashing_subs(combined_subs, working_sub, exclude):
"""
Helper function for the append code. Looking for overlapping subtitles and make adjustments
"""
# If we haven't got a set of subs to check against early return
if not combined_subs or not exclude:
return working_sub, None
second_working_sub = None
for sub in combined_subs:
# Standard style exit
if exclude and sub.style not in exclude:
continue
if sub.start <= working_sub.start <= sub.end:
# Drop the start of the working sub
working_sub.start = sub.end
elif working_sub.start <= sub.start <= working_sub.end:
# Drop the end of the working sub
if sub.end < working_sub.end:
# We might need to split the sub
second_working_sub = working_sub.copy()
second_working_sub.start = sub.end
second_working_sub.end = working_sub.end
working_sub.end = sub.start
# Check that we now have a sub that has no duration
if working_sub.start >= working_sub.end:
working_sub = None
return working_sub, second_working_sub
def append_subs(combined_subs, new_subs, style=None, formatter=None, exclude=None):
"""
Append a set of subs to a current set avoiding a clash if needed. Also allows for styling and formatting
"""
if exclude is None:
exclude = []
new_combined_subs = SSAFile()
if combined_subs:
# First add the subs we are keeping
new_combined_subs.extend(combined_subs)
for sub in new_subs:
# Add a style
if style:
sub.style = style
# Perform the formatting
if formatter:
sub.text = formatter(sub.text)
# See if we want to cater for clashes
sub, second_sub = _adjust_for_clashing_subs(combined_subs, sub, exclude)
# Prepare results
if sub:
new_combined_subs.append(sub)
if second_sub:
new_combined_subs.append(second_sub)
new_combined_subs.sort()
return new_combined_subs
def flatten_subs(starting_subs, style=None):
"""
Take some subs and merge them together (adjacent subtitle which are the same)
"""
new_subs = SSAFile()
for sub in starting_subs:
# Standard style exit
if style and sub.style != style:
continue
if not new_subs:
new_subs.append(sub)
elif sub.text == new_subs[-1].text and sub.start <= new_subs[-1].end:
if sub.end > new_subs[-1].end:
new_subs[-1].end = sub.end
else:
new_subs.append(sub)
# Copy in all the subs we skipped due to styling
if style:
for sub in starting_subs:
if sub.style != style:
new_subs.append(sub)
new_subs.sort()
return new_subs
def merge_subs(starting_subs, tolerance_millis=1000, style=None):
"""
Take some subs and eliminate any blank spots where they are less than a tolerance (default of 1 second)
"""
merged_subs = SSAFile()
for sub in starting_subs:
if style and sub.style != style:
continue
if merged_subs and merged_subs[-1].end + tolerance_millis >= sub.start:
merged_subs[-1].end = sub.start
merged_subs.append(sub)
if style:
for sub in starting_subs:
if sub.style != style:
merged_subs.append(sub)
merged_subs.sort()
return merged_subs
def compress_subs(subs, max_chars=30, max_stretch_millis=3000, max_oldest_millis=10000, style=None):
"""
Mostly for the use of speech subtitles this will take individual words and create a running subtitle
"""
# Phase 1 based on character count so that we dont overflow the screen
# Phase 2 is to make sure that the oldest word on the screen has not been there for too long
# First remove gaps where they exist
merged_subs = merge_subs(subs, max_stretch_millis, style)
char_count = 0
oldest_start_time = 0
compressed_subs = SSAFile()
for sub in merged_subs:
if style and sub.style is not style:
continue
char_count += len(sub.text)
# Check the character count and reset if needed
if char_count > max_chars:
char_count = len(sub.text)
oldest_start_time = sub.start
# Check if subtitle has been on screen for too long then reset
elif sub.start - oldest_start_time > max_oldest_millis:
char_count = len(sub.text)
oldest_start_time = sub.start
# If there is a gap in time between subtitles then reset
elif len(compressed_subs) > 0 and sub.start != compressed_subs[-1].end:
char_count = len(sub.text)
oldest_start_time = sub.start
# Add this sub
elif len(compressed_subs) > 0:
sub.text = compressed_subs[-1].text + ' ' + sub.text
char_count += 1
compressed_subs.append(sub)
# Append all the other subs
if style:
for sub in merged_subs:
if sub.style is not style:
compressed_subs.append(sub)
compressed_subs.sort()
return compressed_subs
def remove_tiny_subs(subs, duration_millis=1000, left_millis=2000, right_millis=2000, style=None):
"""
Remove any subs that are out on their own or too short
"""
copy_subs = SSAFile()
new_subs = SSAFile()
for sub in subs:
if (style and sub.style is style) or not style:
copy_subs.append(sub)
for i, sub in enumerate(copy_subs):
# if it is longer it goes in
if sub.duration >= duration_millis:
new_subs.append(sub)
continue
# if its the first one then look right only
# if its the last one then look left only
# if its in the middle then look both ways
if left_millis is None and right_millis is None:
continue
if i == 0:
if copy_subs[i + 1].start - sub.end < right_millis:
new_subs.append(sub)
elif i == len(copy_subs) - 1:
if sub.start - copy_subs[i - 1].end < left_millis:
new_subs.append(sub)
elif copy_subs[i + 1].start - sub.end < right_millis or sub.start - copy_subs[i - 1].end < left_millis:
new_subs.append(sub)
if style:
for sub in subs:
if sub.style is not style:
new_subs.append(sub)
new_subs.sort()
return new_subs
def add_styles(subs, style_list=None):
"""
Add styles to the subtitle file based on the style strings in each individual subtitle
"""
if style_list is None:
style_list = []
for style in style_list:
new_style = SSAStyle()
# Number for position refers to the number on a keypad
if 'top_left' in style:
new_style.alignment = 7
elif 'top_right' in style:
new_style.alignment = 9
elif 'bottom_left' in style:
new_style.alignment = 1
elif 'bottom_right' in style:
new_style.alignment = 3
elif 'left' in style:
new_style.alignment = 4
elif 'right' in style:
new_style.alignment = 6
elif 'top' in style:
new_style.alignment = 8
elif 'bottom' in style:
new_style.alignment = 2
# Setting the RGB values for the text
if 'pred' in style:
new_style.primarycolor = Color(255, 0, 0, 0)
elif 'pblue' in style:
new_style.primarycolor = Color(0, 0, 255, 0)
elif 'pgreen' in style:
new_style.primarycolor = Color(0, 255, 0, 0)
elif 'pwhite' in style:
new_style.primarycolor = Color(255, 255, 255, 0)
# Setting the RGB values for the text's background
if 'bred' in style:
new_style.backcolor = Color(255, 0, 0, 0)
elif 'bblue' in style:
new_style.backcolor = Color(0, 0, 255, 0)
elif 'bgreen' in style:
new_style.backcolor = Color(0, 255, 0, 0)
elif 'bwhite' in style:
new_style.backcolor = Color(255, 255, 255, 0)
# Setting different font types
if 'bold' in style:
new_style.bold = True
if 'italic' in style:
new_style.italic = True
subs.styles[style] = new_style
return subs
def save_to_subtitles(results, formatter):
"""
Save to subtitle file
:param results: Dictionary containing info and start/end times
:param formatter: Apply text formating to the subtitle
:return: New subtitle file
"""
subs = SSAFile()
for result in results:
event = SSAEvent(start=make_time(s=result['start']),
end=make_time(s=result['end']), text=formatter(result))
if 'highlight' in result and result['highlight']:
event.style = 'red'
subs.append(event)
logger.info(f'Processed {len(results)} results to subtitle events')
return subs
def create_styles(subs):
"""
Gather text from subtitles and call the subtitle adder
"""
styles = set()
for sub in subs:
styles.add(sub.style)
add_styles(subs, styles)
def burn_subtitles_into_video(video_path, subtitle_path, output_path):
"""
Create new video with subtitles burned in
:param video_path: input video path
:param subtitle_path: subtitle input path
:param output_path: video output path
:return: File name that it has written to
"""
temp_file_name = tempfile.mktemp(dir=output_path, prefix='output_with_hard_subtitles_', suffix='.mp4')
# Handle srt files if needed
if subtitle_path.endswith('.srt.'):
subtitle_ass_file = subtitle_path.replace(".srt", ".ass")
run_ffmpeg(f'ffmpeg -y -i {subtitle_path} {subtitle_ass_file}')
else:
subtitle_ass_file = subtitle_path
run_ffmpeg(f'ffmpeg -i {video_path} -vf "ass={subtitle_ass_file}" {temp_file_name}')
logger.info(f'Burnt subtitles {subtitle_path} to {video_path} stored in {temp_file_name}')
return temp_file_name
| 36.799296 | 111 | 0.62683 | import tempfile
from pysubs2 import SSAFile, SSAStyle, Color, SSAEvent, make_time
from audio_pipeline import logging_config
from audio_pipeline.audio_processing.ffmpeg_processor import run_ffmpeg
logger = logging_config.get_logger(__name__)
def _adjust_for_clashing_subs(combined_subs, working_sub, exclude):
"""
Helper function for the append code. Looking for overlapping subtitles and make adjustments
"""
# If we haven't got a set of subs to check against early return
if not combined_subs or not exclude:
return working_sub, None
second_working_sub = None
for sub in combined_subs:
# Standard style exit
if exclude and sub.style not in exclude:
continue
if sub.start <= working_sub.start <= sub.end:
# Drop the start of the working sub
working_sub.start = sub.end
elif working_sub.start <= sub.start <= working_sub.end:
# Drop the end of the working sub
if sub.end < working_sub.end:
# We might need to split the sub
second_working_sub = working_sub.copy()
second_working_sub.start = sub.end
second_working_sub.end = working_sub.end
working_sub.end = sub.start
# Check that we now have a sub that has no duration
if working_sub.start >= working_sub.end:
working_sub = None
return working_sub, second_working_sub
def append_subs(combined_subs, new_subs, style=None, formatter=None, exclude=None):
"""
Append a set of subs to a current set avoiding a clash if needed. Also allows for styling and formatting
"""
if exclude is None:
exclude = []
new_combined_subs = SSAFile()
if combined_subs:
# First add the subs we are keeping
new_combined_subs.extend(combined_subs)
for sub in new_subs:
# Add a style
if style:
sub.style = style
# Perform the formatting
if formatter:
sub.text = formatter(sub.text)
# See if we want to cater for clashes
sub, second_sub = _adjust_for_clashing_subs(combined_subs, sub, exclude)
# Prepare results
if sub:
new_combined_subs.append(sub)
if second_sub:
new_combined_subs.append(second_sub)
new_combined_subs.sort()
return new_combined_subs
def flatten_subs(starting_subs, style=None):
"""
Take some subs and merge them together (adjacent subtitle which are the same)
"""
new_subs = SSAFile()
for sub in starting_subs:
# Standard style exit
if style and sub.style != style:
continue
if not new_subs:
new_subs.append(sub)
elif sub.text == new_subs[-1].text and sub.start <= new_subs[-1].end:
if sub.end > new_subs[-1].end:
new_subs[-1].end = sub.end
else:
new_subs.append(sub)
# Copy in all the subs we skipped due to styling
if style:
for sub in starting_subs:
if sub.style != style:
new_subs.append(sub)
new_subs.sort()
return new_subs
def merge_subs(starting_subs, tolerance_millis=1000, style=None):
"""
Take some subs and eliminate any blank spots where they are less than a tolerance (default of 1 second)
"""
merged_subs = SSAFile()
for sub in starting_subs:
if style and sub.style != style:
continue
if merged_subs and merged_subs[-1].end + tolerance_millis >= sub.start:
merged_subs[-1].end = sub.start
merged_subs.append(sub)
if style:
for sub in starting_subs:
if sub.style != style:
merged_subs.append(sub)
merged_subs.sort()
return merged_subs
def compress_subs(subs, max_chars=30, max_stretch_millis=3000, max_oldest_millis=10000, style=None):
"""
Mostly for the use of speech subtitles this will take individual words and create a running subtitle
"""
# Phase 1 based on character count so that we dont overflow the screen
# Phase 2 is to make sure that the oldest word on the screen has not been there for too long
# First remove gaps where they exist
merged_subs = merge_subs(subs, max_stretch_millis, style)
char_count = 0
oldest_start_time = 0
compressed_subs = SSAFile()
for sub in merged_subs:
if style and sub.style is not style:
continue
char_count += len(sub.text)
# Check the character count and reset if needed
if char_count > max_chars:
char_count = len(sub.text)
oldest_start_time = sub.start
# Check if subtitle has been on screen for too long then reset
elif sub.start - oldest_start_time > max_oldest_millis:
char_count = len(sub.text)
oldest_start_time = sub.start
# If there is a gap in time between subtitles then reset
elif len(compressed_subs) > 0 and sub.start != compressed_subs[-1].end:
char_count = len(sub.text)
oldest_start_time = sub.start
# Add this sub
elif len(compressed_subs) > 0:
sub.text = compressed_subs[-1].text + ' ' + sub.text
char_count += 1
compressed_subs.append(sub)
# Append all the other subs
if style:
for sub in merged_subs:
if sub.style is not style:
compressed_subs.append(sub)
compressed_subs.sort()
return compressed_subs
def remove_tiny_subs(subs, duration_millis=1000, left_millis=2000, right_millis=2000, style=None):
"""
Remove any subs that are out on their own or too short
"""
copy_subs = SSAFile()
new_subs = SSAFile()
for sub in subs:
if (style and sub.style is style) or not style:
copy_subs.append(sub)
for i, sub in enumerate(copy_subs):
# if it is longer it goes in
if sub.duration >= duration_millis:
new_subs.append(sub)
continue
# if its the first one then look right only
# if its the last one then look left only
# if its in the middle then look both ways
if left_millis is None and right_millis is None:
continue
if i == 0:
if copy_subs[i + 1].start - sub.end < right_millis:
new_subs.append(sub)
elif i == len(copy_subs) - 1:
if sub.start - copy_subs[i - 1].end < left_millis:
new_subs.append(sub)
elif copy_subs[i + 1].start - sub.end < right_millis or sub.start - copy_subs[i - 1].end < left_millis:
new_subs.append(sub)
if style:
for sub in subs:
if sub.style is not style:
new_subs.append(sub)
new_subs.sort()
return new_subs
def add_styles(subs, style_list=None):
"""
Add styles to the subtitle file based on the style strings in each individual subtitle
"""
if style_list is None:
style_list = []
for style in style_list:
new_style = SSAStyle()
# Number for position refers to the number on a keypad
if 'top_left' in style:
new_style.alignment = 7
elif 'top_right' in style:
new_style.alignment = 9
elif 'bottom_left' in style:
new_style.alignment = 1
elif 'bottom_right' in style:
new_style.alignment = 3
elif 'left' in style:
new_style.alignment = 4
elif 'right' in style:
new_style.alignment = 6
elif 'top' in style:
new_style.alignment = 8
elif 'bottom' in style:
new_style.alignment = 2
# Setting the RGB values for the text
if 'pred' in style:
new_style.primarycolor = Color(255, 0, 0, 0)
elif 'pblue' in style:
new_style.primarycolor = Color(0, 0, 255, 0)
elif 'pgreen' in style:
new_style.primarycolor = Color(0, 255, 0, 0)
elif 'pwhite' in style:
new_style.primarycolor = Color(255, 255, 255, 0)
# Setting the RGB values for the text's background
if 'bred' in style:
new_style.backcolor = Color(255, 0, 0, 0)
elif 'bblue' in style:
new_style.backcolor = Color(0, 0, 255, 0)
elif 'bgreen' in style:
new_style.backcolor = Color(0, 255, 0, 0)
elif 'bwhite' in style:
new_style.backcolor = Color(255, 255, 255, 0)
# Setting different font types
if 'bold' in style:
new_style.bold = True
if 'italic' in style:
new_style.italic = True
subs.styles[style] = new_style
return subs
def save_to_subtitles(results, formatter):
"""
Save to subtitle file
:param results: Dictionary containing info and start/end times
:param formatter: Apply text formating to the subtitle
:return: New subtitle file
"""
subs = SSAFile()
for result in results:
event = SSAEvent(start=make_time(s=result['start']),
end=make_time(s=result['end']), text=formatter(result))
if 'highlight' in result and result['highlight']:
event.style = 'red'
subs.append(event)
logger.info(f'Processed {len(results)} results to subtitle events')
return subs
def create_styles(subs):
"""
Gather text from subtitles and call the subtitle adder
"""
styles = set()
for sub in subs:
styles.add(sub.style)
add_styles(subs, styles)
def burn_subtitles_into_video(video_path, subtitle_path, output_path):
"""
Create new video with subtitles burned in
:param video_path: input video path
:param subtitle_path: subtitle input path
:param output_path: video output path
:return: File name that it has written to
"""
temp_file_name = tempfile.mktemp(dir=output_path, prefix='output_with_hard_subtitles_', suffix='.mp4')
# Handle srt files if needed
if subtitle_path.endswith('.srt.'):
subtitle_ass_file = subtitle_path.replace(".srt", ".ass")
run_ffmpeg(f'ffmpeg -y -i {subtitle_path} {subtitle_ass_file}')
else:
subtitle_ass_file = subtitle_path
run_ffmpeg(f'ffmpeg -i {video_path} -vf "ass={subtitle_ass_file}" {temp_file_name}')
logger.info(f'Burnt subtitles {subtitle_path} to {video_path} stored in {temp_file_name}')
return temp_file_name
| 0 | 0 | 0 |
365aacb7e69ac288818c4cf46c2f47b217dbd9af | 962 | py | Python | drug/migrations/0004_auto_20190604_2300.py | MubongwoNdasi/pms | 0cc5dcbc25b31e13631672e1a03c88e2ad46bc92 | [
"MIT"
] | null | null | null | drug/migrations/0004_auto_20190604_2300.py | MubongwoNdasi/pms | 0cc5dcbc25b31e13631672e1a03c88e2ad46bc92 | [
"MIT"
] | 8 | 2021-03-18T22:27:44.000Z | 2022-02-10T09:18:50.000Z | drug/migrations/0004_auto_20190604_2300.py | MubongwoNdasi/pms | 0cc5dcbc25b31e13631672e1a03c88e2ad46bc92 | [
"MIT"
] | 1 | 2021-09-20T06:37:41.000Z | 2021-09-20T06:37:41.000Z | # Generated by Django 2.2 on 2019-06-04 23:00
from django.db import migrations, models
| 29.151515 | 110 | 0.589397 | # Generated by Django 2.2 on 2019-06-04 23:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drug', '0003_auto_20190604_2233'),
]
operations = [
migrations.RemoveField(
model_name='drugs',
name='name',
),
migrations.AlterField(
model_name='drugs',
name='brand_name',
field=models.CharField(help_text='brand name', max_length=100, verbose_name='Brand name'),
),
migrations.AlterField(
model_name='drugs',
name='des',
field=models.TextField(help_text='drug description', max_length=1000, verbose_name='Description'),
),
migrations.AlterField(
model_name='drugs',
name='generic_name',
field=models.CharField(help_text='scientific name', max_length=100, verbose_name='Generic name'),
),
]
| 0 | 850 | 23 |
c0e7ce2e4c3ab7a07d885ea07518fe7f67108216 | 3,006 | py | Python | butunleme/160401007/butunleme.py | gizemozgun/kriptografi | fa395ea7592f2e6cf0cbb44a20f876d30a1d502a | [
"Unlicense"
] | 8 | 2020-04-15T12:06:42.000Z | 2022-01-21T10:35:51.000Z | butunleme/160401007/butunleme.py | gizemozgun/kriptografi | fa395ea7592f2e6cf0cbb44a20f876d30a1d502a | [
"Unlicense"
] | 3 | 2020-05-13T20:41:27.000Z | 2020-06-11T00:45:27.000Z | butunleme/160401007/butunleme.py | gizemozgun/kriptografi | fa395ea7592f2e6cf0cbb44a20f876d30a1d502a | [
"Unlicense"
] | 54 | 2020-04-23T14:58:50.000Z | 2020-06-26T06:00:32.000Z | #Gizem Özgün / 160401007
# -*- coding: utf-8 -*-
import sys
if __name__ == "__main__":
menu()
| 28.093458 | 135 | 0.558217 | #Gizem Özgün / 160401007
# -*- coding: utf-8 -*-
import sys
def str_to_binary(string): #string'i binary'e ceviren fonksiyon
binary = ""
for i in string:
binary += "".join(f"{ord(i):08b}")
return binary
def binary_to_dec(binary): #binary'i decimale ceviren fonksiyon
binary=str(binary)[::-1]
decimal,index = 0,0
for i in binary:
decimal+=pow(2,index)*int(i)
index+=1
return decimal
def decimal_to_hex(decimal) : #decimali hexadecimale ceviren fonksiyon
hexa = hex(decimal)
return hexa
def hex_to_ascii(hex): #hexadecimale cevrilen her bir karakteri asciiye ceviren fonksiyon
ascii = list()
for h in hex[2:]:
ascii.append(ord(h))
return ascii
def get_bit_length(n): #bit uzunlugunu bulan fonksiyon
bit=0
while (n):
n >>= 1
bit += 1
return bit
def ozet(string):
asci_list = list()
ozet=1
for i in string :
binary = str_to_binary(i)
decimal = binary_to_dec(binary)
hexa = decimal_to_hex(decimal)
asci_list += hex_to_ascii(hexa)
for i in asci_list:
ozet *= i
#ozet 32 bit olana kadar kaydırılıyor.
while get_bit_length(ozet)!=32:
if get_bit_length(ozet)<32:
ozet=ozet<<1
else:
ozet=ozet>>1
return ozet
def menu():
secenek = input("Uygulamak istediginiz secenegin numarasini giriniz:\n Menu:\n1 - Ozet alma\n2 - Ozet dogrulama\n3 - Cikis\n ")
if secenek == "1":
value = input("Ozet degeri alinacak 6 karakterlik bir girdi giriniz:")
if len(value)==6:
golge = open("golge.txt","w")
golge.write(str(ozet(value))) #girilen karakter ozet_alma fonksiyonuna gonderilip ozet deger golge.txt'e yaziliyor
golge.close()
print("golge.txt olusturuldu")
else:
print("Lutfen 6 karakter uzunlugunda bir girdi giriniz.")
menu()
elif secenek == "2":
value = input("Kontrol etmek istediginiz dosya adini giriniz:")
try:
new_file= open(value, 'r') #kullanicinin girdigi dosya okunuyor
golge = open('golge.txt', 'r') #golge.txt aciliyor
golge_ozet= golge.read()
for i in new_file:
new_ozet= ozet(i) #yeni dosya ve golgenin icerigi karsilastiriliyor
if(new_ozet == golge_ozet):
print("Eslesen deger: " + new_ozet)
golge.close()
new_file.close()
break
else:
print("Eslesen deger bulunamadi")
except:
print("dosya hatasi")
elif secenek == "3":
print("Cikis yapiliyor")
sys.exit()
else:
print("Lutfen menude olan gecerli rakamlardan birini girin!")
menu()
if __name__ == "__main__":
menu()
| 2,742 | 0 | 161 |
37639967ffaa3c0210549f24f634d2e636218c9a | 2,561 | py | Python | BayOptPy/freesurfer_preprocess/uniform_distributed_dataset.py | Mind-the-Pineapple/tpot-age | 2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef | [
"MIT"
] | 3 | 2020-04-09T16:53:54.000Z | 2020-04-21T16:49:52.000Z | BayOptPy/freesurfer_preprocess/uniform_distributed_dataset.py | Mind-the-Pineapple/tpot-age | 2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef | [
"MIT"
] | null | null | null | BayOptPy/freesurfer_preprocess/uniform_distributed_dataset.py | Mind-the-Pineapple/tpot-age | 2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef | [
"MIT"
] | null | null | null | # This script assumes taht the freesurfer csv for the BANC data has already been generated
import os
import pandas as pd
import numpy as np
import pdb
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from BayOptPy.helperfunctions import get_paths, get_data, drop_missing_features
def str_to_bool(s):
'''
As arg pass does not acess boolen, transfrom the string into
booleans
'''
if s == 'True':
return True
elif s == 'False':
return False
#-----------------------------------------------------------------------------
# Settings
#-----------------------------------------------------------------------------
debug = False
dataset = 'freesurf_combined'
resamplefactor = 1
save_path = os.path.join('/code/BayOptPy', 'freesurfer_preprocess')
raw = 'False'
analysis = 'uniform'
project_wd, project_data, project_sink = get_paths(debug, dataset)
demographics, imgs, dataframe = get_data(project_data, dataset,
debug, project_wd,
resamplefactor,
raw=str_to_bool(raw),
analysis=analysis)
# transform age into ints
demographics['age_int'] = demographics['age'].astype('int32', copy=False)
# Select 14 subjects for all ages that have 14 representatives.
age_range = np.arange(demographics['age'].min(), demographics['age'].max())
# remove entry where you don't have 14 subjects
max_n = 14
age_to_remove = [35, 36, 39, 42, 78, 79, 80, 81, 82, 83, 85, 89]
age_range = np.setdiff1d(age_range, age_to_remove)
# iterate over the dataframe and select 14 subjects for each age range
ids_to_use = []
for age in age_range:
ids_to_use.append(demographics.index[demographics['age_int'] ==
age].tolist()[:max_n])
# flatten ids_to_use
ids_to_use = [item for sublist in ids_to_use for item in sublist]
# Filter the demographics dataframe
demographics = demographics[demographics.index.isin(ids_to_use)]
# set subject's id as index
demographics = demographics.set_index('id')
# filter dataset using index of the subjects
dataframe = dataframe.loc[demographics.index]
# Print some diagnosis
print('Shape of the new demographics:')
print(demographics.shape)
print('Oldest %d and youngest %d subject' %(demographics['age_int'].max(),
demographics['age_int'].min()))
print('Number of age bins %d' %len(demographics['age_int'].unique()))
import pdb
pdb.set_trace()
print('Done')
| 36.070423 | 90 | 0.629832 | # This script assumes taht the freesurfer csv for the BANC data has already been generated
import os
import pandas as pd
import numpy as np
import pdb
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from BayOptPy.helperfunctions import get_paths, get_data, drop_missing_features
def str_to_bool(s):
'''
As arg pass does not acess boolen, transfrom the string into
booleans
'''
if s == 'True':
return True
elif s == 'False':
return False
#-----------------------------------------------------------------------------
# Settings
#-----------------------------------------------------------------------------
debug = False
dataset = 'freesurf_combined'
resamplefactor = 1
save_path = os.path.join('/code/BayOptPy', 'freesurfer_preprocess')
raw = 'False'
analysis = 'uniform'
project_wd, project_data, project_sink = get_paths(debug, dataset)
demographics, imgs, dataframe = get_data(project_data, dataset,
debug, project_wd,
resamplefactor,
raw=str_to_bool(raw),
analysis=analysis)
# transform age into ints
demographics['age_int'] = demographics['age'].astype('int32', copy=False)
# Select 14 subjects for all ages that have 14 representatives.
age_range = np.arange(demographics['age'].min(), demographics['age'].max())
# remove entry where you don't have 14 subjects
max_n = 14
age_to_remove = [35, 36, 39, 42, 78, 79, 80, 81, 82, 83, 85, 89]
age_range = np.setdiff1d(age_range, age_to_remove)
# iterate over the dataframe and select 14 subjects for each age range
ids_to_use = []
for age in age_range:
ids_to_use.append(demographics.index[demographics['age_int'] ==
age].tolist()[:max_n])
# flatten ids_to_use
ids_to_use = [item for sublist in ids_to_use for item in sublist]
# Filter the demographics dataframe
demographics = demographics[demographics.index.isin(ids_to_use)]
# set subject's id as index
demographics = demographics.set_index('id')
# filter dataset using index of the subjects
dataframe = dataframe.loc[demographics.index]
# Print some diagnosis
print('Shape of the new demographics:')
print(demographics.shape)
print('Oldest %d and youngest %d subject' %(demographics['age_int'].max(),
demographics['age_int'].min()))
print('Number of age bins %d' %len(demographics['age_int'].unique()))
import pdb
pdb.set_trace()
print('Done')
| 0 | 0 | 0 |
8bbf2f6e7ce233dc89fddc7e425bf35285b8c1c1 | 584 | py | Python | leaderboard/migrations/0007_submission_is_public.py | AppraiseDev/OCELoT | 9237c1eb1d9feebb1a51966b8c1ef82b381b4b1e | [
"BSD-3-Clause"
] | 6 | 2020-06-25T05:00:45.000Z | 2022-03-30T09:45:11.000Z | leaderboard/migrations/0007_submission_is_public.py | AppraiseDev/OCELoT | 9237c1eb1d9feebb1a51966b8c1ef82b381b4b1e | [
"BSD-3-Clause"
] | 42 | 2020-06-24T08:48:48.000Z | 2021-09-08T14:36:11.000Z | leaderboard/migrations/0007_submission_is_public.py | AppraiseDev/OCELoT | 9237c1eb1d9feebb1a51966b8c1ef82b381b4b1e | [
"BSD-3-Clause"
] | 3 | 2020-05-25T20:34:08.000Z | 2021-03-21T05:10:11.000Z | # pylint: disable=invalid-name,missing-docstring
# Generated by Django 2.2.1 on 2020-06-19 05:29
from django.db import migrations
from django.db import models
| 26.545455 | 64 | 0.580479 | # pylint: disable=invalid-name,missing-docstring
# Generated by Django 2.2.1 on 2020-06-19 05:29
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [('leaderboard', '0006_auto_20200618_2204')]
operations = [
migrations.AddField(
model_name='submission',
name='is_public',
field=models.BooleanField(
db_index=True,
default=False,
help_text='Is publicly visible?',
),
)
]
| 0 | 394 | 25 |
38c3d799d246d5ac683945ee7d8f3db96348c890 | 1,186 | py | Python | bitmovin/resources/models/manifests/dash/dash_mp4_representation.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 44 | 2016-12-12T17:37:23.000Z | 2021-03-03T09:48:48.000Z | bitmovin/resources/models/manifests/dash/dash_mp4_representation.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 38 | 2017-01-09T14:45:45.000Z | 2022-02-27T18:04:33.000Z | bitmovin/resources/models/manifests/dash/dash_mp4_representation.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 27 | 2017-02-02T22:49:31.000Z | 2019-11-21T07:04:57.000Z | from .abstract_dash_mp4_representation import AbstractDashMP4Representation
| 45.615385 | 104 | 0.62226 | from .abstract_dash_mp4_representation import AbstractDashMP4Representation
class DashMP4Representation(AbstractDashMP4Representation):
def __init__(self, encoding_id, muxing_id, file_path, id_=None, custom_data=None):
super().__init__(id_=id_, custom_data=custom_data, encoding_id=encoding_id, muxing_id=muxing_id,
file_path=file_path)
@classmethod
def parse_from_json_object(cls, json_object):
representation = AbstractDashMP4Representation.parse_from_json_object(json_object=json_object)
id_ = representation.id
custom_data = representation.customData
encoding_id = representation.encodingId
muxing_id = representation.muxingId
file_path = representation.filePath
dash_mp4_representation = DashMP4Representation(id_=id_,
custom_data=custom_data,
encoding_id=encoding_id,
muxing_id=muxing_id,
file_path=file_path)
return dash_mp4_representation
| 977 | 109 | 23 |
aefd262b130e708bff022101d0ddcb9ba1871734 | 2,110 | py | Python | models.py | fanieblesat/proyectoMintic | 18e25caf4a077a67c0e83d82757dfdc167ef61f6 | [
"MIT"
] | null | null | null | models.py | fanieblesat/proyectoMintic | 18e25caf4a077a67c0e83d82757dfdc167ef61f6 | [
"MIT"
] | null | null | null | models.py | fanieblesat/proyectoMintic | 18e25caf4a077a67c0e83d82757dfdc167ef61f6 | [
"MIT"
] | null | null | null | from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from __init__ import db
class User(db.Model):
"""Data model for user accounts."""
__tablename__ = 'usuario'
id = db.Column(
db.Integer,
primary_key=True
)
email = db.Column(
db.String(80),
index=True,
unique=True,
nullable=False
)
isadmin = db.Column(
db.Boolean,
index=False,
unique=False,
nullable=False
)
password_hash = db.Column(
db.String(128),
index=False,
unique=False,
nullable=False)
@staticmethod
@staticmethod
@property
def password(self):
"""
Prevent pasword from being accessed
"""
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
| 26.375 | 94 | 0.592417 | from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from __init__ import db
class User(db.Model):
"""Data model for user accounts."""
__tablename__ = 'usuario'
id = db.Column(
db.Integer,
primary_key=True
)
email = db.Column(
db.String(80),
index=True,
unique=True,
nullable=False
)
isadmin = db.Column(
db.Boolean,
index=False,
unique=False,
nullable=False
)
password_hash = db.Column(
db.String(128),
index=False,
unique=False,
nullable=False)
def get_reset_token(self, expires=500):
return jwt.encode({'reset_password': self.email, 'exp': time() + expires},
key=os.getenv('SECRET_KEY_FLASK'))
@staticmethod
def verify_reset_token(token):
try:
username = jwt.decode(token, key=os.getenv('SECRET_KEY_FLASK'))['reset_password']
print(username)
except Exception as e:
print(e)
return
return User.query.filter_by(username=username).first()
@staticmethod
def verify_email(email):
user = User.query.filter_by(email=email).first()
return user
@property
def password(self):
"""
Prevent pasword from being accessed
"""
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User {}>'.format(self.username)
| 584 | 0 | 112 |
0b45197a2c899c4d28fd133ec00a125cd4845c21 | 15,393 | py | Python | models_all_solvable2/syn05m02h.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | 7 | 2019-05-08T19:14:34.000Z | 2021-12-24T00:00:40.000Z | models_all_solvable2/syn05m02h.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | null | null | null | models_all_solvable2/syn05m02h.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | 2 | 2020-05-21T22:15:51.000Z | 2020-06-02T23:02:08.000Z | # MINLP written by GAMS Convert at 05/15/20 00:51:23
#
# Equation counts
# Total E G L N X C B
# 152 71 6 75 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 105 85 20 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 352 334 18 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - m.x12 - m.x13 + 5*m.x24 + 10*m.x25 - 2*m.x34 - m.x35 + 80*m.x36 + 90*m.x37 + 285*m.x38
+ 390*m.x39 + 290*m.x40 + 405*m.x41 - 5*m.b96 - 4*m.b97 - 8*m.b98 - 7*m.b99 - 6*m.b100
- 9*m.b101 - 10*m.b102 - 9*m.b103 - 6*m.b104 - 10*m.b105, sense=maximize)
m.c2 = Constraint(expr= m.x12 - m.x14 - m.x16 == 0)
m.c3 = Constraint(expr= m.x13 - m.x15 - m.x17 == 0)
m.c4 = Constraint(expr= - m.x18 - m.x20 + m.x22 == 0)
m.c5 = Constraint(expr= - m.x19 - m.x21 + m.x23 == 0)
m.c6 = Constraint(expr= m.x22 - m.x24 - m.x26 == 0)
m.c7 = Constraint(expr= m.x23 - m.x25 - m.x27 == 0)
m.c8 = Constraint(expr= m.x26 - m.x28 - m.x30 - m.x32 == 0)
m.c9 = Constraint(expr= m.x27 - m.x29 - m.x31 - m.x33 == 0)
m.c10 = Constraint(expr=(m.x50/(1e-6 + m.b86) - log(1 + m.x42/(1e-6 + m.b86)))*(1e-6 + m.b86) <= 0)
m.c11 = Constraint(expr=(m.x51/(1e-6 + m.b87) - log(1 + m.x43/(1e-6 + m.b87)))*(1e-6 + m.b87) <= 0)
m.c12 = Constraint(expr= m.x44 == 0)
m.c13 = Constraint(expr= m.x45 == 0)
m.c14 = Constraint(expr= m.x52 == 0)
m.c15 = Constraint(expr= m.x53 == 0)
m.c16 = Constraint(expr= m.x14 - m.x42 - m.x44 == 0)
m.c17 = Constraint(expr= m.x15 - m.x43 - m.x45 == 0)
m.c18 = Constraint(expr= m.x18 - m.x50 - m.x52 == 0)
m.c19 = Constraint(expr= m.x19 - m.x51 - m.x53 == 0)
m.c20 = Constraint(expr= m.x42 - 40*m.b86 <= 0)
m.c21 = Constraint(expr= m.x43 - 40*m.b87 <= 0)
m.c22 = Constraint(expr= m.x44 + 40*m.b86 <= 40)
m.c23 = Constraint(expr= m.x45 + 40*m.b87 <= 40)
m.c24 = Constraint(expr= m.x50 - 3.71357206670431*m.b86 <= 0)
m.c25 = Constraint(expr= m.x51 - 3.71357206670431*m.b87 <= 0)
m.c26 = Constraint(expr= m.x52 + 3.71357206670431*m.b86 <= 3.71357206670431)
m.c27 = Constraint(expr= m.x53 + 3.71357206670431*m.b87 <= 3.71357206670431)
m.c28 = Constraint(expr=(m.x54/(1e-6 + m.b88) - 1.2*log(1 + m.x46/(1e-6 + m.b88)))*(1e-6 + m.b88) <= 0)
m.c29 = Constraint(expr=(m.x55/(1e-6 + m.b89) - 1.2*log(1 + m.x47/(1e-6 + m.b89)))*(1e-6 + m.b89) <= 0)
m.c30 = Constraint(expr= m.x48 == 0)
m.c31 = Constraint(expr= m.x49 == 0)
m.c32 = Constraint(expr= m.x56 == 0)
m.c33 = Constraint(expr= m.x57 == 0)
m.c34 = Constraint(expr= m.x16 - m.x46 - m.x48 == 0)
m.c35 = Constraint(expr= m.x17 - m.x47 - m.x49 == 0)
m.c36 = Constraint(expr= m.x20 - m.x54 - m.x56 == 0)
m.c37 = Constraint(expr= m.x21 - m.x55 - m.x57 == 0)
m.c38 = Constraint(expr= m.x46 - 40*m.b88 <= 0)
m.c39 = Constraint(expr= m.x47 - 40*m.b89 <= 0)
m.c40 = Constraint(expr= m.x48 + 40*m.b88 <= 40)
m.c41 = Constraint(expr= m.x49 + 40*m.b89 <= 40)
m.c42 = Constraint(expr= m.x54 - 4.45628648004517*m.b88 <= 0)
m.c43 = Constraint(expr= m.x55 - 4.45628648004517*m.b89 <= 0)
m.c44 = Constraint(expr= m.x56 + 4.45628648004517*m.b88 <= 4.45628648004517)
m.c45 = Constraint(expr= m.x57 + 4.45628648004517*m.b89 <= 4.45628648004517)
m.c46 = Constraint(expr= - 0.75*m.x58 + m.x74 == 0)
m.c47 = Constraint(expr= - 0.75*m.x59 + m.x75 == 0)
m.c48 = Constraint(expr= m.x60 == 0)
m.c49 = Constraint(expr= m.x61 == 0)
m.c50 = Constraint(expr= m.x76 == 0)
m.c51 = Constraint(expr= m.x77 == 0)
m.c52 = Constraint(expr= m.x28 - m.x58 - m.x60 == 0)
m.c53 = Constraint(expr= m.x29 - m.x59 - m.x61 == 0)
m.c54 = Constraint(expr= m.x36 - m.x74 - m.x76 == 0)
m.c55 = Constraint(expr= m.x37 - m.x75 - m.x77 == 0)
m.c56 = Constraint(expr= m.x58 - 4.45628648004517*m.b90 <= 0)
m.c57 = Constraint(expr= m.x59 - 4.45628648004517*m.b91 <= 0)
m.c58 = Constraint(expr= m.x60 + 4.45628648004517*m.b90 <= 4.45628648004517)
m.c59 = Constraint(expr= m.x61 + 4.45628648004517*m.b91 <= 4.45628648004517)
m.c60 = Constraint(expr= m.x74 - 3.34221486003388*m.b90 <= 0)
m.c61 = Constraint(expr= m.x75 - 3.34221486003388*m.b91 <= 0)
m.c62 = Constraint(expr= m.x76 + 3.34221486003388*m.b90 <= 3.34221486003388)
m.c63 = Constraint(expr= m.x77 + 3.34221486003388*m.b91 <= 3.34221486003388)
m.c64 = Constraint(expr=(m.x78/(1e-6 + m.b92) - 1.5*log(1 + m.x62/(1e-6 + m.b92)))*(1e-6 + m.b92) <= 0)
m.c65 = Constraint(expr=(m.x79/(1e-6 + m.b93) - 1.5*log(1 + m.x63/(1e-6 + m.b93)))*(1e-6 + m.b93) <= 0)
m.c66 = Constraint(expr= m.x64 == 0)
m.c67 = Constraint(expr= m.x65 == 0)
m.c68 = Constraint(expr= m.x80 == 0)
m.c69 = Constraint(expr= m.x81 == 0)
m.c70 = Constraint(expr= m.x30 - m.x62 - m.x64 == 0)
m.c71 = Constraint(expr= m.x31 - m.x63 - m.x65 == 0)
m.c72 = Constraint(expr= m.x38 - m.x78 - m.x80 == 0)
m.c73 = Constraint(expr= m.x39 - m.x79 - m.x81 == 0)
m.c74 = Constraint(expr= m.x62 - 4.45628648004517*m.b92 <= 0)
m.c75 = Constraint(expr= m.x63 - 4.45628648004517*m.b93 <= 0)
m.c76 = Constraint(expr= m.x64 + 4.45628648004517*m.b92 <= 4.45628648004517)
m.c77 = Constraint(expr= m.x65 + 4.45628648004517*m.b93 <= 4.45628648004517)
m.c78 = Constraint(expr= m.x78 - 2.54515263975353*m.b92 <= 0)
m.c79 = Constraint(expr= m.x79 - 2.54515263975353*m.b93 <= 0)
m.c80 = Constraint(expr= m.x80 + 2.54515263975353*m.b92 <= 2.54515263975353)
m.c81 = Constraint(expr= m.x81 + 2.54515263975353*m.b93 <= 2.54515263975353)
m.c82 = Constraint(expr= - m.x66 + m.x82 == 0)
m.c83 = Constraint(expr= - m.x67 + m.x83 == 0)
m.c84 = Constraint(expr= - 0.5*m.x70 + m.x82 == 0)
m.c85 = Constraint(expr= - 0.5*m.x71 + m.x83 == 0)
m.c86 = Constraint(expr= m.x68 == 0)
m.c87 = Constraint(expr= m.x69 == 0)
m.c88 = Constraint(expr= m.x72 == 0)
m.c89 = Constraint(expr= m.x73 == 0)
m.c90 = Constraint(expr= m.x84 == 0)
m.c91 = Constraint(expr= m.x85 == 0)
m.c92 = Constraint(expr= m.x32 - m.x66 - m.x68 == 0)
m.c93 = Constraint(expr= m.x33 - m.x67 - m.x69 == 0)
m.c94 = Constraint(expr= m.x34 - m.x70 - m.x72 == 0)
m.c95 = Constraint(expr= m.x35 - m.x71 - m.x73 == 0)
m.c96 = Constraint(expr= m.x40 - m.x82 - m.x84 == 0)
m.c97 = Constraint(expr= m.x41 - m.x83 - m.x85 == 0)
m.c98 = Constraint(expr= m.x66 - 4.45628648004517*m.b94 <= 0)
m.c99 = Constraint(expr= m.x67 - 4.45628648004517*m.b95 <= 0)
m.c100 = Constraint(expr= m.x68 + 4.45628648004517*m.b94 <= 4.45628648004517)
m.c101 = Constraint(expr= m.x69 + 4.45628648004517*m.b95 <= 4.45628648004517)
m.c102 = Constraint(expr= m.x70 - 30*m.b94 <= 0)
m.c103 = Constraint(expr= m.x71 - 30*m.b95 <= 0)
m.c104 = Constraint(expr= m.x72 + 30*m.b94 <= 30)
m.c105 = Constraint(expr= m.x73 + 30*m.b95 <= 30)
m.c106 = Constraint(expr= m.x82 - 15*m.b94 <= 0)
m.c107 = Constraint(expr= m.x83 - 15*m.b95 <= 0)
m.c108 = Constraint(expr= m.x84 + 15*m.b94 <= 15)
m.c109 = Constraint(expr= m.x85 + 15*m.b95 <= 15)
m.c110 = Constraint(expr= m.x2 + 5*m.b96 == 0)
m.c111 = Constraint(expr= m.x3 + 4*m.b97 == 0)
m.c112 = Constraint(expr= m.x4 + 8*m.b98 == 0)
m.c113 = Constraint(expr= m.x5 + 7*m.b99 == 0)
m.c114 = Constraint(expr= m.x6 + 6*m.b100 == 0)
m.c115 = Constraint(expr= m.x7 + 9*m.b101 == 0)
m.c116 = Constraint(expr= m.x8 + 10*m.b102 == 0)
m.c117 = Constraint(expr= m.x9 + 9*m.b103 == 0)
m.c118 = Constraint(expr= m.x10 + 6*m.b104 == 0)
m.c119 = Constraint(expr= m.x11 + 10*m.b105 == 0)
m.c120 = Constraint(expr= m.b86 - m.b87 <= 0)
m.c121 = Constraint(expr= m.b88 - m.b89 <= 0)
m.c122 = Constraint(expr= m.b90 - m.b91 <= 0)
m.c123 = Constraint(expr= m.b92 - m.b93 <= 0)
m.c124 = Constraint(expr= m.b94 - m.b95 <= 0)
m.c125 = Constraint(expr= m.b96 + m.b97 <= 1)
m.c126 = Constraint(expr= m.b96 + m.b97 <= 1)
m.c127 = Constraint(expr= m.b98 + m.b99 <= 1)
m.c128 = Constraint(expr= m.b98 + m.b99 <= 1)
m.c129 = Constraint(expr= m.b100 + m.b101 <= 1)
m.c130 = Constraint(expr= m.b100 + m.b101 <= 1)
m.c131 = Constraint(expr= m.b102 + m.b103 <= 1)
m.c132 = Constraint(expr= m.b102 + m.b103 <= 1)
m.c133 = Constraint(expr= m.b104 + m.b105 <= 1)
m.c134 = Constraint(expr= m.b104 + m.b105 <= 1)
m.c135 = Constraint(expr= m.b86 - m.b96 <= 0)
m.c136 = Constraint(expr= - m.b86 + m.b87 - m.b97 <= 0)
m.c137 = Constraint(expr= m.b88 - m.b98 <= 0)
m.c138 = Constraint(expr= - m.b88 + m.b89 - m.b99 <= 0)
m.c139 = Constraint(expr= m.b90 - m.b100 <= 0)
m.c140 = Constraint(expr= - m.b90 + m.b91 - m.b101 <= 0)
m.c141 = Constraint(expr= m.b92 - m.b102 <= 0)
m.c142 = Constraint(expr= - m.b92 + m.b93 - m.b103 <= 0)
m.c143 = Constraint(expr= m.b94 - m.b104 <= 0)
m.c144 = Constraint(expr= - m.b94 + m.b95 - m.b105 <= 0)
m.c145 = Constraint(expr= m.b86 + m.b88 == 1)
m.c146 = Constraint(expr= m.b87 + m.b89 == 1)
m.c147 = Constraint(expr= m.b86 + m.b88 - m.b90 >= 0)
m.c148 = Constraint(expr= m.b87 + m.b89 - m.b91 >= 0)
m.c149 = Constraint(expr= m.b86 + m.b88 - m.b92 >= 0)
m.c150 = Constraint(expr= m.b87 + m.b89 - m.b93 >= 0)
m.c151 = Constraint(expr= m.b86 + m.b88 - m.b94 >= 0)
m.c152 = Constraint(expr= m.b87 + m.b89 - m.b95 >= 0)
| 35.386207 | 112 | 0.626324 | # MINLP written by GAMS Convert at 05/15/20 00:51:23
#
# Equation counts
# Total E G L N X C B
# 152 71 6 75 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 105 85 20 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 352 334 18 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - m.x12 - m.x13 + 5*m.x24 + 10*m.x25 - 2*m.x34 - m.x35 + 80*m.x36 + 90*m.x37 + 285*m.x38
+ 390*m.x39 + 290*m.x40 + 405*m.x41 - 5*m.b96 - 4*m.b97 - 8*m.b98 - 7*m.b99 - 6*m.b100
- 9*m.b101 - 10*m.b102 - 9*m.b103 - 6*m.b104 - 10*m.b105, sense=maximize)
m.c2 = Constraint(expr= m.x12 - m.x14 - m.x16 == 0)
m.c3 = Constraint(expr= m.x13 - m.x15 - m.x17 == 0)
m.c4 = Constraint(expr= - m.x18 - m.x20 + m.x22 == 0)
m.c5 = Constraint(expr= - m.x19 - m.x21 + m.x23 == 0)
m.c6 = Constraint(expr= m.x22 - m.x24 - m.x26 == 0)
m.c7 = Constraint(expr= m.x23 - m.x25 - m.x27 == 0)
m.c8 = Constraint(expr= m.x26 - m.x28 - m.x30 - m.x32 == 0)
m.c9 = Constraint(expr= m.x27 - m.x29 - m.x31 - m.x33 == 0)
m.c10 = Constraint(expr=(m.x50/(1e-6 + m.b86) - log(1 + m.x42/(1e-6 + m.b86)))*(1e-6 + m.b86) <= 0)
m.c11 = Constraint(expr=(m.x51/(1e-6 + m.b87) - log(1 + m.x43/(1e-6 + m.b87)))*(1e-6 + m.b87) <= 0)
m.c12 = Constraint(expr= m.x44 == 0)
m.c13 = Constraint(expr= m.x45 == 0)
m.c14 = Constraint(expr= m.x52 == 0)
m.c15 = Constraint(expr= m.x53 == 0)
m.c16 = Constraint(expr= m.x14 - m.x42 - m.x44 == 0)
m.c17 = Constraint(expr= m.x15 - m.x43 - m.x45 == 0)
m.c18 = Constraint(expr= m.x18 - m.x50 - m.x52 == 0)
m.c19 = Constraint(expr= m.x19 - m.x51 - m.x53 == 0)
m.c20 = Constraint(expr= m.x42 - 40*m.b86 <= 0)
m.c21 = Constraint(expr= m.x43 - 40*m.b87 <= 0)
m.c22 = Constraint(expr= m.x44 + 40*m.b86 <= 40)
m.c23 = Constraint(expr= m.x45 + 40*m.b87 <= 40)
m.c24 = Constraint(expr= m.x50 - 3.71357206670431*m.b86 <= 0)
m.c25 = Constraint(expr= m.x51 - 3.71357206670431*m.b87 <= 0)
m.c26 = Constraint(expr= m.x52 + 3.71357206670431*m.b86 <= 3.71357206670431)
m.c27 = Constraint(expr= m.x53 + 3.71357206670431*m.b87 <= 3.71357206670431)
m.c28 = Constraint(expr=(m.x54/(1e-6 + m.b88) - 1.2*log(1 + m.x46/(1e-6 + m.b88)))*(1e-6 + m.b88) <= 0)
m.c29 = Constraint(expr=(m.x55/(1e-6 + m.b89) - 1.2*log(1 + m.x47/(1e-6 + m.b89)))*(1e-6 + m.b89) <= 0)
m.c30 = Constraint(expr= m.x48 == 0)
m.c31 = Constraint(expr= m.x49 == 0)
m.c32 = Constraint(expr= m.x56 == 0)
m.c33 = Constraint(expr= m.x57 == 0)
m.c34 = Constraint(expr= m.x16 - m.x46 - m.x48 == 0)
m.c35 = Constraint(expr= m.x17 - m.x47 - m.x49 == 0)
m.c36 = Constraint(expr= m.x20 - m.x54 - m.x56 == 0)
m.c37 = Constraint(expr= m.x21 - m.x55 - m.x57 == 0)
m.c38 = Constraint(expr= m.x46 - 40*m.b88 <= 0)
m.c39 = Constraint(expr= m.x47 - 40*m.b89 <= 0)
m.c40 = Constraint(expr= m.x48 + 40*m.b88 <= 40)
m.c41 = Constraint(expr= m.x49 + 40*m.b89 <= 40)
m.c42 = Constraint(expr= m.x54 - 4.45628648004517*m.b88 <= 0)
m.c43 = Constraint(expr= m.x55 - 4.45628648004517*m.b89 <= 0)
m.c44 = Constraint(expr= m.x56 + 4.45628648004517*m.b88 <= 4.45628648004517)
m.c45 = Constraint(expr= m.x57 + 4.45628648004517*m.b89 <= 4.45628648004517)
m.c46 = Constraint(expr= - 0.75*m.x58 + m.x74 == 0)
m.c47 = Constraint(expr= - 0.75*m.x59 + m.x75 == 0)
m.c48 = Constraint(expr= m.x60 == 0)
m.c49 = Constraint(expr= m.x61 == 0)
m.c50 = Constraint(expr= m.x76 == 0)
m.c51 = Constraint(expr= m.x77 == 0)
m.c52 = Constraint(expr= m.x28 - m.x58 - m.x60 == 0)
m.c53 = Constraint(expr= m.x29 - m.x59 - m.x61 == 0)
m.c54 = Constraint(expr= m.x36 - m.x74 - m.x76 == 0)
m.c55 = Constraint(expr= m.x37 - m.x75 - m.x77 == 0)
m.c56 = Constraint(expr= m.x58 - 4.45628648004517*m.b90 <= 0)
m.c57 = Constraint(expr= m.x59 - 4.45628648004517*m.b91 <= 0)
m.c58 = Constraint(expr= m.x60 + 4.45628648004517*m.b90 <= 4.45628648004517)
m.c59 = Constraint(expr= m.x61 + 4.45628648004517*m.b91 <= 4.45628648004517)
m.c60 = Constraint(expr= m.x74 - 3.34221486003388*m.b90 <= 0)
m.c61 = Constraint(expr= m.x75 - 3.34221486003388*m.b91 <= 0)
m.c62 = Constraint(expr= m.x76 + 3.34221486003388*m.b90 <= 3.34221486003388)
m.c63 = Constraint(expr= m.x77 + 3.34221486003388*m.b91 <= 3.34221486003388)
m.c64 = Constraint(expr=(m.x78/(1e-6 + m.b92) - 1.5*log(1 + m.x62/(1e-6 + m.b92)))*(1e-6 + m.b92) <= 0)
m.c65 = Constraint(expr=(m.x79/(1e-6 + m.b93) - 1.5*log(1 + m.x63/(1e-6 + m.b93)))*(1e-6 + m.b93) <= 0)
m.c66 = Constraint(expr= m.x64 == 0)
m.c67 = Constraint(expr= m.x65 == 0)
m.c68 = Constraint(expr= m.x80 == 0)
m.c69 = Constraint(expr= m.x81 == 0)
m.c70 = Constraint(expr= m.x30 - m.x62 - m.x64 == 0)
m.c71 = Constraint(expr= m.x31 - m.x63 - m.x65 == 0)
m.c72 = Constraint(expr= m.x38 - m.x78 - m.x80 == 0)
m.c73 = Constraint(expr= m.x39 - m.x79 - m.x81 == 0)
m.c74 = Constraint(expr= m.x62 - 4.45628648004517*m.b92 <= 0)
m.c75 = Constraint(expr= m.x63 - 4.45628648004517*m.b93 <= 0)
m.c76 = Constraint(expr= m.x64 + 4.45628648004517*m.b92 <= 4.45628648004517)
m.c77 = Constraint(expr= m.x65 + 4.45628648004517*m.b93 <= 4.45628648004517)
m.c78 = Constraint(expr= m.x78 - 2.54515263975353*m.b92 <= 0)
m.c79 = Constraint(expr= m.x79 - 2.54515263975353*m.b93 <= 0)
m.c80 = Constraint(expr= m.x80 + 2.54515263975353*m.b92 <= 2.54515263975353)
m.c81 = Constraint(expr= m.x81 + 2.54515263975353*m.b93 <= 2.54515263975353)
m.c82 = Constraint(expr= - m.x66 + m.x82 == 0)
m.c83 = Constraint(expr= - m.x67 + m.x83 == 0)
m.c84 = Constraint(expr= - 0.5*m.x70 + m.x82 == 0)
m.c85 = Constraint(expr= - 0.5*m.x71 + m.x83 == 0)
m.c86 = Constraint(expr= m.x68 == 0)
m.c87 = Constraint(expr= m.x69 == 0)
m.c88 = Constraint(expr= m.x72 == 0)
m.c89 = Constraint(expr= m.x73 == 0)
m.c90 = Constraint(expr= m.x84 == 0)
m.c91 = Constraint(expr= m.x85 == 0)
m.c92 = Constraint(expr= m.x32 - m.x66 - m.x68 == 0)
m.c93 = Constraint(expr= m.x33 - m.x67 - m.x69 == 0)
m.c94 = Constraint(expr= m.x34 - m.x70 - m.x72 == 0)
m.c95 = Constraint(expr= m.x35 - m.x71 - m.x73 == 0)
m.c96 = Constraint(expr= m.x40 - m.x82 - m.x84 == 0)
m.c97 = Constraint(expr= m.x41 - m.x83 - m.x85 == 0)
m.c98 = Constraint(expr= m.x66 - 4.45628648004517*m.b94 <= 0)
m.c99 = Constraint(expr= m.x67 - 4.45628648004517*m.b95 <= 0)
m.c100 = Constraint(expr= m.x68 + 4.45628648004517*m.b94 <= 4.45628648004517)
m.c101 = Constraint(expr= m.x69 + 4.45628648004517*m.b95 <= 4.45628648004517)
m.c102 = Constraint(expr= m.x70 - 30*m.b94 <= 0)
m.c103 = Constraint(expr= m.x71 - 30*m.b95 <= 0)
m.c104 = Constraint(expr= m.x72 + 30*m.b94 <= 30)
m.c105 = Constraint(expr= m.x73 + 30*m.b95 <= 30)
m.c106 = Constraint(expr= m.x82 - 15*m.b94 <= 0)
m.c107 = Constraint(expr= m.x83 - 15*m.b95 <= 0)
m.c108 = Constraint(expr= m.x84 + 15*m.b94 <= 15)
m.c109 = Constraint(expr= m.x85 + 15*m.b95 <= 15)
m.c110 = Constraint(expr= m.x2 + 5*m.b96 == 0)
m.c111 = Constraint(expr= m.x3 + 4*m.b97 == 0)
m.c112 = Constraint(expr= m.x4 + 8*m.b98 == 0)
m.c113 = Constraint(expr= m.x5 + 7*m.b99 == 0)
m.c114 = Constraint(expr= m.x6 + 6*m.b100 == 0)
m.c115 = Constraint(expr= m.x7 + 9*m.b101 == 0)
m.c116 = Constraint(expr= m.x8 + 10*m.b102 == 0)
m.c117 = Constraint(expr= m.x9 + 9*m.b103 == 0)
m.c118 = Constraint(expr= m.x10 + 6*m.b104 == 0)
m.c119 = Constraint(expr= m.x11 + 10*m.b105 == 0)
m.c120 = Constraint(expr= m.b86 - m.b87 <= 0)
m.c121 = Constraint(expr= m.b88 - m.b89 <= 0)
m.c122 = Constraint(expr= m.b90 - m.b91 <= 0)
m.c123 = Constraint(expr= m.b92 - m.b93 <= 0)
m.c124 = Constraint(expr= m.b94 - m.b95 <= 0)
m.c125 = Constraint(expr= m.b96 + m.b97 <= 1)
m.c126 = Constraint(expr= m.b96 + m.b97 <= 1)
m.c127 = Constraint(expr= m.b98 + m.b99 <= 1)
m.c128 = Constraint(expr= m.b98 + m.b99 <= 1)
m.c129 = Constraint(expr= m.b100 + m.b101 <= 1)
m.c130 = Constraint(expr= m.b100 + m.b101 <= 1)
m.c131 = Constraint(expr= m.b102 + m.b103 <= 1)
m.c132 = Constraint(expr= m.b102 + m.b103 <= 1)
m.c133 = Constraint(expr= m.b104 + m.b105 <= 1)
m.c134 = Constraint(expr= m.b104 + m.b105 <= 1)
m.c135 = Constraint(expr= m.b86 - m.b96 <= 0)
m.c136 = Constraint(expr= - m.b86 + m.b87 - m.b97 <= 0)
m.c137 = Constraint(expr= m.b88 - m.b98 <= 0)
m.c138 = Constraint(expr= - m.b88 + m.b89 - m.b99 <= 0)
m.c139 = Constraint(expr= m.b90 - m.b100 <= 0)
m.c140 = Constraint(expr= - m.b90 + m.b91 - m.b101 <= 0)
m.c141 = Constraint(expr= m.b92 - m.b102 <= 0)
m.c142 = Constraint(expr= - m.b92 + m.b93 - m.b103 <= 0)
m.c143 = Constraint(expr= m.b94 - m.b104 <= 0)
m.c144 = Constraint(expr= - m.b94 + m.b95 - m.b105 <= 0)
m.c145 = Constraint(expr= m.b86 + m.b88 == 1)
m.c146 = Constraint(expr= m.b87 + m.b89 == 1)
m.c147 = Constraint(expr= m.b86 + m.b88 - m.b90 >= 0)
m.c148 = Constraint(expr= m.b87 + m.b89 - m.b91 >= 0)
m.c149 = Constraint(expr= m.b86 + m.b88 - m.b92 >= 0)
m.c150 = Constraint(expr= m.b87 + m.b89 - m.b93 >= 0)
m.c151 = Constraint(expr= m.b86 + m.b88 - m.b94 >= 0)
m.c152 = Constraint(expr= m.b87 + m.b89 - m.b95 >= 0)
| 0 | 0 | 0 |
ada96d601a49d1e85041c045e4a7fca6ac4db9a3 | 2,689 | py | Python | inverse_covariance/tests/adaptive_graph_lasso_test.py | aldanor/skggm | d2e29d692d1654285653ab07fd24534628fcb076 | [
"MIT"
] | 199 | 2016-10-21T14:36:02.000Z | 2022-03-29T20:59:08.000Z | inverse_covariance/tests/adaptive_graph_lasso_test.py | aldanor/skggm | d2e29d692d1654285653ab07fd24534628fcb076 | [
"MIT"
] | 66 | 2016-10-17T01:47:28.000Z | 2022-03-06T11:02:56.000Z | inverse_covariance/tests/adaptive_graph_lasso_test.py | aldanor/skggm | d2e29d692d1654285653ab07fd24534628fcb076 | [
"MIT"
] | 36 | 2016-10-15T23:42:10.000Z | 2022-03-06T00:03:13.000Z | import numpy as np
import pytest
from inverse_covariance import (
QuicGraphicalLassoEBIC,
AdaptiveGraphicalLasso,
QuicGraphicalLassoCV,
)
from inverse_covariance.profiling import ClusterGraph
| 32.39759 | 88 | 0.479732 | import numpy as np
import pytest
from inverse_covariance import (
QuicGraphicalLassoEBIC,
AdaptiveGraphicalLasso,
QuicGraphicalLassoCV,
)
from inverse_covariance.profiling import ClusterGraph
class TestAdaptiveGraphicalLasso(object):
@pytest.mark.parametrize(
"params_in",
[
(
{
"estimator": QuicGraphicalLassoCV(
cv=2,
n_refinements=6,
init_method="cov",
score_metric="log_likelihood",
),
"method": "binary",
}
),
(
{
"estimator": QuicGraphicalLassoCV(
cv=2,
n_refinements=6,
init_method="cov",
score_metric="log_likelihood",
),
"method": "inverse",
}
),
(
{
"estimator": QuicGraphicalLassoCV(
cv=2,
n_refinements=6,
init_method="cov",
score_metric="log_likelihood",
),
"method": "inverse_squared",
}
),
({"estimator": QuicGraphicalLassoEBIC(), "method": "binary"}),
({"estimator": QuicGraphicalLassoEBIC(), "method": "inverse"}),
({"estimator": QuicGraphicalLassoEBIC(), "method": "inverse_squared"}),
],
)
def test_integration_adaptive_graphical_lasso(self, params_in):
"""
Just tests inputs/outputs (not validity of result).
"""
n_features = 20
n_samples = 25
cov, prec, adj = ClusterGraph(n_blocks=1, chain_blocks=False, seed=1).create(
n_features, 0.8
)
prng = np.random.RandomState(2)
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
model = AdaptiveGraphicalLasso(**params_in)
model.fit(X)
assert model.estimator_ is not None
assert model.lam_ is not None
assert np.sum(model.lam_[np.diag_indices(n_features)]) == 0
if params_in["method"] == "binary":
uvals = set(model.lam_.flat)
assert len(uvals) == 2
assert 0 in uvals
assert 1 in uvals
elif (
params_in["method"] == "inverse" or params_in["method"] == "inverse_squared"
):
uvals = set(model.lam_.flat[model.lam_.flat != 0])
assert len(uvals) > 0
| 0 | 2,460 | 23 |
f2d7eb3ab3f908b1ca35e025e58b489235659469 | 64 | py | Python | custom/opm/opm_tasks/__init__.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | custom/opm/opm_tasks/__init__.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | custom/opm/opm_tasks/__init__.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | DEVELOPERS_EMAILS = ['esoergel@dimagi.com', 'sreddy@dimagi.com'] | 64 | 64 | 0.765625 | DEVELOPERS_EMAILS = ['esoergel@dimagi.com', 'sreddy@dimagi.com'] | 0 | 0 | 0 |
606cc358f9511c3f340751656877c62607d0a40f | 6,894 | py | Python | runner_with_threshold.py | dmitryrubtsov/Predictions-of-calls-in-Moscow-Megafon | 260bb49e859694d6a7c0dfb8cb13cd39d05ed597 | [
"MIT"
] | null | null | null | runner_with_threshold.py | dmitryrubtsov/Predictions-of-calls-in-Moscow-Megafon | 260bb49e859694d6a7c0dfb8cb13cd39d05ed597 | [
"MIT"
] | null | null | null | runner_with_threshold.py | dmitryrubtsov/Predictions-of-calls-in-Moscow-Megafon | 260bb49e859694d6a7c0dfb8cb13cd39d05ed597 | [
"MIT"
] | null | null | null | import os
import pickle
import time
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
TARGET = 'target'
THRESHOLD = 0.7
df = pd.read_csv('data_test.csv', index_col=[1]) \
.drop('Unnamed: 0', axis=1)
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
df[TARGET] = (model.predict_proba(df)[:, 1] > THRESHOLD).astype('int')
df.to_csv('answers_test.csv')
| 37.879121 | 120 | 0.555411 | import os
import pickle
import time
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
assert isinstance(X, pd.DataFrame)
try:
return X[self.columns]
except KeyError:
cols_error = list(set(self.columns) - set(X.columns))
raise KeyError(
f'DataFrame does not contain the following columns: {cols_error}')
class AddFeatures(BaseEstimator, TransformerMixin):
def __init__(self, features, silent=True):
self.features = features
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start adding features'.center(100, '*'))
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
X_features = self.features.loc[self.features.index.isin(
X.index.unique())]
X_features = X_features.sort_values('buy_time') \
.groupby('id').last()
X_merge = X.reset_index() \
.merge(X_features.reset_index(), on=X.index.name, how='left', suffixes=('_train', '_features')) \
.set_index(X.index.name)
assert X_merge.shape[0] == X.shape[
0], f'Shapes of dataframe don\'t match: {X_merge.shape[0]} and {X.shape[0]}'
assert (X_merge.index == X.index).all(), 'Index Sort Error'
if not self.silent:
print(
f'End adding features, run time: {time_format(time.time()-start_t)}'.center(100, '*'))
print()
return X_merge
class MemUseOptimizing(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
start_t = time.time()
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
if not self.silent:
print('Start of dataframe memory use optimizing'.center(100, '*'))
start_memory_usage = X.memory_usage(deep=True).sum() / 1024**2
X_dtype = pd.DataFrame(
X.dtypes, columns=['dtype'], index=X.columns)
X_dtype['min'] = X.select_dtypes(['int', 'float']).min()
X_dtype['max'] = X.select_dtypes(['int', 'float']).max()
X_dtype['is_int'] = ~(X.select_dtypes(['int', 'float']).astype(
int).sum() - X.select_dtypes(['int', 'float']).sum()).astype('bool_')
X_dtype.loc[(X_dtype['is_int'] == True), 'dtype'] = 'int64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int32').min) & (X_dtype['max'] <= np.iinfo('int32').max), 'dtype'] = 'int32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int16').min) & (X_dtype['max'] <= np.iinfo('int16').max), 'dtype'] = 'int16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int8').min) & (X_dtype['max'] <= np.iinfo('int8').max), 'dtype'] = 'int8'
X_dtype.loc[(X_dtype['is_int'] == True) & (
X_dtype['min'] >= np.iinfo('uint64').min), 'dtype'] = 'uint64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint32').min) & (X_dtype['max'] <= np.iinfo('uint32').max), 'dtype'] = 'uint32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint16').min) & (X_dtype['max'] <= np.iinfo('uint16').max), 'dtype'] = 'uint16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint8').min) & (X_dtype['max'] <= np.iinfo('uint8').max), 'dtype'] = 'uint8'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] == 0) & (
X_dtype['max'] == 1), 'dtype'] = 'bool_'
X_dtype.loc[(X_dtype['is_int'] == False), 'dtype'] = 'float64'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float32').min) & (X_dtype['max'] <= np.finfo('float32').max), 'dtype'] = 'float32'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float16').min) & (X_dtype['max'] <= np.finfo('float16').max), 'dtype'] = 'float16'
for col in X.select_dtypes('object').columns:
num_unique_values = len(X[col].unique())
num_total_values = len(X[col])
if num_unique_values / num_total_values < 0.5:
X_dtype.loc[col, 'dtype'] = 'category'
dtype = X_dtype['dtype'].to_dict()
X = X.astype(dtype)
if not self.silent:
memory_usage = X.memory_usage(deep=True).sum() / 1024**2
print('Memory use optimizing'.center(100, '*'))
print(
f'Memory usage of properties dataframe before optimizing: {start_memory_usage:.02f} MB')
print(
f'Memory usage of properties dataframe after optimizing: {memory_usage:.02f} MB')
print(
f'This is {100*memory_usage/start_memory_usage:.02f} % of the initial size')
print(
f'End of dataframe memory use optimizing, run time: {time_format(time.time()-start_t)}'.center(64, '*'))
print()
return X
class GetDate(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start geting date from timestamp'.center(100, '*'))
if isinstance(X, pd.Series):
X = pd.DataFrame(X)
assert isinstance(
X, pd.DataFrame), 'This is not a pandas dataframe or series'
df = pd.DataFrame()
for col in X.columns:
df[f'{col}_day'] = pd.to_datetime(X[col], unit='s').dt.day
df[f'{col}_month'] = pd.to_datetime(X[col], unit='s').dt.month
df[f'{col}_week'] = pd.to_datetime(X[col], unit='s').dt.week
if not self.silent:
print(
f'End geting date from timestamp, run time: {time_format(time.time()-start_t)}'.center(100, '*'))
print()
return df
TARGET = 'target'
THRESHOLD = 0.7
df = pd.read_csv('data_test.csv', index_col=[1]) \
.drop('Unnamed: 0', axis=1)
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
df[TARGET] = (model.predict_proba(df)[:, 1] > THRESHOLD).astype('int')
df.to_csv('answers_test.csv')
| 5,953 | 124 | 412 |
a1b133030770735b4198a383c95dc2e1f77bd961 | 58,100 | py | Python | lattes_qualis/_Classes/Indicators.py | ellenjkr/LattesQualis | 4fa149ea9e1c58e12b03bd1b88474a0cc2c6d534 | [
"MIT"
] | null | null | null | lattes_qualis/_Classes/Indicators.py | ellenjkr/LattesQualis | 4fa149ea9e1c58e12b03bd1b88474a0cc2c6d534 | [
"MIT"
] | null | null | null | lattes_qualis/_Classes/Indicators.py | ellenjkr/LattesQualis | 4fa149ea9e1c58e12b03bd1b88474a0cc2c6d534 | [
"MIT"
] | null | null | null | from _Funções_e_Valores.verify_authors import treat_exceptions
from _Funções_e_Valores.values import ND
import pandas as pd
# Proceedings and Journals separated
| 58.041958 | 352 | 0.718709 | from _Funções_e_Valores.verify_authors import treat_exceptions
from _Funções_e_Valores.values import ND
import pandas as pd
class Indicators():
def __init__(self, egress_list, students_list, info, qualis_year, general=False):
super(Indicators, self).__init__()
self.egress_list = egress_list
self.students_list = students_list
self.info = info
self.qualis_year = qualis_year
self.general = general
def get_SE(self, data_frame): # Get the amount of publications that contains students or egress as authors
# Get students and egress names
egress_names = []
for egress in self.egress_list:
egress_names.append(treat_exceptions(egress.name.strip()))
students_names = []
for student in self.students_list:
students_names.append(treat_exceptions(student.name.strip()))
# Calculate the amount of students and egress who appear as authors
amount_SE = 0
for index, row in data_frame.iterrows():
SE = False
for column in row.index:
if "Autor" in str(column):
if data_frame[column][index] != "": # If the value isn't null
# Verify if the author's name is on the egress list and if it's a valid publication year
for pos_egress, egress in enumerate(egress_names):
if data_frame[column][index] == egress:
if self.egress_list[pos_egress].period[str(int(data_frame["Ano"][index]))[2:4]] is True:
SE = True
# Verify if the author's name is on the students list and if it's a valid publication year
for pos_student, student in enumerate(students_names):
if data_frame[column][index] == student:
if self.students_list[pos_student].period[str(data_frame["Ano"][index])[2:4]] is True:
SE = True
# If there's an egress or a student as an author for that publication it increases the amount of SE
if SE == True:
amount_SE += 1
return amount_SE
def calculate_amount(self, data_frame, perc_aux):
amount_SE = self.get_SE(data_frame) # Get the amount of publications that contains students or egress as authors
amount = len(data_frame.index) # Amount of publications
perc = f"{perc_aux * amount:.2f}%" # Percentage of this type of publication
try:
perc_SE = f"{100/amount * amount_SE:.2f}%" # Percentage with students or egress
except ZeroDivisionError:
perc_SE = "0%"
return (amount, amount_SE, perc, perc_SE)
def build_table_2016_general(self, journals, proceedings, a1_b1, a1, a2, b1,
b2_b5, b2, b3, b4, b5, others, Irestrito, Irestrito_journals, Irestrito_proceedings,
Igeral, Igeral_journals, Igeral_proceedings, SE_journals, SE_proceedings, SE_a1_b1,
SE_a1, SE_a2, SE_b1, SE_b2_b5, SE_b2, SE_b3, SE_b4, SE_b5, SE_others, percentages_SE,
percentages, Irestrito_medio, Irestrito_medio_journals, Irestrito_medio_proceedings,
Igeral_medio, Igeral_medio_journals, Igeral_medio_proceedings):
type_qualis = ["Periódicos", "Anais", "A1-B1", "A1", "A2", "B1", "B2-B5", "B2", "B3", "B4", "B5", "Outros"]
table = {f"Tipo/Qualis {self.qualis_year}": type_qualis, "Quantidade": [], "Porcentagem": [], 'Quantidade com alunos/egressos':[], "% Alunos/Egressos":[]}
table[f"Tipo/Qualis {self.qualis_year}"].append(None)
table[f"Tipo/Qualis {self.qualis_year}"].append("Índice")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito Periódicos")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral Periódicos")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito Anais")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral Anais")
table["Quantidade"].append(journals)
table["Quantidade"].append(proceedings)
table["Quantidade"].append(a1_b1)
table["Quantidade"].append(a1)
table["Quantidade"].append(a2)
table["Quantidade"].append(b1)
table["Quantidade"].append(b2_b5)
table["Quantidade"].append(b2)
table["Quantidade"].append(b3)
table["Quantidade"].append(b4)
table["Quantidade"].append(b5)
table["Quantidade"].append(others)
table["Quantidade"].append(None)
table["Quantidade"].append("Acumulado")
table["Quantidade"].append(Irestrito)
table["Quantidade"].append(Igeral)
table["Quantidade"].append(Irestrito_journals)
table["Quantidade"].append(Igeral_journals)
table["Quantidade"].append(Irestrito_proceedings)
table["Quantidade"].append(Igeral_proceedings)
table['Quantidade com alunos/egressos'].append(SE_journals)
table['Quantidade com alunos/egressos'].append(SE_proceedings)
table['Quantidade com alunos/egressos'].append(SE_a1_b1)
table['Quantidade com alunos/egressos'].append(SE_a1)
table['Quantidade com alunos/egressos'].append(SE_a2)
table['Quantidade com alunos/egressos'].append(SE_b1)
table['Quantidade com alunos/egressos'].append(SE_b2_b5)
table['Quantidade com alunos/egressos'].append(SE_b2)
table['Quantidade com alunos/egressos'].append(SE_b3)
table['Quantidade com alunos/egressos'].append(SE_b4)
table['Quantidade com alunos/egressos'].append(SE_b5)
table['Quantidade com alunos/egressos'].append(SE_others)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table["% Alunos/Egressos"] = percentages_SE
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["Porcentagem"] = percentages
table["Porcentagem"].append(None)
if self.general:
table["Porcentagem"].append("Média por docente")
table["Porcentagem"].append(Irestrito_medio)
table["Porcentagem"].append(Igeral_medio)
table["Porcentagem"].append(Irestrito_medio_journals)
table["Porcentagem"].append(Igeral_medio_journals)
table["Porcentagem"].append(Irestrito_medio_proceedings)
table["Porcentagem"].append(Igeral_medio_proceedings)
else:
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
return table
# Proceedings and Journals separated
def build_table_2016_separated(self, a1_b1, a1, a2, b1, b2_b5, b2, b3, b4, b5, others,
Irestrito, Igeral, SE_a1_b1, SE_a1, SE_a2, SE_b1, SE_b2_b5, SE_b2, SE_b3, SE_b4,
SE_b5, SE_others, percentages_SE, percentages, Irestrito_medio, Igeral_medio):
type_qualis = ["A1-B1", "A1", "A2", "B1", "B2-B5", "B2", "B3", "B4", "B5", "Outros"]
table = {f"Tipo/Qualis {self.qualis_year}": type_qualis, "Quantidade": [], "Porcentagem": [], 'Quantidade com alunos/egressos':[], "% Alunos/Egressos":[]}
table[f"Tipo/Qualis {self.qualis_year}"].append(None)
table[f"Tipo/Qualis {self.qualis_year}"].append("Índice")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral")
table["Quantidade"].append(a1_b1)
table["Quantidade"].append(a1)
table["Quantidade"].append(a2)
table["Quantidade"].append(b1)
table["Quantidade"].append(b2_b5)
table["Quantidade"].append(b2)
table["Quantidade"].append(b3)
table["Quantidade"].append(b4)
table["Quantidade"].append(b5)
table["Quantidade"].append(others)
table["Quantidade"].append(None)
table["Quantidade"].append("Acumulado")
table["Quantidade"].append(Irestrito)
table["Quantidade"].append(Igeral)
table['Quantidade com alunos/egressos'].append(SE_a1_b1)
table['Quantidade com alunos/egressos'].append(SE_a1)
table['Quantidade com alunos/egressos'].append(SE_a2)
table['Quantidade com alunos/egressos'].append(SE_b1)
table['Quantidade com alunos/egressos'].append(SE_b2_b5)
table['Quantidade com alunos/egressos'].append(SE_b2)
table['Quantidade com alunos/egressos'].append(SE_b3)
table['Quantidade com alunos/egressos'].append(SE_b4)
table['Quantidade com alunos/egressos'].append(SE_b5)
table['Quantidade com alunos/egressos'].append(SE_others)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table["% Alunos/Egressos"] = percentages_SE
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["Porcentagem"] = percentages
table["Porcentagem"].append(None)
if self.general:
table["Porcentagem"].append("Média por docente")
table["Porcentagem"].append(Irestrito_medio)
table["Porcentagem"].append(Igeral_medio)
else:
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
return table
def build_table_2019_general(self, journals, proceedings, a1_a4, a1, a2, a3, a4,
b1_b4, b1, b2, b3, b4, others, Irestrito, Igeral, Irestrito_journals, Igeral_journals,
Irestrito_proceedings, Igeral_proceedings, SE_journals, SE_proceedings, SE_a1_a4, SE_a1,
SE_a2, SE_a3, SE_a4, SE_b1_b4, SE_b1, SE_b2, SE_b3, SE_b4, SE_others, percentages_SE,
percentages, Irestrito_medio, Igeral_medio, Irestrito_medio_journals, Igeral_medio_journals,
Irestrito_medio_proceedings, Igeral_medio_proceedings):
# Build table
type_qualis = ["Periódicos", "Anais", "A1-A4", "A1", "A2", "A3", "A4", "B1-B4", "B1", "B2", "B3", "B4", "Outros"]
table = {f"Tipo/Qualis {self.qualis_year}": type_qualis, "Quantidade": [], "Porcentagem": [], 'Quantidade com alunos/egressos':[], "% Alunos/Egressos":[]}
table[f"Tipo/Qualis {self.qualis_year}"].append(None)
table[f"Tipo/Qualis {self.qualis_year}"].append("Índice")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito Periódicos")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral Periódicos")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito Anais")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral Anais")
table["Quantidade"].append(journals)
table["Quantidade"].append(proceedings)
table["Quantidade"].append(a1_a4)
table["Quantidade"].append(a1)
table["Quantidade"].append(a2)
table["Quantidade"].append(a3)
table["Quantidade"].append(a4)
table["Quantidade"].append(b1_b4)
table["Quantidade"].append(b1)
table["Quantidade"].append(b2)
table["Quantidade"].append(b3)
table["Quantidade"].append(b4)
table["Quantidade"].append(others)
table["Quantidade"].append(None)
table["Quantidade"].append("Acumulado")
table["Quantidade"].append(Irestrito)
table["Quantidade"].append(Igeral)
table["Quantidade"].append(Irestrito_journals)
table["Quantidade"].append(Igeral_journals)
table["Quantidade"].append(Irestrito_proceedings)
table["Quantidade"].append(Igeral_proceedings)
table['Quantidade com alunos/egressos'].append(SE_journals)
table['Quantidade com alunos/egressos'].append(SE_proceedings)
table['Quantidade com alunos/egressos'].append(SE_a1_a4)
table['Quantidade com alunos/egressos'].append(SE_a1)
table['Quantidade com alunos/egressos'].append(SE_a2)
table['Quantidade com alunos/egressos'].append(SE_a3)
table['Quantidade com alunos/egressos'].append(SE_a4)
table['Quantidade com alunos/egressos'].append(SE_b1_b4)
table['Quantidade com alunos/egressos'].append(SE_b1)
table['Quantidade com alunos/egressos'].append(SE_b2)
table['Quantidade com alunos/egressos'].append(SE_b3)
table['Quantidade com alunos/egressos'].append(SE_b4)
table['Quantidade com alunos/egressos'].append(SE_others)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table["% Alunos/Egressos"] = percentages_SE
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["Porcentagem"] = percentages
table["Porcentagem"].append(None)
if self.general:
table["Porcentagem"].append("Média por docente")
table["Porcentagem"].append(Irestrito_medio)
table["Porcentagem"].append(Igeral_medio)
table["Porcentagem"].append(Irestrito_medio_journals)
table["Porcentagem"].append(Igeral_medio_journals)
table["Porcentagem"].append(Irestrito_medio_proceedings)
table["Porcentagem"].append(Igeral_medio_proceedings)
else:
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
return table
def build_table_2019_separated(self, a1_a4, a1, a2, a3, a4, b1_b4, b1, b2, b3, b4, others,
Irestrito, Igeral, SE_a1_a4, SE_a1, SE_a2, SE_a3, SE_a4, SE_b1_b4, SE_b1, SE_b2, SE_b3, SE_b4,
SE_others, percentages_SE, percentages, Irestrito_medio, Igeral_medio):
# Build table
type_qualis = ["A1-A4", "A1", "A2", "A3", "A4", "B1-B4", "B1", "B2", "B3", "B4", "Outros"]
table = {f"Tipo/Qualis {self.qualis_year}": type_qualis, "Quantidade": [], "Porcentagem": [], 'Quantidade com alunos/egressos':[], "% Alunos/Egressos":[]}
table[f"Tipo/Qualis {self.qualis_year}"].append(None)
table[f"Tipo/Qualis {self.qualis_year}"].append("Índice")
table[f"Tipo/Qualis {self.qualis_year}"].append("Irestrito")
table[f"Tipo/Qualis {self.qualis_year}"].append("Igeral")
table["Quantidade"].append(a1_a4)
table["Quantidade"].append(a1)
table["Quantidade"].append(a2)
table["Quantidade"].append(a3)
table["Quantidade"].append(a4)
table["Quantidade"].append(b1_b4)
table["Quantidade"].append(b1)
table["Quantidade"].append(b2)
table["Quantidade"].append(b3)
table["Quantidade"].append(b4)
table["Quantidade"].append(others)
table["Quantidade"].append(None)
table["Quantidade"].append("Acumulado")
table["Quantidade"].append(Irestrito)
table["Quantidade"].append(Igeral)
table['Quantidade com alunos/egressos'].append(SE_a1_a4)
table['Quantidade com alunos/egressos'].append(SE_a1)
table['Quantidade com alunos/egressos'].append(SE_a2)
table['Quantidade com alunos/egressos'].append(SE_a3)
table['Quantidade com alunos/egressos'].append(SE_a4)
table['Quantidade com alunos/egressos'].append(SE_b1_b4)
table['Quantidade com alunos/egressos'].append(SE_b1)
table['Quantidade com alunos/egressos'].append(SE_b2)
table['Quantidade com alunos/egressos'].append(SE_b3)
table['Quantidade com alunos/egressos'].append(SE_b4)
table['Quantidade com alunos/egressos'].append(SE_others)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table['Quantidade com alunos/egressos'].append(None)
table["% Alunos/Egressos"] = percentages_SE
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["% Alunos/Egressos"].append(None)
table["Porcentagem"] = percentages
table["Porcentagem"].append(None)
if self.general:
table["Porcentagem"].append("Média por docente")
table["Porcentagem"].append(Irestrito_medio)
table["Porcentagem"].append(Igeral_medio)
else:
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
table["Porcentagem"].append(None)
return table
def get_irestrito_igeral_2016(self, a1, a2, b1, b2, b3, b4, b5):
Irestrito = (a1 + a2*0.85 + b1*0.7)
if Irestrito != 0:
Irestrito = round(Irestrito, 2)
Igeral = (a1 + a2*0.85 + b1*0.7 + b2*0.5 + b3*0.2 + b4*0.1 + b5*0.05)
if Igeral != 0:
Igeral = round(Igeral, 2)
return (Irestrito, Igeral)
def get_irestrito_igeral_2019(self, a1, a2, a3, a4, b1, b2, b3, b4):
Irestrito = a1 + (a2 * 0.875) + (a3 * 0.75) + (a4 * 0.625)
if Irestrito != 0:
Irestrito = round(Irestrito, 2)
Igeral = Irestrito + (b1 * 0.5) + (b2 * 0.2) + (b3 * 0.1) + (b4 * 0.05)
if Igeral != 0:
Igeral = round(Igeral, 2)
return (Irestrito, Igeral)
def apply_3x1_2016(self, a1_journals, a2_journals, b1_journals, b2_journals, b3_journals, b4_journals, b5_journals,
a1_proceedings, a2_proceedings, b1_proceedings, b2_proceedings, b3_proceedings, b4_proceedings, b5_proceedings):
slots = {'EA1':a1_journals*3, 'EA2':a2_journals*3, 'EB1':b1_journals*3, 'EB2':b2_journals*3,
'EB3':b3_journals*3, 'EB4':b4_journals*3, 'EB5':b5_journals*3}
events_qualis = {'EA1':a1_proceedings, 'EA2':a2_proceedings, 'EB1':b1_proceedings, 'EB2':b2_proceedings,
'EB3':b3_proceedings, 'EB4':b4_proceedings, 'EB5':b5_proceedings}
remainder = 0
for key in slots.keys():
slots[key] += remainder
remainder = 0
if events_qualis[key] >= slots[key]:
events_qualis[key] = slots[key]
else:
remainder += slots[key] - events_qualis[key]
a1_total = a1_journals + events_qualis['EA1']
a2_total = a2_journals + events_qualis['EA2']
b1_total = b1_journals + events_qualis['EB1']
b2_total = b2_journals + events_qualis['EB2']
b3_total = b3_journals + events_qualis['EB3']
b4_total = b4_journals + events_qualis['EB4']
b5_total = b5_journals + events_qualis['EB5']
Irestrito_3x1_proceedings, Igeral_3x1_proceedings = self.get_irestrito_igeral_2016(events_qualis['EA1'], events_qualis['EA2'], events_qualis['EB1'], events_qualis['EB2'], events_qualis['EB3'], events_qualis['EB4'], events_qualis['EB5'])
Irestrito_3x1_total, Igeral_3x1_total = self.get_irestrito_igeral_2016(a1_total, a2_total, b1_total, b2_total, b3_total, b4_total, b5_total)
return (Irestrito_3x1_proceedings, Igeral_3x1_proceedings, Irestrito_3x1_total, Igeral_3x1_total)
def apply_3x1_2019(self, a1_journals, a2_journals, a3_journals, a4_journals, b1_journals, b2_journals, b3_journals, b4_journals,
a1_proceedings, a2_proceedings, a3_proceedings, a4_proceedings, b1_proceedings, b2_proceedings, b3_proceedings, b4_proceedings):
slots = {'EA1':a1_journals*3, 'EA2':a2_journals*3, 'EA3':a3_journals*3, 'EA4':a4_journals*3,
'EB1':b1_journals*3, 'EB2':b2_journals*3, 'EB3':b3_journals*3, 'EB4':b4_journals*3}
events_qualis = {'EA1':a1_proceedings, 'EA2':a2_proceedings, 'EA3':a3_proceedings, 'EA4':a4_proceedings,
'EB1':b1_proceedings, 'EB2':b2_proceedings, 'EB3':b3_proceedings, 'EB4':b4_proceedings}
remainder = 0
for key in slots.keys():
slots[key] += remainder
remainder = 0
if events_qualis[key] >= slots[key]:
events_qualis[key] = slots[key]
else:
remainder += slots[key] - events_qualis[key]
a1_total = a1_journals + events_qualis['EA1']
a2_total = a2_journals + events_qualis['EA2']
a3_total = a3_journals + events_qualis['EA3']
a4_total = a4_journals + events_qualis['EA4']
b1_total = b1_journals + events_qualis['EB1']
b2_total = b2_journals + events_qualis['EB2']
b3_total = b3_journals + events_qualis['EB3']
b4_total = b4_journals + events_qualis['EB4']
Irestrito_3x1_proceedings, Igeral_3x1_proceedings = self.get_irestrito_igeral_2019(events_qualis['EA1'], events_qualis['EA2'], events_qualis['EA3'], events_qualis['EA4'], events_qualis['EB1'], events_qualis['EB2'], events_qualis['EB3'], events_qualis['EB4'])
Irestrito_3x1_total, Igeral_3x1_total = self.get_irestrito_igeral_2019(a1_total, a2_total, a3_total, a4_total, b1_total, b2_total, b3_total, b4_total)
return (Irestrito_3x1_proceedings, Igeral_3x1_proceedings, Irestrito_3x1_total, Igeral_3x1_total)
def get_irestritos(self, Irestrito, Irestrito_journals, Irestrito_proceedings, Irestrito_3x1_proceedings, Irestrito_3x1_total):
self.irestritos = {'Total com trava':None, 'Total sem trava':None, 'Anais com trava':None, 'Anais sem trava':None, 'Periódicos':None}
self.irestritos['Total com trava'] = Irestrito_3x1_total
self.irestritos['Total sem trava'] = Irestrito
self.irestritos['Anais com trava'] = Irestrito_3x1_proceedings
self.irestritos['Anais sem trava'] = Irestrito_proceedings
self.irestritos['Periódicos'] = Irestrito_journals
def get_igerais(self, Igeral, Igeral_journals, Igeral_proceedings, Igeral_3x1_proceedings, Igeral_3x1_total):
self.igerais = {'Total com trava':None, 'Total sem trava':None, 'Anais com trava':None, 'Anais sem trava':None, 'Periódicos':None}
self.igerais['Total com trava'] = Igeral_3x1_total
self.igerais['Total sem trava'] = Igeral
self.igerais['Anais com trava'] = Igeral_3x1_proceedings
self.igerais['Anais sem trava'] = Igeral_proceedings
self.igerais['Periódicos'] = Igeral_journals
def get_indicators_2016(self):
data_frame = pd.DataFrame(self.info)
# Get total of publications that are not books or chapters
total_articles = 0
for i in data_frame["Tipo"]:
if i != "Livros" and i != "Capítulos":
total_articles += 1
if total_articles != 0:
perc_aux = 100/total_articles
else:
perc_aux = 0
journals_df = data_frame.loc[data_frame["Tipo"] == "Periódico"] # Get all publications on journals
journals, SE_journals, perc_journals, perc_SE_journals = self.calculate_amount(journals_df, perc_aux) # Perform calculations
# (amount of journals, amount of journals with students or egress as authors, percentage of publications on journals, percentage of publications on journals with students or egress as authors)
if journals != 0:
perc_aux_journals = 100/journals
else:
perc_aux_journals = 0
proceedings_df = data_frame.loc[data_frame["Tipo"] == "Anais"] # Get all publications on events
proceedings, SE_proceedings, perc_proceedings, perc_SE_proceedings = self.calculate_amount(proceedings_df, perc_aux) # Perform calculations
if proceedings != 0:
perc_aux_proceedings = 100/proceedings
else:
perc_aux_proceedings = 0
# ==========================================================================================================
a1 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "A1"] # Get all publications with "A1" Qualis
a1, SE_a1, perc_a1, perc_SE_a1 = self.calculate_amount(a1, perc_aux) # Perform calculations
a1_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "A1"] # Get all journals with "A1" Qualis
a1_journals, SE_a1_journals, perc_a1_journals, perc_SE_a1_journals = self.calculate_amount(a1_journals, perc_aux_journals) # Perform calculations
a1_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "A1"] # Get all proceedings with "A1" Qualis
a1_proceedings, SE_a1_proceedings, perc_a1_proceedings, perc_SE_a1_proceedings = self.calculate_amount(a1_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
a2 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "A2"] # Get all publications with "A2" Qualis
a2, SE_a2, perc_a2, perc_SE_a2 = self.calculate_amount(a2, perc_aux) # Perform calculations
a2_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "A2"] # Get all journals with "A2" Qualis
a2_journals, SE_a2_journals, perc_a2_journals, perc_SE_a2_journals = self.calculate_amount(a2_journals, perc_aux_journals) # Perform calculations
a2_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "A2"] # Get all proceedings with "A2" Qualis
a2_proceedings, SE_a2_proceedings, perc_a2_proceedings, perc_SE_a2_proceedings = self.calculate_amount(a2_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b1 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B1"] # Get all publications with "B1" Qualis
b1, SE_b1, perc_b1, perc_SE_b1 = self.calculate_amount(b1, perc_aux) # Perform calculations
b1_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B1"] # Get all journals with "B1" Qualis
b1_journals, SE_b1_journals, perc_b1_journals, perc_SE_b1_journals = self.calculate_amount(b1_journals, perc_aux_journals) # Perform calculations
b1_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B1"] # Get all proceedings with "B1" Qualis
b1_proceedings, SE_b1_proceedings, perc_b1_proceedings, perc_SE_b1_proceedings = self.calculate_amount(b1_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b2 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B2"] # Get all publications with "B2" Qualis
b2, SE_b2, perc_b2, perc_SE_b2 = self.calculate_amount(b2, perc_aux) # Perform calculations
b2_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B2"] # Get all journals with "B2" Qualis
b2_journals, SE_b2_journals, perc_b2_journals, perc_SE_b2_journals = self.calculate_amount(b2_journals, perc_aux_journals) # Perform calculations
b2_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B2"] # Get all proceedings with "B2" Qualis
b2_proceedings, SE_b2_proceedings, perc_b2_proceedings, perc_SE_b2_proceedings = self.calculate_amount(b2_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b3 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B3"] # Get all publications with "B3" Qualis
b3, SE_b3, perc_b3, perc_SE_b3 = self.calculate_amount(b3, perc_aux) # Perform calculations
b3_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B3"] # Get all journals with "B3" Qualis
b3_journals, SE_b3_journals, perc_b3_journals, perc_SE_b3_journals = self.calculate_amount(b3_journals, perc_aux_journals) # Perform calculations
b3_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B3"] # Get all proceedings with "B3" Qualis
b3_proceedings, SE_b3_proceedings, perc_b3_proceedings, perc_SE_b3_proceedings = self.calculate_amount(b3_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b4 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B4"] # Get all publications with "B4" Qualis
b4, SE_b4, perc_b4, perc_SE_b4 = self.calculate_amount(b4, perc_aux) # Perform calculations
b4_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B4"] # Get all journals with "B4" Qualis
b4_journals, SE_b4_journals, perc_b4_journals, perc_SE_b4_journals = self.calculate_amount(b4_journals, perc_aux_journals) # Perform calculations
b4_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B4"] # Get all proceedings with "B4" Qualis
b4_proceedings, SE_b4_proceedings, perc_b4_proceedings, perc_SE_b4_proceedings = self.calculate_amount(b4_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b5 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B5"] # Get all publications with "B4" Qualis
b5, SE_b5, perc_b5, perc_SE_b5 = self.calculate_amount(b5, perc_aux) # Perform calculations
b5_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B5"] # Get all journals with "B5" Qualis
b5_journals, SE_b5_journals, perc_b5_journals, perc_SE_b5_journals = self.calculate_amount(b5_journals, perc_aux_journals) # Perform calculations
b5_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B5"] # Get all proceedings with "B5" Qualis
b5_proceedings, SE_b5_proceedings, perc_b5_proceedings, perc_SE_b5_proceedings = self.calculate_amount(b5_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
# A1-B1 (all merged)
a1_b1 = a1 + a2 + b1
SE_a1_b1 = SE_a1 + SE_a2 + SE_b1
perc_a1_b1 = f"{perc_aux * a1_b1:.2f}%"
try:
perc_SE_a1_b1 = f"{100/a1_b1 * SE_a1_b1:.2f}%"
except ZeroDivisionError:
perc_SE_a1_b1 = "0%"
# A1-B1 (all merged) - Journals
a1_b1_journals = a1_journals + a2_journals + b1_journals
SE_a1_b1_journals = SE_a1_journals + SE_a2_journals + SE_b1_journals
perc_a1_b1_journals = f"{perc_aux_journals * a1_b1_journals:.2f}%"
try:
perc_SE_a1_b1_journals = f"{100/a1_b1_journals * SE_a1_b1_journals:.2f}%"
except ZeroDivisionError:
perc_SE_a1_b1_journals = "0%"
# A1-B1 (all merged) - Proceedings
a1_b1_proceedings = a1_proceedings + a2_proceedings + b1_proceedings
SE_a1_b1_proceedings = SE_a1_proceedings + SE_a2_proceedings + SE_b1_proceedings
perc_a1_b1_proceedings = f"{perc_aux_proceedings * a1_b1_proceedings:.2f}%"
try:
perc_SE_a1_b1_proceedings = f"{100/a1_b1_proceedings * SE_a1_b1_proceedings:.2f}%"
except ZeroDivisionError:
perc_SE_a1_b1_proceedings = "0%"
# ==========================================================================================================
# B2-B5 (all merged)
b2_b5 = b2 + b3 + b4 + b5
SE_b2_b5 = SE_b2 + SE_b3 + SE_b4 + SE_b5
perc_b2_b5 = f"{perc_aux * b2_b5:.2f}%"
try:
perc_SE_b2_b5 = f"{100/b2_b5 * SE_b2_b5:.2f}%"
except ZeroDivisionError:
perc_SE_b2_b5 = "0%"
# B2-B5 (all merged) - Journals
b2_b5_journals = b2_journals + b3_journals + b4_journals + b5_journals
SE_b2_b5_journals = SE_b2_journals + SE_b3_journals + SE_b4_journals + SE_b5_journals
perc_b2_b5_journals = f"{perc_aux_journals * b2_b5_journals:.2f}%"
try:
perc_SE_b2_b5_journals = f"{100/b2_b5_journals * SE_b2_b5_journals:.2f}%"
except ZeroDivisionError:
perc_SE_b2_b5_journals = "0%"
# B2-B5 (all merged) - Proceedings
b2_b5_proceedings = b2_proceedings + b3_proceedings + b4_proceedings + b5_proceedings
SE_b2_b5_proceedings = SE_b2_proceedings + SE_b3_proceedings + SE_b4_proceedings + SE_b5_proceedings
perc_b2_b5_proceedings = f"{perc_aux_proceedings * b2_b5_proceedings:.2f}%"
try:
perc_SE_b2_b5_proceedings = f"{100/b2_b5_proceedings * SE_b2_b5_proceedings:.2f}%"
except ZeroDivisionError:
perc_SE_b2_b5_proceedings = "0%"
# ==========================================================================================================
# Other - Not in A1-B1 or B2-B5
others = data_frame.loc[((data_frame[f"Qualis {self.qualis_year}"] != "A1") & (data_frame[f"Qualis {self.qualis_year}"] != "A2") & (data_frame[f"Qualis {self.qualis_year}"] != "A3") & (data_frame[f"Qualis {self.qualis_year}"] != "A4") & (data_frame["Tipo"] != "Livros") & (data_frame["Tipo"] != "Capítulos"))]
others = others.loc[((others[f"Qualis {self.qualis_year}"] != "B1") & (others[f"Qualis {self.qualis_year}"] != "B2") & (others[f"Qualis {self.qualis_year}"] != "B3") & (others[f"Qualis {self.qualis_year}"] != "B4") & (others[f"Qualis {self.qualis_year}"] != "B5"))]
others, SE_others, perc_others, perc_SE_others = self.calculate_amount(others, perc_aux) # Perform calculations
# Other - Not in A1-B1 or B2-B5 - Journals
others_journals = journals_df.loc[((journals_df[f"Qualis {self.qualis_year}"] != "A1") & (journals_df[f"Qualis {self.qualis_year}"] != "A2") & (journals_df[f"Qualis {self.qualis_year}"] != "A3") & (journals_df[f"Qualis {self.qualis_year}"] != "A4") & (journals_df["Tipo"] != "Livros") & (journals_df["Tipo"] != "Capítulos"))]
others_journals = others_journals.loc[((others_journals[f"Qualis {self.qualis_year}"] != "B1") & (others_journals[f"Qualis {self.qualis_year}"] != "B2") & (others_journals[f"Qualis {self.qualis_year}"] != "B3") & (others_journals[f"Qualis {self.qualis_year}"] != "B4") & (others_journals[f"Qualis {self.qualis_year}"] != "B5"))]
others_journals, SE_others_journals, perc_others_journals, perc_SE_others_journals = self.calculate_amount(others_journals, perc_aux_journals) # Perform calculations
# Other - Not in A1-B1 or B2-B5 - Proceedings
others_proceedings = proceedings_df.loc[((proceedings_df[f"Qualis {self.qualis_year}"] != "A1") & (proceedings_df[f"Qualis {self.qualis_year}"] != "A2") & (proceedings_df[f"Qualis {self.qualis_year}"] != "A3") & (proceedings_df[f"Qualis {self.qualis_year}"] != "A4") & (proceedings_df["Tipo"] != "Livros") & (proceedings_df["Tipo"] != "Capítulos"))]
others_proceedings = others_proceedings.loc[((others_proceedings[f"Qualis {self.qualis_year}"] != "B1") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B2") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B3") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B4") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B5"))]
others_proceedings, SE_others_proceedings, perc_others_proceedings, perc_SE_others_proceedings = self.calculate_amount(others_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
percentages = [perc_journals, perc_proceedings, perc_a1_b1, perc_a1, perc_a2, perc_b1, perc_b2_b5, perc_b2, perc_b3, perc_b4, perc_b5, perc_others]
percentages_SE = [perc_SE_journals, perc_SE_proceedings, perc_SE_a1_b1, perc_SE_a1, perc_SE_a2, perc_SE_b1, perc_SE_b2_b5, perc_SE_b2, perc_SE_b3, perc_SE_b4, perc_SE_b5, perc_SE_others]
percentages_journals = [perc_a1_b1_journals, perc_a1_journals, perc_a2_journals, perc_b1_journals, perc_b2_b5_journals, perc_b2_journals, perc_b3_journals, perc_b4_journals, perc_b5_journals, perc_others_journals]
percentages_SE_journals = [perc_SE_a1_b1_journals, perc_SE_a1_journals, perc_SE_a2_journals, perc_SE_b1_journals, perc_SE_b2_b5_journals, perc_SE_b2_journals, perc_SE_b3_journals, perc_SE_b4_journals, perc_SE_b5_journals, perc_SE_others_journals]
percentages_proceedings = [perc_a1_b1_proceedings, perc_a1_proceedings, perc_a2_proceedings, perc_b1_proceedings, perc_b2_b5_proceedings, perc_b2_proceedings, perc_b3_proceedings, perc_b4_proceedings, perc_b5_proceedings, perc_others_proceedings]
percentages_SE_proceedings = [perc_SE_a1_b1_proceedings, perc_SE_a1_proceedings, perc_SE_a2_proceedings, perc_SE_b1_proceedings, perc_SE_b2_b5_proceedings, perc_SE_b2_proceedings, perc_SE_b3_proceedings, perc_SE_b4_proceedings, perc_SE_b5_proceedings, perc_SE_others_proceedings]
# ==========================================================================================================
Irestrito, Igeral = self.get_irestrito_igeral_2016(a1, a2, b1, b2, b3, b4, b5)
if Irestrito != 0:
Irestrito_medio = round((Irestrito/ND), 2)
else:
Irestrito_medio = 0
if Igeral != 0:
Igeral_medio = round((Igeral/ND), 2)
else:
Igeral_medio = 0
Irestrito_journals, Igeral_journals = self.get_irestrito_igeral_2016(a1_journals, a2_journals, b1_journals, b2_journals, b3_journals, b4_journals, b5_journals)
if Irestrito_journals != 0:
Irestrito_medio_journals = round((Irestrito_journals/ND), 2)
else:
Irestrito_medio_journals = 0
if Igeral_journals != 0:
Igeral_medio_journals = round((Igeral_journals/ND), 2)
else:
Igeral_medio_journals = 0
Irestrito_proceedings, Igeral_proceedings = self.get_irestrito_igeral_2016(a1_proceedings, a2_proceedings, b1_proceedings, b2_proceedings, b3_proceedings, b4_proceedings, b5_proceedings)
if Irestrito_proceedings != 0:
Irestrito_medio_proceedings = round((Irestrito_proceedings/ND), 2)
else:
Irestrito_medio_proceedings = 0
if Igeral_proceedings != 0:
Igeral_medio_proceedings = round((Igeral_proceedings/ND), 2)
else:
Igeral_medio_proceedings = 0
# ==========================================================================================================
table_general = self.build_table_2016_general(journals, proceedings, a1_b1, a1, a2, b1,
b2_b5, b2, b3, b4, b5, others, Irestrito, Irestrito_journals, Irestrito_proceedings,
Igeral, Igeral_journals, Igeral_proceedings, SE_journals, SE_proceedings, SE_a1_b1,
SE_a1, SE_a2, SE_b1, SE_b2_b5, SE_b2, SE_b3, SE_b4, SE_b5, SE_others, percentages_SE,
percentages, Irestrito_medio, Irestrito_medio_journals, Irestrito_medio_proceedings,
Igeral_medio, Igeral_medio_journals, Igeral_medio_proceedings)
table_journals = self.build_table_2016_separated(a1_b1_journals, a1_journals, a2_journals, b1_journals,
b2_b5_journals, b2_journals, b3_journals, b4_journals, b5_journals, others_journals, Irestrito_journals,
Igeral_journals, SE_a1_b1_journals, SE_a1_journals, SE_a2_journals, SE_b1_journals, SE_b2_b5_journals,
SE_b2_journals, SE_b3_journals, SE_b4_journals, SE_b5_journals, SE_others_journals, percentages_SE_journals,
percentages_journals, Irestrito_medio_journals, Igeral_medio_journals)
table_proceedings = self.build_table_2016_separated(a1_b1_proceedings, a1_proceedings, a2_proceedings, b1_proceedings,
b2_b5_proceedings, b2_proceedings, b3_proceedings, b4_proceedings, b5_proceedings, others_proceedings, Irestrito_proceedings,
Igeral_proceedings, SE_a1_b1_proceedings, SE_a1_proceedings, SE_a2_proceedings, SE_b1_proceedings, SE_b2_b5_proceedings,
SE_b2_proceedings, SE_b3_proceedings, SE_b4_proceedings, SE_b5_proceedings, SE_others_proceedings, percentages_SE_proceedings,
percentages_proceedings, Irestrito_medio_proceedings, Igeral_medio_proceedings)
if self.general == True:
Irestrito_3x1_proceedings, Igeral_3x1_proceedings, Irestrito_3x1_total, Igeral_3x1_total = self.apply_3x1_2016(a1_journals, a2_journals,
b1_journals, b2_journals, b3_journals, b4_journals, b5_journals, a1_proceedings, a2_proceedings,
b1_proceedings, b2_proceedings, b3_proceedings, b4_proceedings, b5_proceedings)
self.get_irestritos(Irestrito, Irestrito_journals, Irestrito_proceedings, Irestrito_3x1_proceedings, Irestrito_3x1_total)
self.get_igerais(Igeral, Igeral_journals, Igeral_proceedings, Igeral_3x1_proceedings, Igeral_3x1_total)
return (pd.DataFrame(table_general), pd.DataFrame(table_journals), pd.DataFrame(table_proceedings))
def get_indicators_2019(self):
data_frame = pd.DataFrame(self.info)
# Get total of publications that are not books or chapters
total_articles = 0
for i in data_frame["Tipo"]:
if i != "Livros" and i != "Capítulos":
total_articles += 1
if total_articles != 0:
perc_aux = 100/total_articles
else:
perc_aux = 0
journals_df = data_frame.loc[data_frame["Tipo"] == "Periódico"] # Get all publications on journals
journals, SE_journals, perc_journals, perc_SE_journals = self.calculate_amount(journals_df, perc_aux) # Perform calculations
# (amount of journals, amount of journals with students or egress as authors, percentage of publications on journals, percentage of publications on journals with students or egress as authors)
if journals != 0:
perc_aux_journals = 100/journals
else:
perc_aux_journals = 0
proceedings_df = data_frame.loc[data_frame["Tipo"] == "Anais"] # Get all publications on events
proceedings, SE_proceedings, perc_proceedings, perc_SE_proceedings = self.calculate_amount(proceedings_df, perc_aux) # Perform calculations
if proceedings != 0:
perc_aux_proceedings = 100/proceedings
else:
perc_aux_proceedings = 0
# ==========================================================================================================
a1 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "A1"] # Get all publications with "A1" Qualis
a1, SE_a1, perc_a1, perc_SE_a1 = self.calculate_amount(a1, perc_aux) # Perform calculations
a1_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "A1"] # Get all journals with "A1" Qualis
a1_journals, SE_a1_journals, perc_a1_journals, perc_SE_a1_journals = self.calculate_amount(a1_journals, perc_aux_journals) # Perform calculations
a1_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "A1"] # Get all proceedings with "A1" Qualis
a1_proceedings, SE_a1_proceedings, perc_a1_proceedings, perc_SE_a1_proceedings = self.calculate_amount(a1_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
a2 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "A2"] # Get all publications with "A2" Qualis
a2, SE_a2, perc_a2, perc_SE_a2 = self.calculate_amount(a2, perc_aux) # Perform calculations
a2_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "A2"] # Get all journals with "A2" Qualis
a2_journals, SE_a2_journals, perc_a2_journals, perc_SE_a2_journals = self.calculate_amount(a2_journals, perc_aux_journals) # Perform calculations
a2_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "A2"] # Get all proceedings with "A2" Qualis
a2_proceedings, SE_a2_proceedings, perc_a2_proceedings, perc_SE_a2_proceedings = self.calculate_amount(a2_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
a3 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "A3"] # Get all publications with "A3" Qualis
a3, SE_a3, perc_a3, perc_SE_a3 = self.calculate_amount(a3, perc_aux) # Perform calculations
a3_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "A3"] # Get all journals with "A3" Qualis
a3_journals, SE_a3_journals, perc_a3_journals, perc_SE_a3_journals = self.calculate_amount(a3_journals, perc_aux_journals) # Perform calculations
a3_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "A3"] # Get all proceedings with "A3" Qualis
a3_proceedings, SE_a3_proceedings, perc_a3_proceedings, perc_SE_a3_proceedings = self.calculate_amount(a3_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
a4 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "A4"] # Get all publications with "A4" Qualis
a4, SE_a4, perc_a4, perc_SE_a4 = self.calculate_amount(a4, perc_aux) # Perform calculations
a4_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "A4"] # Get all journals with "A4" Qualis
a4_journals, SE_a4_journals, perc_a4_journals, perc_SE_a4_journals = self.calculate_amount(a4_journals, perc_aux_journals) # Perform calculations
a4_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "A4"] # Get all proceedings with "A4" Qualis
a4_proceedings, SE_a4_proceedings, perc_a4_proceedings, perc_SE_a4_proceedings = self.calculate_amount(a4_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b1 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B1"] # Get all publications with "B1" Qualis
b1, SE_b1, perc_b1, perc_SE_b1 = self.calculate_amount(b1, perc_aux) # Perform calculations
b1_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B1"] # Get all journals with "B1" Qualis
b1_journals, SE_b1_journals, perc_b1_journals, perc_SE_b1_journals = self.calculate_amount(b1_journals, perc_aux_journals) # Perform calculations
b1_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B1"] # Get all proceedings with "B1" Qualis
b1_proceedings, SE_b1_proceedings, perc_b1_proceedings, perc_SE_b1_proceedings = self.calculate_amount(b1_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b2 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B2"] # Get all publications with "B2" Qualis
b2, SE_b2, perc_b2, perc_SE_b2 = self.calculate_amount(b2, perc_aux) # Perform calculations
b2_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B2"] # Get all journals with "B2" Qualis
b2_journals, SE_b2_journals, perc_b2_journals, perc_SE_b2_journals = self.calculate_amount(b2_journals, perc_aux_journals) # Perform calculations
b2_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B2"] # Get all proceedings with "B2" Qualis
b2_proceedings, SE_b2_proceedings, perc_b2_proceedings, perc_SE_b2_proceedings = self.calculate_amount(b2_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b3 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B3"] # Get all publications with "B3" Qualis
b3, SE_b3, perc_b3, perc_SE_b3 = self.calculate_amount(b3, perc_aux) # Perform calculations
b3_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B3"] # Get all journals with "B3" Qualis
b3_journals, SE_b3_journals, perc_b3_journals, perc_SE_b3_journals = self.calculate_amount(b3_journals, perc_aux_journals) # Perform calculations
b3_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B3"] # Get all proceedings with "B3" Qualis
b3_proceedings, SE_b3_proceedings, perc_b3_proceedings, perc_SE_b3_proceedings = self.calculate_amount(b3_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
b4 = data_frame.loc[data_frame[f"Qualis {self.qualis_year}"] == "B4"] # Get all publications with "B4" Qualis
b4, SE_b4, perc_b4, perc_SE_b4 = self.calculate_amount(b4, perc_aux) # Perform calculations
b4_journals = journals_df.loc[journals_df[f"Qualis {self.qualis_year}"] == "B4"] # Get all journals with "B4" Qualis
b4_journals, SE_b4_journals, perc_b4_journals, perc_SE_b4_journals = self.calculate_amount(b4_journals, perc_aux_journals) # Perform calculations
b4_proceedings = proceedings_df.loc[proceedings_df[f"Qualis {self.qualis_year}"] == "B4"] # Get all proceedings with "B4" Qualis
b4_proceedings, SE_b4_proceedings, perc_b4_proceedings, perc_SE_b4_proceedings = self.calculate_amount(b4_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
# A1-A4 (all merged)
a1_a4 = a1 + a2 + a3 + a4
SE_a1_a4 = SE_a1 + SE_a2 + SE_a3 + SE_a4
perc_a1_a4 = f"{perc_aux * a1_a4:.2f}%"
try:
perc_SE_a1_a4 = f"{100/a1_a4 * SE_a1_a4:.2f}%"
except ZeroDivisionError:
perc_SE_a1_a4 = "0%"
# A1-A4 (all merged) - Journals
a1_a4_journals = a1_journals + a2_journals + a3_journals + a4_journals
SE_a1_a4_journals = SE_a1_journals + SE_a2_journals + SE_a3_journals + SE_a4_journals
perc_a1_a4_journals = f"{perc_aux_journals * a1_a4_journals:.2f}%"
try:
perc_SE_a1_a4_journals = f"{100/a1_a4_journals * SE_a1_a4_journals:.2f}%"
except ZeroDivisionError:
perc_SE_a1_a4_journals = "0%"
# A1-A4 (all merged) - Proceedings
a1_a4_proceedings = a1_proceedings + a2_proceedings + a3_proceedings + a4_proceedings
SE_a1_a4_proceedings = SE_a1_proceedings + SE_a2_proceedings + SE_a3_proceedings + SE_a4_proceedings
perc_a1_a4_proceedings = f"{perc_aux_proceedings * a1_a4_proceedings:.2f}%"
try:
perc_SE_a1_a4_proceedings = f"{100/a1_a4_proceedings * SE_a1_a4_proceedings:.2f}%"
except ZeroDivisionError:
perc_SE_a1_a4_proceedings = "0%"
# ==========================================================================================================
# B1-B4 (all merged)
b1_b4 = b1 + b2 + b3 + b4
SE_b1_b4 = SE_b1 + SE_b2 + SE_b3 + SE_b4
perc_b1_b4 = f"{perc_aux * b1_b4:.2f}%"
try:
perc_SE_b1_b4 = f"{100/b1_b4 * SE_b1_b4:.2f}%"
except ZeroDivisionError:
perc_SE_b1_b4 = "0%"
# B1-B4 (all merged) - Journals
b1_b4_journals = b1_journals + b2_journals + b3_journals + b4_journals
SE_b1_b4_journals = SE_b1_journals + SE_b2_journals + SE_b3_journals + SE_b4_journals
perc_b1_b4_journals = f"{perc_aux_journals * b1_b4_journals:.2f}%"
try:
perc_SE_b1_b4_journals = f"{100/b1_b4_journals * SE_b1_b4_journals:.2f}%"
except ZeroDivisionError:
perc_SE_b1_b4_journals = "0%"
# B1-B4 (all merged) - Proceedings
b1_b4_proceedings = b1_proceedings + b2_proceedings + b3_proceedings + b4_proceedings
SE_b1_b4_proceedings = SE_b1_proceedings + SE_b2_proceedings + SE_b3_proceedings + SE_b4_proceedings
perc_b1_b4_proceedings = f"{perc_aux_proceedings * b1_b4_proceedings:.2f}%"
try:
perc_SE_b1_b4_proceedings = f"{100/b1_b4_proceedings * SE_b1_b4_proceedings:.2f}%"
except ZeroDivisionError:
perc_SE_b1_b4_proceedings = "0%"
# ==========================================================================================================
# Other - Not in A1-A4 or B1-B4
others = data_frame.loc[((data_frame[f"Qualis {self.qualis_year}"] != "A1") & (data_frame[f"Qualis {self.qualis_year}"] != "A2") & (data_frame[f"Qualis {self.qualis_year}"] != "A3") & (data_frame[f"Qualis {self.qualis_year}"] != "A4") & (data_frame["Tipo"] != "Livros") & (data_frame["Tipo"] != "Capítulos"))]
others = others.loc[((others[f"Qualis {self.qualis_year}"] != "B1") & (others[f"Qualis {self.qualis_year}"] != "B2") & (others[f"Qualis {self.qualis_year}"] != "B3") & (others[f"Qualis {self.qualis_year}"] != "B4") & (others[f"Qualis {self.qualis_year}"] != "B5"))]
others, SE_others, perc_others, perc_SE_others = self.calculate_amount(others, perc_aux) # Perform calculations
# Other - Not in A1-A4 or B1-B4 - Journals
others_journals = journals_df.loc[((journals_df[f"Qualis {self.qualis_year}"] != "A1") & (journals_df[f"Qualis {self.qualis_year}"] != "A2") & (journals_df[f"Qualis {self.qualis_year}"] != "A3") & (journals_df[f"Qualis {self.qualis_year}"] != "A4") & (journals_df["Tipo"] != "Livros") & (journals_df["Tipo"] != "Capítulos"))]
others_journals = others_journals.loc[((others_journals[f"Qualis {self.qualis_year}"] != "B1") & (others_journals[f"Qualis {self.qualis_year}"] != "B2") & (others_journals[f"Qualis {self.qualis_year}"] != "B3") & (others_journals[f"Qualis {self.qualis_year}"] != "B4") & (others_journals[f"Qualis {self.qualis_year}"] != "B5"))]
others_journals, SE_others_journals, perc_others_journals, perc_SE_others_journals = self.calculate_amount(others_journals, perc_aux_journals) # Perform calculations
# Other - Not in A1-A4 or B1-B4 - Proceedings
others_proceedings = proceedings_df.loc[((proceedings_df[f"Qualis {self.qualis_year}"] != "A1") & (proceedings_df[f"Qualis {self.qualis_year}"] != "A2") & (proceedings_df[f"Qualis {self.qualis_year}"] != "A3") & (proceedings_df[f"Qualis {self.qualis_year}"] != "A4") & (proceedings_df["Tipo"] != "Livros") & (proceedings_df["Tipo"] != "Capítulos"))]
others_proceedings = others_proceedings.loc[((others_proceedings[f"Qualis {self.qualis_year}"] != "B1") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B2") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B3") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B4") & (others_proceedings[f"Qualis {self.qualis_year}"] != "B5"))]
others_proceedings, SE_others_proceedings, perc_others_proceedings, perc_SE_others_proceedings = self.calculate_amount(others_proceedings, perc_aux_proceedings) # Perform calculations
# ==========================================================================================================
percentages = [perc_journals, perc_proceedings, perc_a1_a4, perc_a1, perc_a2, perc_a3, perc_a4, perc_b1_b4, perc_b1, perc_b2, perc_b3, perc_b4, perc_others]
percentages_SE = [perc_SE_journals, perc_SE_proceedings, perc_SE_a1_a4, perc_SE_a1, perc_SE_a2, perc_SE_a3, perc_SE_a4, perc_SE_b1_b4, perc_SE_b1, perc_SE_b2, perc_SE_b3, perc_SE_b4, perc_SE_others]
percentages_journals = [perc_a1_a4_journals, perc_a1_journals, perc_a2_journals, perc_a3_journals, perc_a4_journals, perc_b1_b4_journals, perc_b1_journals, perc_b2_journals, perc_b3_journals, perc_b4_journals, perc_others_journals]
percentages_SE_journals = [perc_SE_a1_a4_journals, perc_SE_a1_journals, perc_SE_a2_journals, perc_SE_a3_journals, perc_SE_a4_journals, perc_SE_b1_b4_journals, perc_SE_b1_journals, perc_SE_b2_journals, perc_SE_b3_journals, perc_SE_b4_journals, perc_SE_others_journals]
percentages_proceedings = [perc_a1_a4_proceedings, perc_a1_proceedings, perc_a2_proceedings, perc_a3_proceedings, perc_a4_proceedings, perc_b1_b4_proceedings, perc_b1_proceedings, perc_b2_proceedings, perc_b3_proceedings, perc_b4_proceedings, perc_others_proceedings]
percentages_SE_proceedings = [perc_SE_a1_a4_proceedings, perc_SE_a1_proceedings, perc_SE_a2_proceedings, perc_SE_a3_proceedings, perc_SE_a4_proceedings, perc_SE_b1_b4_proceedings, perc_SE_b1_proceedings, perc_SE_b2_proceedings, perc_SE_b3_proceedings, perc_SE_b4_proceedings, perc_SE_others_proceedings]
# ==========================================================================================================
# Calculate Irestrito and Igeral
Irestrito, Igeral = self.get_irestrito_igeral_2019(a1, a2, a3, a4, b1, b2, b3, b4)
if Irestrito != 0:
Irestrito_medio = round((Irestrito/ND), 2)
else:
Irestrito_medio = 0
if Igeral != 0:
Igeral_medio = round((Igeral/ND), 2)
else:
Igeral_medio = 0
Irestrito_journals, Igeral_journals = self.get_irestrito_igeral_2019(a1_journals, a2_journals, a3_journals, a4_journals, b1_journals, b2_journals, b3_journals, b4_journals)
if Irestrito_journals != 0:
Irestrito_medio_journals = round((Irestrito_journals/ND), 2)
else:
Irestrito_medio_journals = 0
if Igeral_journals != 0:
Igeral_medio_journals = round((Igeral_journals/ND), 2)
else:
Igeral_medio_journals = 0
Irestrito_proceedings, Igeral_proceedings = self.get_irestrito_igeral_2019(a1_proceedings, a2_proceedings, a3_proceedings, a4_proceedings, b1_proceedings, b2_proceedings, b3_proceedings, b4_proceedings)
if Irestrito_proceedings != 0:
Irestrito_medio_proceedings = round((Irestrito_proceedings/ND), 2)
else:
Irestrito_medio_proceedings = 0
if Igeral_proceedings != 0:
Igeral_medio_proceedings = round((Igeral_proceedings/ND), 2)
else:
Igeral_medio_proceedings = 0
# ==========================================================================================================
table_general = self.build_table_2019_general(journals, proceedings, a1_a4, a1, a2, a3, a4,
b1_b4, b1, b2, b3, b4, others, Irestrito, Igeral, Irestrito_journals, Igeral_journals,
Irestrito_proceedings, Igeral_proceedings, SE_journals, SE_proceedings, SE_a1_a4, SE_a1,
SE_a2, SE_a3, SE_a4, SE_b1_b4, SE_b1, SE_b2, SE_b3, SE_b4, SE_others, percentages_SE,
percentages, Irestrito_medio, Igeral_medio, Irestrito_medio_journals, Igeral_medio_journals,
Irestrito_medio_proceedings, Igeral_medio_proceedings)
table_journals = self.build_table_2019_separated(a1_a4_journals, a1_journals, a2_journals, a3_journals, a4_journals,
b1_b4_journals, b1_journals, b2_journals, b3_journals, b4_journals, others_journals, Irestrito_journals,
Igeral_journals, SE_a1_a4_journals, SE_a1_journals, SE_a2_journals, SE_a3_journals, SE_a4_journals,
SE_b1_b4_journals, SE_b1_journals, SE_b2_journals, SE_b3_journals, SE_b4_journals, SE_others_journals,
percentages_SE_journals, percentages_journals, Irestrito_medio_journals, Igeral_medio_journals)
table_proceedings = self.build_table_2019_separated(a1_a4_proceedings, a1_proceedings, a2_proceedings, a3_proceedings, a4_proceedings,
b1_b4_proceedings, b1_proceedings, b2_proceedings, b3_proceedings, b4_proceedings, others_proceedings, Irestrito_proceedings,
Igeral_proceedings, SE_a1_a4_proceedings, SE_a1_proceedings, SE_a2_proceedings, SE_a3_proceedings, SE_a4_proceedings,
SE_b1_b4_proceedings, SE_b1_proceedings, SE_b2_proceedings, SE_b3_proceedings, SE_b4_proceedings, SE_others_proceedings,
percentages_SE_proceedings, percentages_proceedings, Irestrito_medio_proceedings, Igeral_medio_proceedings)
if self.general == True:
Irestrito_3x1_proceedings, Igeral_3x1_proceedings, Irestrito_3x1_total, Igeral_3x1_total = self.apply_3x1_2019(a1_journals, a2_journals, a3_journals, a4_journals,
b1_journals, b2_journals, b3_journals, b4_journals, a1_proceedings, a2_proceedings, a3_proceedings, a4_proceedings,
b1_proceedings, b2_proceedings, b3_proceedings, b4_proceedings)
self.get_irestritos(Irestrito, Irestrito_journals, Irestrito_proceedings, Irestrito_3x1_proceedings, Irestrito_3x1_total)
self.get_igerais(Igeral, Igeral_journals, Igeral_proceedings, Igeral_3x1_proceedings, Igeral_3x1_total)
return (pd.DataFrame(table_general), pd.DataFrame(table_journals), pd.DataFrame(table_proceedings))
| 57,576 | -2 | 383 |
f9df096ba45086d24778903b52ed2aaffddeaa80 | 920 | py | Python | 02_script.py | michalskop/cz-covid-predictive-data | 42b7a4557d8b3c40ab4d2cf194efabb3b16db8be | [
"Apache-2.0"
] | null | null | null | 02_script.py | michalskop/cz-covid-predictive-data | 42b7a4557d8b3c40ab4d2cf194efabb3b16db8be | [
"Apache-2.0"
] | null | null | null | 02_script.py | michalskop/cz-covid-predictive-data | 42b7a4557d8b3c40ab4d2cf194efabb3b16db8be | [
"Apache-2.0"
] | null | null | null | """Split sorted modely_02."""
import pandas as pd
url = "https://onemocneni-aktualne.mzcr.cz/api/account/mifLSHU2re3GAmiotOkdYExeoQ/file/modely%252Fmodely_02_efektivita_testovani.csv"
df = pd.read_csv(url, delimiter=';')
df = df.sort_values(['datum_hlaseni', 'datum_prvniho_priznaku', 'orp', 'vek_kat', 'pohlavi'])
df[df['datum_hlaseni'] < '2021'].to_csv('modely_02_efektivita_testovani_sorted_2020_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2021') & (df['datum_hlaseni'] < '2021-07')].to_csv('modely_02_efektivita_testovani_sorted_2021_1_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2021') & (df['datum_hlaseni'] >= '2021-07')].to_csv('modely_02_efektivita_testovani_sorted_2021_2_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2022')].to_csv('modely_02_efektivita_testovani_sorted_2022_v1.csv')
df[(df['datum_hlaseni'] >= '2023') | df['datum_hlaseni'].isnull()].to_csv('modely_02_efektivita_testovani_sorted_null_v1.csv') | 48.421053 | 138 | 0.753261 | """Split sorted modely_02."""
import pandas as pd
url = "https://onemocneni-aktualne.mzcr.cz/api/account/mifLSHU2re3GAmiotOkdYExeoQ/file/modely%252Fmodely_02_efektivita_testovani.csv"
df = pd.read_csv(url, delimiter=';')
df = df.sort_values(['datum_hlaseni', 'datum_prvniho_priznaku', 'orp', 'vek_kat', 'pohlavi'])
df[df['datum_hlaseni'] < '2021'].to_csv('modely_02_efektivita_testovani_sorted_2020_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2021') & (df['datum_hlaseni'] < '2021-07')].to_csv('modely_02_efektivita_testovani_sorted_2021_1_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2021') & (df['datum_hlaseni'] >= '2021-07')].to_csv('modely_02_efektivita_testovani_sorted_2021_2_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2022')].to_csv('modely_02_efektivita_testovani_sorted_2022_v1.csv')
df[(df['datum_hlaseni'] >= '2023') | df['datum_hlaseni'].isnull()].to_csv('modely_02_efektivita_testovani_sorted_null_v1.csv') | 0 | 0 | 0 |
ac20fea2bd287e5e54a8c519873777a66011c100 | 1,048 | py | Python | btplotting-master/btplotting/tab.py | fredryce/stocker | 041fbe8348f7a035a607a214477cf423c4259171 | [
"MIT"
] | null | null | null | btplotting-master/btplotting/tab.py | fredryce/stocker | 041fbe8348f7a035a607a214477cf423c4259171 | [
"MIT"
] | null | null | null | btplotting-master/btplotting/tab.py | fredryce/stocker | 041fbe8348f7a035a607a214477cf423c4259171 | [
"MIT"
] | null | null | null | from bokeh.models.widgets import Panel
class BacktraderPlottingTab:
'''
Abstract class for tabs
This class needs to be extended from when creating custom tabs.
It is required to overwrite the _is_useable and _get_panel method.
The _get_panel method needs to return a panel child and a title.
'''
def is_useable(self):
'''
Returns if the tab is useable within the current environment
'''
return self._is_useable()
def get_panel(self):
'''
Returns the panel to show as a tab
'''
child, title = self._get_panel()
self._panel = Panel(child=child, title=title)
return self._panel
| 27.578947 | 70 | 0.644084 | from bokeh.models.widgets import Panel
class BacktraderPlottingTab:
'''
Abstract class for tabs
This class needs to be extended from when creating custom tabs.
It is required to overwrite the _is_useable and _get_panel method.
The _get_panel method needs to return a panel child and a title.
'''
def __init__(self, app, figurepage, client=None):
self._app = app
self._figurepage = figurepage
self._client = client
self._panel = None
def _is_useable(self):
raise Exception('_is_useable needs to be implemented.')
def _get_panel(self):
raise Exception('_get_panel needs to be implemented.')
def is_useable(self):
'''
Returns if the tab is useable within the current environment
'''
return self._is_useable()
def get_panel(self):
'''
Returns the panel to show as a tab
'''
child, title = self._get_panel()
self._panel = Panel(child=child, title=title)
return self._panel
| 275 | 0 | 81 |
832acd9db96614ccc2d38b291080f4460de203bc | 8,375 | py | Python | functions/sampling.py | Yuleii/yulei-thesis-QBSM-kw94 | bb882bc6c809331c370a4d6442c36ad67ccad498 | [
"MIT"
] | null | null | null | functions/sampling.py | Yuleii/yulei-thesis-QBSM-kw94 | bb882bc6c809331c370a4d6442c36ad67ccad498 | [
"MIT"
] | null | null | null | functions/sampling.py | Yuleii/yulei-thesis-QBSM-kw94 | bb882bc6c809331c370a4d6442c36ad67ccad498 | [
"MIT"
] | null | null | null | """Functions that create samples."""
import chaospy as cp
import numpy as np
import respy as rp
import pandas as pd
from pathlib import Path
PROJECT_ROOT = Path(__file__).resolve().parents[1]
DATA_PATH = PROJECT_ROOT / "data"
CHAOSPY_SAMPLING_METHODS = {
"random",
"grid",
"chebyshev",
"korobov",
"sobol",
"halton",
"hammersley",
"latin_hypercube",
}
def create_sample(
n_samples=30,
seed=123,
M="None",
sampling_method="random",
MC_method="Brute force",
):
"""Simulate samples of qoi.
Parameters
----------
n_samples : int
Number of samples to draw.
seed : int
Seed for the random number generators.
M : int
The number of conditional bins to genetate if `MC_method` is "DLR".
sampling_method : string
Specifies which sampling method should be employed. Possible arguments
are in {"random", "grid", "chebyshev", "korobov","sobol", "halton",
"hammersley", "latin_hypercube"}
MC_method : string
Specify the Monte Carlo estimator. One of ["brute force", "DLR"],
where "DLR" denotes to the double loop reordering approach.
Returns
-------
input_x_respy: list
A list of input parameters that are ready to be passed into the
`respy` function.
input_x_mix_respy: list
A list of conditional input parameters that are ready to be passed
into the `respy` function.
"""
# load mean and cov
mean, cov = load_mean_and_cov()
# get unconditioal samples
sample_x, sample_x_prime = unconditional_samples(
mean,
cov,
n_samples,
seed,
sampling_method,
)
# fix parameters of interest
x_3 = subset_params(sample_x)
x_prime_3 = subset_params(sample_x_prime)
x = fix_true_params(x_3, mean)
# get conditional samples
x_mix_3 = conditional_samples(x_3, x_prime_3, MC_method, M)
# fix parameters of interest
x_mix = fix_true_params_mix(x_mix_3, mean, MC_method)
input_x_respy = [(params_to_respy)(i) for i in x]
input_x_mix_respy = [(params_to_respy)(z) for x in x_mix for y in x for z in y]
return input_x_respy, input_x_mix_respy
def load_mean_and_cov():
"""Return mean and covariance for Keane and Wolpin (1994) model."""
# load model specifications
base_params = pd.read_pickle(DATA_PATH / "params_kw_94_one_se.pkl")
# mean and cov for sampling
mean = base_params["value"].to_numpy()[:27]
cov = pd.read_pickle(DATA_PATH / "covariance_kw_94_one.pkl").to_numpy()
return mean, cov
def unconditional_samples(
mean,
cov,
n_samples,
seed,
sampling_method,
):
"""Generate two independent groups of sample points.
Parameters
----------
mean : pd.DataFrame or np.ndarray
The mean, of shape (k, ).
cov : pd.DataFrame or np.ndarrary
The covariance, has to be of shape (k, k).
n_samples : int
Number of samples to draw.
seed : int, optional
Random number generator seed.
sampling_method : string
Specifies which sampling method should be employed. Possible arguments
are in {"random", "grid", "chebyshev", "korobov","sobol", "halton",
"hammersley", "latin_hypercube"}
Returns
-------
sample_x, sample_x_prime : np.ndarray
Two arrays of shape (n_draws, n_params) with i.i.d draws from a
given joint distribution.
"""
distribution = cp.MvNormal(loc=mean, scale=cov)
if sampling_method in CHAOSPY_SAMPLING_METHODS:
np.random.seed(seed)
sample_x = np.array(distribution.sample(size=n_samples, rule=sampling_method).T)
np.random.seed(seed + 1)
sample_x_prime = np.array(
distribution.sample(size=n_samples, rule=sampling_method).T
)
else:
raise ValueError(f"Argument 'method' is not in {CHAOSPY_SAMPLING_METHODS}.")
return sample_x, sample_x_prime
def subset_params(x):
"""Pick a subset of samples from the sampled parameters.
Parameters
----------
x : np.ndarray
Array of shape (n_draws, n_params).
Returns
-------
params_interests : np.ndarray
Array of shape (n_draws, 3) contains only 3 seleted parameters.
"""
n_draws = x.shape[0]
indices = [2, 14, 16]
params_interests = np.zeros((n_draws, 3))
for i in range(n_draws):
params_interests[i] = np.take(x[i], indices)
return params_interests
def conditional_samples(x_3, x_prime_3, MC_method, M):
"""Generate mixed sample sets of interest distributed accroding to a conditional PDF.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
x_prime : np.ndarray
Array with shape (n_draws, 3).
MC_method : string
Specify the Monte Carlo estimator. One of ["brute force", "DLR"],
where "DLR" denotes to the double loop reordering approach.
M : int
The number of conditional bins to genetate if `MC_method` is "DLR".
Returns
-------
x_mix : np.ndarray
Mixed sample sets. Shape has the form (n_draws, 3, n_draws, 3).
"""
n_draws, n_params = x_3.shape
if MC_method == "Brute force":
x_3_mix = np.zeros((n_draws, n_params, n_draws, n_params))
for i in range(n_params):
for j in range(n_draws):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = x_prime_3[j, i]
if MC_method == "DLR":
conditional_bin = x_3[:M]
x_3_mix = np.zeros((M, n_params, n_draws, n_params))
# subdivide unconditional samples into M eaually bins,
# within each bin x_i being fixed.
for i in range(n_params):
for j in range(M):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = conditional_bin[j, i]
return x_3_mix
def fix_true_params(x_3, true_values):
"""Replace the 3 selected point estimates with the sampled parameters.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
true_values : np.ndarray
The point estimated, of shape (k, ).
Returns
-------
true_params_fix : np.ndarray
Shape has the form (n_draws, n_params, n_draws, n_params).
"""
n_draws = x_3.shape[0]
true_params_fix = np.tile(true_values, (n_draws, 1))
for i in range(n_draws):
np.put(true_params_fix[i], [2, 14, 16], x_3[i])
return true_params_fix
def fix_true_params_mix(x_3, true_values, MC_method):
"""Replace the 3 selected point estimates with the conditional sampled parameters.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
true_values : np.ndarray
The point estimated, of shape (k, ).
Returns
-------
true_params_fix : np.ndarray
Shape has the form (n_draws, n_params, n_draws, n_params).
"""
if MC_method == "Brute force":
n_draws, n_3_parmas = x_3.shape[:2]
true_params_fix = np.tile(true_values, (n_draws, n_3_parmas, n_draws, 1))
for i in range(n_draws):
for j in range(n_3_parmas):
for k in range(n_draws):
np.put(true_params_fix[i, j, k], [2, 14, 16], x_3[i, j, k])
if MC_method == "DLR":
M, n_3_parmas, n_draws = x_3.shape[:3]
true_params_fix = np.tile(true_values, (M, n_3_parmas, n_draws, 1))
for i in range(M):
for j in range(n_3_parmas):
for k in range(n_draws):
np.put(true_params_fix[i, j, k], [2, 14, 16], x_3[i, j, k])
return true_params_fix
def params_to_respy(input_params, *args):
"""transfer sampled paramters to respy format."""
# baseline options and params for the indices.
base_params = pd.read_pickle(DATA_PATH / "params_kw_94_one_se.pkl")
params_idx = pd.Series(data=input_params, index=base_params.index[0:27])
assert len(params_idx) == 27, "Length of KW94 vector must be 27."
part_1 = params_idx
rp_params, _ = rp.get_example_model("kw_94_one", with_data=False)
part_2 = rp_params.iloc[27:31, 0]
parts = [part_1, part_2]
rp_params_series = pd.concat(parts)
input_params_respy = pd.DataFrame(rp_params_series, columns=["value"])
return input_params_respy
| 27.459016 | 89 | 0.628776 | """Functions that create samples."""
import chaospy as cp
import numpy as np
import respy as rp
import pandas as pd
from pathlib import Path
PROJECT_ROOT = Path(__file__).resolve().parents[1]
DATA_PATH = PROJECT_ROOT / "data"
CHAOSPY_SAMPLING_METHODS = {
"random",
"grid",
"chebyshev",
"korobov",
"sobol",
"halton",
"hammersley",
"latin_hypercube",
}
def create_sample(
n_samples=30,
seed=123,
M="None",
sampling_method="random",
MC_method="Brute force",
):
"""Simulate samples of qoi.
Parameters
----------
n_samples : int
Number of samples to draw.
seed : int
Seed for the random number generators.
M : int
The number of conditional bins to genetate if `MC_method` is "DLR".
sampling_method : string
Specifies which sampling method should be employed. Possible arguments
are in {"random", "grid", "chebyshev", "korobov","sobol", "halton",
"hammersley", "latin_hypercube"}
MC_method : string
Specify the Monte Carlo estimator. One of ["brute force", "DLR"],
where "DLR" denotes to the double loop reordering approach.
Returns
-------
input_x_respy: list
A list of input parameters that are ready to be passed into the
`respy` function.
input_x_mix_respy: list
A list of conditional input parameters that are ready to be passed
into the `respy` function.
"""
# load mean and cov
mean, cov = load_mean_and_cov()
# get unconditioal samples
sample_x, sample_x_prime = unconditional_samples(
mean,
cov,
n_samples,
seed,
sampling_method,
)
# fix parameters of interest
x_3 = subset_params(sample_x)
x_prime_3 = subset_params(sample_x_prime)
x = fix_true_params(x_3, mean)
# get conditional samples
x_mix_3 = conditional_samples(x_3, x_prime_3, MC_method, M)
# fix parameters of interest
x_mix = fix_true_params_mix(x_mix_3, mean, MC_method)
input_x_respy = [(params_to_respy)(i) for i in x]
input_x_mix_respy = [(params_to_respy)(z) for x in x_mix for y in x for z in y]
return input_x_respy, input_x_mix_respy
def load_mean_and_cov():
"""Return mean and covariance for Keane and Wolpin (1994) model."""
# load model specifications
base_params = pd.read_pickle(DATA_PATH / "params_kw_94_one_se.pkl")
# mean and cov for sampling
mean = base_params["value"].to_numpy()[:27]
cov = pd.read_pickle(DATA_PATH / "covariance_kw_94_one.pkl").to_numpy()
return mean, cov
def unconditional_samples(
mean,
cov,
n_samples,
seed,
sampling_method,
):
"""Generate two independent groups of sample points.
Parameters
----------
mean : pd.DataFrame or np.ndarray
The mean, of shape (k, ).
cov : pd.DataFrame or np.ndarrary
The covariance, has to be of shape (k, k).
n_samples : int
Number of samples to draw.
seed : int, optional
Random number generator seed.
sampling_method : string
Specifies which sampling method should be employed. Possible arguments
are in {"random", "grid", "chebyshev", "korobov","sobol", "halton",
"hammersley", "latin_hypercube"}
Returns
-------
sample_x, sample_x_prime : np.ndarray
Two arrays of shape (n_draws, n_params) with i.i.d draws from a
given joint distribution.
"""
distribution = cp.MvNormal(loc=mean, scale=cov)
if sampling_method in CHAOSPY_SAMPLING_METHODS:
np.random.seed(seed)
sample_x = np.array(distribution.sample(size=n_samples, rule=sampling_method).T)
np.random.seed(seed + 1)
sample_x_prime = np.array(
distribution.sample(size=n_samples, rule=sampling_method).T
)
else:
raise ValueError(f"Argument 'method' is not in {CHAOSPY_SAMPLING_METHODS}.")
return sample_x, sample_x_prime
def subset_params(x):
"""Pick a subset of samples from the sampled parameters.
Parameters
----------
x : np.ndarray
Array of shape (n_draws, n_params).
Returns
-------
params_interests : np.ndarray
Array of shape (n_draws, 3) contains only 3 seleted parameters.
"""
n_draws = x.shape[0]
indices = [2, 14, 16]
params_interests = np.zeros((n_draws, 3))
for i in range(n_draws):
params_interests[i] = np.take(x[i], indices)
return params_interests
def conditional_samples(x_3, x_prime_3, MC_method, M):
"""Generate mixed sample sets of interest distributed accroding to a conditional PDF.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
x_prime : np.ndarray
Array with shape (n_draws, 3).
MC_method : string
Specify the Monte Carlo estimator. One of ["brute force", "DLR"],
where "DLR" denotes to the double loop reordering approach.
M : int
The number of conditional bins to genetate if `MC_method` is "DLR".
Returns
-------
x_mix : np.ndarray
Mixed sample sets. Shape has the form (n_draws, 3, n_draws, 3).
"""
n_draws, n_params = x_3.shape
if MC_method == "Brute force":
x_3_mix = np.zeros((n_draws, n_params, n_draws, n_params))
for i in range(n_params):
for j in range(n_draws):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = x_prime_3[j, i]
if MC_method == "DLR":
conditional_bin = x_3[:M]
x_3_mix = np.zeros((M, n_params, n_draws, n_params))
# subdivide unconditional samples into M eaually bins,
# within each bin x_i being fixed.
for i in range(n_params):
for j in range(M):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = conditional_bin[j, i]
return x_3_mix
def fix_true_params(x_3, true_values):
"""Replace the 3 selected point estimates with the sampled parameters.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
true_values : np.ndarray
The point estimated, of shape (k, ).
Returns
-------
true_params_fix : np.ndarray
Shape has the form (n_draws, n_params, n_draws, n_params).
"""
n_draws = x_3.shape[0]
true_params_fix = np.tile(true_values, (n_draws, 1))
for i in range(n_draws):
np.put(true_params_fix[i], [2, 14, 16], x_3[i])
return true_params_fix
def fix_true_params_mix(x_3, true_values, MC_method):
"""Replace the 3 selected point estimates with the conditional sampled parameters.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
true_values : np.ndarray
The point estimated, of shape (k, ).
Returns
-------
true_params_fix : np.ndarray
Shape has the form (n_draws, n_params, n_draws, n_params).
"""
if MC_method == "Brute force":
n_draws, n_3_parmas = x_3.shape[:2]
true_params_fix = np.tile(true_values, (n_draws, n_3_parmas, n_draws, 1))
for i in range(n_draws):
for j in range(n_3_parmas):
for k in range(n_draws):
np.put(true_params_fix[i, j, k], [2, 14, 16], x_3[i, j, k])
if MC_method == "DLR":
M, n_3_parmas, n_draws = x_3.shape[:3]
true_params_fix = np.tile(true_values, (M, n_3_parmas, n_draws, 1))
for i in range(M):
for j in range(n_3_parmas):
for k in range(n_draws):
np.put(true_params_fix[i, j, k], [2, 14, 16], x_3[i, j, k])
return true_params_fix
def params_to_respy(input_params, *args):
"""transfer sampled paramters to respy format."""
# baseline options and params for the indices.
base_params = pd.read_pickle(DATA_PATH / "params_kw_94_one_se.pkl")
params_idx = pd.Series(data=input_params, index=base_params.index[0:27])
assert len(params_idx) == 27, "Length of KW94 vector must be 27."
part_1 = params_idx
rp_params, _ = rp.get_example_model("kw_94_one", with_data=False)
part_2 = rp_params.iloc[27:31, 0]
parts = [part_1, part_2]
rp_params_series = pd.concat(parts)
input_params_respy = pd.DataFrame(rp_params_series, columns=["value"])
return input_params_respy
| 0 | 0 | 0 |
eaff22981bf52da6d78148d2d28c27ef6dce2a67 | 523 | py | Python | app/auth.py | leandcesar/bobotinho-api | 7a3ce31fb2220e00b4b1fabf10e1c32afde314a9 | [
"MIT"
] | null | null | null | app/auth.py | leandcesar/bobotinho-api | 7a3ce31fb2220e00b4b1fabf10e1c32afde314a9 | [
"MIT"
] | null | null | null | app/auth.py | leandcesar/bobotinho-api | 7a3ce31fb2220e00b4b1fabf10e1c32afde314a9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from http import HTTPStatus
from typing import Optional
from flask_httpauth import HTTPTokenAuth
from app.config import config
auth = HTTPTokenAuth(scheme="Bearer", header="Authorization")
@auth.verify_token
@auth.error_handler
| 24.904762 | 76 | 0.74761 | # -*- coding: utf-8 -*-
from http import HTTPStatus
from typing import Optional
from flask_httpauth import HTTPTokenAuth
from app.config import config
auth = HTTPTokenAuth(scheme="Bearer", header="Authorization")
@auth.verify_token
def verify_token(token: str) -> Optional[str]:
users = {config.AUTH_TOKEN: "admin"}
return users.get(token)
@auth.error_handler
def default_error_handler(status: int) -> tuple[dict[str, str], HTTPStatus]:
return {"message": "Unauthorized Access"}, HTTPStatus.UNAUTHORIZED
| 220 | 0 | 44 |
6724a980f6d6c43d369b9a46c968d83b68bf114e | 2,335 | py | Python | devsechan/irc.py | Woomymy/devse-chan | 8964e0a34d299b39bd244f17e8564f0fa003f2e0 | [
"BSD-3-Clause"
] | 9 | 2020-11-19T12:55:06.000Z | 2021-08-13T19:11:23.000Z | devsechan/irc.py | Woomymy/devse-chan | 8964e0a34d299b39bd244f17e8564f0fa003f2e0 | [
"BSD-3-Clause"
] | 15 | 2021-09-01T09:16:05.000Z | 2022-03-15T17:48:14.000Z | devsechan/irc.py | Woomymy/devse-chan | 8964e0a34d299b39bd244f17e8564f0fa003f2e0 | [
"BSD-3-Clause"
] | 8 | 2020-12-21T16:03:52.000Z | 2021-08-31T19:40:23.000Z | from re import M
import bottom
import asyncio
import platform
| 38.916667 | 111 | 0.532334 | from re import M
import bottom
import asyncio
import platform
class IRC:
def __init__(self, parent, config):
self.config = config
self.irc = bottom.Client(host=config['host'].get(), port=config['port'].get(), ssl=config['ssl'].get())
@self.irc.on('CLIENT_CONNECT')
async def connect(**kwargs):
self.irc.send('NICK', nick=config['nick'].get())
self.irc.send('USER', user=config['username'].get(), realname='https://devse.wiki/')
done, pending = await asyncio.wait(
[self.irc.wait('RPL_ENDOFMOTD'), self.irc.wait('ERR_NOMOTD')],
return_when=asyncio.FIRST_COMPLETED)
for future in pending:
future.cancel()
# FIXME: maybe a cleaner way to do this with confuse (maybe I'll just drop confuse)
try:
self.irc.send('PRIVMSG', target="nickserv", message=f"IDENTIFY {config['nickserv'].get()}")
except BaseException:
pass
self.irc.send('JOIN', channel=config['channel'].get())
@self.irc.on('privmsg')
async def irc_message(nick, target, message, **kwargs):
if nick == config['nick'].get():
return
if target == config['nick'].get():
if message == '\001VERSION\001':
def gnuify(x): return 'GNU/Linux' if x == 'Linux' else x
self.irc.send(
'NOTICE',
target=nick,
message=f"\001VERSION devse-chan on {gnuify(platform.system())}\001")
elif message == '\001SOURCE\001':
self.irc.send(
'NOTICE',
target=nick,
message='\001SOURCE https://github.com/d0p1s4m4/devse-chan\001')
elif target != config['channel'].get():
return
await parent.to_discord(nick, message)
@self.irc.on('PING')
async def irc_ping(message, **kwargs):
self.irc.send('PONG', message=message)
def send(self, nick, message):
self.irc.send('PRIVMSG', target=self.config['channel'].get(), message=f"<\x036{nick}\x0F> {message}")
async def start(self):
return await self.irc.connect()
| 2,179 | -11 | 104 |
826b024a79bb72e12c1b5294e4bfa65c557b57a9 | 3,118 | py | Python | lib/cirrocumulus/parquet_output.py | klarman-cell-observatory/cirrocumulus-app-engine | 52997ae790773364591ab8d7c747e4505700373b | [
"BSD-3-Clause"
] | null | null | null | lib/cirrocumulus/parquet_output.py | klarman-cell-observatory/cirrocumulus-app-engine | 52997ae790773364591ab8d7c747e4505700373b | [
"BSD-3-Clause"
] | 1 | 2021-04-13T14:52:39.000Z | 2021-04-13T15:53:34.000Z | lib/cirrocumulus/parquet_output.py | klarman-cell-observatory/cirrocumulus-app-engine | 52997ae790773364591ab8d7c747e4505700373b | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
import numpy as np
import pandas._libs.json as ujson
import pyarrow as pa
import pyarrow.parquet as pq
import scipy.sparse
from cirrocumulus.anndata_util import DataType
logger = logging.getLogger("cirro")
| 37.566265 | 107 | 0.661642 | import logging
import os
import numpy as np
import pandas._libs.json as ujson
import pyarrow as pa
import pyarrow.parquet as pq
import scipy.sparse
from cirrocumulus.anndata_util import DataType
logger = logging.getLogger("cirro")
def write_pq(d, output_dir, name, filesystem, write_statistics=True, row_group_size=None):
filesystem.makedirs(output_dir, exist_ok=True)
pq.write_table(pa.Table.from_pydict(d), os.path.join(output_dir, name + '.parquet'),
write_statistics=write_statistics, row_group_size=row_group_size, filesystem=filesystem)
def save_datasets_pq(datasets, schema, output_directory, filesystem, whitelist):
X_dir = os.path.join(output_directory, 'X')
module_dir = os.path.join(output_directory, 'X_module')
obs_dir = os.path.join(output_directory, 'obs')
obsm_dir = os.path.join(output_directory, 'obsm')
filesystem.makedirs(X_dir, exist_ok=True)
filesystem.makedirs(obs_dir, exist_ok=True)
filesystem.makedirs(obsm_dir, exist_ok=True)
with filesystem.open(os.path.join(output_directory, 'index.json.gz'), 'wt', compression='gzip') as f:
f.write(ujson.dumps(schema, double_precision=2, orient='values'))
for dataset in datasets:
if dataset.uns.get('data_type') == DataType.MODULE:
filesystem.makedirs(module_dir, exist_ok=True)
if whitelist is None or 'X' in whitelist:
save_adata_X(dataset, module_dir, filesystem)
elif whitelist is None or 'X' in whitelist:
save_adata_X(dataset, X_dir, filesystem)
if whitelist is None or 'obs' in whitelist:
save_data_obs(dataset, obs_dir, filesystem)
if whitelist is None or 'obsm' in whitelist:
save_data_obsm(dataset, obsm_dir, filesystem)
def save_adata_X(adata, X_dir, filesystem):
adata_X = adata.X
names = adata.var.index
is_sparse = scipy.sparse.issparse(adata_X)
output_dir = X_dir
for j in range(adata_X.shape[1]):
X = adata_X[:, j]
if is_sparse:
X = X.toarray().flatten()
filename = names[j]
if is_sparse:
indices = np.where(X != 0)[0]
values = X[indices]
write_pq(dict(index=indices, value=values), output_dir, filename, filesystem)
else:
write_pq(dict(value=X), output_dir, filename, filesystem)
if j > 0 and (j + 1) % 1000 == 0:
logger.info('Wrote adata X {}/{}'.format(j + 1, adata_X.shape[1]))
def save_data_obsm(adata, obsm_dir, filesystem):
logger.info('writing adata obsm')
for name in adata.obsm.keys():
m = adata.obsm[name]
dim = m.shape[1]
d = {}
for i in range(dim):
d[name + '_' + str(i + 1)] = m[:, i].astype('float32')
write_pq(d, obsm_dir, name, filesystem)
def save_data_obs(adata, obs_dir, filesystem):
logger.info('writing adata obs')
for name in adata.obs:
value = adata.obs[name]
write_pq(dict(value=value), obs_dir, name, filesystem)
write_pq(dict(value=adata.obs.index.values), obs_dir, 'index', filesystem)
| 2,764 | 0 | 115 |
51c0e6bee2e820d7b01f2280b58a270ac9515f4c | 14,005 | py | Python | usaspending_api/search/tests/test_spending_by_award_type.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | usaspending_api/search/tests/test_spending_by_award_type.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | 1 | 2021-11-15T17:53:27.000Z | 2021-11-15T17:53:27.000Z | usaspending_api/search/tests/test_spending_by_award_type.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | import json
import pytest
from rest_framework import status
from usaspending_api.common.helpers.unit_test_helper import add_to_mock_objects
from usaspending_api.search.tests.test_mock_data_search import all_filters
from django_mock_queries.query import MockModel
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
def test_spending_by_award_pop_zip_filter(client, mock_matviews_qs):
""" Test that filtering by pop zips works"""
mock_model_1 = MockModel(pop_zip5="00501", pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None,
type='B', pulled_from="AWARD")
mock_model_2 = MockModel(pop_zip5="00502", pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None,
type='B', pulled_from="AWARD")
mock_model_3 = MockModel(pop_zip5="00503", pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None,
type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00501'},
{'internal_id': 2, 'Place of Performance Zip5': '00502'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_recipient_zip_filter(client, mock_matviews_qs):
""" Test that filtering by recipient zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00001'},
{'internal_id': 2, 'Place of Performance Zip5': '00002'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_both_zip_filter(client, mock_matviews_qs):
""" Test that filtering by both kinds of zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None, type='B',
pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None, type='B',
pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None, type='B',
pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single pair of zips that both match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test simple, single pair of zips that don't match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00002"}]
}
}))
assert len(resp.data['results']) == 0
# test 2 pairs (only one pair can be made from this)
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"},
{"country": "USA", "zip": "00003"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
@pytest.mark.django_db
def test_spending_by_award_foreign_filter(client, mock_matviews_qs):
""" Verify that foreign country filter is returning the correct results """
mock_model_0 = MockModel(award_id=0, piid=None, fain='aaa', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="USA")
mock_model_1 = MockModel(award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="", recipient_location_country_code="USA")
mock_model_2 = MockModel(award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="")
mock_model_3 = MockModel(award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="Gibraltar", recipient_location_country_code="GIB")
add_to_mock_objects(mock_matviews_qs, [mock_model_0, mock_model_1, mock_model_2, mock_model_3])
# add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_3])
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
# "recipient_locations": [{"country": "USA"}]
"recipient_scope": "domestic"
},
"fields": ["Award ID"]
}))
# Three results are returned when searching for "USA"-based recipients
# e.g. "USA"; "UNITED STATES"; "USA" and "UNITED STATES";
assert len(resp.data['results']) == 3
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_scope": "foreign"
},
"fields": ["Award ID"],
}))
# One result is returned when searching for "Foreign" recipients
assert len(resp.data['results']) == 1
| 45.470779 | 118 | 0.581078 | import json
import pytest
from rest_framework import status
from usaspending_api.common.helpers.unit_test_helper import add_to_mock_objects
from usaspending_api.search.tests.test_mock_data_search import all_filters
from django_mock_queries.query import MockModel
@pytest.mark.django_db
def test_spending_by_award_type_success(client, refresh_matviews):
# test small request
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": {
"award_type_codes": ["A", "B", "C"]
}
}))
assert resp.status_code == status.HTTP_200_OK
# test IDV award types
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": {
"award_type_codes": ["IDV_A", "IDV_B", "IDV_B_A", "IDV_B_B", "IDV_B_C", "IDV_C", "IDV_D", "IDV_E"]
}
}))
assert resp.status_code == status.HTTP_200_OK
# test all features
resp = client.post(
'/api/v2/search/spending_by_award',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": all_filters()
}))
assert resp.status_code == status.HTTP_200_OK
# test subawards
resp = client.post(
'/api/v2/search/spending_by_award',
content_type='application/json',
data=json.dumps({
"fields": ["Sub-Award ID"],
"filters": all_filters(),
"subawards": True
}))
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_spending_by_award_type_failure(client, refresh_matviews):
# test incomplete IDV award types
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": {
"award_type_codes": ["IDV_A", "IDV_B_A", "IDV_C", "IDV_D", "IDV_A_A"]
}
}))
assert resp.status_code == status.HTTP_400_BAD_REQUEST
# test bad autocomplete request for budget function
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({'filters': {}}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_spending_by_award_pop_zip_filter(client, mock_matviews_qs):
""" Test that filtering by pop zips works"""
mock_model_1 = MockModel(pop_zip5="00501", pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None,
type='B', pulled_from="AWARD")
mock_model_2 = MockModel(pop_zip5="00502", pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None,
type='B', pulled_from="AWARD")
mock_model_3 = MockModel(pop_zip5="00503", pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None,
type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00501'},
{'internal_id': 2, 'Place of Performance Zip5': '00502'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_recipient_zip_filter(client, mock_matviews_qs):
""" Test that filtering by recipient zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00001'},
{'internal_id': 2, 'Place of Performance Zip5': '00002'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_both_zip_filter(client, mock_matviews_qs):
""" Test that filtering by both kinds of zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None, type='B',
pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None, type='B',
pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None, type='B',
pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single pair of zips that both match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test simple, single pair of zips that don't match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00002"}]
}
}))
assert len(resp.data['results']) == 0
# test 2 pairs (only one pair can be made from this)
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"},
{"country": "USA", "zip": "00003"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
@pytest.mark.django_db
def test_spending_by_award_foreign_filter(client, mock_matviews_qs):
""" Verify that foreign country filter is returning the correct results """
mock_model_0 = MockModel(award_id=0, piid=None, fain='aaa', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="USA")
mock_model_1 = MockModel(award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="", recipient_location_country_code="USA")
mock_model_2 = MockModel(award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="")
mock_model_3 = MockModel(award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="Gibraltar", recipient_location_country_code="GIB")
add_to_mock_objects(mock_matviews_qs, [mock_model_0, mock_model_1, mock_model_2, mock_model_3])
# add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_3])
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
# "recipient_locations": [{"country": "USA"}]
"recipient_scope": "domestic"
},
"fields": ["Award ID"]
}))
# Three results are returned when searching for "USA"-based recipients
# e.g. "USA"; "UNITED STATES"; "USA" and "UNITED STATES";
assert len(resp.data['results']) == 3
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_scope": "foreign"
},
"fields": ["Award ID"],
}))
# One result is returned when searching for "Foreign" recipients
assert len(resp.data['results']) == 1
| 2,235 | 0 | 44 |
bfdf11c58987e1527ebc503d8721980c3affd9ed | 17,105 | py | Python | abei/implements/procedure_basic.py | mind-bricks/abei | 5e364d5200111793073a0a3d64f556b5207a8734 | [
"MIT"
] | null | null | null | abei/implements/procedure_basic.py | mind-bricks/abei | 5e364d5200111793073a0a3d64f556b5207a8734 | [
"MIT"
] | null | null | null | abei/implements/procedure_basic.py | mind-bricks/abei | 5e364d5200111793073a0a3d64f556b5207a8734 | [
"MIT"
] | null | null | null | from abei.interfaces import (
IProcedure,
IProcedureClass,
IProcedureFactory,
IProcedureData,
IProcedureLink,
)
from .procedure_joint_basic import (
joint_validate,
joint_run,
)
# native_function = staticmethod(lambda x, y: x)
# composite procedure class ------------------------------
procedure_class_composite = ProcedureClassComposite()
# bool procedure classes ----------------------------------
procedure_class_not = ProcedureClassBasic(
signature='not',
docstring='logic not',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: not x,
)
procedure_class_and = ProcedureClassBasic(
signature='and',
docstring='logic and',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x and y,
)
procedure_class_or = ProcedureClassBasic(
signature='or',
docstring='logic or',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x or y,
)
# calculation procedure classes ---------------------------
procedure_class_negate = ProcedureClassBasic(
signature='neg',
docstring='negate operator',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: not x,
)
procedure_class_add = ProcedureClassBasic(
signature='add',
docstring='add operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x + y,
)
procedure_class_subtract = ProcedureClassBasic(
signature='sub',
docstring='subtract operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x - y,
)
procedure_class_multiply = ProcedureClassBasic(
signature='mul',
docstring='multiply operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x * y,
)
procedure_class_divide = ProcedureClassBasic(
signature='div',
docstring='divide operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x / y,
)
procedure_class_modulo = ProcedureClassBasic(
signature='mod',
docstring='modulo operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x % y,
)
procedure_class_mod_divide = ProcedureClassBasic(
signature='modDiv',
docstring='modulo divide operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x // y,
)
procedure_class_square = ProcedureClassBasic(
signature='sq',
docstring='square operator',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: x * x,
)
procedure_class_power = ProcedureClassBasic(
signature='pow',
docstring='power operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x ** y,
)
# comparision procedure classes ---------------------------
procedure_class_equal = ProcedureClassBasic(
signature='eq',
docstring='equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x == y,
)
procedure_class_not_equal = ProcedureClassBasic(
signature='ne',
docstring='not equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x != y,
)
procedure_class_less_than = ProcedureClassBasic(
signature='lt',
docstring='less than',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x < y,
)
procedure_class_less_than_or_equal = ProcedureClassBasic(
signature='lte',
docstring='less than or equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x <= y,
)
procedure_class_greater_than = ProcedureClassBasic(
signature='gt',
docstring='greater than',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x > y,
)
procedure_class_greater_than_or_equal = ProcedureClassBasic(
signature='gte',
docstring='greater than or equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x >= y,
)
# probe class --------------------------------------------
procedure_class_probe = ProcedureClassBasic(
signature='probe',
docstring='probe',
procedure_type=ProcedureProbe,
)
# data class cast -----------------------------------------
procedure_class_cast_2_bool = ProcedureClassBasic(
signature='castToBool',
docstring='cast to bool',
procedure_type=ProcedureCast,
native_function=lambda x: bool(x),
)
procedure_class_cast_2_int = ProcedureClassBasic(
signature='castToInt',
docstring='cast to int',
procedure_type=ProcedureCast,
native_function=lambda x: int(x),
)
procedure_class_cast_2_float = ProcedureClassBasic(
signature='castToFloat',
docstring='cast to float',
procedure_type=ProcedureCast,
native_function=lambda x: float(x),
)
# data flow control ---------------------------------------
procedure_class_diverge = ProcedureClassBasic(
signature='diverge2',
docstring='diverge 1 branch to 2',
procedure_type=ProcedureDiverge2,
)
procedure_class_converge = ProcedureClassBasic(
signature='converge2',
docstring='converge 2 branches to 1',
procedure_type=ProcedureConverge2,
)
# implement procedure class factory -----------------------
class ProcedureFactory(IProcedureFactory):
"""
basic procedure class factory
"""
| 29.696181 | 77 | 0.635194 | from abei.interfaces import (
IProcedure,
IProcedureClass,
IProcedureFactory,
IProcedureData,
IProcedureLink,
)
from .procedure_joint_basic import (
joint_validate,
joint_run,
)
class ProcedureBasic(IProcedure):
signature = 'NA'
docstring = 'NA'
input_signatures = []
output_signatures = []
def __init__(
self,
signature=None,
docstring=None,
input_signatures=None,
output_signatures=None,
**kwargs,
):
self.signature = signature or self.signature
self.docstring = docstring or self.docstring
self.input_signatures = input_signatures or self.input_signatures
self.output_signatures = output_signatures or self.output_signatures
def get_signature(self):
return self.signature
def get_input_signatures(self):
return self.input_signatures
def get_output_signatures(self):
return self.output_signatures
def get_docstring(self):
return self.docstring
def set_docstring(self, docstring):
self.docstring = docstring
def run(self, procedure_data_list, **kwargs):
# assert isinstance(kwargs.setdefault('procedure_cache', {}), dict)
return (
self.run_normally(procedure_data_list, **kwargs) if
self.run_validation(
procedure_data_list, self.input_signatures) else
self.run_exceptionally(procedure_data_list, **kwargs)
)
@staticmethod
def run_validation(procedure_data_list, signatures):
if len(procedure_data_list) != len(signatures):
raise AssertionError('invalid data list')
has_missing_params = False
for d, sig in zip(procedure_data_list, signatures):
if d is None:
has_missing_params = True
continue
if not isinstance(d, IProcedureData):
raise AssertionError('invalid data list')
if d.get_class().get_signature() != sig:
raise AssertionError('data signature miss match')
return not has_missing_params
def run_normally(self, procedure_data_list, **kwargs):
return [None] * len(self.output_signatures)
def run_exceptionally(self, procedure_data_list, **kwargs):
return [None] * len(self.output_signatures)
class ProcedureClassBasic(IProcedureClass):
def __init__(
self,
signature,
docstring,
procedure_type,
**kwargs,
):
self.signature = signature
self.docstring = docstring
self.procedure_type = procedure_type
self.kwargs = kwargs
def get_signature(self):
return self.signature
def get_docstring(self):
return self.docstring
def instantiate(
self,
*args,
**kwargs,
):
kwargs.update(self.kwargs)
kwargs.update(
signature=self.generate_signature(**kwargs),
docstring=self.generate_docstring(**kwargs)
)
return self.procedure_type(*args, **kwargs)
def generate_signature(self, data_class=None, **kwargs):
if not data_class:
return self.signature
return '{}[{}]'.format(self.signature, data_class.get_label())
def generate_docstring(self, data_class=None, **kwargs):
if not data_class:
return self.docstring
return '{} for {}'.format(self.docstring, data_class.get_signature())
class ProcedureComposite(IProcedureLink, ProcedureBasic):
output_joints = []
output_indices = []
def get_joints(self):
return [(f, i) for f, i in zip(
self.output_joints, self.output_indices)]
def set_joints(self, joints, indices):
joint_validate(
joints,
indices,
self,
self.output_signatures,
)
self.output_joints = joints
self.output_indices = indices
def run_normally(self, procedure_data_list, **kwargs):
return [
joint_run(joint, procedure_data_list, **kwargs)[i] if
joint else procedure_data_list[i]
for joint, i in self.get_joints()
]
class ProcedureClassComposite(IProcedureClass):
def get_signature(self):
return 'composite'
def get_docstring(self):
return 'composite procedure class'
def instantiate(self, *args, **kwargs):
return ProcedureComposite(*args, **kwargs)
class ProcedureUnaryOperator(ProcedureBasic):
def __init__(
self,
*args,
native_function=None,
data_class=None,
**kwargs
):
super().__init__(*args, **kwargs)
assert data_class
self.input_signatures = [data_class.get_signature()]
self.output_signatures = [data_class.get_signature()]
self.native_function = native_function
def run_normally(self, procedure_data_list, **kwargs):
ret = procedure_data_list[0].clone()
ret.set_value(self.native_function(
procedure_data_list[0].get_value()))
return [ret]
class ProcedureBinaryOperator(ProcedureBasic):
# native_function = staticmethod(lambda x, y: x)
def __init__(
self,
*args,
native_function=None,
data_class=None,
**kwargs,
):
super().__init__(*args, **kwargs)
assert data_class
self.input_signatures = [
data_class.get_signature(),
data_class.get_signature(),
]
self.output_signatures = [
data_class.get_signature(),
]
self.native_function = native_function
def run_normally(self, procedure_data_list, **kwargs):
ret = procedure_data_list[0].clone()
ret.set_value(self.native_function(
procedure_data_list[0].get_value(),
procedure_data_list[1].get_value(),
))
return [ret]
class ProcedureComparator(ProcedureBasic):
def __init__(
self,
*args,
native_function=None,
data_class=None,
bool_class=None,
**kwargs,
):
super().__init__(*args, **kwargs)
assert data_class
assert bool_class
self.input_signatures = [
data_class.get_signature(),
data_class.get_signature(),
]
self.output_signatures = [
bool_class.get_signature(),
]
self.bool_class = bool_class
self.native_function = native_function
def run_normally(self, procedure_data_list, **kwargs):
ret = self.bool_class.instantiate(self.native_function(
procedure_data_list[0].get_value(),
procedure_data_list[1].get_value(),
))
return [ret]
class ProcedureProbe(ProcedureBasic):
def __init__(
self,
*args,
data_class=None,
bool_class=None,
**kwargs,
):
super().__init__(*args, **kwargs)
assert data_class
assert bool_class
self.input_signatures = [
data_class.get_signature(),
]
self.output_signatures = [
bool_class.get_signature(),
]
self.bool_class = bool_class
def run_normally(self, procedure_data_list, **kwargs):
return [
self.bool_class.instantiate(bool(
procedure_data_list[0].get_value() is not None))
]
def run_exceptionally(self, procedure_data_list, **kwargs):
return self.run_normally(procedure_data_list, **kwargs)
class ProcedureDiverge2(ProcedureBasic):
def __init__(
self,
*args,
data_class=None,
bool_class=None,
**kwargs,
):
super().__init__(*args, **kwargs)
assert data_class
assert bool_class
self.input_signatures = [
bool_class.get_signature(),
data_class.get_signature(),
]
self.output_signatures = [
data_class.get_signature(),
data_class.get_signature(),
]
def run_normally(self, procedure_data_list, **kwargs):
flag = procedure_data_list[0].get_value()
ret = procedure_data_list[1]
return flag and [ret, None] or [None, ret]
def run_exceptionally(self, procedure_data_list, **kwargs):
return self.run_normally(procedure_data_list, **kwargs)
class ProcedureConverge2(ProcedureBasic):
def __init__(
self,
*args,
data_class=None,
bool_class=None,
**kwargs,
):
super().__init__(*args, **kwargs)
assert data_class
assert bool_class
self.input_signatures = [
bool_class.get_signature(),
data_class.get_signature(),
data_class.get_signature(),
]
self.output_signatures = [
data_class.get_signature(),
]
def run_normally(self, procedure_data_list, **kwargs):
flag = procedure_data_list[0].get_value()
ret = procedure_data_list[flag and 1 or 2]
return [ret]
def run_exceptionally(self, procedure_data_list, **kwargs):
return self.run_normally(procedure_data_list, **kwargs)
class ProcedureCast(ProcedureBasic):
def __init__(
self,
*args,
data_class=None,
data_class_to=None,
**kwargs,
):
super().__init__(*args, **kwargs)
assert data_class
self.input_signatures = [data_class.get_signature()]
self.output_signatures = [data_class_to.get_signature()]
self.data_class_to = data_class_to
def run_normally(self, procedure_data_list, **kwargs):
ret = self.data_class_to.instantiate(
procedure_data_list[0].get_value())
return [ret]
# composite procedure class ------------------------------
procedure_class_composite = ProcedureClassComposite()
# bool procedure classes ----------------------------------
procedure_class_not = ProcedureClassBasic(
signature='not',
docstring='logic not',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: not x,
)
procedure_class_and = ProcedureClassBasic(
signature='and',
docstring='logic and',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x and y,
)
procedure_class_or = ProcedureClassBasic(
signature='or',
docstring='logic or',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x or y,
)
# calculation procedure classes ---------------------------
procedure_class_negate = ProcedureClassBasic(
signature='neg',
docstring='negate operator',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: not x,
)
procedure_class_add = ProcedureClassBasic(
signature='add',
docstring='add operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x + y,
)
procedure_class_subtract = ProcedureClassBasic(
signature='sub',
docstring='subtract operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x - y,
)
procedure_class_multiply = ProcedureClassBasic(
signature='mul',
docstring='multiply operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x * y,
)
procedure_class_divide = ProcedureClassBasic(
signature='div',
docstring='divide operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x / y,
)
procedure_class_modulo = ProcedureClassBasic(
signature='mod',
docstring='modulo operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x % y,
)
procedure_class_mod_divide = ProcedureClassBasic(
signature='modDiv',
docstring='modulo divide operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x // y,
)
procedure_class_square = ProcedureClassBasic(
signature='sq',
docstring='square operator',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: x * x,
)
procedure_class_power = ProcedureClassBasic(
signature='pow',
docstring='power operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x ** y,
)
# comparision procedure classes ---------------------------
procedure_class_equal = ProcedureClassBasic(
signature='eq',
docstring='equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x == y,
)
procedure_class_not_equal = ProcedureClassBasic(
signature='ne',
docstring='not equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x != y,
)
procedure_class_less_than = ProcedureClassBasic(
signature='lt',
docstring='less than',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x < y,
)
procedure_class_less_than_or_equal = ProcedureClassBasic(
signature='lte',
docstring='less than or equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x <= y,
)
procedure_class_greater_than = ProcedureClassBasic(
signature='gt',
docstring='greater than',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x > y,
)
procedure_class_greater_than_or_equal = ProcedureClassBasic(
signature='gte',
docstring='greater than or equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x >= y,
)
# probe class --------------------------------------------
procedure_class_probe = ProcedureClassBasic(
signature='probe',
docstring='probe',
procedure_type=ProcedureProbe,
)
# data class cast -----------------------------------------
procedure_class_cast_2_bool = ProcedureClassBasic(
signature='castToBool',
docstring='cast to bool',
procedure_type=ProcedureCast,
native_function=lambda x: bool(x),
)
procedure_class_cast_2_int = ProcedureClassBasic(
signature='castToInt',
docstring='cast to int',
procedure_type=ProcedureCast,
native_function=lambda x: int(x),
)
procedure_class_cast_2_float = ProcedureClassBasic(
signature='castToFloat',
docstring='cast to float',
procedure_type=ProcedureCast,
native_function=lambda x: float(x),
)
# data flow control ---------------------------------------
procedure_class_diverge = ProcedureClassBasic(
signature='diverge2',
docstring='diverge 1 branch to 2',
procedure_type=ProcedureDiverge2,
)
procedure_class_converge = ProcedureClassBasic(
signature='converge2',
docstring='converge 2 branches to 1',
procedure_type=ProcedureConverge2,
)
# implement procedure class factory -----------------------
class ProcedureFactory(IProcedureFactory):
"""
basic procedure class factory
"""
def __init__(self, service_site, **kwargs):
self.procedure_classes = {
p.get_signature(): p for p in [
procedure_class_composite,
procedure_class_or,
procedure_class_and,
procedure_class_not,
procedure_class_negate,
procedure_class_add,
procedure_class_subtract,
procedure_class_multiply,
procedure_class_divide,
procedure_class_modulo,
procedure_class_mod_divide,
procedure_class_square,
procedure_class_power,
procedure_class_equal,
procedure_class_not_equal,
procedure_class_greater_than,
procedure_class_greater_than_or_equal,
procedure_class_less_than,
procedure_class_less_than_or_equal,
procedure_class_probe,
procedure_class_cast_2_bool,
procedure_class_cast_2_int,
procedure_class_cast_2_float,
procedure_class_diverge,
procedure_class_converge,
]
}
def create(self, class_signature, *args, **kwargs):
procedure_class = self.get_class(class_signature)
return procedure_class.instantiate(*args, **kwargs)
def get_class(self, class_signature):
procedure_class = self.query_class(class_signature)
if not procedure_class:
raise LookupError('procedure class not found')
return procedure_class
def query_class(self, class_signature):
return self.procedure_classes.get(class_signature)
def register_class(self, class_signature, procedure_class, **kwargs):
assert isinstance(procedure_class, IProcedureClass)
if class_signature in self.procedure_classes:
raise AssertionError(
'{} already registered'.format(class_signature))
self.procedure_classes[class_signature] = procedure_class
def iterate_classes(self):
return self.procedure_classes.keys()
| 10,028 | 747 | 1,113 |
21c9f79920b697cfa6ac2f04a0ea24b5b317a312 | 5,109 | py | Python | tests/handlers/test_base_handler_with_different_storage_config.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | 1 | 2021-12-24T02:01:52.000Z | 2021-12-24T02:01:52.000Z | tests/handlers/test_base_handler_with_different_storage_config.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | 2 | 2022-03-17T06:53:16.000Z | 2022-03-31T19:42:00.000Z | tests/handlers/test_base_handler_with_different_storage_config.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from json import loads
from shutil import which
from preggy import expect
from tornado.testing import gen_test
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.engines.pil import Engine
from thumbor.importer import Importer
from thumbor.storages.file_storage import Storage as FileStorage
from thumbor.storages.no_storage import Storage as NoStorage
# pylint: disable=broad-except,abstract-method,attribute-defined-outside-init,line-too-long,too-many-public-methods
# pylint: disable=too-many-lines
| 34.755102 | 115 | 0.671169 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from json import loads
from shutil import which
from preggy import expect
from tornado.testing import gen_test
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.engines.pil import Engine
from thumbor.importer import Importer
from thumbor.storages.file_storage import Storage as FileStorage
from thumbor.storages.no_storage import Storage as NoStorage
# pylint: disable=broad-except,abstract-method,attribute-defined-outside-init,line-too-long,too-many-public-methods
# pylint: disable=too-many-lines
class StorageOverrideTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.file_storage"
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
return Context(server, cfg, importer)
@gen_test
async def test_shouldnt_call_put_when_storage_overridden_to_nostorage(
self,
): # NOQA
old_load = Engine.load
old_put = FileStorage.put
def load_override(self, arg, arg2):
self.context.modules.storage = NoStorage(None)
return old_load(self, arg, arg2)
def put_override(*_):
expect.not_to_be_here()
Engine.load = load_override
FileStorage.put = put_override
response = await self.async_fetch("/unsafe/image.jpg")
Engine.load = old_load
FileStorage.put = old_put
expect(response.code).to_equal(200)
class ImageOperationsWithoutStorage(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
cfg.USE_GIFSICLE_ENGINE = True
cfg.RESPECT_ORIENTATION = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which("gifsicle")
return ctx
@gen_test
async def test_meta(self):
response = await self.async_fetch("/unsafe/meta/800x400/image.jpg")
expect(response.code).to_equal(200)
@gen_test
async def test_meta_with_unicode(self):
response = await self.async_fetch(
"/unsafe/meta/200x300/alabama1_ap620%C3%A9.jpg"
)
expect(response.code).to_equal(200)
obj = loads(response.body.decode("utf-8"))
expect(obj["thumbor"]["target"]["width"]).to_equal(200)
expect(obj["thumbor"]["target"]["height"]).to_equal(300)
@gen_test
async def test_meta_frame_count(self):
response = await self.async_fetch("/unsafe/meta/800x400/image.jpg")
expect(response.code).to_equal(200)
obj = loads(response.body.decode("utf-8"))
expect(obj["thumbor"]["source"]["frameCount"]).to_equal(1)
@gen_test
async def test_meta_frame_count_with_gif(self):
response = await self.async_fetch("/unsafe/meta/animated.gif")
expect(response.code).to_equal(200)
obj = loads(response.body.decode("utf-8"))
expect(obj["thumbor"]["source"]["frameCount"]).to_equal(2)
@gen_test
async def test_max_bytes(self):
response = await self.async_fetch(
"/unsafe/filters:max_bytes(35000)/Giunchedi%2C_"
"Filippo_January_2015_01.jpg"
)
expect(response.code).to_equal(200)
expect(len(response.body)).to_be_lesser_or_equal_to(35000)
@gen_test
async def test_max_bytes_impossible(self):
response = await self.async_fetch(
"/unsafe/filters:max_bytes(1000)/Giunchedi%2C_Filippo_"
"January_2015_01.jpg"
)
expect(response.code).to_equal(200)
expect(len(response.body)).to_be_greater_than(1000)
@gen_test
async def test_meta_with_exif_orientation(self):
response = await self.async_fetch(
"/unsafe/meta/0x0/Giunchedi%2C_Filippo_January_2015_01-"
"cmyk-orientation-exif.jpg"
)
expect(response.code).to_equal(200)
obj = loads(response.body.decode("utf-8"))
expect(obj["thumbor"]["target"]["width"]).to_equal(533)
expect(obj["thumbor"]["target"]["height"]).to_equal(800)
| 3,744 | 446 | 46 |
69f96b0f73d164eab7447db6d9b5280090b7a144 | 784 | py | Python | server/migrations/versions/69858d32aaff_.py | morganrconnolly/billingPlatform | 9323b3af5a906cac0a0966943d8cf6d9fb1b656c | [
"MIT"
] | null | null | null | server/migrations/versions/69858d32aaff_.py | morganrconnolly/billingPlatform | 9323b3af5a906cac0a0966943d8cf6d9fb1b656c | [
"MIT"
] | null | null | null | server/migrations/versions/69858d32aaff_.py | morganrconnolly/billingPlatform | 9323b3af5a906cac0a0966943d8cf6d9fb1b656c | [
"MIT"
] | null | null | null | """empty message
Revision ID: 69858d32aaff
Revises: 160db434d139
Create Date: 2016-07-20 16:08:00.219265
"""
# revision identifiers, used by Alembic.
revision = '69858d32aaff'
down_revision = '160db434d139'
from alembic import op
import sqlalchemy as sa
| 27.034483 | 113 | 0.706633 | """empty message
Revision ID: 69858d32aaff
Revises: 160db434d139
Create Date: 2016-07-20 16:08:00.219265
"""
# revision identifiers, used by Alembic.
revision = '69858d32aaff'
down_revision = '160db434d139'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('session_action', sa.Column('type', sa.String(length=24), nullable=True))
op.drop_column('session_action', 'Type')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('session_action', sa.Column('Type', sa.VARCHAR(length=24), autoincrement=False, nullable=True))
op.drop_column('session_action', 'type')
### end Alembic commands ###
| 478 | 0 | 46 |
7c02cdfa5a27d248d3b277c93e5fab5aa330d11d | 639 | py | Python | hiargparse/file_protocols/dict_writers/null_writer.py | KKawamura1/hiargparse | 4525003997807c97cf25ee3e0a26c029b553d155 | [
"MIT"
] | 4 | 2018-04-30T02:47:14.000Z | 2020-12-20T13:44:02.000Z | hiargparse/file_protocols/dict_writers/null_writer.py | KKawamura1/hiargparse | 4525003997807c97cf25ee3e0a26c029b553d155 | [
"MIT"
] | 1 | 2022-01-16T17:59:53.000Z | 2022-01-16T17:59:53.000Z | hiargparse/file_protocols/dict_writers/null_writer.py | KKawamura1/hiargparse | 4525003997807c97cf25ee3e0a26c029b553d155 | [
"MIT"
] | null | null | null | from .abstract_dict_writer import AbstractDictWriter
from typing import Union, Sequence
| 18.794118 | 52 | 0.527387 | from .abstract_dict_writer import AbstractDictWriter
from typing import Union, Sequence
class NullWriter(AbstractDictWriter):
def __init__(
self
) -> None:
pass
def begin_section(self, name: str) -> None:
pass
def end_section(self) -> None:
pass
def add_comment(
self,
comment: str
) -> None:
pass
def add_value(
self,
name: str,
values: Union[str, Sequence[str]],
comment: str,
comment_outs: bool
) -> None:
pass
def write_out(self) -> str:
return ''
| 350 | 16 | 184 |
07f275f04459b59e56771f458e8dbc8d729ad137 | 3,070 | py | Python | autogluon/utils/tabular/ml/models/abstract/model_trial.py | tlienart/autogluon | d02e37f41cd947dd1281bb1296cd12a8187ec441 | [
"Apache-2.0"
] | 6 | 2020-06-16T19:17:36.000Z | 2021-07-07T14:50:31.000Z | autogluon/utils/tabular/ml/models/abstract/model_trial.py | tlienart/autogluon | d02e37f41cd947dd1281bb1296cd12a8187ec441 | [
"Apache-2.0"
] | null | null | null | autogluon/utils/tabular/ml/models/abstract/model_trial.py | tlienart/autogluon | d02e37f41cd947dd1281bb1296cd12a8187ec441 | [
"Apache-2.0"
] | 2 | 2020-12-13T16:40:04.000Z | 2021-03-08T09:14:16.000Z | import os
import time
import logging
from ....utils.loaders import load_pkl
from ....utils.exceptions import TimeLimitExceeded
from ......core import args
from ......scheduler.reporter import LocalStatusReporter
logger = logging.getLogger(__name__)
@args()
def model_trial(args, reporter: LocalStatusReporter):
""" Training script for hyperparameter evaluation of an arbitrary model that subclasses AbstractModel.
Notes:
- Model object itself must be passed as kwarg: model
- All model hyperparameters must be stored in model.params dict that may contain special keys such as:
'seed_value' to ensure reproducibility
'num_threads', 'num_gpus' to set specific resources in model.fit()
- model.save() must have return_filename, file_prefix, directory options
"""
try:
model, args, util_args = prepare_inputs(args=args)
X_train, y_train = load_pkl.load(util_args.directory + util_args.dataset_train_filename)
X_val, y_val = load_pkl.load(util_args.directory + util_args.dataset_val_filename)
fit_model_args = dict(X_train=X_train, Y_train=y_train, X_test=X_val, Y_test=y_val)
predict_proba_args = dict(X=X_val)
model = fit_and_save_model(model=model, params=args, fit_args=fit_model_args, predict_proba_args=predict_proba_args, y_test=y_val,
time_start=util_args.time_start, time_limit=util_args.get('time_limit', None), reporter=None)
except Exception as e:
if not isinstance(e, TimeLimitExceeded):
logger.exception(e, exc_info=True)
reporter.terminate()
else:
reporter(epoch=1, validation_performance=model.val_score)
| 41.486486 | 138 | 0.708143 | import os
import time
import logging
from ....utils.loaders import load_pkl
from ....utils.exceptions import TimeLimitExceeded
from ......core import args
from ......scheduler.reporter import LocalStatusReporter
logger = logging.getLogger(__name__)
@args()
def model_trial(args, reporter: LocalStatusReporter):
""" Training script for hyperparameter evaluation of an arbitrary model that subclasses AbstractModel.
Notes:
- Model object itself must be passed as kwarg: model
- All model hyperparameters must be stored in model.params dict that may contain special keys such as:
'seed_value' to ensure reproducibility
'num_threads', 'num_gpus' to set specific resources in model.fit()
- model.save() must have return_filename, file_prefix, directory options
"""
try:
model, args, util_args = prepare_inputs(args=args)
X_train, y_train = load_pkl.load(util_args.directory + util_args.dataset_train_filename)
X_val, y_val = load_pkl.load(util_args.directory + util_args.dataset_val_filename)
fit_model_args = dict(X_train=X_train, Y_train=y_train, X_test=X_val, Y_test=y_val)
predict_proba_args = dict(X=X_val)
model = fit_and_save_model(model=model, params=args, fit_args=fit_model_args, predict_proba_args=predict_proba_args, y_test=y_val,
time_start=util_args.time_start, time_limit=util_args.get('time_limit', None), reporter=None)
except Exception as e:
if not isinstance(e, TimeLimitExceeded):
logger.exception(e, exc_info=True)
reporter.terminate()
else:
reporter(epoch=1, validation_performance=model.val_score)
def prepare_inputs(args):
task_id = args.pop('task_id')
util_args = args.pop('util_args')
file_prefix = f"trial_{task_id}" # append to all file names created during this trial. Do NOT change!
model = util_args.model # the model object must be passed into model_trial() here
model.name = model.name + os.path.sep + file_prefix
model.set_contexts(path_context=model.path_root + model.name + os.path.sep)
return model, args, util_args
def fit_and_save_model(model, params, fit_args, predict_proba_args, y_test, time_start, time_limit=None, reporter=None):
time_current = time.time()
time_elapsed = time_current - time_start
if time_limit is not None:
time_left = time_limit - time_elapsed
if time_left <= 0:
raise TimeLimitExceeded
else:
time_left = None
model.params.update(params)
time_fit_start = time.time()
model.fit(**fit_args, time_limit=time_left, reporter=reporter)
time_fit_end = time.time()
y_pred_proba = model.predict_proba(**predict_proba_args)
time_pred_end = time.time()
model.val_score = model.score_with_y_pred_proba(y=y_test, y_pred_proba=y_pred_proba)
model.fit_time = time_fit_end - time_fit_start
model.predict_time = time_pred_end - time_fit_end
model.save()
return model
| 1,276 | 0 | 46 |
d16aabbf7b1ab02f9447e4f9dd0bdabe4fe556c4 | 718 | py | Python | aioftx/payments/api.py | metta-team/aioftx | f5bd028e8bf40c55c1d4632802b792be113e0978 | [
"MIT"
] | null | null | null | aioftx/payments/api.py | metta-team/aioftx | f5bd028e8bf40c55c1d4632802b792be113e0978 | [
"MIT"
] | null | null | null | aioftx/payments/api.py | metta-team/aioftx | f5bd028e8bf40c55c1d4632802b792be113e0978 | [
"MIT"
] | null | null | null | from typing import Optional
from aioftx.session import FTXClientSession
from .schemas import (
FundingPayment,
GetFundingPaymentsRequest,
GetFundingPaymentsResponse,
)
async def get_funding_payments(
session: FTXClientSession,
*,
future: Optional[str] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
) -> list[FundingPayment]:
"""
Get the funding payments from the FTX API
"""
request = GetFundingPaymentsRequest(
future=future,
start_time=start_time,
end_time=end_time,
)
async with session.get(request.url) as resp:
data = await resp.json()
return GetFundingPaymentsResponse(**data).data()
| 23.933333 | 56 | 0.681058 | from typing import Optional
from aioftx.session import FTXClientSession
from .schemas import (
FundingPayment,
GetFundingPaymentsRequest,
GetFundingPaymentsResponse,
)
async def get_funding_payments(
session: FTXClientSession,
*,
future: Optional[str] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
) -> list[FundingPayment]:
"""
Get the funding payments from the FTX API
"""
request = GetFundingPaymentsRequest(
future=future,
start_time=start_time,
end_time=end_time,
)
async with session.get(request.url) as resp:
data = await resp.json()
return GetFundingPaymentsResponse(**data).data()
| 0 | 0 | 0 |
b91ce7f9815c40c314f3094cadd8acb2c4cda526 | 2,874 | py | Python | examples/server-ssh-keys.py | pschelle/pyonepassword | 2258c0fa851ad6a63c4f959982a66c715706b654 | [
"MIT"
] | 12 | 2019-08-11T09:08:47.000Z | 2022-03-18T22:10:12.000Z | examples/server-ssh-keys.py | pschelle/pyonepassword | 2258c0fa851ad6a63c4f959982a66c715706b654 | [
"MIT"
] | 23 | 2019-09-13T20:16:12.000Z | 2022-01-15T18:29:52.000Z | examples/server-ssh-keys.py | pschelle/pyonepassword | 2258c0fa851ad6a63c4f959982a66c715706b654 | [
"MIT"
] | 10 | 2020-03-10T19:49:35.000Z | 2022-01-18T14:09:10.000Z | import os
import getpass
from pathlib import Path
from argparse import ArgumentParser
from pyonepassword import OP, OPServerItem
if __name__ == "__main__":
main()
| 32.292135 | 101 | 0.675713 | import os
import getpass
from pathlib import Path
from argparse import ArgumentParser
from pyonepassword import OP, OPServerItem
class ServerWithSSHKeys:
SSH_KEYS_SECTION = "SSH Keys"
PRIV_PERMS = 0o600
PUB_PERMS = 0o644
DIR_PERMS = 0o755
def __init__(self, server_item: OPServerItem):
self._server: OPServerItem = server_item
def ssh_key_pair(self, identity_name, pub_only):
identity_name_pub = f"{identity_name}.pub"
priv_key = None
if not pub_only:
priv_key = self._server.field_value_by_section_title("SSH Keys", identity_name)
pub_key = self._server.field_value_by_section_title(
"SSH Keys", identity_name_pub)
return (priv_key, pub_key)
def write_ssh_keys(self, outdir, identity_name, pub_only=False):
priv, pub = self.ssh_key_pair(identity_name, pub_only)
self._mkdir(outdir)
if not pub_only:
fpath = Path(outdir, identity_name)
self._write_with_octal_perms(fpath, self.PRIV_PERMS, priv)
fpath = Path(outdir, f"{identity_name}.pub")
self._write_with_octal_perms(fpath, self.PUB_PERMS, pub)
def _mkdir(self, dirpath):
dirpath.mkdir(mode=self.DIR_PERMS, parents=True, exist_ok=True)
def _write_with_octal_perms(self, fpath, octal_perms: int, data):
if isinstance(data, bytes):
mode = "wb"
elif isinstance(data, str):
mode = "w"
else:
raise Exception("Unknown data type for writing")
with open(os.open(fpath, os.O_CREAT | os.O_WRONLY, octal_perms), mode) as f:
f.write(data)
def do_signin(vault="Machine Credentials"):
my_password = getpass.getpass(prompt="1Password master password:\n")
return OP(vault=vault, password=my_password)
def do_parse_args():
parser = ArgumentParser()
parser.add_argument("server_name", help="Name of server to fetch SSH keys for")
parser.add_argument("key_name", help="Name of SSH identity file")
parser.add_argument("--pub-only", help="Only fetch public key for identity", action="store_true")
parser.add_argument("--outdir", help="Optional directory to write keys to. Default is CWD")
parser.add_argument("--vault", help="Optional name of 1Password vault to search")
parsed = parser.parse_args()
return parsed
def main():
args = do_parse_args()
vault = args.vault
server_name = args.server_name
key_name = args.key_name
if vault:
op = do_signin(vault=vault)
else:
op = do_signin()
if args.outdir:
outdir = Path(args.outdir)
else:
outdir = Path(".")
server: OPServerItem = op.get_item(server_name)
server: ServerWithSSHKeys = ServerWithSSHKeys(server)
server.write_ssh_keys(outdir, key_name, args.pub_only)
if __name__ == "__main__":
main()
| 2,371 | 239 | 92 |
635341ad576004ff02052ecc64b5cd12d53ccc8e | 1,001 | py | Python | simply/simplyRPClient.py | sergsb/simply | e1bea1a3a1f0d71e5ac97ffec4964738aa43cbf3 | [
"MIT"
] | null | null | null | simply/simplyRPClient.py | sergsb/simply | e1bea1a3a1f0d71e5ac97ffec4964738aa43cbf3 | [
"MIT"
] | null | null | null | simply/simplyRPClient.py | sergsb/simply | e1bea1a3a1f0d71e5ac97ffec4964738aa43cbf3 | [
"MIT"
] | null | null | null | import uuid
import msgpack
import redis
| 38.5 | 103 | 0.583417 | import uuid
import msgpack
import redis
class SimplyRedisClient():
def __init__(self,url,name,plugin):
self.redis = redis.from_url(url)
self.name = name
self.plugin = plugin
def call(self,function,args,kwargs,type='instant'):
idx = str(uuid.uuid4())
run = {'method': function, 'type': type, 'args': args, 'kwargs': kwargs, 'id': idx}
self.redis.rpush('{}:{}'.format(self.name,self.plugin), msgpack.packb(run, use_bin_type=True))
res = msgpack.unpackb(self.redis.blpop('{}:general:{}'.format(self.name,idx))[1],raw=False)
#print('first ',res)
if type == 'delayed':
# print(res)
res = msgpack.unpackb(self.redis.blpop('{}:general:{}'.format(self.name,idx))[1],raw=False)
if res['status'] == 'error':
raise Exception(res['exception'])
elif res['status'] == 'ok':
return res['result']
else:
raise Exception("Unknown error: {}".format(res))
| 880 | 5 | 76 |
df90b5084ec078dc608d22914913d694a5f7c40b | 21,282 | py | Python | scraper.py | dancing-rain/HackIllinois-2022-RedditUsrInfoDiscordBot | 65462267f5a282e68b5714c0a1e09a6ded939f1a | [
"MIT"
] | 2 | 2022-02-26T07:27:10.000Z | 2022-02-26T22:41:06.000Z | scraper.py | dancing-rain/HackIllinois-2022-RedditUsrInfoDiscordBot | 65462267f5a282e68b5714c0a1e09a6ded939f1a | [
"MIT"
] | null | null | null | scraper.py | dancing-rain/HackIllinois-2022-RedditUsrInfoDiscordBot | 65462267f5a282e68b5714c0a1e09a6ded939f1a | [
"MIT"
] | 1 | 2022-02-26T07:21:01.000Z | 2022-02-26T07:21:01.000Z | #Dependencies
from array import array
from operator import mod
from statistics import mode
from unicodedata import name
import praw
import os
from datetime import datetime
import time
from prawcore.exceptions import NotFound
import json
from dotenv import load_dotenv
import scraper as scrape
load_dotenv("./.env")
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
PASSWORD = os.getenv("PASS")
USER_AGENT = os.getenv("USER_AGENT")
USERNAME = os.getenv("USERNAME")
abs_path = os.path.abspath(__file__)
dir_name = os.path.dirname(abs_path)
os.chdir(dir_name)
if __name__ == '__main__':
reddit = praw.Reddit( #instance of praw reddit for API access
client_id = CLIENT_ID,
client_secret = CLIENT_SECRET,
password = PASSWORD,
user_agent = USER_AGENT,
username = USERNAME,
)
reddit.read_only = True;
print()
user_name = GetUsernameInput(reddit)
print()
with open("scraper_output.json", mode='w') as outfile:
json.dump([], outfile, indent=2)
user_as_redditor = reddit.redditor(user_name)
user_info = UserInfo()
user_comments_list = list(user_as_redditor.comments.new(limit=99)).copy() #Limited to 100 historical submissions by Reddit API
user_submissions_list = list(user_as_redditor.submissions.new(limit=99)).copy() #Limited to 100 historical submissions by Reddit API
if user_info.IsSuspended(): #todo issuspended status needs to be updated accurately prior
print("User is shadowbanned - only contains name and is_suspended attributes")
else:
user_info.SetBasicInfo()
user_info.PrintBasicInfo()
user_info.ConvertBasicInfoToTxt()
u1 = TopFiveVotedSubmissionsData()
u1.FindFiveMostVotedSubmissions(user_submissions_list)
u1.PrintFiveMostVotedSubmissions()
u1.ConvertFiveMostVotedSubmissionsToTxt()
u2 = TopFiveVotedCommentsData()
u2.FindFiveMostVotedComments(user_comments_list)
u2.PrintFiveMostVotedComments()
u2.ConvertFiveMostVotedCommentsToTxt()
u3 = VoteDistribution()
u3.FindVoteDistribution(user_comments_list, user_submissions_list)
u3.PrintVoteDistribution()
u3.ConvertVoteDistributionToTxt()
u4 = MostActiveSubs()
u4.FindMostActive(user_comments_list, user_submissions_list)
u4.PrintActiveSubs()
u4.ConvertActiveSubsToTxt()
#test json reader
'''print("")
temp = GetUserFromJson("scraper_output.json")
temp["UserInfo"].PrintBasicInfo()
temp["FiveMostVotedSubmissions"].PrintFiveMostVotedSubmissions()
temp["FiveMostVotedComments"].PrintFiveMostVotedComments()
temp["VoteDistribution"].PrintVoteDistribution()
temp["MostActiveSubreddits"].PrintActiveSubs()'''
print("") | 43.971074 | 282 | 0.619303 | #Dependencies
from array import array
from operator import mod
from statistics import mode
from unicodedata import name
import praw
import os
from datetime import datetime
import time
from prawcore.exceptions import NotFound
import json
from dotenv import load_dotenv
import scraper as scrape
load_dotenv("./.env")
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
PASSWORD = os.getenv("PASS")
USER_AGENT = os.getenv("USER_AGENT")
USERNAME = os.getenv("USERNAME")
abs_path = os.path.abspath(__file__)
dir_name = os.path.dirname(abs_path)
os.chdir(dir_name)
def UserExists(name: str, reddit: praw.models.Redditor): #Check if username exists
try:
reddit.redditor(name).id
except NotFound:
return False
return True
def GetUsernameInput(reddit: praw.models.Redditor): #Check if inputted username is valid
name = input("Enter username (eg _dancingrain_): ")
if (not UserExists(name, reddit)):
print("\nUsername not found, try again\n")
return GetUsernameInput(reddit)
return name;
class UserInfo:
id: str #user's id - short series of alphanumeric charaacters
name: str #user's name
cake_day: str #month/day/year
age: str #in days
karma_comments: str #comment karma, may be slightly off
karma_overall: str #comment karma + post karma, may be slightly off
moderator: str #user is a subreddit moderator
suspended: str #user is suspended from reddit
five_most_voted_submissions: str
five_most_voted_comments: str
vote_distribution: str
most_active_subs: str
info_map: map
def __init__(self, id="", name="", cake_day="", age="", karma_comments="", karma_overall="", moderator="False", suspended="False", txt_delimiter = "UserInfo_delim"):
self.id = id
self.name = name
self.cake_day = cake_day
self.age = age
self.karma_comments = karma_comments
self.karma_overall = karma_overall
self.moderator = moderator
self.suspended = suspended
self.info_map = {"Username":self.name, "Cake Day":self.cake_day, "Age":self.age, "User Comment Karma":self.karma_comments, "User Overall Karma":self.karma_overall, "User is a moderator":self.moderator, "User is suspended":self.suspended, "User ID":self.id}
def SetBasicInfo(self, user_as_redditor):
#Username
self.name = user_as_redditor.name
#Is user suspended
self.suspended = "True"
shadowbanned = True
try:
self.user_as_redditor.is_suspended
except AttributeError:
self.suspended = "False"
shadowbanned = False
if not shadowbanned:
#ID
self.id = user_as_redditor.id
#UTC
self.cake_day = datetime.utcfromtimestamp(int(user_as_redditor.created_utc)).strftime("%m/%d/%Y, %H:%M:%S") + " UTC"
#Days
self.age = str(int((time.time()-user_as_redditor.created_utc)/86400)) + " days"
#PRAW Karma may vary from actual
self.karma_comments = str(user_as_redditor.comment_karma) + " karma"
self.karma_overall = str(user_as_redditor.link_karma + user_as_redditor.comment_karma) + " karma"
#Is user a moderator
self.moderator = "False";
if (user_as_redditor.is_mod):
self.moderator = "True";
self.info_map = {"Username":self.name, "Cake Day":self.cake_day, "Age":self.age, "User Comment Karma":self.karma_comments, "User Overall Karma":self.karma_overall, "User is a moderator":self.moderator, "User is suspended":self.suspended, "User ID":self.id}
def SetUserInfo(self, data:map):
for i,(k,v) in enumerate(data.items()):
self.info_map[k] = v
def IsSuspended(self):
return self.suspended == "True"
def ConvertBasicInfoToTxt(self):
with open("scraper_output.json", "r") as f:
feed = json.load(f)
with open("scraper_output.json", "w") as outfile:
feed.append({"UserInfo":self.info_map})
json.dump(list(feed), outfile, indent=2)
def PrintBasicInfo(self):
for i,(k,v) in enumerate(self.info_map.items()):
print(str(k) + ": " + str(v))
def BasicInfoAsString(self):
to_return = ""
for i,(k,v) in enumerate(self.info_map.items()):
to_return += str(k) + ": " + str(v) + "\n"
return to_return
class TopFiveVotedSubmissionsData:
descriptive_header: str
info_list_of_maps: list
def __init__(self, descriptive_header="\nTop 5 most upvoted posts (Out of last 99 posts):\n", txt_delimiter = "TopFiveVotedSubmissionsData_delim"):
self.descriptive_header = descriptive_header
self.info_list_of_maps = []
def FindFiveMostVotedSubmissions(self, user_submissions_list:list):
sorted_submissions = sorted(user_submissions_list,key=lambda x:x.score, reverse=True)
idx = 0
for submission in sorted_submissions:
if idx < 5 and idx < len(sorted_submissions):
self.info_list_of_maps.append({"Rank":str(idx + 1), "Score":str(submission.score),"Time:":str(datetime.utcfromtimestamp(int(submission.created_utc)).strftime("%m/%d/%Y, %H:%M:%S")), "Comments":str(submission.num_comments), "Title":str(submission.title)})
idx+=1
def PrintFiveMostVotedSubmissions(self):
print(self.descriptive_header)
for idx in range(0,len(self.info_list_of_maps)):
to_print = ""
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
print(to_print)
def GetFiveMostVotedSubmissions(self):
to_print = ""
for idx in range(0,len(self.info_list_of_maps)):
if idx != 0:
to_print += "\n"
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
return to_print
def ConvertFiveMostVotedSubmissionsToTxt(self):
with open("scraper_output.json", "r") as f:
feed = json.load(f)
with open("scraper_output.json", "w") as outfile:
info_map = {}
for i in range(0,len(self.info_list_of_maps)):
submission_map = {}
for idx, (k,v) in enumerate(self.info_list_of_maps[i].items()):
submission_map[k] = v
info_map.update({i+1:submission_map.copy()})
to_append = {"FiveMostVotedSubmissions":info_map}
feed.append(to_append)
json.dump(list(feed), outfile, indent=2)
def SetFiveMostVotedSubmissionsFromJsonMap(self, data:map):
for i,(k,v) in enumerate(data.items()):
self.info_list_of_maps.append({k:v})
class TopFiveVotedCommentsData:
descriptive_header: str
info_list_of_maps: list
def __init__(self, descriptive_header="\nTop 5 most upvoted comments (Out of last 99 posts):\n", txt_delimiter = "TopFiveVotedCommentsData_delim"):
self.descriptive_header = descriptive_header
self.info_list_of_maps = []
def FindFiveMostVotedComments(self, user_comments_list: list):
sorted_comments = sorted(user_comments_list,key=lambda x:x.score, reverse=True)
idx = 0
for comments in sorted_comments:
if idx < 5 and idx < len(sorted_comments):
self.info_list_of_maps.append({"Rank":str(idx+1),"Score":str(comments.score), "Time":str(datetime.utcfromtimestamp(int(comments.created_utc)).strftime("%m/%d/%Y, %H:%M:%S")), "Replies":str(len(comments.replies)), "Body":(comments.body.replace("\n","")[0:35]+"...")})
idx+=1
def PrintFiveMostVotedComments(self):
print(self.descriptive_header)
for idx in range(0,len(self.info_list_of_maps)):
to_print = ""
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
print(to_print)
def GetFiveMostVotedComments(self):
to_print = ""
for idx in range(0,len(self.info_list_of_maps)):
if idx != 0:
to_print += "\n"
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
return to_print
def ConvertFiveMostVotedCommentsToTxt(self):
with open("scraper_output.json", "r") as f:
feed = json.load(f)
with open("scraper_output.json", "w") as outfile:
info_map = {}
for i in range(0,len(self.info_list_of_maps)):
submission_map = {}
for idx, (k,v) in enumerate(self.info_list_of_maps[i].items()):
submission_map[k] = v
info_map.update({i+1:submission_map.copy()})
to_append = {"FiveMostVotedComments":info_map}
feed.append(to_append)
json.dump(list(feed), outfile, indent=2)
def SetFiveMostVotedCommentsFromJsonMap(self, data:map):
for i,(k,v) in enumerate(data.items()):
self.info_list_of_maps.append({k:v})
class VoteDistribution:
descriptive_header: str
info_list_of_maps: list
def __init__(self, descriptive_header="\nUser's top subreddits ranked by comment/submission upvotes (Out of last 198 interactions):\n", txt_delimiter = "VoteDistribution_delim"):
self.descriptive_header = descriptive_header
self.info_list_of_maps = []
def FindVoteDistribution(self, user_comments_list:list, user_submissions_list:list):
active_subreddits_map = {}
#combine comments and submissions into dictionary format {sub name, upvote count} to easily organize subreddits and increment their upvote counts
for comments in user_comments_list:
sub_name = comments.subreddit.display_name
upvote_qty = comments.score
if sub_name in active_subreddits_map.keys():
active_subreddits_map[sub_name] = active_subreddits_map[sub_name] + upvote_qty
else:
active_subreddits_map[sub_name] = upvote_qty
for submissions in user_submissions_list:
sub_name = submissions.subreddit.display_name
upvote_qty = submissions.score
if sub_name in active_subreddits_map.keys():
active_subreddits_map[sub_name] = active_subreddits_map[sub_name] + upvote_qty
else:
active_subreddits_map[sub_name] = upvote_qty
#convert map back to list, then use built-in triple parameter sort method to sort subreddits by upvote count
active_subreddits_list = []
for i,(k, v) in enumerate(active_subreddits_map.items()):
active_subreddits_list.append([k, v])
descending_subreddit_by_activity = sorted(active_subreddits_list,key=lambda x:x[1], reverse=True)
idx = 0
#print subreddit upvote distribution in descending order
for subreddit in descending_subreddit_by_activity:
self.info_list_of_maps.append({"Rank":str(idx+1),"Subreddit":subreddit[0], "Vote Count":str(subreddit[1])})
idx+=1
def PrintVoteDistribution(self):
print(self.descriptive_header)
for idx in range(0,len(self.info_list_of_maps)):
to_print = ""
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
print(to_print)
def GetVoteDistribution(self):
to_print = ""
for idx in range(0,len(self.info_list_of_maps)):
if idx != 0:
to_print += "\n"
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
return to_print
def GetDistributionAsList(self):
dist_list = []
labels = []
for idx in range(0,len(self.info_list_of_maps)):
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
if k == 'Vote Count':
dist_list.append(v)
elif k == 'Subreddit':
labels.append(v)
return dist_list, labels
def ConvertVoteDistributionToTxt(self):
with open("scraper_output.json", "r") as f:
feed = json.load(f)
with open("scraper_output.json", "w") as outfile:
info_map = {}
for i in range(0,len(self.info_list_of_maps)):
submission_map = {}
for idx, (k,v) in enumerate(self.info_list_of_maps[i].items()):
submission_map[k] = v
info_map.update({i+1:submission_map.copy()})
to_append = {"VoteDistribution":info_map}
feed.append(to_append)
json.dump(list(feed), outfile, indent=2)
def SetVoteDistributionFromJsonMap(self,data:map):
for i,(k,v) in enumerate(data.items()):
self.info_list_of_maps.append({k:v})
class MostActiveSubs:
descriptive_header: str
info_list_of_maps: list
def __init__(self, descriptive_header="\nTop active subreddits ranked by quantity of comments and submissions (Out of last 198 interactions):\n", txt_delimiter = "MostActiveSubs_delim"):
self.descriptive_header = descriptive_header
self.info_list_of_maps = []
def FindMostActive(self, user_comments_list:list, user_submissions_list:list):
active_subreddits_map = {}
#combine comments and submissions into dictionary format {sub name, upvote count} to easily organize subreddits and increment their interaction count
for comments in user_comments_list:
sub_name = comments.subreddit.display_name
if sub_name in active_subreddits_map.keys():
active_subreddits_map[sub_name] = active_subreddits_map[sub_name] + 1
else:
active_subreddits_map[sub_name] = 1
for submissions in user_submissions_list:
sub_name = submissions.subreddit.display_name
if sub_name in active_subreddits_map.keys():
active_subreddits_map[sub_name] = active_subreddits_map[sub_name] + 1
else:
active_subreddits_map[sub_name] = 1
#convert map back to list, then use built-in triple parameter sort method to sort subreddits by upvote count
active_subreddits_list = []
for i,(k, v) in enumerate(active_subreddits_map.items()):
active_subreddits_list.append([k, v])
descending_subreddit_by_activity = sorted(active_subreddits_list,key=lambda x:x[1], reverse=True)
idx = 0
#print subreddit interactions in descending order
for subreddit in descending_subreddit_by_activity:
self.info_list_of_maps.append({"Rank":str(idx+1),"Subreddit":subreddit[0], "Post/Repl Count":str(subreddit[1])})
idx+=1
def PrintActiveSubs(self):
print(self.descriptive_header)
for idx in range(0,len(self.info_list_of_maps)):
to_print = ""
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
print(to_print)
def GetActiveSubs(self):
to_print = ""
for idx in range(0,len(self.info_list_of_maps)):
if idx != 0:
to_print += "\n"
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
to_print += str(k) + ": " + str(v)
if idx1 < len(self.info_list_of_maps[idx]):
to_print += " | "
return to_print
def GetActiveSubsAsList(self):
subs_list = []
labels = []
for idx in range(0, len(self.info_list_of_maps)):
for idx1,(k,v) in enumerate(self.info_list_of_maps[idx].items()):
if k == 'Post/Repl Count':
subs_list.append(v)
if k == 'Subreddit':
labels.append(v)
return subs_list, labels
def ConvertActiveSubsToTxt(self):
with open("scraper_output.json", "r") as f:
feed = json.load(f)
with open("scraper_output.json", "w") as outfile:
info_map = {}
for i in range(0,len(self.info_list_of_maps)):
submission_map = {}
for idx, (k,v) in enumerate(self.info_list_of_maps[i].items()):
submission_map[k] = v
info_map.update({i+1:submission_map.copy()})
to_append = {"MostActiveSubreddits":info_map}
feed.append(to_append)
json.dump(list(feed), outfile, indent=2)
def SetMostActiveFromJsonMap(self,data:map):
for i,(k,v) in enumerate(data.items()):
self.info_list_of_maps.append({k:v})
def GetUserFromJson(file_name:str):
to_return = {}
with open(file_name, mode='r') as outfile:
data = json.load(outfile)
for i in data:
type = str(list(i.keys())[0])
data = list(i.values())[0]
if(type == "UserInfo"):
instance = UserInfo()
instance.SetUserInfo(data)
to_return[type] = instance
elif(type == "FiveMostVotedSubmissions"):
instance = TopFiveVotedSubmissionsData()
instance.SetFiveMostVotedSubmissionsFromJsonMap(data)
to_return[type] = instance
elif(type == "FiveMostVotedComments"):
instance = TopFiveVotedCommentsData()
instance.SetFiveMostVotedCommentsFromJsonMap(data)
to_return[type] = instance
elif(type == "VoteDistribution"):
instance = VoteDistribution()
instance.SetVoteDistributionFromJsonMap(data)
to_return[type] = instance
elif(type == "MostActiveSubreddits"):
instance = MostActiveSubs()
instance.SetMostActiveFromJsonMap(data)
to_return[type] = instance
return to_return
if __name__ == '__main__':
reddit = praw.Reddit( #instance of praw reddit for API access
client_id = CLIENT_ID,
client_secret = CLIENT_SECRET,
password = PASSWORD,
user_agent = USER_AGENT,
username = USERNAME,
)
reddit.read_only = True;
print()
user_name = GetUsernameInput(reddit)
print()
with open("scraper_output.json", mode='w') as outfile:
json.dump([], outfile, indent=2)
user_as_redditor = reddit.redditor(user_name)
user_info = UserInfo()
user_comments_list = list(user_as_redditor.comments.new(limit=99)).copy() #Limited to 100 historical submissions by Reddit API
user_submissions_list = list(user_as_redditor.submissions.new(limit=99)).copy() #Limited to 100 historical submissions by Reddit API
if user_info.IsSuspended(): #todo issuspended status needs to be updated accurately prior
print("User is shadowbanned - only contains name and is_suspended attributes")
else:
user_info.SetBasicInfo()
user_info.PrintBasicInfo()
user_info.ConvertBasicInfoToTxt()
u1 = TopFiveVotedSubmissionsData()
u1.FindFiveMostVotedSubmissions(user_submissions_list)
u1.PrintFiveMostVotedSubmissions()
u1.ConvertFiveMostVotedSubmissionsToTxt()
u2 = TopFiveVotedCommentsData()
u2.FindFiveMostVotedComments(user_comments_list)
u2.PrintFiveMostVotedComments()
u2.ConvertFiveMostVotedCommentsToTxt()
u3 = VoteDistribution()
u3.FindVoteDistribution(user_comments_list, user_submissions_list)
u3.PrintVoteDistribution()
u3.ConvertVoteDistributionToTxt()
u4 = MostActiveSubs()
u4.FindMostActive(user_comments_list, user_submissions_list)
u4.PrintActiveSubs()
u4.ConvertActiveSubsToTxt()
#test json reader
'''print("")
temp = GetUserFromJson("scraper_output.json")
temp["UserInfo"].PrintBasicInfo()
temp["FiveMostVotedSubmissions"].PrintFiveMostVotedSubmissions()
temp["FiveMostVotedComments"].PrintFiveMostVotedComments()
temp["VoteDistribution"].PrintVoteDistribution()
temp["MostActiveSubreddits"].PrintActiveSubs()'''
print("") | 16,346 | 1,848 | 193 |
253e580a3773a11365a717fa1945cf25ba110650 | 554 | py | Python | accounting/blueprints/account_type/forms.py | alvin-c-cruz/accounting | f16ef16ded3cab36eee7227008ae40856680034d | [
"MIT"
] | 1 | 2022-02-05T13:57:40.000Z | 2022-02-05T13:57:40.000Z | accounting/blueprints/account_type/forms.py | alvin-c-cruz/accounting | f16ef16ded3cab36eee7227008ae40856680034d | [
"MIT"
] | null | null | null | accounting/blueprints/account_type/forms.py | alvin-c-cruz/accounting | f16ef16ded3cab36eee7227008ae40856680034d | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired
from .models import AccountType
| 36.933333 | 80 | 0.752708 | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired
from .models import AccountType
class AccountTypeForm(FlaskForm):
account_type = StringField(label="Description", validators=[DataRequired()])
classification = SelectField(
label="Classification",
validators=[DataRequired()],
choices=AccountType.classification_choices()
)
priority = StringField(label="Order", validators=[DataRequired()])
submit = SubmitField(label="Save") | 0 | 365 | 23 |
5d6c9190d3e7576fe518ec7b1ef02456361a1ace | 507 | py | Python | tests/bench_mark/func_exec_time_decorator.py | apache/incubator-sdap-in-situ-data-services | 4e65e0e2eb178461baba61e2204e5a97f701d8ed | [
"Apache-2.0"
] | 1 | 2021-11-07T20:27:13.000Z | 2021-11-07T20:27:13.000Z | tests/bench_mark/func_exec_time_decorator.py | apache/incubator-sdap-in-situ-data-services | 4e65e0e2eb178461baba61e2204e5a97f701d8ed | [
"Apache-2.0"
] | null | null | null | tests/bench_mark/func_exec_time_decorator.py | apache/incubator-sdap-in-situ-data-services | 4e65e0e2eb178461baba61e2204e5a97f701d8ed | [
"Apache-2.0"
] | 2 | 2021-11-07T20:27:05.000Z | 2021-11-15T15:40:40.000Z | import logging
from datetime import datetime
from functools import wraps
LOGGER = logging.getLogger(__name__)
| 28.166667 | 82 | 0.682446 | import logging
from datetime import datetime
from functools import wraps
LOGGER = logging.getLogger(__name__)
def func_exec_time_decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
time1 = datetime.now()
func_result = f(*args, **kwargs)
time2 = datetime.now()
duration = time2 - time1
LOGGER.info(f'duration: {duration.total_seconds()} s. name: {f.__name__}')
return func_result, duration.total_seconds()
return decorated_function
| 372 | 0 | 23 |
c3a5056c918dabba6cfeccc2f5e35a381a297809 | 678 | py | Python | falconn/src/examples/glove/convert.py | bobpoekert/ocamlfalconn | 678976064077ca2a4bc6ced3e84042ac1751669a | [
"MIT"
] | 1,068 | 2015-12-10T18:03:11.000Z | 2022-03-29T09:05:38.000Z | falconn/src/examples/glove/convert.py | bobpoekert/ocamlfalconn | 678976064077ca2a4bc6ced3e84042ac1751669a | [
"MIT"
] | 108 | 2015-12-10T21:14:41.000Z | 2022-03-15T17:51:17.000Z | falconn/src/examples/glove/convert.py | bobpoekert/ocamlfalconn | 678976064077ca2a4bc6ced3e84042ac1751669a | [
"MIT"
] | 224 | 2015-12-17T02:35:21.000Z | 2022-03-29T09:05:40.000Z | #!/usr/bin/python
import sys
import struct
import numpy as np
matrix = []
with open('dataset/glove.840B.300d.txt', 'r') as inf:
with open('dataset/glove.840B.300d.dat', 'wb') as ouf:
counter = 0
for line in inf:
row = [float(x) for x in line.split()[1:]]
assert len(row) == 300
ouf.write(struct.pack('i', len(row)))
ouf.write(struct.pack('%sf' % len(row), *row))
counter += 1
matrix.append(np.array(row, dtype=np.float32))
if counter % 10000 == 0:
sys.stdout.write('%d points processed...\n' % counter)
np.save('dataset/glove.840B.300d', np.array(matrix))
| 32.285714 | 70 | 0.558997 | #!/usr/bin/python
import sys
import struct
import numpy as np
matrix = []
with open('dataset/glove.840B.300d.txt', 'r') as inf:
with open('dataset/glove.840B.300d.dat', 'wb') as ouf:
counter = 0
for line in inf:
row = [float(x) for x in line.split()[1:]]
assert len(row) == 300
ouf.write(struct.pack('i', len(row)))
ouf.write(struct.pack('%sf' % len(row), *row))
counter += 1
matrix.append(np.array(row, dtype=np.float32))
if counter % 10000 == 0:
sys.stdout.write('%d points processed...\n' % counter)
np.save('dataset/glove.840B.300d', np.array(matrix))
| 0 | 0 | 0 |
54c89ad19cbe5956de571d43a26e1b16cbff6748 | 18,826 | py | Python | iso3166/__init__.py | briangmaddox/QGISSOLR | e98e98f89265b7d0b6b8a760f6233c990ce368c3 | [
"MIT"
] | null | null | null | iso3166/__init__.py | briangmaddox/QGISSOLR | e98e98f89265b7d0b6b8a760f6233c990ce368c3 | [
"MIT"
] | null | null | null | iso3166/__init__.py | briangmaddox/QGISSOLR | e98e98f89265b7d0b6b8a760f6233c990ce368c3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from builtins import object
import re
from numbers import Integral
from collections import namedtuple
__all__ = ["countries"]
try:
str
except NameError:
str = str
Country = namedtuple('Country',
'name alpha2 alpha3 numeric apolitical_name')
_records = [
Country(u"Afghanistan", "AF", "AFG", "004", u"Afghanistan"),
Country(u"Åland Islands", "AX", "ALA", "248", u"Åland Islands"),
Country(u"Albania", "AL", "ALB", "008", u"Albania"),
Country(u"Algeria", "DZ", "DZA", "012", u"Algeria"),
Country(u"American Samoa", "AS", "ASM", "016", u"American Samoa"),
Country(u"Andorra", "AD", "AND", "020", u"Andorra"),
Country(u"Angola", "AO", "AGO", "024", u"Angola"),
Country(u"Anguilla", "AI", "AIA", "660", u"Anguilla"),
Country(u"Antarctica", "AQ", "ATA", "010", u"Antarctica"),
Country(u"Antigua and Barbuda", "AG", "ATG", "028",
u"Antigua and Barbuda"),
Country(u"Argentina", "AR", "ARG", "032", u"Argentina"),
Country(u"Armenia", "AM", "ARM", "051", u"Armenia"),
Country(u"Aruba", "AW", "ABW", "533", u"Aruba"),
Country(u"Australia", "AU", "AUS", "036", u"Australia"),
Country(u"Austria", "AT", "AUT", "040", u"Austria"),
Country(u"Azerbaijan", "AZ", "AZE", "031", u"Azerbaijan"),
Country(u"Bahamas", "BS", "BHS", "044", u"Bahamas"),
Country(u"Bahrain", "BH", "BHR", "048", u"Bahrain"),
Country(u"Bangladesh", "BD", "BGD", "050", u"Bangladesh"),
Country(u"Barbados", "BB", "BRB", "052", u"Barbados"),
Country(u"Belarus", "BY", "BLR", "112", u"Belarus"),
Country(u"Belgium", "BE", "BEL", "056", u"Belgium"),
Country(u"Belize", "BZ", "BLZ", "084", u"Belize"),
Country(u"Benin", "BJ", "BEN", "204", u"Benin"),
Country(u"Bermuda", "BM", "BMU", "060", u"Bermuda"),
Country(u"Bhutan", "BT", "BTN", "064", u"Bhutan"),
Country(u"Bolivia, Plurinational State of", "BO", "BOL", "068",
u"Bolivia, Plurinational State of"),
Country(u"Bonaire, Sint Eustatius and Saba", "BQ", "BES", "535",
u"Bonaire, Sint Eustatius and Saba"),
Country(u"Bosnia and Herzegovina", "BA", "BIH", "070",
u"Bosnia and Herzegovina"),
Country(u"Botswana", "BW", "BWA", "072", u"Botswana"),
Country(u"Bouvet Island", "BV", "BVT", "074", u"Bouvet Island"),
Country(u"Brazil", "BR", "BRA", "076", u"Brazil"),
Country(u"British Indian Ocean Territory", "IO", "IOT", "086",
u"British Indian Ocean Territory"),
Country(u"Brunei Darussalam", "BN", "BRN", "096",
u"Brunei Darussalam"),
Country(u"Bulgaria", "BG", "BGR", "100", u"Bulgaria"),
Country(u"Burkina Faso", "BF", "BFA", "854", u"Burkina Faso"),
Country(u"Burundi", "BI", "BDI", "108", u"Burundi"),
Country(u"Cambodia", "KH", "KHM", "116", u"Cambodia"),
Country(u"Cameroon", "CM", "CMR", "120", u"Cameroon"),
Country(u"Canada", "CA", "CAN", "124", u"Canada"),
Country(u"Cabo Verde", "CV", "CPV", "132", u"Cabo Verde"),
Country(u"Cayman Islands", "KY", "CYM", "136", u"Cayman Islands"),
Country(u"Central African Republic", "CF", "CAF", "140",
u"Central African Republic"),
Country(u"Chad", "TD", "TCD", "148", u"Chad"),
Country(u"Chile", "CL", "CHL", "152", u"Chile"),
Country(u"China", "CN", "CHN", "156", u"China"),
Country(u"Christmas Island", "CX", "CXR", "162", u"Christmas Island"),
Country(u"Cocos (Keeling) Islands", "CC", "CCK", "166",
u"Cocos (Keeling) Islands"),
Country(u"Colombia", "CO", "COL", "170", u"Colombia"),
Country(u"Comoros", "KM", "COM", "174", u"Comoros"),
Country(u"Congo", "CG", "COG", "178", u"Congo"),
Country(u"Congo, Democratic Republic of the", "CD", "COD", "180",
u"Congo, Democratic Republic of the"),
Country(u"Cook Islands", "CK", "COK", "184", u"Cook Islands"),
Country(u"Costa Rica", "CR", "CRI", "188", u"Costa Rica"),
Country(u"Côte d'Ivoire", "CI", "CIV", "384", u"Côte d'Ivoire"),
Country(u"Croatia", "HR", "HRV", "191", u"Croatia"),
Country(u"Cuba", "CU", "CUB", "192", u"Cuba"),
Country(u"Curaçao", "CW", "CUW", "531", u"Curaçao"),
Country(u"Cyprus", "CY", "CYP", "196", u"Cyprus"),
Country(u"Czechia", "CZ", "CZE", "203", u"Czechia"),
Country(u"Denmark", "DK", "DNK", "208", u"Denmark"),
Country(u"Djibouti", "DJ", "DJI", "262", u"Djibouti"),
Country(u"Dominica", "DM", "DMA", "212", u"Dominica"),
Country(u"Dominican Republic", "DO", "DOM", "214", u"Dominican Republic"),
Country(u"Ecuador", "EC", "ECU", "218", u"Ecuador"),
Country(u"Egypt", "EG", "EGY", "818", u"Egypt"),
Country(u"El Salvador", "SV", "SLV", "222", u"El Salvador"),
Country(u"Equatorial Guinea", "GQ", "GNQ", "226", u"Equatorial Guinea"),
Country(u"Eritrea", "ER", "ERI", "232", u"Eritrea"),
Country(u"Estonia", "EE", "EST", "233", u"Estonia"),
Country(u"Ethiopia", "ET", "ETH", "231", u"Ethiopia"),
Country(u"Falkland Islands (Malvinas)", "FK", "FLK", "238",
u"Falkland Islands (Malvinas)"),
Country(u"Faroe Islands", "FO", "FRO", "234", u"Faroe Islands"),
Country(u"Fiji", "FJ", "FJI", "242", u"Fiji"),
Country(u"Finland", "FI", "FIN", "246", u"Finland"),
Country(u"France", "FR", "FRA", "250", u"France"),
Country(u"French Guiana", "GF", "GUF", "254", u"French Guiana"),
Country(u"French Polynesia", "PF", "PYF", "258", u"French Polynesia"),
Country(u"French Southern Territories", "TF", "ATF", "260",
u"French Southern Territories"),
Country(u"Gabon", "GA", "GAB", "266", u"Gabon"),
Country(u"Gambia", "GM", "GMB", "270", u"Gambia"),
Country(u"Georgia", "GE", "GEO", "268", u"Georgia"),
Country(u"Germany", "DE", "DEU", "276", u"Germany"),
Country(u"Ghana", "GH", "GHA", "288", u"Ghana"),
Country(u"Gibraltar", "GI", "GIB", "292", u"Gibraltar"),
Country(u"Greece", "GR", "GRC", "300", u"Greece"),
Country(u"Greenland", "GL", "GRL", "304", u"Greenland"),
Country(u"Grenada", "GD", "GRD", "308", u"Grenada"),
Country(u"Guadeloupe", "GP", "GLP", "312", u"Guadeloupe"),
Country(u"Guam", "GU", "GUM", "316", u"Guam"),
Country(u"Guatemala", "GT", "GTM", "320", u"Guatemala"),
Country(u"Guernsey", "GG", "GGY", "831", u"Guernsey"),
Country(u"Guinea", "GN", "GIN", "324", u"Guinea"),
Country(u"Guinea-Bissau", "GW", "GNB", "624", u"Guinea-Bissau"),
Country(u"Guyana", "GY", "GUY", "328", u"Guyana"),
Country(u"Haiti", "HT", "HTI", "332", u"Haiti"),
Country(u"Heard Island and McDonald Islands", "HM", "HMD", "334",
u"Heard Island and McDonald Islands"),
Country(u"Holy See", "VA", "VAT", "336", u"Holy See"),
Country(u"Honduras", "HN", "HND", "340", u"Honduras"),
Country(u"Hong Kong", "HK", "HKG", "344", u"Hong Kong"),
Country(u"Hungary", "HU", "HUN", "348", u"Hungary"),
Country(u"Iceland", "IS", "ISL", "352", u"Iceland"),
Country(u"India", "IN", "IND", "356", u"India"),
Country(u"Indonesia", "ID", "IDN", "360", u"Indonesia"),
Country(u"Iran, Islamic Republic of", "IR", "IRN", "364",
u"Iran, Islamic Republic of"),
Country(u"Iraq", "IQ", "IRQ", "368", u"Iraq"),
Country(u"Ireland", "IE", "IRL", "372", u"Ireland"),
Country(u"Isle of Man", "IM", "IMN", "833", u"Isle of Man"),
Country(u"Israel", "IL", "ISR", "376", u"Israel"),
Country(u"Italy", "IT", "ITA", "380", u"Italy"),
Country(u"Jamaica", "JM", "JAM", "388", u"Jamaica"),
Country(u"Japan", "JP", "JPN", "392", u"Japan"),
Country(u"Jersey", "JE", "JEY", "832", u"Jersey"),
Country(u"Jordan", "JO", "JOR", "400", u"Jordan"),
Country(u"Kazakhstan", "KZ", "KAZ", "398", u"Kazakhstan"),
Country(u"Kenya", "KE", "KEN", "404", u"Kenya"),
Country(u"Kiribati", "KI", "KIR", "296", u"Kiribati"),
Country(u"Korea, Democratic People's Republic of", "KP", "PRK", "408",
u"Korea, Democratic People's Republic of"),
Country(u"Korea, Republic of", "KR", "KOR", "410", u"Korea, Republic of"),
Country(u"Kuwait", "KW", "KWT", "414", u"Kuwait"),
Country(u"Kyrgyzstan", "KG", "KGZ", "417", u"Kyrgyzstan"),
Country(u"Lao People's Democratic Republic", "LA", "LAO", "418",
u"Lao People's Democratic Republic"),
Country(u"Latvia", "LV", "LVA", "428", u"Latvia"),
Country(u"Lebanon", "LB", "LBN", "422", u"Lebanon"),
Country(u"Lesotho", "LS", "LSO", "426", u"Lesotho"),
Country(u"Liberia", "LR", "LBR", "430", u"Liberia"),
Country(u"Libya", "LY", "LBY", "434", u"Libya"),
Country(u"Liechtenstein", "LI", "LIE", "438", u"Liechtenstein"),
Country(u"Lithuania", "LT", "LTU", "440", u"Lithuania"),
Country(u"Luxembourg", "LU", "LUX", "442", u"Luxembourg"),
Country(u"Macao", "MO", "MAC", "446", u"Macao"),
Country(u"Macedonia, the former Yugoslav Republic of", "MK", "MKD", "807",
u"Macedonia, the former Yugoslav Republic of"),
Country(u"Madagascar", "MG", "MDG", "450", u"Madagascar"),
Country(u"Malawi", "MW", "MWI", "454", u"Malawi"),
Country(u"Malaysia", "MY", "MYS", "458", u"Malaysia"),
Country(u"Maldives", "MV", "MDV", "462", u"Maldives"),
Country(u"Mali", "ML", "MLI", "466", u"Mali"),
Country(u"Malta", "MT", "MLT", "470", u"Malta"),
Country(u"Marshall Islands", "MH", "MHL", "584", u"Marshall Islands"),
Country(u"Martinique", "MQ", "MTQ", "474", u"Martinique"),
Country(u"Mauritania", "MR", "MRT", "478", u"Mauritania"),
Country(u"Mauritius", "MU", "MUS", "480", u"Mauritius"),
Country(u"Mayotte", "YT", "MYT", "175", u"Mayotte"),
Country(u"Mexico", "MX", "MEX", "484", u"Mexico"),
Country(u"Micronesia, Federated States of", "FM", "FSM", "583",
u"Micronesia, Federated States of"),
Country(u"Moldova, Republic of", "MD", "MDA", "498",
u"Moldova, Republic of"),
Country(u"Monaco", "MC", "MCO", "492", u"Monaco"),
Country(u"Mongolia", "MN", "MNG", "496", u"Mongolia"),
Country(u"Montenegro", "ME", "MNE", "499", u"Montenegro"),
Country(u"Montserrat", "MS", "MSR", "500", u"Montserrat"),
Country(u"Morocco", "MA", "MAR", "504", u"Morocco"),
Country(u"Mozambique", "MZ", "MOZ", "508", u"Mozambique"),
Country(u"Myanmar", "MM", "MMR", "104", u"Myanmar"),
Country(u"Namibia", "NA", "NAM", "516", u"Namibia"),
Country(u"Nauru", "NR", "NRU", "520", u"Nauru"),
Country(u"Nepal", "NP", "NPL", "524", u"Nepal"),
Country(u"Netherlands", "NL", "NLD", "528", u"Netherlands"),
Country(u"New Caledonia", "NC", "NCL", "540", u"New Caledonia"),
Country(u"New Zealand", "NZ", "NZL", "554", u"New Zealand"),
Country(u"Nicaragua", "NI", "NIC", "558", u"Nicaragua"),
Country(u"Niger", "NE", "NER", "562", u"Niger"),
Country(u"Nigeria", "NG", "NGA", "566", u"Nigeria"),
Country(u"Niue", "NU", "NIU", "570", u"Niue"),
Country(u"Norfolk Island", "NF", "NFK", "574", u"Norfolk Island"),
Country(u"Northern Mariana Islands", "MP", "MNP", "580",
u"Northern Mariana Islands"),
Country(u"Norway", "NO", "NOR", "578", u"Norway"),
Country(u"Oman", "OM", "OMN", "512", u"Oman"),
Country(u"Pakistan", "PK", "PAK", "586", u"Pakistan"),
Country(u"Palau", "PW", "PLW", "585", u"Palau"),
Country(u"Palestine, State of", "PS", "PSE", "275",
u"Palestine"),
Country(u"Panama", "PA", "PAN", "591", u"Panama"),
Country(u"Papua New Guinea", "PG", "PNG", "598",
u"Papua New Guinea"),
Country(u"Paraguay", "PY", "PRY", "600", u"Paraguay"),
Country(u"Peru", "PE", "PER", "604", u"Peru"),
Country(u"Philippines", "PH", "PHL", "608", u"Philippines"),
Country(u"Pitcairn", "PN", "PCN", "612", u"Pitcairn"),
Country(u"Poland", "PL", "POL", "616", u"Poland"),
Country(u"Portugal", "PT", "PRT", "620", u"Portugal"),
Country(u"Puerto Rico", "PR", "PRI", "630", u"Puerto Rico"),
Country(u"Qatar", "QA", "QAT", "634", u"Qatar"),
Country(u"Réunion", "RE", "REU", "638", u"Réunion"),
Country(u"Romania", "RO", "ROU", "642", u"Romania"),
Country(u"Russian Federation", "RU", "RUS", "643",
u"Russian Federation"),
Country(u"Rwanda", "RW", "RWA", "646", u"Rwanda"),
Country(u"Saint Barthélemy", "BL", "BLM", "652",
u"Saint Barthélemy"),
Country(u"Saint Helena, Ascension and Tristan da Cunha",
"SH", "SHN", "654",
u"Saint Helena, Ascension and Tristan da Cunha"),
Country(u"Saint Kitts and Nevis", "KN", "KNA", "659",
u"Saint Kitts and Nevis"),
Country(u"Saint Lucia", "LC", "LCA", "662", u"Saint Lucia"),
Country(u"Saint Martin (French part)", "MF", "MAF", "663",
u"Saint Martin (French part)"),
Country(u"Saint Pierre and Miquelon", "PM", "SPM", "666",
u"Saint Pierre and Miquelon"),
Country(u"Saint Vincent and the Grenadines", "VC", "VCT", "670",
u"Saint Vincent and the Grenadines"),
Country(u"Samoa", "WS", "WSM", "882", u"Samoa"),
Country(u"San Marino", "SM", "SMR", "674", u"San Marino"),
Country(u"Sao Tome and Principe", "ST", "STP", "678",
u"Sao Tome and Principe"),
Country(u"Saudi Arabia", "SA", "SAU", "682", u"Saudi Arabia"),
Country(u"Senegal", "SN", "SEN", "686", u"Senegal"),
Country(u"Serbia", "RS", "SRB", "688", u"Serbia"),
Country(u"Seychelles", "SC", "SYC", "690", u"Seychelles"),
Country(u"Sierra Leone", "SL", "SLE", "694", u"Sierra Leone"),
Country(u"Singapore", "SG", "SGP", "702", u"Singapore"),
Country(u"Sint Maarten (Dutch part)", "SX", "SXM", "534",
u"Sint Maarten (Dutch part)"),
Country(u"Slovakia", "SK", "SVK", "703", u"Slovakia"),
Country(u"Slovenia", "SI", "SVN", "705", u"Slovenia"),
Country(u"Solomon Islands", "SB", "SLB", "090", u"Solomon Islands"),
Country(u"Somalia", "SO", "SOM", "706", u"Somalia"),
Country(u"South Africa", "ZA", "ZAF", "710", u"South Africa"),
Country(u"South Georgia and the South Sandwich Islands",
"GS", "SGS", "239",
u"South Georgia and the South Sandwich Islands",),
Country(u"South Sudan", "SS", "SSD", "728", u"South Sudan"),
Country(u"Spain", "ES", "ESP", "724", u"Spain"),
Country(u"Sri Lanka", "LK", "LKA", "144", u"Sri Lanka"),
Country(u"Sudan", "SD", "SDN", "729", u"Sudan"),
Country(u"Suriname", "SR", "SUR", "740", u"Suriname"),
Country(u"Svalbard and Jan Mayen", "SJ", "SJM", "744",
u"Svalbard and Jan Mayen"),
Country(u"Swaziland", "SZ", "SWZ", "748", u"Swaziland"),
Country(u"Sweden", "SE", "SWE", "752", u"Sweden"),
Country(u"Switzerland", "CH", "CHE", "756", u"Switzerland"),
Country(u"Syrian Arab Republic", "SY", "SYR", "760",
u"Syrian Arab Republic"),
Country(u"Taiwan, Province of China", "TW", "TWN", "158",
u"Taiwan"),
Country(u"Tajikistan", "TJ", "TJK", "762", u"Tajikistan"),
Country(u"Tanzania, United Republic of", "TZ", "TZA", "834",
u"Tanzania, United Republic of"),
Country(u"Thailand", "TH", "THA", "764", u"Thailand"),
Country(u"Timor-Leste", "TL", "TLS", "626", u"Timor-Leste"),
Country(u"Togo", "TG", "TGO", "768", u"Togo"),
Country(u"Tokelau", "TK", "TKL", "772", u"Tokelau"),
Country(u"Tonga", "TO", "TON", "776", u"Tonga"),
Country(u"Trinidad and Tobago", "TT", "TTO", "780",
u"Trinidad and Tobago"),
Country(u"Tunisia", "TN", "TUN", "788", u"Tunisia"),
Country(u"Turkey", "TR", "TUR", "792", u"Turkey"),
Country(u"Turkmenistan", "TM", "TKM", "795", u"Turkmenistan"),
Country(u"Turks and Caicos Islands", "TC", "TCA", "796",
u"Turks and Caicos Islands"),
Country(u"Tuvalu", "TV", "TUV", "798", u"Tuvalu"),
Country(u"Uganda", "UG", "UGA", "800", u"Uganda"),
Country(u"Ukraine", "UA", "UKR", "804", u"Ukraine"),
Country(u"United Arab Emirates", "AE", "ARE", "784",
u"United Arab Emirates"),
Country(u"United Kingdom of Great Britain and Northern Ireland",
"GB", "GBR", "826",
u"United Kingdom of Great Britain and Northern Ireland"),
Country(u"United States of America", "US", "USA", "840",
u"United States of America"),
Country(u"United States Minor Outlying Islands", "UM", "UMI", "581",
u"United States Minor Outlying Islands"),
Country(u"Uruguay", "UY", "URY", "858", u"Uruguay"),
Country(u"Uzbekistan", "UZ", "UZB", "860", u"Uzbekistan"),
Country(u"Vanuatu", "VU", "VUT", "548", u"Vanuatu"),
Country(u"Venezuela, Bolivarian Republic of", "VE", "VEN", "862",
u"Venezuela, Bolivarian Republic of"),
Country(u"Viet Nam", "VN", "VNM", "704", u"Viet Nam"),
Country(u"Virgin Islands, British", "VG", "VGB", "092",
u"Virgin Islands, British"),
Country(u"Virgin Islands, U.S.", "VI", "VIR", "850",
u"Virgin Islands, U.S."),
Country(u"Wallis and Futuna", "WF", "WLF", "876", u"Wallis and Futuna"),
Country(u"Western Sahara", "EH", "ESH", "732", u"Western Sahara"),
Country(u"Yemen", "YE", "YEM", "887", u"Yemen"),
Country(u"Zambia", "ZM", "ZMB", "894", u"Zambia"),
Country(u"Zimbabwe", "ZW", "ZWE", "716", u"Zimbabwe")]
# Internal country indexes
_by_alpha2 = _build_index(1)
_by_alpha3 = _build_index(2)
_by_numeric = _build_index(3)
_by_name = _build_index(0)
_by_apolitical_name = _build_index(4)
# Documented accessors for the country indexes
countries_by_alpha2 = _by_alpha2
countries_by_alpha3 = _by_alpha3
countries_by_numeric = _by_numeric
countries_by_name = _by_name
countries_by_apolitical_name = _by_apolitical_name
NOT_FOUND = object()
countries = _CountryLookup()
| 49.412073 | 78 | 0.565973 | # -*- coding: utf-8 -*-
from builtins import object
import re
from numbers import Integral
from collections import namedtuple
__all__ = ["countries"]
try:
str
except NameError:
str = str
Country = namedtuple('Country',
'name alpha2 alpha3 numeric apolitical_name')
_records = [
Country(u"Afghanistan", "AF", "AFG", "004", u"Afghanistan"),
Country(u"Åland Islands", "AX", "ALA", "248", u"Åland Islands"),
Country(u"Albania", "AL", "ALB", "008", u"Albania"),
Country(u"Algeria", "DZ", "DZA", "012", u"Algeria"),
Country(u"American Samoa", "AS", "ASM", "016", u"American Samoa"),
Country(u"Andorra", "AD", "AND", "020", u"Andorra"),
Country(u"Angola", "AO", "AGO", "024", u"Angola"),
Country(u"Anguilla", "AI", "AIA", "660", u"Anguilla"),
Country(u"Antarctica", "AQ", "ATA", "010", u"Antarctica"),
Country(u"Antigua and Barbuda", "AG", "ATG", "028",
u"Antigua and Barbuda"),
Country(u"Argentina", "AR", "ARG", "032", u"Argentina"),
Country(u"Armenia", "AM", "ARM", "051", u"Armenia"),
Country(u"Aruba", "AW", "ABW", "533", u"Aruba"),
Country(u"Australia", "AU", "AUS", "036", u"Australia"),
Country(u"Austria", "AT", "AUT", "040", u"Austria"),
Country(u"Azerbaijan", "AZ", "AZE", "031", u"Azerbaijan"),
Country(u"Bahamas", "BS", "BHS", "044", u"Bahamas"),
Country(u"Bahrain", "BH", "BHR", "048", u"Bahrain"),
Country(u"Bangladesh", "BD", "BGD", "050", u"Bangladesh"),
Country(u"Barbados", "BB", "BRB", "052", u"Barbados"),
Country(u"Belarus", "BY", "BLR", "112", u"Belarus"),
Country(u"Belgium", "BE", "BEL", "056", u"Belgium"),
Country(u"Belize", "BZ", "BLZ", "084", u"Belize"),
Country(u"Benin", "BJ", "BEN", "204", u"Benin"),
Country(u"Bermuda", "BM", "BMU", "060", u"Bermuda"),
Country(u"Bhutan", "BT", "BTN", "064", u"Bhutan"),
Country(u"Bolivia, Plurinational State of", "BO", "BOL", "068",
u"Bolivia, Plurinational State of"),
Country(u"Bonaire, Sint Eustatius and Saba", "BQ", "BES", "535",
u"Bonaire, Sint Eustatius and Saba"),
Country(u"Bosnia and Herzegovina", "BA", "BIH", "070",
u"Bosnia and Herzegovina"),
Country(u"Botswana", "BW", "BWA", "072", u"Botswana"),
Country(u"Bouvet Island", "BV", "BVT", "074", u"Bouvet Island"),
Country(u"Brazil", "BR", "BRA", "076", u"Brazil"),
Country(u"British Indian Ocean Territory", "IO", "IOT", "086",
u"British Indian Ocean Territory"),
Country(u"Brunei Darussalam", "BN", "BRN", "096",
u"Brunei Darussalam"),
Country(u"Bulgaria", "BG", "BGR", "100", u"Bulgaria"),
Country(u"Burkina Faso", "BF", "BFA", "854", u"Burkina Faso"),
Country(u"Burundi", "BI", "BDI", "108", u"Burundi"),
Country(u"Cambodia", "KH", "KHM", "116", u"Cambodia"),
Country(u"Cameroon", "CM", "CMR", "120", u"Cameroon"),
Country(u"Canada", "CA", "CAN", "124", u"Canada"),
Country(u"Cabo Verde", "CV", "CPV", "132", u"Cabo Verde"),
Country(u"Cayman Islands", "KY", "CYM", "136", u"Cayman Islands"),
Country(u"Central African Republic", "CF", "CAF", "140",
u"Central African Republic"),
Country(u"Chad", "TD", "TCD", "148", u"Chad"),
Country(u"Chile", "CL", "CHL", "152", u"Chile"),
Country(u"China", "CN", "CHN", "156", u"China"),
Country(u"Christmas Island", "CX", "CXR", "162", u"Christmas Island"),
Country(u"Cocos (Keeling) Islands", "CC", "CCK", "166",
u"Cocos (Keeling) Islands"),
Country(u"Colombia", "CO", "COL", "170", u"Colombia"),
Country(u"Comoros", "KM", "COM", "174", u"Comoros"),
Country(u"Congo", "CG", "COG", "178", u"Congo"),
Country(u"Congo, Democratic Republic of the", "CD", "COD", "180",
u"Congo, Democratic Republic of the"),
Country(u"Cook Islands", "CK", "COK", "184", u"Cook Islands"),
Country(u"Costa Rica", "CR", "CRI", "188", u"Costa Rica"),
Country(u"Côte d'Ivoire", "CI", "CIV", "384", u"Côte d'Ivoire"),
Country(u"Croatia", "HR", "HRV", "191", u"Croatia"),
Country(u"Cuba", "CU", "CUB", "192", u"Cuba"),
Country(u"Curaçao", "CW", "CUW", "531", u"Curaçao"),
Country(u"Cyprus", "CY", "CYP", "196", u"Cyprus"),
Country(u"Czechia", "CZ", "CZE", "203", u"Czechia"),
Country(u"Denmark", "DK", "DNK", "208", u"Denmark"),
Country(u"Djibouti", "DJ", "DJI", "262", u"Djibouti"),
Country(u"Dominica", "DM", "DMA", "212", u"Dominica"),
Country(u"Dominican Republic", "DO", "DOM", "214", u"Dominican Republic"),
Country(u"Ecuador", "EC", "ECU", "218", u"Ecuador"),
Country(u"Egypt", "EG", "EGY", "818", u"Egypt"),
Country(u"El Salvador", "SV", "SLV", "222", u"El Salvador"),
Country(u"Equatorial Guinea", "GQ", "GNQ", "226", u"Equatorial Guinea"),
Country(u"Eritrea", "ER", "ERI", "232", u"Eritrea"),
Country(u"Estonia", "EE", "EST", "233", u"Estonia"),
Country(u"Ethiopia", "ET", "ETH", "231", u"Ethiopia"),
Country(u"Falkland Islands (Malvinas)", "FK", "FLK", "238",
u"Falkland Islands (Malvinas)"),
Country(u"Faroe Islands", "FO", "FRO", "234", u"Faroe Islands"),
Country(u"Fiji", "FJ", "FJI", "242", u"Fiji"),
Country(u"Finland", "FI", "FIN", "246", u"Finland"),
Country(u"France", "FR", "FRA", "250", u"France"),
Country(u"French Guiana", "GF", "GUF", "254", u"French Guiana"),
Country(u"French Polynesia", "PF", "PYF", "258", u"French Polynesia"),
Country(u"French Southern Territories", "TF", "ATF", "260",
u"French Southern Territories"),
Country(u"Gabon", "GA", "GAB", "266", u"Gabon"),
Country(u"Gambia", "GM", "GMB", "270", u"Gambia"),
Country(u"Georgia", "GE", "GEO", "268", u"Georgia"),
Country(u"Germany", "DE", "DEU", "276", u"Germany"),
Country(u"Ghana", "GH", "GHA", "288", u"Ghana"),
Country(u"Gibraltar", "GI", "GIB", "292", u"Gibraltar"),
Country(u"Greece", "GR", "GRC", "300", u"Greece"),
Country(u"Greenland", "GL", "GRL", "304", u"Greenland"),
Country(u"Grenada", "GD", "GRD", "308", u"Grenada"),
Country(u"Guadeloupe", "GP", "GLP", "312", u"Guadeloupe"),
Country(u"Guam", "GU", "GUM", "316", u"Guam"),
Country(u"Guatemala", "GT", "GTM", "320", u"Guatemala"),
Country(u"Guernsey", "GG", "GGY", "831", u"Guernsey"),
Country(u"Guinea", "GN", "GIN", "324", u"Guinea"),
Country(u"Guinea-Bissau", "GW", "GNB", "624", u"Guinea-Bissau"),
Country(u"Guyana", "GY", "GUY", "328", u"Guyana"),
Country(u"Haiti", "HT", "HTI", "332", u"Haiti"),
Country(u"Heard Island and McDonald Islands", "HM", "HMD", "334",
u"Heard Island and McDonald Islands"),
Country(u"Holy See", "VA", "VAT", "336", u"Holy See"),
Country(u"Honduras", "HN", "HND", "340", u"Honduras"),
Country(u"Hong Kong", "HK", "HKG", "344", u"Hong Kong"),
Country(u"Hungary", "HU", "HUN", "348", u"Hungary"),
Country(u"Iceland", "IS", "ISL", "352", u"Iceland"),
Country(u"India", "IN", "IND", "356", u"India"),
Country(u"Indonesia", "ID", "IDN", "360", u"Indonesia"),
Country(u"Iran, Islamic Republic of", "IR", "IRN", "364",
u"Iran, Islamic Republic of"),
Country(u"Iraq", "IQ", "IRQ", "368", u"Iraq"),
Country(u"Ireland", "IE", "IRL", "372", u"Ireland"),
Country(u"Isle of Man", "IM", "IMN", "833", u"Isle of Man"),
Country(u"Israel", "IL", "ISR", "376", u"Israel"),
Country(u"Italy", "IT", "ITA", "380", u"Italy"),
Country(u"Jamaica", "JM", "JAM", "388", u"Jamaica"),
Country(u"Japan", "JP", "JPN", "392", u"Japan"),
Country(u"Jersey", "JE", "JEY", "832", u"Jersey"),
Country(u"Jordan", "JO", "JOR", "400", u"Jordan"),
Country(u"Kazakhstan", "KZ", "KAZ", "398", u"Kazakhstan"),
Country(u"Kenya", "KE", "KEN", "404", u"Kenya"),
Country(u"Kiribati", "KI", "KIR", "296", u"Kiribati"),
Country(u"Korea, Democratic People's Republic of", "KP", "PRK", "408",
u"Korea, Democratic People's Republic of"),
Country(u"Korea, Republic of", "KR", "KOR", "410", u"Korea, Republic of"),
Country(u"Kuwait", "KW", "KWT", "414", u"Kuwait"),
Country(u"Kyrgyzstan", "KG", "KGZ", "417", u"Kyrgyzstan"),
Country(u"Lao People's Democratic Republic", "LA", "LAO", "418",
u"Lao People's Democratic Republic"),
Country(u"Latvia", "LV", "LVA", "428", u"Latvia"),
Country(u"Lebanon", "LB", "LBN", "422", u"Lebanon"),
Country(u"Lesotho", "LS", "LSO", "426", u"Lesotho"),
Country(u"Liberia", "LR", "LBR", "430", u"Liberia"),
Country(u"Libya", "LY", "LBY", "434", u"Libya"),
Country(u"Liechtenstein", "LI", "LIE", "438", u"Liechtenstein"),
Country(u"Lithuania", "LT", "LTU", "440", u"Lithuania"),
Country(u"Luxembourg", "LU", "LUX", "442", u"Luxembourg"),
Country(u"Macao", "MO", "MAC", "446", u"Macao"),
Country(u"Macedonia, the former Yugoslav Republic of", "MK", "MKD", "807",
u"Macedonia, the former Yugoslav Republic of"),
Country(u"Madagascar", "MG", "MDG", "450", u"Madagascar"),
Country(u"Malawi", "MW", "MWI", "454", u"Malawi"),
Country(u"Malaysia", "MY", "MYS", "458", u"Malaysia"),
Country(u"Maldives", "MV", "MDV", "462", u"Maldives"),
Country(u"Mali", "ML", "MLI", "466", u"Mali"),
Country(u"Malta", "MT", "MLT", "470", u"Malta"),
Country(u"Marshall Islands", "MH", "MHL", "584", u"Marshall Islands"),
Country(u"Martinique", "MQ", "MTQ", "474", u"Martinique"),
Country(u"Mauritania", "MR", "MRT", "478", u"Mauritania"),
Country(u"Mauritius", "MU", "MUS", "480", u"Mauritius"),
Country(u"Mayotte", "YT", "MYT", "175", u"Mayotte"),
Country(u"Mexico", "MX", "MEX", "484", u"Mexico"),
Country(u"Micronesia, Federated States of", "FM", "FSM", "583",
u"Micronesia, Federated States of"),
Country(u"Moldova, Republic of", "MD", "MDA", "498",
u"Moldova, Republic of"),
Country(u"Monaco", "MC", "MCO", "492", u"Monaco"),
Country(u"Mongolia", "MN", "MNG", "496", u"Mongolia"),
Country(u"Montenegro", "ME", "MNE", "499", u"Montenegro"),
Country(u"Montserrat", "MS", "MSR", "500", u"Montserrat"),
Country(u"Morocco", "MA", "MAR", "504", u"Morocco"),
Country(u"Mozambique", "MZ", "MOZ", "508", u"Mozambique"),
Country(u"Myanmar", "MM", "MMR", "104", u"Myanmar"),
Country(u"Namibia", "NA", "NAM", "516", u"Namibia"),
Country(u"Nauru", "NR", "NRU", "520", u"Nauru"),
Country(u"Nepal", "NP", "NPL", "524", u"Nepal"),
Country(u"Netherlands", "NL", "NLD", "528", u"Netherlands"),
Country(u"New Caledonia", "NC", "NCL", "540", u"New Caledonia"),
Country(u"New Zealand", "NZ", "NZL", "554", u"New Zealand"),
Country(u"Nicaragua", "NI", "NIC", "558", u"Nicaragua"),
Country(u"Niger", "NE", "NER", "562", u"Niger"),
Country(u"Nigeria", "NG", "NGA", "566", u"Nigeria"),
Country(u"Niue", "NU", "NIU", "570", u"Niue"),
Country(u"Norfolk Island", "NF", "NFK", "574", u"Norfolk Island"),
Country(u"Northern Mariana Islands", "MP", "MNP", "580",
u"Northern Mariana Islands"),
Country(u"Norway", "NO", "NOR", "578", u"Norway"),
Country(u"Oman", "OM", "OMN", "512", u"Oman"),
Country(u"Pakistan", "PK", "PAK", "586", u"Pakistan"),
Country(u"Palau", "PW", "PLW", "585", u"Palau"),
Country(u"Palestine, State of", "PS", "PSE", "275",
u"Palestine"),
Country(u"Panama", "PA", "PAN", "591", u"Panama"),
Country(u"Papua New Guinea", "PG", "PNG", "598",
u"Papua New Guinea"),
Country(u"Paraguay", "PY", "PRY", "600", u"Paraguay"),
Country(u"Peru", "PE", "PER", "604", u"Peru"),
Country(u"Philippines", "PH", "PHL", "608", u"Philippines"),
Country(u"Pitcairn", "PN", "PCN", "612", u"Pitcairn"),
Country(u"Poland", "PL", "POL", "616", u"Poland"),
Country(u"Portugal", "PT", "PRT", "620", u"Portugal"),
Country(u"Puerto Rico", "PR", "PRI", "630", u"Puerto Rico"),
Country(u"Qatar", "QA", "QAT", "634", u"Qatar"),
Country(u"Réunion", "RE", "REU", "638", u"Réunion"),
Country(u"Romania", "RO", "ROU", "642", u"Romania"),
Country(u"Russian Federation", "RU", "RUS", "643",
u"Russian Federation"),
Country(u"Rwanda", "RW", "RWA", "646", u"Rwanda"),
Country(u"Saint Barthélemy", "BL", "BLM", "652",
u"Saint Barthélemy"),
Country(u"Saint Helena, Ascension and Tristan da Cunha",
"SH", "SHN", "654",
u"Saint Helena, Ascension and Tristan da Cunha"),
Country(u"Saint Kitts and Nevis", "KN", "KNA", "659",
u"Saint Kitts and Nevis"),
Country(u"Saint Lucia", "LC", "LCA", "662", u"Saint Lucia"),
Country(u"Saint Martin (French part)", "MF", "MAF", "663",
u"Saint Martin (French part)"),
Country(u"Saint Pierre and Miquelon", "PM", "SPM", "666",
u"Saint Pierre and Miquelon"),
Country(u"Saint Vincent and the Grenadines", "VC", "VCT", "670",
u"Saint Vincent and the Grenadines"),
Country(u"Samoa", "WS", "WSM", "882", u"Samoa"),
Country(u"San Marino", "SM", "SMR", "674", u"San Marino"),
Country(u"Sao Tome and Principe", "ST", "STP", "678",
u"Sao Tome and Principe"),
Country(u"Saudi Arabia", "SA", "SAU", "682", u"Saudi Arabia"),
Country(u"Senegal", "SN", "SEN", "686", u"Senegal"),
Country(u"Serbia", "RS", "SRB", "688", u"Serbia"),
Country(u"Seychelles", "SC", "SYC", "690", u"Seychelles"),
Country(u"Sierra Leone", "SL", "SLE", "694", u"Sierra Leone"),
Country(u"Singapore", "SG", "SGP", "702", u"Singapore"),
Country(u"Sint Maarten (Dutch part)", "SX", "SXM", "534",
u"Sint Maarten (Dutch part)"),
Country(u"Slovakia", "SK", "SVK", "703", u"Slovakia"),
Country(u"Slovenia", "SI", "SVN", "705", u"Slovenia"),
Country(u"Solomon Islands", "SB", "SLB", "090", u"Solomon Islands"),
Country(u"Somalia", "SO", "SOM", "706", u"Somalia"),
Country(u"South Africa", "ZA", "ZAF", "710", u"South Africa"),
Country(u"South Georgia and the South Sandwich Islands",
"GS", "SGS", "239",
u"South Georgia and the South Sandwich Islands",),
Country(u"South Sudan", "SS", "SSD", "728", u"South Sudan"),
Country(u"Spain", "ES", "ESP", "724", u"Spain"),
Country(u"Sri Lanka", "LK", "LKA", "144", u"Sri Lanka"),
Country(u"Sudan", "SD", "SDN", "729", u"Sudan"),
Country(u"Suriname", "SR", "SUR", "740", u"Suriname"),
Country(u"Svalbard and Jan Mayen", "SJ", "SJM", "744",
u"Svalbard and Jan Mayen"),
Country(u"Swaziland", "SZ", "SWZ", "748", u"Swaziland"),
Country(u"Sweden", "SE", "SWE", "752", u"Sweden"),
Country(u"Switzerland", "CH", "CHE", "756", u"Switzerland"),
Country(u"Syrian Arab Republic", "SY", "SYR", "760",
u"Syrian Arab Republic"),
Country(u"Taiwan, Province of China", "TW", "TWN", "158",
u"Taiwan"),
Country(u"Tajikistan", "TJ", "TJK", "762", u"Tajikistan"),
Country(u"Tanzania, United Republic of", "TZ", "TZA", "834",
u"Tanzania, United Republic of"),
Country(u"Thailand", "TH", "THA", "764", u"Thailand"),
Country(u"Timor-Leste", "TL", "TLS", "626", u"Timor-Leste"),
Country(u"Togo", "TG", "TGO", "768", u"Togo"),
Country(u"Tokelau", "TK", "TKL", "772", u"Tokelau"),
Country(u"Tonga", "TO", "TON", "776", u"Tonga"),
Country(u"Trinidad and Tobago", "TT", "TTO", "780",
u"Trinidad and Tobago"),
Country(u"Tunisia", "TN", "TUN", "788", u"Tunisia"),
Country(u"Turkey", "TR", "TUR", "792", u"Turkey"),
Country(u"Turkmenistan", "TM", "TKM", "795", u"Turkmenistan"),
Country(u"Turks and Caicos Islands", "TC", "TCA", "796",
u"Turks and Caicos Islands"),
Country(u"Tuvalu", "TV", "TUV", "798", u"Tuvalu"),
Country(u"Uganda", "UG", "UGA", "800", u"Uganda"),
Country(u"Ukraine", "UA", "UKR", "804", u"Ukraine"),
Country(u"United Arab Emirates", "AE", "ARE", "784",
u"United Arab Emirates"),
Country(u"United Kingdom of Great Britain and Northern Ireland",
"GB", "GBR", "826",
u"United Kingdom of Great Britain and Northern Ireland"),
Country(u"United States of America", "US", "USA", "840",
u"United States of America"),
Country(u"United States Minor Outlying Islands", "UM", "UMI", "581",
u"United States Minor Outlying Islands"),
Country(u"Uruguay", "UY", "URY", "858", u"Uruguay"),
Country(u"Uzbekistan", "UZ", "UZB", "860", u"Uzbekistan"),
Country(u"Vanuatu", "VU", "VUT", "548", u"Vanuatu"),
Country(u"Venezuela, Bolivarian Republic of", "VE", "VEN", "862",
u"Venezuela, Bolivarian Republic of"),
Country(u"Viet Nam", "VN", "VNM", "704", u"Viet Nam"),
Country(u"Virgin Islands, British", "VG", "VGB", "092",
u"Virgin Islands, British"),
Country(u"Virgin Islands, U.S.", "VI", "VIR", "850",
u"Virgin Islands, U.S."),
Country(u"Wallis and Futuna", "WF", "WLF", "876", u"Wallis and Futuna"),
Country(u"Western Sahara", "EH", "ESH", "732", u"Western Sahara"),
Country(u"Yemen", "YE", "YEM", "887", u"Yemen"),
Country(u"Zambia", "ZM", "ZMB", "894", u"Zambia"),
Country(u"Zimbabwe", "ZW", "ZWE", "716", u"Zimbabwe")]
def _build_index(idx):
return dict((r[idx].upper(), r) for r in _records)
# Internal country indexes
_by_alpha2 = _build_index(1)
_by_alpha3 = _build_index(2)
_by_numeric = _build_index(3)
_by_name = _build_index(0)
_by_apolitical_name = _build_index(4)
# Documented accessors for the country indexes
countries_by_alpha2 = _by_alpha2
countries_by_alpha3 = _by_alpha3
countries_by_numeric = _by_numeric
countries_by_name = _by_name
countries_by_apolitical_name = _by_apolitical_name
NOT_FOUND = object()
class _CountryLookup(object):
def get(self, key, default=NOT_FOUND):
if isinstance(key, Integral):
r = _by_numeric.get("%03d" % key, default)
elif isinstance(key, str):
k = key.upper()
if len(k) == 2:
r = _by_alpha2.get(k, default)
elif len(k) == 3 and re.match(r"[0-9]{3}", k):
r = _by_numeric.get(k, default)
elif len(k) == 3:
r = _by_alpha3.get(k, default)
elif k in _by_name:
r = _by_name.get(k, default)
else:
r = _by_apolitical_name.get(k, default)
else:
r = default
if r == NOT_FOUND:
raise KeyError(key)
return r
__getitem__ = get
def __len__(self):
return len(_records)
def __iter__(self):
return iter(_records)
def __contains__(self, item):
try:
self.get(item)
return True
except KeyError:
return False
countries = _CountryLookup()
| 931 | 139 | 46 |
eeffb6fed77812d6b8dcfa922e04b9b21a87db1d | 637 | py | Python | tracim/migration/versions/2cd20ff3d23a_user_timezone.py | lebouquetin/tracim | dc3485f92b07ced3230834a5852c9f9574477c1c | [
"MIT"
] | 1 | 2016-09-27T12:16:05.000Z | 2016-09-27T12:16:05.000Z | tracim/migration/versions/2cd20ff3d23a_user_timezone.py | lebouquetin/tracim | dc3485f92b07ced3230834a5852c9f9574477c1c | [
"MIT"
] | null | null | null | tracim/migration/versions/2cd20ff3d23a_user_timezone.py | lebouquetin/tracim | dc3485f92b07ced3230834a5852c9f9574477c1c | [
"MIT"
] | null | null | null | """user_timezone
Revision ID: 2cd20ff3d23a
Revises: b4b8d57b54e5
Create Date: 2016-11-08 11:32:00.903232
"""
# revision identifiers, used by Alembic.
revision = '2cd20ff3d23a'
down_revision = 'b4b8d57b54e5'
from alembic import op
import sqlalchemy as sa
| 23.592593 | 108 | 0.700157 | """user_timezone
Revision ID: 2cd20ff3d23a
Revises: b4b8d57b54e5
Create Date: 2016-11-08 11:32:00.903232
"""
# revision identifiers, used by Alembic.
revision = '2cd20ff3d23a'
down_revision = 'b4b8d57b54e5'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('timezone', sa.Unicode(length=255), server_default='', nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'timezone')
### end Alembic commands ###
| 331 | 0 | 46 |
adfdf8fb280e0e87d5424c93032f99b388e9671d | 1,280 | py | Python | setup.py | CraazzzyyFoxx/lavacord.py | 5974644b2ceb814b8ad3e253e9328d22c5e17921 | [
"MIT"
] | null | null | null | setup.py | CraazzzyyFoxx/lavacord.py | 5974644b2ceb814b8ad3e253e9328d22c5e17921 | [
"MIT"
] | null | null | null | setup.py | CraazzzyyFoxx/lavacord.py | 5974644b2ceb814b8ad3e253e9328d22c5e17921 | [
"MIT"
] | null | null | null | import pathlib
from setuptools import setup
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='lavacord.py',
version='1.0.4a1',
description='Its a lavalink nodes manger to make a music bots for discord with python.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/CraazzzyyFoxx/lavacord.py',
author='CraazzzyyFoxx',
author_email='38073783+CraazzzyyFoxx@users.noreply.github.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.9',
"Programming Language :: Python :: 3.10",
'Programming Language :: Python :: 3 :: Only',
],
keywords='lavalink, discord, discord-lavalink, lavacord.py',
packages=["lavacord", "lavacord.types"],
install_requires=["aiohttp", "hikari", "yarl", "tekore", "pydantic"],
project_urls={
'Bug Reports': 'https://github.com/CraazzzyyFoxx/lavacord.py/issues',
'Source': 'https://github.com/CraazzzyyFoxx/lavacord.py/',
},
)
| 36.571429 | 92 | 0.663281 | import pathlib
from setuptools import setup
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='lavacord.py',
version='1.0.4a1',
description='Its a lavalink nodes manger to make a music bots for discord with python.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/CraazzzyyFoxx/lavacord.py',
author='CraazzzyyFoxx',
author_email='38073783+CraazzzyyFoxx@users.noreply.github.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.9',
"Programming Language :: Python :: 3.10",
'Programming Language :: Python :: 3 :: Only',
],
keywords='lavalink, discord, discord-lavalink, lavacord.py',
packages=["lavacord", "lavacord.types"],
install_requires=["aiohttp", "hikari", "yarl", "tekore", "pydantic"],
project_urls={
'Bug Reports': 'https://github.com/CraazzzyyFoxx/lavacord.py/issues',
'Source': 'https://github.com/CraazzzyyFoxx/lavacord.py/',
},
)
| 0 | 0 | 0 |
aae0e362a38f56ce2cc7a1385f0822cd2db7ed86 | 2,823 | py | Python | examples/wavelets/chirp_cwt_mexh.py | carnot-shailesh/cr-sparse | 989ebead8a8ac37ade643093e1caa31ae2a3eda1 | [
"Apache-2.0"
] | 42 | 2021-06-11T17:11:29.000Z | 2022-03-29T11:51:44.000Z | examples/wavelets/chirp_cwt_mexh.py | carnot-shailesh/cr-sparse | 989ebead8a8ac37ade643093e1caa31ae2a3eda1 | [
"Apache-2.0"
] | 19 | 2021-06-04T11:36:11.000Z | 2022-01-22T20:13:39.000Z | examples/wavelets/chirp_cwt_mexh.py | carnot-shailesh/cr-sparse | 989ebead8a8ac37ade643093e1caa31ae2a3eda1 | [
"Apache-2.0"
] | 5 | 2021-11-21T21:01:11.000Z | 2022-02-28T07:20:03.000Z | """
Chirp CWT with Ricker
=======================
In this example, we analyze a chirp signal with a Ricker (a.k.a. Mexican Hat wavelet)
"""
# Configure JAX to work with 64-bit floating point precision.
from jax.config import config
config.update("jax_enable_x64", True)
# %%
# Let's import necessary libraries
import jax
import numpy as np
import jax.numpy as jnp
# CR.Sparse libraries
import cr.sparse as crs
import cr.sparse.wt as wt
# Utilty functions to construct sinusoids
import cr.sparse.dsp.signals as signals
# Plotting
import matplotlib.pyplot as plt
# %%
# Test signal generation
# ------------------------------
# Sampling frequency in Hz
fs = 100
# Signal duration in seconds
T = 10
# Initial instantaneous frequency for the chirp
f0 = 1
# Final instantaneous frequency for the chirp
f1 = 4
# Construct the chirp signal
t, x = signals.chirp(fs, T, f0, f1, initial_phase=0)
# Plot the chirp signal
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(t, x)
ax.grid('on')
# %%
# Power spectrum
# ------------------------------
# Compute the power spectrum
f, sxx = crs.power_spectrum(x, dt=1/fs)
# Plot the power spectrum
fig, ax = plt.subplots(1, figsize=(12,4))
ax.plot(f, sxx)
ax.grid('on')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power')
# %%
# As expected, the power spectrum is able to identify the
# frequencies in the zone 1Hz to 4Hz in the chirp.
# However, the spectrum is unable to localize the
# changes in frequency over time.
# %%
# Ricker/Mexican Hat Wavelet
# ------------------------------
wavelet = wt.build_wavelet('mexh')
# generate the wavelet function for the range of time [-8, 8]
psi, t_psi = wavelet.wavefun()
# plot the wavelet
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(t_psi, psi)
ax.grid('on')
# %%
# Wavelet Analysis
# ------------------------------
# select a set of scales for wavelet analysis
# voices per octave
nu = 8
scales = wt.scales_from_voices_per_octave(nu, jnp.arange(32))
scales = jax.device_get(scales)
# Compute the wavelet analysis
output = wt.cwt(x, scales, wavelet)
# Identify the frequencies for the analysis
frequencies = wt.scale2frequency(wavelet, scales) * fs
# Plot the analysis
cmap = plt.cm.seismic
fig, ax = plt.subplots(1, figsize=(10,10))
title = 'Wavelet Transform (Power Spectrum) of signal'
ylabel = 'Frequency (Hz)'
xlabel = 'Time'
power = (abs(output)) ** 2
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8]
contourlevels = np.log2(levels)
im = ax.contourf(t, jnp.log2(frequencies), jnp.log2(power), contourlevels, extend='both',cmap=cmap)
ax.set_title(title, fontsize=20)
ax.set_ylabel(ylabel, fontsize=18)
ax.set_xlabel(xlabel, fontsize=18)
yticks = 2**np.arange(np.ceil(np.log2(frequencies.min())), np.ceil(np.log2(frequencies.max())))
ax.set_yticks(np.log2(yticks))
ax.set_yticklabels(yticks)
ylim = ax.get_ylim()
| 26.383178 | 99 | 0.685087 | """
Chirp CWT with Ricker
=======================
In this example, we analyze a chirp signal with a Ricker (a.k.a. Mexican Hat wavelet)
"""
# Configure JAX to work with 64-bit floating point precision.
from jax.config import config
config.update("jax_enable_x64", True)
# %%
# Let's import necessary libraries
import jax
import numpy as np
import jax.numpy as jnp
# CR.Sparse libraries
import cr.sparse as crs
import cr.sparse.wt as wt
# Utilty functions to construct sinusoids
import cr.sparse.dsp.signals as signals
# Plotting
import matplotlib.pyplot as plt
# %%
# Test signal generation
# ------------------------------
# Sampling frequency in Hz
fs = 100
# Signal duration in seconds
T = 10
# Initial instantaneous frequency for the chirp
f0 = 1
# Final instantaneous frequency for the chirp
f1 = 4
# Construct the chirp signal
t, x = signals.chirp(fs, T, f0, f1, initial_phase=0)
# Plot the chirp signal
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(t, x)
ax.grid('on')
# %%
# Power spectrum
# ------------------------------
# Compute the power spectrum
f, sxx = crs.power_spectrum(x, dt=1/fs)
# Plot the power spectrum
fig, ax = plt.subplots(1, figsize=(12,4))
ax.plot(f, sxx)
ax.grid('on')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power')
# %%
# As expected, the power spectrum is able to identify the
# frequencies in the zone 1Hz to 4Hz in the chirp.
# However, the spectrum is unable to localize the
# changes in frequency over time.
# %%
# Ricker/Mexican Hat Wavelet
# ------------------------------
wavelet = wt.build_wavelet('mexh')
# generate the wavelet function for the range of time [-8, 8]
psi, t_psi = wavelet.wavefun()
# plot the wavelet
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(t_psi, psi)
ax.grid('on')
# %%
# Wavelet Analysis
# ------------------------------
# select a set of scales for wavelet analysis
# voices per octave
nu = 8
scales = wt.scales_from_voices_per_octave(nu, jnp.arange(32))
scales = jax.device_get(scales)
# Compute the wavelet analysis
output = wt.cwt(x, scales, wavelet)
# Identify the frequencies for the analysis
frequencies = wt.scale2frequency(wavelet, scales) * fs
# Plot the analysis
cmap = plt.cm.seismic
fig, ax = plt.subplots(1, figsize=(10,10))
title = 'Wavelet Transform (Power Spectrum) of signal'
ylabel = 'Frequency (Hz)'
xlabel = 'Time'
power = (abs(output)) ** 2
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8]
contourlevels = np.log2(levels)
im = ax.contourf(t, jnp.log2(frequencies), jnp.log2(power), contourlevels, extend='both',cmap=cmap)
ax.set_title(title, fontsize=20)
ax.set_ylabel(ylabel, fontsize=18)
ax.set_xlabel(xlabel, fontsize=18)
yticks = 2**np.arange(np.ceil(np.log2(frequencies.min())), np.ceil(np.log2(frequencies.max())))
ax.set_yticks(np.log2(yticks))
ax.set_yticklabels(yticks)
ylim = ax.get_ylim()
| 0 | 0 | 0 |
40f8a4494b9bdf239b895320690f0d81b2f6c458 | 6,971 | py | Python | cogs/maps.py | lifehackerhansol/Sycamore | 39b4574cd8224c2b4927992cadf22e4c4c368bd1 | [
"0BSD"
] | null | null | null | cogs/maps.py | lifehackerhansol/Sycamore | 39b4574cd8224c2b4927992cadf22e4c4c368bd1 | [
"0BSD"
] | 4 | 2021-05-25T06:48:00.000Z | 2022-02-03T18:41:57.000Z | cogs/maps.py | lifehackerhansol/Sycamore | 39b4574cd8224c2b4927992cadf22e4c4c368bd1 | [
"0BSD"
] | null | null | null | #
# ISC License
#
# Copyright (C) 2021-present lifehackerhansol
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import discord
from discord.ext import commands
class Maps(commands.Cog):
"""
Map commands
"""
@commands.command()
async def kalos(self, ctx):
"""Kalos map"""
await self.simple_embed(ctx, "kalos.png", "Kalos Region Map")
@commands.command()
async def r1(self, ctx):
"""Route 1"""
await self.simple_embed(ctx, "r1.png", "Route 1")
@commands.command()
async def r2(self, ctx):
"""Route 2"""
await self.simple_embed(ctx, "r2.png", "Route 2")
@commands.command()
async def r3(self, ctx):
"""Route 3"""
await self.simple_embed(ctx, "r3.png", "Route 3")
@commands.command()
async def r4(self, ctx):
"""Route 4"""
await self.simple_embed(ctx, "r4.png", "Route 4")
@commands.command()
async def r5(self, ctx):
"""Route 5"""
await self.simple_embed(ctx, "r5.png", "Route 5")
@commands.command()
async def r6(self, ctx):
"""Route 6"""
await self.simple_embed(ctx, "r6.png", "Route 6")
@commands.command()
async def r7(self, ctx):
"""Route 7"""
await self.simple_embed(ctx, "r7.png", "Route 7")
@commands.command()
async def r8(self, ctx):
"""Route 8"""
await self.simple_embed(ctx, "r8.png", "Route 8")
@commands.command()
async def r9(self, ctx):
"""Route 9"""
await self.simple_embed(ctx, "r9.png", "Route 9")
@commands.command()
async def r10(self, ctx):
"""Route 10"""
await self.simple_embed(ctx, "r10.png", "Route 10")
@commands.command()
async def r11(self, ctx):
"""Route 11"""
await self.simple_embed(ctx, "r11.png", "Route 11")
@commands.command()
async def r12(self, ctx):
"""Route 12"""
await self.simple_embed(ctx, "r12.png", "Route 12")
@commands.command()
async def r13(self, ctx):
"""Route 13"""
await self.simple_embed(ctx, "r13.png", "Route 13")
@commands.command()
async def r14(self, ctx):
"""Route 14"""
await self.simple_embed(ctx, "r14.png", "Route 14")
@commands.command()
async def r15(self, ctx):
"""Route 15"""
await self.simple_embed(ctx, "r15.png", "Route 15")
@commands.command()
async def r16(self, ctx):
"""Route 16"""
await self.simple_embed(ctx, "r16.png", "Route 16")
@commands.command()
async def r17(self, ctx):
"""Route 17"""
await self.simple_embed(ctx, "r17.png", "Route 17")
@commands.command()
async def r18(self, ctx):
"""Route 18"""
await self.simple_embed(ctx, "r18.png", "Route 18")
@commands.command()
async def r19(self, ctx):
"""Route 19"""
await self.simple_embed(ctx, "r19.png", "Route 19")
@commands.command()
async def r20(self, ctx):
"""Route 20"""
await self.simple_embed(ctx, "r20.png", "Route 20")
@commands.command()
async def r21(self, ctx):
"""Route 21"""
await self.simple_embed(ctx, "r21.png", "Route 21")
@commands.command()
async def r22(self, ctx):
"""Route 22"""
await self.simple_embed(ctx, "r22.png", "Route 22")
@commands.command()
async def vaniville(self, ctx):
"""Vaniville Town"""
await self.simple_embed(ctx, "vaniville.png", "Vaniville Town")
@commands.command()
async def aquacorde(self, ctx):
"""Aquacorde Town"""
await self.simple_embed(ctx, "aquacorde.png", "Aquacorde Town")
@commands.command()
async def santalune(self, ctx):
"""Santalune City"""
await self.simple_embed(ctx, "santalune.png", "Santalune City")
@commands.command()
async def lumiosesouth(self, ctx):
"""Lumiose City South"""
await self.simple_embed(ctx, "lumiosesouth.png", "Lumiose City - South Boulevard")
@commands.command()
async def lumiosenorth(self, ctx):
"""Lumiose City North"""
await self.simple_embed(ctx, "lumiosenorth.png", "Lumiose City - North Boulevard")
@commands.command()
async def camphrier(self, ctx):
"""Camphrier Town"""
await self.simple_embed(ctx, "camphrier.png", "Camphrier Town")
@commands.command()
async def cyllage(self, ctx):
"""Cyllage City"""
await self.simple_embed(ctx, "cyllage.png", "Cyllage City")
@commands.command()
async def ambrette(self, ctx):
"""Ambrette Town"""
await self.simple_embed(ctx, "ambrette.png", "Ambrette Town")
async def geosenge(self, ctx):
"""Geosenge Town"""
await self.simple_embed(ctx, "geosenge.png", "Geosenge Town")
@commands.command()
async def shalour(self, ctx):
"""Shalour City"""
await self.simple_embed(ctx, "shalour.png", "Shalour City")
@commands.command()
async def coumarine(self, ctx):
"""Coumarine City"""
await self.simple_embed(ctx, "coumarine.png", "Coumarine City")
@commands.command()
async def laverre(self, ctx):
"""Laverre City"""
await self.simple_embed(ctx, "laverre.png", "Laverre City")
@commands.command()
async def dendemille(self, ctx):
"""Dendemille Town"""
await self.simple_embed(ctx, "dendemille.png", "Dendemille Town")
@commands.command()
async def anistar(self, ctx):
"""Anistar City"""
await self.simple_embed(ctx, "anistar.png", "Anistar City")
@commands.command()
async def couriway(self, ctx):
"""Couriway Town"""
await self.simple_embed(ctx, "couriway.png", "Couriway Town")
@commands.command()
async def kiloude(self, ctx):
"""Kiloude City"""
await self.simple_embed(ctx, "kiloude.png", "Kiloude City")
| 29.918455 | 114 | 0.611103 | #
# ISC License
#
# Copyright (C) 2021-present lifehackerhansol
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import discord
from discord.ext import commands
class Maps(commands.Cog):
"""
Map commands
"""
def __init__(self, bot):
self.bot = bot
async def simple_embed(self, ctx, location, title=""):
embed = discord.Embed(title=title)
embed.set_image(url="https://raw.githubusercontent.com/hansoljin/sycamore-assets/master/maps/" + location)
await ctx.send(embed=embed)
@commands.command()
async def kalos(self, ctx):
"""Kalos map"""
await self.simple_embed(ctx, "kalos.png", "Kalos Region Map")
@commands.command()
async def r1(self, ctx):
"""Route 1"""
await self.simple_embed(ctx, "r1.png", "Route 1")
@commands.command()
async def r2(self, ctx):
"""Route 2"""
await self.simple_embed(ctx, "r2.png", "Route 2")
@commands.command()
async def r3(self, ctx):
"""Route 3"""
await self.simple_embed(ctx, "r3.png", "Route 3")
@commands.command()
async def r4(self, ctx):
"""Route 4"""
await self.simple_embed(ctx, "r4.png", "Route 4")
@commands.command()
async def r5(self, ctx):
"""Route 5"""
await self.simple_embed(ctx, "r5.png", "Route 5")
@commands.command()
async def r6(self, ctx):
"""Route 6"""
await self.simple_embed(ctx, "r6.png", "Route 6")
@commands.command()
async def r7(self, ctx):
"""Route 7"""
await self.simple_embed(ctx, "r7.png", "Route 7")
@commands.command()
async def r8(self, ctx):
"""Route 8"""
await self.simple_embed(ctx, "r8.png", "Route 8")
@commands.command()
async def r9(self, ctx):
"""Route 9"""
await self.simple_embed(ctx, "r9.png", "Route 9")
@commands.command()
async def r10(self, ctx):
"""Route 10"""
await self.simple_embed(ctx, "r10.png", "Route 10")
@commands.command()
async def r11(self, ctx):
"""Route 11"""
await self.simple_embed(ctx, "r11.png", "Route 11")
@commands.command()
async def r12(self, ctx):
"""Route 12"""
await self.simple_embed(ctx, "r12.png", "Route 12")
@commands.command()
async def r13(self, ctx):
"""Route 13"""
await self.simple_embed(ctx, "r13.png", "Route 13")
@commands.command()
async def r14(self, ctx):
"""Route 14"""
await self.simple_embed(ctx, "r14.png", "Route 14")
@commands.command()
async def r15(self, ctx):
"""Route 15"""
await self.simple_embed(ctx, "r15.png", "Route 15")
@commands.command()
async def r16(self, ctx):
"""Route 16"""
await self.simple_embed(ctx, "r16.png", "Route 16")
@commands.command()
async def r17(self, ctx):
"""Route 17"""
await self.simple_embed(ctx, "r17.png", "Route 17")
@commands.command()
async def r18(self, ctx):
"""Route 18"""
await self.simple_embed(ctx, "r18.png", "Route 18")
@commands.command()
async def r19(self, ctx):
"""Route 19"""
await self.simple_embed(ctx, "r19.png", "Route 19")
@commands.command()
async def r20(self, ctx):
"""Route 20"""
await self.simple_embed(ctx, "r20.png", "Route 20")
@commands.command()
async def r21(self, ctx):
"""Route 21"""
await self.simple_embed(ctx, "r21.png", "Route 21")
@commands.command()
async def r22(self, ctx):
"""Route 22"""
await self.simple_embed(ctx, "r22.png", "Route 22")
@commands.command()
async def vaniville(self, ctx):
"""Vaniville Town"""
await self.simple_embed(ctx, "vaniville.png", "Vaniville Town")
@commands.command()
async def aquacorde(self, ctx):
"""Aquacorde Town"""
await self.simple_embed(ctx, "aquacorde.png", "Aquacorde Town")
@commands.command()
async def santalune(self, ctx):
"""Santalune City"""
await self.simple_embed(ctx, "santalune.png", "Santalune City")
@commands.command()
async def lumiosesouth(self, ctx):
"""Lumiose City South"""
await self.simple_embed(ctx, "lumiosesouth.png", "Lumiose City - South Boulevard")
@commands.command()
async def lumiosenorth(self, ctx):
"""Lumiose City North"""
await self.simple_embed(ctx, "lumiosenorth.png", "Lumiose City - North Boulevard")
@commands.command()
async def camphrier(self, ctx):
"""Camphrier Town"""
await self.simple_embed(ctx, "camphrier.png", "Camphrier Town")
@commands.command()
async def cyllage(self, ctx):
"""Cyllage City"""
await self.simple_embed(ctx, "cyllage.png", "Cyllage City")
@commands.command()
async def ambrette(self, ctx):
"""Ambrette Town"""
await self.simple_embed(ctx, "ambrette.png", "Ambrette Town")
async def geosenge(self, ctx):
"""Geosenge Town"""
await self.simple_embed(ctx, "geosenge.png", "Geosenge Town")
@commands.command()
async def shalour(self, ctx):
"""Shalour City"""
await self.simple_embed(ctx, "shalour.png", "Shalour City")
@commands.command()
async def coumarine(self, ctx):
"""Coumarine City"""
await self.simple_embed(ctx, "coumarine.png", "Coumarine City")
@commands.command()
async def laverre(self, ctx):
"""Laverre City"""
await self.simple_embed(ctx, "laverre.png", "Laverre City")
@commands.command()
async def dendemille(self, ctx):
"""Dendemille Town"""
await self.simple_embed(ctx, "dendemille.png", "Dendemille Town")
@commands.command()
async def anistar(self, ctx):
"""Anistar City"""
await self.simple_embed(ctx, "anistar.png", "Anistar City")
@commands.command()
async def couriway(self, ctx):
"""Couriway Town"""
await self.simple_embed(ctx, "couriway.png", "Couriway Town")
@commands.command()
async def kiloude(self, ctx):
"""Kiloude City"""
await self.simple_embed(ctx, "kiloude.png", "Kiloude City")
def setup(bot):
bot.add_cog(Maps(bot))
| 274 | 0 | 76 |
6a03a71123c452d2d58aa64cd34f2cc6ff76c80b | 155 | py | Python | mysite/blog/admin.py | sakshikhachane/Blogger | a1a6f2fc1843b83b47f1ba8b3c88c5c478f5d6ac | [
"MIT"
] | 52 | 2020-07-01T10:06:34.000Z | 2021-09-30T18:23:23.000Z | mysite/blog/admin.py | sakshikhachane/Blogger | a1a6f2fc1843b83b47f1ba8b3c88c5c478f5d6ac | [
"MIT"
] | 206 | 2020-07-25T08:48:05.000Z | 2022-03-12T00:43:35.000Z | mysite/blog/admin.py | sakshikhachane/Blogger | a1a6f2fc1843b83b47f1ba8b3c88c5c478f5d6ac | [
"MIT"
] | 124 | 2020-08-07T11:22:44.000Z | 2021-10-16T05:39:17.000Z | from django.contrib import admin
from .models import Post, TagDict
# Register your models here.
admin.site.register(Post)
admin.site.register(TagDict)
| 15.5 | 33 | 0.787097 | from django.contrib import admin
from .models import Post, TagDict
# Register your models here.
admin.site.register(Post)
admin.site.register(TagDict)
| 0 | 0 | 0 |
bd76b059a85838004b73efadfe04b0077dbae495 | 1,985 | py | Python | tests/test_day4.py | fullybaked/advent-of-code | def5fa21574536465fe13ed2ec8de1e4c7cdf856 | [
"MIT"
] | null | null | null | tests/test_day4.py | fullybaked/advent-of-code | def5fa21574536465fe13ed2ec8de1e4c7cdf856 | [
"MIT"
] | null | null | null | tests/test_day4.py | fullybaked/advent-of-code | def5fa21574536465fe13ed2ec8de1e4c7cdf856 | [
"MIT"
] | null | null | null | from src.day4 import Board, Game, load_data
from unittest.mock import patch, mock_open
EXAMPLE_IN = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
"""
| 20.463918 | 86 | 0.58539 | from src.day4 import Board, Game, load_data
from unittest.mock import patch, mock_open
EXAMPLE_IN = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
"""
def test_play_boards_for_first_win():
with patch("builtins.open", mock_open(read_data=EXAMPLE_IN)):
calls, boards = load_data()
game = Game(boards, calls)
game.play()
assert game.winner() == 4512
def test_play_boards_for_last_win():
with patch("builtins.open", mock_open(read_data=EXAMPLE_IN)):
calls, boards = load_data()
game = Game(boards, calls)
game.play()
assert game.looser() == 1924
def test_record_win_row():
board_data = """22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
"""
board = Board(board_data)
assert board.check_win(0, 0) is False
assert board.check_win(0, 1) is False
assert board.check_win(0, 2) is False
assert board.check_win(0, 3) is False
assert board.check_win(0, 4) is True
def test_record_win_col():
board_data = """22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
"""
board = Board(board_data)
assert board.check_win(0, 0) is False
assert board.check_win(1, 0) is False
assert board.check_win(2, 0) is False
assert board.check_win(3, 0) is False
assert board.check_win(4, 0) is True
def test_cols():
board_data = """22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
"""
cols = [
[22, 8, 21, 6, 1],
[13, 2, 9, 10, 12],
[17, 23, 14, 3, 20],
[11, 4, 16, 18, 15],
[0, 24, 7, 5, 19],
]
board = Board(board_data)
assert board._cols == cols
| 1,458 | 0 | 115 |
22f6030013bcd837394c0207f0adfee79e6d965d | 4,633 | py | Python | lib/rucio/web/rest/webpy/v1/credential.py | ijjorama/rucio | 69391847117cf3567081814fbc30f476ada88853 | [
"Apache-2.0"
] | null | null | null | lib/rucio/web/rest/webpy/v1/credential.py | ijjorama/rucio | 69391847117cf3567081814fbc30f476ada88853 | [
"Apache-2.0"
] | null | null | null | lib/rucio/web/rest/webpy/v1/credential.py | ijjorama/rucio | 69391847117cf3567081814fbc30f476ada88853 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
from __future__ import print_function
from traceback import format_exc
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
from web import application, ctx, OK, header, InternalError
from rucio.api.authentication import validate_auth_token
from rucio.api.credential import get_signed_url
from rucio.common.exception import RucioException
from rucio.common.utils import generate_http_error
from rucio.web.rest.common import RucioController, check_accept_header_wrapper
URLS = (
'/signurl?$', 'SignURL',
)
class SignURL(RucioController):
"""
Request a signed URL.
"""
def OPTIONS(self):
"""
HTTP Success:
200 OK
Allow cross-site scripting. Explicit for Authorisation.
"""
header('Access-Control-Allow-Origin', ctx.env.get('HTTP_ORIGIN'))
header('Access-Control-Allow-Headers', ctx.env.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS'))
header('Access-Control-Allow-Methods', '*')
header('Access-Control-Allow-Credentials', 'true')
header('Access-Control-Expose-Headers', 'X-Rucio-Auth-Token')
raise OK
@check_accept_header_wrapper(['application/octet-stream'])
def GET(self):
"""
HTTP Success:
200 OK
HTTP Error:
400 Bad Request
401 Unauthorized
406 Not Acceptable
500 Internal Server Error
:param Rucio-VO: VO name as a string (Multi-VO only).
:param Rucio-Account: Account identifier as a string.
:param Rucio-AppID: Application identifier as a string.
:returns: Signed URL.
"""
vo = ctx.env.get('HTTP_X_RUCIO_VO')
account = ctx.env.get('HTTP_X_RUCIO_ACCOUNT')
appid = ctx.env.get('HTTP_X_RUCIO_APPID')
if appid is None:
appid = 'unknown'
ip = ctx.env.get('HTTP_X_FORWARDED_FOR')
if ip is None:
ip = ctx.ip
try:
validate_auth_token(ctx.env.get('HTTP_X_RUCIO_AUTH_TOKEN'))
except RucioException as e:
raise generate_http_error(500, e.__class__.__name__, e.args[0][0])
except Exception as e:
print(format_exc())
raise InternalError(e)
svc, operation, url = None, None, None
try:
params = parse_qs(ctx.query[1:])
lifetime = params.get('lifetime', [600])[0]
service = params.get('svc', ['gcs'])[0]
operation = params.get('op', ['read'])[0]
url = params.get('url', [None])[0]
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
if service not in ['gcs', 's3', 'swift']:
raise generate_http_error(400, 'ValueError', 'Parameter "svc" must be either empty(=gcs), gcs, s3 or swift')
if url is None:
raise generate_http_error(400, 'ValueError', 'Parameter "url" not found')
if operation not in ['read', 'write', 'delete']:
raise generate_http_error(400, 'ValueError', 'Parameter "op" must be either empty(=read), read, write, or delete.')
try:
result = get_signed_url(account, appid, ip, service=service, operation=operation, url=url, lifetime=lifetime, vo=vo)
except RucioException as e:
raise generate_http_error(500, e.__class__.__name__, e.args[0])
except Exception as e:
print(format_exc())
raise InternalError(e)
if not result:
raise generate_http_error(401, 'CannotAuthenticate', 'Cannot generate signed URL for account %(account)s' % locals())
return result
"""----------------------
Web service startup
----------------------"""
APP = application(URLS, globals())
application = APP.wsgifunc()
| 34.066176 | 129 | 0.64278 | #!/usr/bin/env python
# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
from __future__ import print_function
from traceback import format_exc
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
from web import application, ctx, OK, header, InternalError
from rucio.api.authentication import validate_auth_token
from rucio.api.credential import get_signed_url
from rucio.common.exception import RucioException
from rucio.common.utils import generate_http_error
from rucio.web.rest.common import RucioController, check_accept_header_wrapper
URLS = (
'/signurl?$', 'SignURL',
)
class SignURL(RucioController):
"""
Request a signed URL.
"""
def OPTIONS(self):
"""
HTTP Success:
200 OK
Allow cross-site scripting. Explicit for Authorisation.
"""
header('Access-Control-Allow-Origin', ctx.env.get('HTTP_ORIGIN'))
header('Access-Control-Allow-Headers', ctx.env.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS'))
header('Access-Control-Allow-Methods', '*')
header('Access-Control-Allow-Credentials', 'true')
header('Access-Control-Expose-Headers', 'X-Rucio-Auth-Token')
raise OK
@check_accept_header_wrapper(['application/octet-stream'])
def GET(self):
"""
HTTP Success:
200 OK
HTTP Error:
400 Bad Request
401 Unauthorized
406 Not Acceptable
500 Internal Server Error
:param Rucio-VO: VO name as a string (Multi-VO only).
:param Rucio-Account: Account identifier as a string.
:param Rucio-AppID: Application identifier as a string.
:returns: Signed URL.
"""
vo = ctx.env.get('HTTP_X_RUCIO_VO')
account = ctx.env.get('HTTP_X_RUCIO_ACCOUNT')
appid = ctx.env.get('HTTP_X_RUCIO_APPID')
if appid is None:
appid = 'unknown'
ip = ctx.env.get('HTTP_X_FORWARDED_FOR')
if ip is None:
ip = ctx.ip
try:
validate_auth_token(ctx.env.get('HTTP_X_RUCIO_AUTH_TOKEN'))
except RucioException as e:
raise generate_http_error(500, e.__class__.__name__, e.args[0][0])
except Exception as e:
print(format_exc())
raise InternalError(e)
svc, operation, url = None, None, None
try:
params = parse_qs(ctx.query[1:])
lifetime = params.get('lifetime', [600])[0]
service = params.get('svc', ['gcs'])[0]
operation = params.get('op', ['read'])[0]
url = params.get('url', [None])[0]
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
if service not in ['gcs', 's3', 'swift']:
raise generate_http_error(400, 'ValueError', 'Parameter "svc" must be either empty(=gcs), gcs, s3 or swift')
if url is None:
raise generate_http_error(400, 'ValueError', 'Parameter "url" not found')
if operation not in ['read', 'write', 'delete']:
raise generate_http_error(400, 'ValueError', 'Parameter "op" must be either empty(=read), read, write, or delete.')
try:
result = get_signed_url(account, appid, ip, service=service, operation=operation, url=url, lifetime=lifetime, vo=vo)
except RucioException as e:
raise generate_http_error(500, e.__class__.__name__, e.args[0])
except Exception as e:
print(format_exc())
raise InternalError(e)
if not result:
raise generate_http_error(401, 'CannotAuthenticate', 'Cannot generate signed URL for account %(account)s' % locals())
return result
"""----------------------
Web service startup
----------------------"""
APP = application(URLS, globals())
application = APP.wsgifunc()
| 0 | 0 | 0 |
e6ff18f98511f3a89b06bbe9cae4cef30086dde0 | 2,048 | py | Python | server/apps/utils/aws/kinesis.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/utils/aws/kinesis.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/utils/aws/kinesis.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | __author__ = 'dkarchmer'
import datetime
import json
import logging
import pprint
import boto3
from django.conf import settings
from .common import AWS_REGION
# Get an instance of a logger
logger = logging.getLogger(__name__)
FIREHOSE_STREAM_NAME = getattr(settings, 'FIREHOSE_STREAM_NAME')
firehose_client = boto3.client('firehose', region_name=AWS_REGION)
| 27.306667 | 101 | 0.663086 | __author__ = 'dkarchmer'
import datetime
import json
import logging
import pprint
import boto3
from django.conf import settings
from .common import AWS_REGION
# Get an instance of a logger
logger = logging.getLogger(__name__)
FIREHOSE_STREAM_NAME = getattr(settings, 'FIREHOSE_STREAM_NAME')
firehose_client = boto3.client('firehose', region_name=AWS_REGION)
def _write_stream(stream, firehose_client):
try:
response = firehose_client.put_record(
DeliveryStreamName=FIREHOSE_STREAM_NAME,
Record={
'Data': json.dumps(stream)
}
)
logging.info(response)
except Exception:
logging.exception('Problem pushing to firehose')
def _write_stream_batch(records, firehose_client):
try:
response = firehose_client.put_record_batch(
DeliveryStreamName=FIREHOSE_STREAM_NAME,
Records=records
)
if 'FailedPutCount' in response and response['FailedPutCount']:
logger.error('Firehose: {0} upload failures detected'.format(response['FailedPutCount']))
except Exception as e:
logging.debug(e)
logging.exception('Firehose: upload failures detected. {}'.format(str(e)[0:50]))
def datetime_handler(x):
if isinstance(x, datetime.datetime):
return x.isoformat()
raise TypeError("Unknown type")
def send_to_firehose(data, batch_num):
batch_payload = []
count = 1
for item in data:
# print(str(stream_payload))
batch_item = {
'Data': json.dumps(item, default=datetime_handler)
}
batch_payload.append(batch_item)
count += 1
if count == batch_num:
logger.info('Uploading {0} records'.format(batch_num))
_write_stream_batch(batch_payload, firehose_client)
batch_payload = []
count = 1
if len(batch_payload):
logger.info('Uploading final {0} records'.format(len(batch_payload)))
_write_stream_batch(batch_payload, firehose_client)
| 1,587 | 0 | 92 |
453972bee5e4b38dcaee26d48c6dcec6950939dd | 821 | py | Python | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | 2 | 2022-02-13T19:13:16.000Z | 2022-02-17T14:52:05.000Z | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | null | null | null | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | 1 | 2022-02-16T20:17:38.000Z | 2022-02-16T20:17:38.000Z | import sys
from PySide6 import QtGui
| 25.65625 | 72 | 0.576127 | import sys
from PySide6 import QtGui
class OutLog:
def __init__(self, edit, out=None, color=None):
"""(edit, out=None, color=None) -> can write stdout, stderr to a
QTextEdit.
edit = QTextEdit
out = alternate stream ( can be the original sys.stdout )
color = alternate color (i.e. color stderr a different color)
"""
self.edit = edit
self.out = None
self.color = color
def write(self, m):
if self.color:
tc = self.edit.textColor()
self.edit.setTextColor(self.color)
self.edit.moveCursor(QtGui.QTextCursor.End)
self.edit.insertPlainText( m )
if self.color:
self.edit.setTextColor(tc)
if self.out:
self.out.write(m)
def flush(self):
pass
| 322 | 439 | 23 |
57401c5732fe62caa7393d19d927adac65849582 | 101 | py | Python | lang/py/cookbook/v2/source/cb2_2_2_sol_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_2_2_sol_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_2_2_sol_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | file_object.writelines(list_of_text_strings)
open('abinfile', 'wb').writelines(list_of_data_strings)
| 33.666667 | 55 | 0.841584 | file_object.writelines(list_of_text_strings)
open('abinfile', 'wb').writelines(list_of_data_strings)
| 0 | 0 | 0 |
53a0848066228da1110c7becd0df032beaa6b4c8 | 922 | py | Python | saec/core/models.py | berrondo/saec | e8063f7b75fbeec4ea4d514958c073ff97a08088 | [
"MIT"
] | null | null | null | saec/core/models.py | berrondo/saec | e8063f7b75fbeec4ea4d514958c073ff97a08088 | [
"MIT"
] | null | null | null | saec/core/models.py | berrondo/saec | e8063f7b75fbeec4ea4d514958c073ff97a08088 | [
"MIT"
] | null | null | null | from django.db import models
| 22.487805 | 44 | 0.553145 | from django.db import models
class ComunicacaoAgendada(models.Model):
data = models.DateTimeField()
mensagem = models.TextField()
class Via(models.TextChoices):
EMAIL = 'email', 'Email'
SMS = 'sms', 'SMS'
PUSH = 'push', 'Push'
WHATSAPP = 'whatsapp', 'WhatsApp'
via = models.CharField(
max_length=10,
choices=Via.choices,
)
# email, telefone, token...
para = models.CharField(max_length=255)
class Status(models.TextChoices):
AGENDADA = 'AGENDADA', 'Agendada'
ENVIADA = 'ENVIADA', 'Enviada'
CANCELADA = 'CANCELADA', 'Cancelada'
status = models.CharField(
max_length=10,
choices=Status.choices,
default=Status.AGENDADA
)
class Meta:
unique_together = [
'data',
'mensagem',
'via',
'para',
'status',
]
| 0 | 869 | 23 |
46022dba439632662d356578c1c51146aecefe0f | 24,554 | py | Python | tools/perf/core/bot_platforms.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-10-18T02:33:40.000Z | 2020-10-18T02:33:40.000Z | tools/perf/core/bot_platforms.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2021-05-17T16:28:52.000Z | 2021-05-21T22:42:22.000Z | tools/perf/core/bot_platforms.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import six.moves.urllib.parse # pylint: disable=import-error
from core import benchmark_finders
from core import benchmark_utils
from telemetry.story import story_filter
_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')
_ALL_BENCHMARKS_BY_NAMES = dict(
(b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())
OFFICIAL_BENCHMARKS = frozenset(
b for b in benchmark_finders.GetOfficialBenchmarks()
if not b.Name().startswith('UNSCHEDULED_'))
CONTRIB_BENCHMARKS = frozenset(benchmark_finders.GetContribBenchmarks())
ALL_SCHEDULEABLE_BENCHMARKS = OFFICIAL_BENCHMARKS | CONTRIB_BENCHMARKS
GTEST_STORY_NAME = '_gtest_'
# Global |benchmarks| is convenient way to keep BenchmarkConfig objects
# unique, which allows us to use set subtraction below.
benchmarks = {b.Name(): {True: BenchmarkConfig(b, abridged=True),
False: BenchmarkConfig(b, abridged=False)}
for b in ALL_SCHEDULEABLE_BENCHMARKS}
OFFICIAL_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig(b.Name()) for b in OFFICIAL_BENCHMARKS])
# power.mobile requires special hardware.
# only run blink_perf.sanitizer-api on linux-perf.
OFFICIAL_BENCHMARK_CONFIGS = OFFICIAL_BENCHMARK_CONFIGS.Remove([
'power.mobile',
'blink_perf.sanitizer-api',
])
# TODO(crbug.com/965158): Remove OFFICIAL_BENCHMARK_NAMES once sharding
# scripts are no longer using it.
OFFICIAL_BENCHMARK_NAMES = frozenset(
b.name for b in OFFICIAL_BENCHMARK_CONFIGS.Frozenset())
# TODO(crbug.com/1030840): Stop using these 'OFFICIAL_EXCEPT' suites and instead
# define each benchmarking config separately as is already done for many of the
# suites below.
_OFFICIAL_EXCEPT_DISPLAY_LOCKING = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking'])
_OFFICIAL_EXCEPT_JETSTREAM2 = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['jetstream2'])
_OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2 = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking', 'jetstream2'])
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP = PerfSuite([
_GetBenchmarkConfig('system_health.common_desktop')
])
_LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
]).Add([
'blink_perf.sanitizer-api',
])
_LINUX_EXECUTABLE_CONFIGS = frozenset([
# TODO(crbug.com/811766): Add views_perftests.
_base_perftests(200),
_load_library_perf_tests(),
_performance_browser_tests(165),
_tracing_perftests(5),
])
_MAC_HIGH_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_HIGH_END_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_MAC_LOW_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'jetstream2',
'v8.runtime_stats.top_25',
])
_MAC_LOW_END_EXECUTABLE_CONFIGS = frozenset([
_load_library_perf_tests(),
_performance_browser_tests(210),
])
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_WIN_10_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_WIN_10_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(200),
_components_perftests(125),
_dawn_perf_tests(600),
_views_perftests(),
])
_WIN_10_LOW_END_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
])
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_desktop'),
_GetBenchmarkConfig('rendering.desktop', abridged=True),
])
_WIN_10_AMD_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('jetstream2'),
_GetBenchmarkConfig('kraken'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('system_health.common_desktop'),
])
_WIN_7_BENCHMARK_CONFIGS = PerfSuite([
'loading.desktop',
]).Abridge([
'loading.desktop',
])
_WIN_7_GPU_BENCHMARK_CONFIGS = PerfSuite(['rendering.desktop']).Abridge(
['rendering.desktop'])
_ANDROID_GO_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.webview_startup'),
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('speedometer'),
_GetBenchmarkConfig('speedometer2')])
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS = _ANDROID_GO_BENCHMARK_CONFIGS
# Note that Nexus 5 bot capacity is very low, so we must severely limit
# the benchmarks that we run on it and abridge large benchmarks in order
# to run them on it. See crbug.com/1030840 for details.
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS = PerfSuite([
'loading.mobile',
'startup.mobile',
'system_health.common_mobile',
'system_health.webview_startup',
]).Abridge(['loading.mobile', 'startup.mobile', 'system_health.common_mobile'])
_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(100),
_gpu_perftests(45),
_tracing_perftests(55),
])
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL2_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL4_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.mobile'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('system_health.scroll_jank_mobile')])
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('startup.mobile')])
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('rendering.mobile'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('rendering.desktop')])
_LACROS_EVE_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_LINUX_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.desktop'),
_GetBenchmarkConfig('rendering.desktop'),
_GetBenchmarkConfig('system_health.common_desktop')
])
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_desktop'),
_GetBenchmarkConfig('media.mobile')
])
_LINUX_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('blink_perf.shadow_dom'),
])
_ANDROID_PIXEL2_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
])
# Linux
LINUX = PerfPlatform(
'linux-perf',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_BENCHMARK_CONFIGS,
26,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
LINUX_REL = PerfPlatform(
'linux-perf-rel',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP,
2,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
# Mac
MAC_HIGH_END = PerfPlatform(
'mac-10_13_laptop_high_end-perf',
'MacBook Pro, Core i7 2.8 GHz, 16GB RAM, 256GB SSD, Radeon 55',
_MAC_HIGH_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_HIGH_END_EXECUTABLE_CONFIGS)
MAC_LOW_END = PerfPlatform(
'mac-10_12_laptop_low_end-perf',
'MacBook Air, Core i5 1.8 GHz, 8GB RAM, 128GB SSD, HD Graphics',
_MAC_LOW_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_LOW_END_EXECUTABLE_CONFIGS)
MAC_M1_MINI_2020 = PerfPlatform(
'mac-m1_mini_2020-perf',
'Mac M1 Mini 2020',
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS)
# Win
WIN_10_LOW_END = PerfPlatform(
'win-10_laptop_low_end-perf',
'Low end windows 10 HP laptops. HD Graphics 5500, x86-64-i3-5005U, '
'SSD, 4GB RAM.',
_WIN_10_LOW_END_BENCHMARK_CONFIGS,
# TODO(crbug.com/998161): Increase the number of shards once you
# have enough test data to make a shard map and when more devices
# are added to the data center.
46,
'win')
WIN_10 = PerfPlatform(
'win-10-perf',
'Windows Intel HD 630 towers, Core i7-7700 3.6 GHz, 16GB RAM,'
' Intel Kaby Lake HD Graphics 630', _WIN_10_BENCHMARK_CONFIGS,
26, 'win', executables=_WIN_10_EXECUTABLE_CONFIGS)
WIN_10_AMD = PerfPlatform('win-10_amd-perf', 'Windows AMD chipset',
_WIN_10_AMD_BENCHMARK_CONFIGS, 2, 'win')
WIN_7 = PerfPlatform('Win 7 Perf', 'N/A', _WIN_7_BENCHMARK_CONFIGS, 2, 'win')
WIN_7_GPU = PerfPlatform('Win 7 Nvidia GPU Perf', 'N/A',
_WIN_7_GPU_BENCHMARK_CONFIGS, 3, 'win')
# Android
ANDROID_GO = PerfPlatform(
'android-go-perf', 'Android O (gobo)', _ANDROID_GO_BENCHMARK_CONFIGS,
19, 'android')
ANDROID_GO_WEBVIEW = PerfPlatform('android-go_webview-perf',
'Android OPM1.171019.021 (gobo)',
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS, 13,
'android')
ANDROID_NEXUS_5 = PerfPlatform('Android Nexus5 Perf',
'Android KOT49H',
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS,
10,
'android',
executables=_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS)
ANDROID_NEXUS_5X_WEBVIEW = PerfPlatform(
'Android Nexus5X WebView Perf', 'Android AOSP MOB30K',
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS, 16, 'android')
ANDROID_PIXEL2 = PerfPlatform('android-pixel2-perf',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL2_EXECUTABLE_CONFIGS)
ANDROID_PIXEL2_WEBVIEW = PerfPlatform(
'android-pixel2_webview-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL2_WEBLAYER = PerfPlatform(
'android-pixel2_weblayer-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4 = PerfPlatform('android-pixel4-perf',
'Android R',
_ANDROID_PIXEL4_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL4_EXECUTABLE_CONFIGS)
ANDROID_PIXEL4_WEBVIEW = PerfPlatform(
'android-pixel4_webview-perf', 'Android R',
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL4_WEBLAYER = PerfPlatform(
'android-pixel4_weblayer-perf', 'Android R',
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4A_POWER = PerfPlatform('android-pixel4a_power-perf',
'Android QD4A.200102.001.A1',
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS,
1, 'android')
# Cros/Lacros
LACROS_EVE_PERF = PerfPlatform('lacros-eve-perf', '',
_LACROS_EVE_BENCHMARK_CONFIGS, 10, 'chromeos')
# FYI bots
WIN_10_LOW_END_HP_CANDIDATE = PerfPlatform(
'win-10_laptop_low_end-perf_HP-Candidate', 'HP 15-BS121NR Laptop Candidate',
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS,
1, 'win', is_fyi=True)
ANDROID_NEXUS5X_PERF_FYI = PerfPlatform('android-nexus5x-perf-fyi',
'Android MMB29Q',
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS,
2,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_AAB_FYI = PerfPlatform(
'android-pixel2-perf-aab-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS,
1,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_FYI = PerfPlatform('android-pixel2-perf-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS,
4,
'android',
is_fyi=True)
CHROMEOS_KEVIN_PERF_FYI = PerfPlatform('chromeos-kevin-perf-fyi',
'',
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS,
4,
'chromeos',
is_fyi=True)
LINUX_PERF_FYI = PerfPlatform('linux-perf-fyi',
'',
_LINUX_PERF_FYI_BENCHMARK_CONFIGS,
1,
'linux',
is_fyi=True)
FUCHSIA_PERF_FYI = PerfPlatform('fuchsia-perf-fyi',
'',
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS,
7,
'fuchsia',
is_fyi=True)
# Calibration bots
LINUX_PERF_CALIBRATION = PerfPlatform(
'linux-perf-calibration',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_PERF_CALIBRATION_BENCHMARK_CONFIGS,
28,
'linux',
is_calibration=True)
ANDROID_PIXEL2_PERF_CALIBRATION = PerfPlatform(
'android-pixel2-perf-calibration',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_PERF_CALIBRATION_BENCHMARK_CONFIGS,
42,
'android',
is_calibration=True)
ALL_PLATFORMS = {
p for p in locals().values() if isinstance(p, PerfPlatform)
}
PLATFORMS_BY_NAME = {p.name: p for p in ALL_PLATFORMS}
FYI_PLATFORMS = {
p for p in ALL_PLATFORMS if p.is_fyi
}
CALIBRATION_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_calibration}
OFFICIAL_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_official}
ALL_PLATFORM_NAMES = {
p.name for p in ALL_PLATFORMS
}
OFFICIAL_PLATFORM_NAMES = {
p.name for p in OFFICIAL_PLATFORMS
}
| 35.688953 | 80 | 0.691008 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import six.moves.urllib.parse # pylint: disable=import-error
from core import benchmark_finders
from core import benchmark_utils
from telemetry.story import story_filter
_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')
_ALL_BENCHMARKS_BY_NAMES = dict(
(b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())
OFFICIAL_BENCHMARKS = frozenset(
b for b in benchmark_finders.GetOfficialBenchmarks()
if not b.Name().startswith('UNSCHEDULED_'))
CONTRIB_BENCHMARKS = frozenset(benchmark_finders.GetContribBenchmarks())
ALL_SCHEDULEABLE_BENCHMARKS = OFFICIAL_BENCHMARKS | CONTRIB_BENCHMARKS
GTEST_STORY_NAME = '_gtest_'
def _IsPlatformSupported(benchmark, platform):
supported = benchmark.GetSupportedPlatformNames(benchmark.SUPPORTED_PLATFORMS)
return 'all' in supported or platform in supported
class PerfPlatform(object):
def __init__(self,
name,
description,
benchmark_configs,
num_shards,
platform_os,
is_fyi=False,
is_calibration=False,
run_reference_build=False,
executables=None):
benchmark_configs = benchmark_configs.Frozenset()
self._name = name
self._description = description
self._platform_os = platform_os
# For sorting ignore case and "segments" in the bot name.
self._sort_key = name.lower().replace('-', ' ')
self._is_fyi = is_fyi
self._is_calibration = is_calibration
self.run_reference_build = run_reference_build
self.executables = executables or frozenset()
assert num_shards
self._num_shards = num_shards
# pylint: disable=redefined-outer-name
self._benchmark_configs = frozenset([
b for b in benchmark_configs if
_IsPlatformSupported(b.benchmark, self._platform_os)])
# pylint: enable=redefined-outer-name
benchmark_names = [config.name for config in self._benchmark_configs]
assert len(set(benchmark_names)) == len(benchmark_names), (
'Make sure that a benchmark does not appear twice.')
base_file_name = name.replace(' ', '_').lower()
self._timing_file_path = os.path.join(
_SHARD_MAP_DIR, 'timing_data', base_file_name + '_timing.json')
self.shards_map_file_name = base_file_name + '_map.json'
self._shards_map_file_path = os.path.join(
_SHARD_MAP_DIR, self.shards_map_file_name)
def __lt__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
# pylint: disable=protected-access
return self._sort_key < other._sort_key
@property
def num_shards(self):
return self._num_shards
@property
def shards_map_file_path(self):
return self._shards_map_file_path
@property
def timing_file_path(self):
return self._timing_file_path
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def platform(self):
return self._platform_os
@property
def benchmarks_to_run(self):
# TODO(crbug.com/965158): Deprecate this in favor of benchmark_configs
# as part of change to make sharding scripts accommodate abridged
# benchmarks.
return frozenset({b.benchmark for b in self._benchmark_configs})
@property
def benchmark_configs(self):
return self._benchmark_configs
@property
def is_fyi(self):
return self._is_fyi
@property
def is_calibration(self):
return self._is_calibration
@property
def is_official(self):
return not self._is_fyi and not self.is_calibration
@property
def builder_url(self):
return ('https://ci.chromium.org/p/chrome/builders/ci/%s' %
six.moves.urllib.parse.quote(self._name))
class BenchmarkConfig(object):
def __init__(self, benchmark, abridged):
"""A configuration for a benchmark that helps decide how to shard it.
Args:
benchmark: the benchmark.Benchmark object.
abridged: True if the benchmark should be abridged so fewer stories
are run, and False if the whole benchmark should be run.
"""
self.benchmark = benchmark
self.abridged = abridged
self._stories = None
self.is_telemetry = True
@property
def name(self):
return self.benchmark.Name()
@property
def repeat(self):
return self.benchmark.options.get('pageset_repeat', 1)
@property
def stories(self):
if self._stories != None:
return self._stories
else:
story_set = benchmark_utils.GetBenchmarkStorySet(self.benchmark())
abridged_story_set_tag = (
story_set.GetAbridgedStorySetTagFilter() if self.abridged else None)
story_filter_obj = story_filter.StoryFilter(
abridged_story_set_tag=abridged_story_set_tag)
stories = story_filter_obj.FilterStories(story_set)
self._stories = [story.name for story in stories]
return self._stories
class ExecutableConfig(object):
def __init__(self, name, path=None, flags=None, estimated_runtime=60):
self.name = name
self.path = path or name
self.flags = flags or []
self.estimated_runtime = estimated_runtime
self.abridged = False
self.stories = [GTEST_STORY_NAME]
self.is_telemetry = False
self.repeat = 1
class PerfSuite(object):
def __init__(self, configs):
self._configs = dict()
self.Add(configs)
def Frozenset(self):
return frozenset(self._configs.values())
def Add(self, configs):
if isinstance(configs, PerfSuite):
configs = configs.Frozenset()
for config in configs:
if isinstance(config, str):
config = _GetBenchmarkConfig(config)
if config.name in self._configs:
raise ValueError('Cannot have duplicate benchmarks/executables.')
self._configs[config.name] = config
return self
def Remove(self, configs):
for config in configs:
name = config
if isinstance(config, PerfSuite):
name = config.name
del self._configs[name]
return self
def Abridge(self, config_names):
for name in config_names:
del self._configs[name]
self._configs[name] = _GetBenchmarkConfig(
name, abridged=True)
return self
# Global |benchmarks| is convenient way to keep BenchmarkConfig objects
# unique, which allows us to use set subtraction below.
benchmarks = {b.Name(): {True: BenchmarkConfig(b, abridged=True),
False: BenchmarkConfig(b, abridged=False)}
for b in ALL_SCHEDULEABLE_BENCHMARKS}
def _GetBenchmarkConfig(benchmark_name, abridged=False):
return benchmarks[benchmark_name][abridged]
OFFICIAL_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig(b.Name()) for b in OFFICIAL_BENCHMARKS])
# power.mobile requires special hardware.
# only run blink_perf.sanitizer-api on linux-perf.
OFFICIAL_BENCHMARK_CONFIGS = OFFICIAL_BENCHMARK_CONFIGS.Remove([
'power.mobile',
'blink_perf.sanitizer-api',
])
# TODO(crbug.com/965158): Remove OFFICIAL_BENCHMARK_NAMES once sharding
# scripts are no longer using it.
OFFICIAL_BENCHMARK_NAMES = frozenset(
b.name for b in OFFICIAL_BENCHMARK_CONFIGS.Frozenset())
# TODO(crbug.com/1030840): Stop using these 'OFFICIAL_EXCEPT' suites and instead
# define each benchmarking config separately as is already done for many of the
# suites below.
_OFFICIAL_EXCEPT_DISPLAY_LOCKING = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking'])
_OFFICIAL_EXCEPT_JETSTREAM2 = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['jetstream2'])
_OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2 = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking', 'jetstream2'])
def _base_perftests(estimated_runtime=270):
return ExecutableConfig(
'base_perftests',
flags=['--test-launcher-jobs=1', '--test-launcher-retry-limit=0'],
estimated_runtime=estimated_runtime)
def _components_perftests(estimated_runtime=110):
return ExecutableConfig('components_perftests',
flags=[
'--xvfb',
],
estimated_runtime=estimated_runtime)
def _dawn_perf_tests(estimated_runtime=270):
return ExecutableConfig(
'dawn_perf_tests',
flags=['--test-launcher-jobs=1', '--test-launcher-retry-limit=0'],
estimated_runtime=estimated_runtime)
def _gpu_perftests(estimated_runtime=60):
return ExecutableConfig('gpu_perftests', estimated_runtime=estimated_runtime)
def _load_library_perf_tests(estimated_runtime=3):
return ExecutableConfig('load_library_perf_tests',
estimated_runtime=estimated_runtime)
def _performance_browser_tests(estimated_runtime=67):
return ExecutableConfig(
'performance_browser_tests',
path='browser_tests',
flags=[
'--full-performance-run',
'--test-launcher-jobs=1',
'--test-launcher-retry-limit=0',
# Allow the full performance runs to take up to 60 seconds (rather
# than the default of 30 for normal CQ browser test runs).
'--ui-test-action-timeout=60000',
'--ui-test-action-max-timeout=60000',
'--test-launcher-timeout=60000',
'--gtest_filter=*/TabCapturePerformanceTest.*:'
'*/CastV2PerformanceTest.*',
],
estimated_runtime=estimated_runtime)
def _tracing_perftests(estimated_runtime=50):
return ExecutableConfig('tracing_perftests',
estimated_runtime=estimated_runtime)
def _views_perftests(estimated_runtime=7):
return ExecutableConfig('views_perftests',
flags=['--xvfb'],
estimated_runtime=estimated_runtime)
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP = PerfSuite([
_GetBenchmarkConfig('system_health.common_desktop')
])
_LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
]).Add([
'blink_perf.sanitizer-api',
])
_LINUX_EXECUTABLE_CONFIGS = frozenset([
# TODO(crbug.com/811766): Add views_perftests.
_base_perftests(200),
_load_library_perf_tests(),
_performance_browser_tests(165),
_tracing_perftests(5),
])
_MAC_HIGH_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_HIGH_END_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_MAC_LOW_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'jetstream2',
'v8.runtime_stats.top_25',
])
_MAC_LOW_END_EXECUTABLE_CONFIGS = frozenset([
_load_library_perf_tests(),
_performance_browser_tests(210),
])
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_WIN_10_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_WIN_10_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(200),
_components_perftests(125),
_dawn_perf_tests(600),
_views_perftests(),
])
_WIN_10_LOW_END_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
])
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_desktop'),
_GetBenchmarkConfig('rendering.desktop', abridged=True),
])
_WIN_10_AMD_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('jetstream2'),
_GetBenchmarkConfig('kraken'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('system_health.common_desktop'),
])
_WIN_7_BENCHMARK_CONFIGS = PerfSuite([
'loading.desktop',
]).Abridge([
'loading.desktop',
])
_WIN_7_GPU_BENCHMARK_CONFIGS = PerfSuite(['rendering.desktop']).Abridge(
['rendering.desktop'])
_ANDROID_GO_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.webview_startup'),
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('speedometer'),
_GetBenchmarkConfig('speedometer2')])
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS = _ANDROID_GO_BENCHMARK_CONFIGS
# Note that Nexus 5 bot capacity is very low, so we must severely limit
# the benchmarks that we run on it and abridge large benchmarks in order
# to run them on it. See crbug.com/1030840 for details.
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS = PerfSuite([
'loading.mobile',
'startup.mobile',
'system_health.common_mobile',
'system_health.webview_startup',
]).Abridge(['loading.mobile', 'startup.mobile', 'system_health.common_mobile'])
_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(100),
_gpu_perftests(45),
_tracing_perftests(55),
])
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL2_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL4_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.mobile'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('system_health.scroll_jank_mobile')])
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('startup.mobile')])
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('rendering.mobile'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('rendering.desktop')])
_LACROS_EVE_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_LINUX_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.desktop'),
_GetBenchmarkConfig('rendering.desktop'),
_GetBenchmarkConfig('system_health.common_desktop')
])
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_desktop'),
_GetBenchmarkConfig('media.mobile')
])
_LINUX_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('blink_perf.shadow_dom'),
])
_ANDROID_PIXEL2_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
])
# Linux
LINUX = PerfPlatform(
'linux-perf',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_BENCHMARK_CONFIGS,
26,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
LINUX_REL = PerfPlatform(
'linux-perf-rel',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP,
2,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
# Mac
MAC_HIGH_END = PerfPlatform(
'mac-10_13_laptop_high_end-perf',
'MacBook Pro, Core i7 2.8 GHz, 16GB RAM, 256GB SSD, Radeon 55',
_MAC_HIGH_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_HIGH_END_EXECUTABLE_CONFIGS)
MAC_LOW_END = PerfPlatform(
'mac-10_12_laptop_low_end-perf',
'MacBook Air, Core i5 1.8 GHz, 8GB RAM, 128GB SSD, HD Graphics',
_MAC_LOW_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_LOW_END_EXECUTABLE_CONFIGS)
MAC_M1_MINI_2020 = PerfPlatform(
'mac-m1_mini_2020-perf',
'Mac M1 Mini 2020',
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS)
# Win
WIN_10_LOW_END = PerfPlatform(
'win-10_laptop_low_end-perf',
'Low end windows 10 HP laptops. HD Graphics 5500, x86-64-i3-5005U, '
'SSD, 4GB RAM.',
_WIN_10_LOW_END_BENCHMARK_CONFIGS,
# TODO(crbug.com/998161): Increase the number of shards once you
# have enough test data to make a shard map and when more devices
# are added to the data center.
46,
'win')
WIN_10 = PerfPlatform(
'win-10-perf',
'Windows Intel HD 630 towers, Core i7-7700 3.6 GHz, 16GB RAM,'
' Intel Kaby Lake HD Graphics 630', _WIN_10_BENCHMARK_CONFIGS,
26, 'win', executables=_WIN_10_EXECUTABLE_CONFIGS)
WIN_10_AMD = PerfPlatform('win-10_amd-perf', 'Windows AMD chipset',
_WIN_10_AMD_BENCHMARK_CONFIGS, 2, 'win')
WIN_7 = PerfPlatform('Win 7 Perf', 'N/A', _WIN_7_BENCHMARK_CONFIGS, 2, 'win')
WIN_7_GPU = PerfPlatform('Win 7 Nvidia GPU Perf', 'N/A',
_WIN_7_GPU_BENCHMARK_CONFIGS, 3, 'win')
# Android
ANDROID_GO = PerfPlatform(
'android-go-perf', 'Android O (gobo)', _ANDROID_GO_BENCHMARK_CONFIGS,
19, 'android')
ANDROID_GO_WEBVIEW = PerfPlatform('android-go_webview-perf',
'Android OPM1.171019.021 (gobo)',
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS, 13,
'android')
ANDROID_NEXUS_5 = PerfPlatform('Android Nexus5 Perf',
'Android KOT49H',
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS,
10,
'android',
executables=_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS)
ANDROID_NEXUS_5X_WEBVIEW = PerfPlatform(
'Android Nexus5X WebView Perf', 'Android AOSP MOB30K',
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS, 16, 'android')
ANDROID_PIXEL2 = PerfPlatform('android-pixel2-perf',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL2_EXECUTABLE_CONFIGS)
ANDROID_PIXEL2_WEBVIEW = PerfPlatform(
'android-pixel2_webview-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL2_WEBLAYER = PerfPlatform(
'android-pixel2_weblayer-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4 = PerfPlatform('android-pixel4-perf',
'Android R',
_ANDROID_PIXEL4_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL4_EXECUTABLE_CONFIGS)
ANDROID_PIXEL4_WEBVIEW = PerfPlatform(
'android-pixel4_webview-perf', 'Android R',
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL4_WEBLAYER = PerfPlatform(
'android-pixel4_weblayer-perf', 'Android R',
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4A_POWER = PerfPlatform('android-pixel4a_power-perf',
'Android QD4A.200102.001.A1',
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS,
1, 'android')
# Cros/Lacros
LACROS_EVE_PERF = PerfPlatform('lacros-eve-perf', '',
_LACROS_EVE_BENCHMARK_CONFIGS, 10, 'chromeos')
# FYI bots
WIN_10_LOW_END_HP_CANDIDATE = PerfPlatform(
'win-10_laptop_low_end-perf_HP-Candidate', 'HP 15-BS121NR Laptop Candidate',
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS,
1, 'win', is_fyi=True)
ANDROID_NEXUS5X_PERF_FYI = PerfPlatform('android-nexus5x-perf-fyi',
'Android MMB29Q',
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS,
2,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_AAB_FYI = PerfPlatform(
'android-pixel2-perf-aab-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS,
1,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_FYI = PerfPlatform('android-pixel2-perf-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS,
4,
'android',
is_fyi=True)
CHROMEOS_KEVIN_PERF_FYI = PerfPlatform('chromeos-kevin-perf-fyi',
'',
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS,
4,
'chromeos',
is_fyi=True)
LINUX_PERF_FYI = PerfPlatform('linux-perf-fyi',
'',
_LINUX_PERF_FYI_BENCHMARK_CONFIGS,
1,
'linux',
is_fyi=True)
FUCHSIA_PERF_FYI = PerfPlatform('fuchsia-perf-fyi',
'',
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS,
7,
'fuchsia',
is_fyi=True)
# Calibration bots
LINUX_PERF_CALIBRATION = PerfPlatform(
'linux-perf-calibration',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_PERF_CALIBRATION_BENCHMARK_CONFIGS,
28,
'linux',
is_calibration=True)
ANDROID_PIXEL2_PERF_CALIBRATION = PerfPlatform(
'android-pixel2-perf-calibration',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_PERF_CALIBRATION_BENCHMARK_CONFIGS,
42,
'android',
is_calibration=True)
ALL_PLATFORMS = {
p for p in locals().values() if isinstance(p, PerfPlatform)
}
PLATFORMS_BY_NAME = {p.name: p for p in ALL_PLATFORMS}
FYI_PLATFORMS = {
p for p in ALL_PLATFORMS if p.is_fyi
}
CALIBRATION_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_calibration}
OFFICIAL_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_official}
ALL_PLATFORM_NAMES = {
p.name for p in ALL_PLATFORMS
}
OFFICIAL_PLATFORM_NAMES = {
p.name for p in OFFICIAL_PLATFORMS
}
def find_bot_platform(builder_name):
for bot_platform in ALL_PLATFORMS:
if bot_platform.name == builder_name:
return bot_platform
| 6,252 | 1,070 | 493 |
1c463b00bcc93f690abe0126cebd12479e2b2c5d | 1,568 | py | Python | cirq/optimizers/drop_negligible.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | 1 | 2020-05-20T00:08:33.000Z | 2020-05-20T00:08:33.000Z | cirq/optimizers/drop_negligible.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | null | null | null | cirq/optimizers/drop_negligible.py | sleichen/Cirq | 02f715203406d1f2af2d86e7561af09a2cdd4d45 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that removes operations with tiny effects."""
from typing import TYPE_CHECKING
from cirq import protocols
from cirq.circuits import optimization_pass, circuit as _circuit
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import List, Tuple
from cirq import ops
class DropNegligible(optimization_pass.OptimizationPass):
"""An optimization pass that removes operations with tiny effects."""
| 37.333333 | 78 | 0.714286 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that removes operations with tiny effects."""
from typing import TYPE_CHECKING
from cirq import protocols
from cirq.circuits import optimization_pass, circuit as _circuit
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import List, Tuple
from cirq import ops
class DropNegligible(optimization_pass.OptimizationPass):
"""An optimization pass that removes operations with tiny effects."""
def __init__(self, tolerance: float = 1e-8) -> None:
self.tolerance = tolerance
def optimize_circuit(self, circuit: _circuit.Circuit) -> None:
deletions = [] # type: List[Tuple[int, ops.Operation]]
for moment_index, moment in enumerate(circuit):
for op in moment.operations:
if (op is not None and
protocols.trace_distance_bound(op) <= self.tolerance):
deletions.append((moment_index, op))
circuit.batch_remove(deletions)
| 483 | 0 | 54 |
5831bcb34288a143649c99a14e072f15de7aa45d | 792 | py | Python | dapodik/sekolah/yayasan.py | hexatester/dapodik | d89c0fb899c89e866527f6b7b57f741abd6444ea | [
"MIT"
] | 4 | 2021-02-01T15:19:35.000Z | 2022-01-26T02:47:21.000Z | dapodik/sekolah/yayasan.py | hexatester/dapodik | d89c0fb899c89e866527f6b7b57f741abd6444ea | [
"MIT"
] | 3 | 2020-01-08T17:07:15.000Z | 2020-01-08T18:05:12.000Z | dapodik/sekolah/yayasan.py | hexatester/dapodik | d89c0fb899c89e866527f6b7b57f741abd6444ea | [
"MIT"
] | 2 | 2021-08-04T13:48:08.000Z | 2021-12-25T02:36:49.000Z | from datetime import datetime
from typing import Optional
import attr
@attr.dataclass
| 20.842105 | 41 | 0.70202 | from datetime import datetime
from typing import Optional
import attr
@attr.dataclass
class Yayasan:
yayasan_id: str
nama: str
alamat_jalan: str
rt: str
rw: str
nama_dusun: str
desa_kelurahan: str
kode_wilayah: str
kode_pos: str
lintang: str
bujur: str
nomor_telepon: Optional[str]
nomor_fax: Optional[str]
email: Optional[str]
website: Optional[str]
npyp: Optional[str]
nama_pimpinan_yayasan: str
no_pendirian_yayasan: str
tanggal_pendirian_yayasan: str
nomor_pengesahan_pn_ln: Optional[str]
nomor_sk_bn: Optional[str]
tanggal_sk_bn: str
create_date: datetime
last_update: datetime
soft_delete: str
last_sync: datetime
updater_id: str
kode_wilayah_str: str
vld_count: int
| 0 | 681 | 22 |
915cd60e3606a8124de1feb87deb1d79540401cf | 13,438 | py | Python | transcriptic/util.py | transcriptic/transcriptic | 1b5df943db266d18dbf055d0ace68c3cde8980e9 | [
"BSD-3-Clause"
] | 32 | 2015-10-27T22:51:05.000Z | 2020-03-26T00:43:32.000Z | transcriptic/util.py | transcriptic/transcriptic | 1b5df943db266d18dbf055d0ace68c3cde8980e9 | [
"BSD-3-Clause"
] | 95 | 2015-10-27T15:30:46.000Z | 2020-03-30T00:38:05.000Z | transcriptic/util.py | transcriptic/transcriptic | 1b5df943db266d18dbf055d0ace68c3cde8980e9 | [
"BSD-3-Clause"
] | 10 | 2015-10-27T06:35:30.000Z | 2019-09-26T15:18:49.000Z | import itertools
import json
import re
from collections import OrderedDict, defaultdict
from os.path import abspath, dirname, join
import click
def ascii_encode(non_compatible_string):
"""Primarily used for ensuring terminal display compatibility"""
if non_compatible_string:
return non_compatible_string.encode("ascii", errors="ignore").decode("ascii")
else:
return ""
def regex_manifest(protocol, input):
"""Special input types, gets updated as more input types are added"""
if "type" in input and input["type"] == "choice":
if "options" in input:
pattern = r"\[(.*?)\]"
match = re.search(pattern, str(input["options"]))
if not match:
click.echo(
'Error in %s: input type "choice" options must '
'be in the form of: \n[\n {\n "value": '
'<choice value>, \n "label": <choice label>\n '
"},\n ...\n]" % protocol["name"]
)
raise RuntimeError
else:
click.echo(
f"Must have options for 'choice' input type. Error in: {protocol['name']}"
)
raise RuntimeError
def makedirs(name, mode=None, exist_ok=False):
"""Forward ports `exist_ok` flag for Py2 makedirs. Retains mode defaults"""
from os import makedirs
mode = mode if mode is not None else 0o777
makedirs(name, mode, exist_ok)
class PreviewParameters:
"""
A PreviewParameters object modifies web browser quick launch parameters and
modifies them for application protocol testing and debugging.
Attributes
------
api : object
the Connection object to provide session for using api endpoints
quick_launch_params: dict
web browser generated inputs for quick launch
selected_samples: defaultdict
all aliquots selected through the web quick launch manifest
modified_params: dict
the modified quick launch launch parameters, converts quick launch
aliquot objects into strings for debugging
refs: dict
all unique refs seen in the quick launch parameters
preview: dict
the combination of refs and modified_params for scientific
application debugging
protocol_obj: dict
the protocol object from the manifest
"""
def __init__(self, api, quick_launch_params, protocol_obj):
"""
Initialize TestParameter by providing a web generated params dict.
Parameters
----------
quick_launch_params: dict
web browser generated inputs for quick launch
"""
self.api = api
self.protocol_obj = protocol_obj
self.container_cache = {}
self.selected_samples = {}
self.csv_templates = {}
self.quick_launch_params = quick_launch_params
self.preview = self.build_preview()
def build_preview(self):
"""Builds preview parameters"""
self.modify_preview_parameters()
self.refs = self.generate_refs()
preview = defaultdict(lambda: defaultdict(dict))
preview["preview"]["parameters"].update(self.modified_params)
preview["preview"].update(self.refs)
return preview
def adjust_csv_table_input_type(self):
"""
Traverses the protocol object from the manifest to find any csv-table
input types. If it finds one it creates the headers and modifies the
modified_params that eventually will be the preview parameters for
autoprotocol testing.
"""
self.traverse_protocol_obj(self.protocol_obj["inputs"])
def modify_preview_parameters(self):
"""
This method will traverse the quick launch 'raw_inputs' and modify
container ids and aliquot dicts into a preview parameter container
string for autoprotocol generation debugging.
"""
self.modified_params = self.traverse_quick_launch(
obj=self.quick_launch_params, callback=self.create_preview_string
)
self.adjust_csv_table_input_type()
def generate_refs(self):
"""
This method takes the aggregated containers and aliquots to produce
the refs aliquot values
"""
ref_dict = defaultdict(lambda: defaultdict(dict))
ref_dict["refs"] = {}
for cid, index_arr in self.selected_samples.items():
container = self.container_cache.get(cid)
cont_name = PreviewParameters.format_container_name(container)
ref_dict["refs"][cont_name] = {
"label": cont_name,
"type": container.get("container_type").get("id"),
"store": container.get("storage_condition"),
"cover": container.get("cover", None),
"properties": container.get("properties"),
"aliquots": {},
}
if None not in index_arr:
ref_dict["refs"][cont_name]["aliquots"] = self.get_selected_aliquots(
container, index_arr
)
elif container.get("aliquots", None):
for ali in container.get("aliquots"):
ref_dict["refs"][cont_name]["aliquots"][ali["well_idx"]] = {
"name": ali["name"],
"volume": ali["volume_ul"] + ":microliter",
"properties": ali["properties"],
}
return ref_dict
def traverse_quick_launch(self, obj, callback=None):
"""
Will traverse quick launch object and send value to a callback
action method.
"""
if isinstance(obj, dict):
# If object has 'containerId' and 'wellIndex', then it is an aliquot
if "containerId" and "wellIndex" in obj.keys():
return self.create_string_from_aliquot(value=obj)
else:
value = {
k: self.traverse_quick_launch(v, callback) for k, v in obj.items()
}
elif isinstance(obj, list):
return [self.traverse_quick_launch(elem, callback) for elem in obj]
else:
value = obj
if callback is None:
return value
else:
return callback(value)
def add_to_cache(self, container_id):
"""Adds requested container to cache for later use"""
if container_id in self.container_cache:
container = self.container_cache[container_id]
else:
container = self.api.get_container(container_id)
self.container_cache[container_id] = container
return container
def create_string_from_aliquot(self, value):
"""Creates preview aliquot representation"""
well_idx = value.get("wellIndex")
container_id = value.get("containerId")
container = self.add_to_cache(container_id)
cont_name = PreviewParameters.format_container_name(container)
self.add_to_selected(container_id, well_idx)
return "{}/{}".format(cont_name, well_idx)
def create_preview_string(self, value):
"""Creates preview parameters string representation"""
if isinstance(value, str):
if value[:2] == "ct":
container_id = value
container = self.add_to_cache(container_id)
cont_name = PreviewParameters.format_container_name(container)
self.add_to_selected(container_id)
return cont_name
else:
return value
else:
return value
def add_to_selected(self, container_id, well_idx=None):
"""Saves which containers were selected."""
if container_id in self.selected_samples:
self.selected_samples[container_id].append(well_idx)
else:
self.selected_samples[container_id] = [well_idx]
def get_selected_aliquots(self, container, index_arr):
"""Grabs the properties from the selected aliquots"""
ref_aliquots = dict()
container_aliquots = {
ali.get("well_idx"): ali for ali in container.get("aliquots")
}
for i in index_arr:
ali = container_aliquots.get(i, container)
ref_aliquots[i] = {
"name": ali.get("name"),
"volume": "{}:microliter".format(ali.get("volume_ul", 10)),
"properties": ali.get("properties"),
}
return ref_aliquots
@classmethod
| 36.417344 | 90 | 0.598527 | import itertools
import json
import re
from collections import OrderedDict, defaultdict
from os.path import abspath, dirname, join
import click
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split("([0-9]+)", key)]
return sorted(l, key=alphanum_key)
def flatmap(func, items):
return itertools.chain.from_iterable(map(func, items))
def ascii_encode(non_compatible_string):
"""Primarily used for ensuring terminal display compatibility"""
if non_compatible_string:
return non_compatible_string.encode("ascii", errors="ignore").decode("ascii")
else:
return ""
def pull(nested_dict):
if "type" in nested_dict and "inputs" not in nested_dict:
return nested_dict
else:
inputs = {}
if "type" in nested_dict and "inputs" in nested_dict:
for param, input in list(nested_dict["inputs"].items()):
inputs[str(param)] = pull(input)
return inputs
else:
return nested_dict
def regex_manifest(protocol, input):
"""Special input types, gets updated as more input types are added"""
if "type" in input and input["type"] == "choice":
if "options" in input:
pattern = r"\[(.*?)\]"
match = re.search(pattern, str(input["options"]))
if not match:
click.echo(
'Error in %s: input type "choice" options must '
'be in the form of: \n[\n {\n "value": '
'<choice value>, \n "label": <choice label>\n '
"},\n ...\n]" % protocol["name"]
)
raise RuntimeError
else:
click.echo(
f"Must have options for 'choice' input type. Error in: {protocol['name']}"
)
raise RuntimeError
def iter_json(manifest):
all_types = {}
try:
protocol = manifest["protocols"]
except TypeError:
raise RuntimeError(
"Error: Your manifest.json file doesn't contain "
"valid JSON and cannot be formatted."
)
for protocol in manifest["protocols"]:
types = {}
for param, input in list(protocol["inputs"].items()):
types[param] = pull(input)
if isinstance(input, dict):
if input["type"] == "group" or input["type"] == "group+":
for i, j in list(input.items()):
if isinstance(j, dict):
for k, l in list(j.items()):
regex_manifest(protocol, l)
else:
regex_manifest(protocol, input)
all_types[protocol["name"]] = types
return all_types
def by_well(datasets, well):
return [
datasets[reading].props["data"][well][0] for reading in list(datasets.keys())
]
def makedirs(name, mode=None, exist_ok=False):
"""Forward ports `exist_ok` flag for Py2 makedirs. Retains mode defaults"""
from os import makedirs
mode = mode if mode is not None else 0o777
makedirs(name, mode, exist_ok)
def is_valid_jwt_token(token: str):
regex = r"Bearer ([a-zA-Z0-9_=]+)\.([a-zA-Z0-9_=]+)\.([a-zA-Z0-9_\-\+\/=]*)"
return re.fullmatch(regex, token) is not None
def load_sampledata_json(filename: str) -> dict:
with open(sampledata_path(filename)) as fh:
return json.load(fh)
def sampledata_path(filename: str) -> str:
return join(sampledata_dir(), filename)
def sampledata_dir() -> str:
return abspath(join(dirname(__file__), "sampledata", "_data"))
class PreviewParameters:
"""
A PreviewParameters object modifies web browser quick launch parameters and
modifies them for application protocol testing and debugging.
Attributes
------
api : object
the Connection object to provide session for using api endpoints
quick_launch_params: dict
web browser generated inputs for quick launch
selected_samples: defaultdict
all aliquots selected through the web quick launch manifest
modified_params: dict
the modified quick launch launch parameters, converts quick launch
aliquot objects into strings for debugging
refs: dict
all unique refs seen in the quick launch parameters
preview: dict
the combination of refs and modified_params for scientific
application debugging
protocol_obj: dict
the protocol object from the manifest
"""
def __init__(self, api, quick_launch_params, protocol_obj):
"""
Initialize TestParameter by providing a web generated params dict.
Parameters
----------
quick_launch_params: dict
web browser generated inputs for quick launch
"""
self.api = api
self.protocol_obj = protocol_obj
self.container_cache = {}
self.selected_samples = {}
self.csv_templates = {}
self.quick_launch_params = quick_launch_params
self.preview = self.build_preview()
def build_preview(self):
"""Builds preview parameters"""
self.modify_preview_parameters()
self.refs = self.generate_refs()
preview = defaultdict(lambda: defaultdict(dict))
preview["preview"]["parameters"].update(self.modified_params)
preview["preview"].update(self.refs)
return preview
def adjust_csv_table_input_type(self):
"""
Traverses the protocol object from the manifest to find any csv-table
input types. If it finds one it creates the headers and modifies the
modified_params that eventually will be the preview parameters for
autoprotocol testing.
"""
self.traverse_protocol_obj(self.protocol_obj["inputs"])
def modify_preview_parameters(self):
"""
This method will traverse the quick launch 'raw_inputs' and modify
container ids and aliquot dicts into a preview parameter container
string for autoprotocol generation debugging.
"""
self.modified_params = self.traverse_quick_launch(
obj=self.quick_launch_params, callback=self.create_preview_string
)
self.adjust_csv_table_input_type()
def generate_refs(self):
"""
This method takes the aggregated containers and aliquots to produce
the refs aliquot values
"""
ref_dict = defaultdict(lambda: defaultdict(dict))
ref_dict["refs"] = {}
for cid, index_arr in self.selected_samples.items():
container = self.container_cache.get(cid)
cont_name = PreviewParameters.format_container_name(container)
ref_dict["refs"][cont_name] = {
"label": cont_name,
"type": container.get("container_type").get("id"),
"store": container.get("storage_condition"),
"cover": container.get("cover", None),
"properties": container.get("properties"),
"aliquots": {},
}
if None not in index_arr:
ref_dict["refs"][cont_name]["aliquots"] = self.get_selected_aliquots(
container, index_arr
)
elif container.get("aliquots", None):
for ali in container.get("aliquots"):
ref_dict["refs"][cont_name]["aliquots"][ali["well_idx"]] = {
"name": ali["name"],
"volume": ali["volume_ul"] + ":microliter",
"properties": ali["properties"],
}
return ref_dict
def traverse_quick_launch(self, obj, callback=None):
"""
Will traverse quick launch object and send value to a callback
action method.
"""
if isinstance(obj, dict):
# If object has 'containerId' and 'wellIndex', then it is an aliquot
if "containerId" and "wellIndex" in obj.keys():
return self.create_string_from_aliquot(value=obj)
else:
value = {
k: self.traverse_quick_launch(v, callback) for k, v in obj.items()
}
elif isinstance(obj, list):
return [self.traverse_quick_launch(elem, callback) for elem in obj]
else:
value = obj
if callback is None:
return value
else:
return callback(value)
def add_to_cache(self, container_id):
"""Adds requested container to cache for later use"""
if container_id in self.container_cache:
container = self.container_cache[container_id]
else:
container = self.api.get_container(container_id)
self.container_cache[container_id] = container
return container
def create_string_from_aliquot(self, value):
"""Creates preview aliquot representation"""
well_idx = value.get("wellIndex")
container_id = value.get("containerId")
container = self.add_to_cache(container_id)
cont_name = PreviewParameters.format_container_name(container)
self.add_to_selected(container_id, well_idx)
return "{}/{}".format(cont_name, well_idx)
def create_preview_string(self, value):
"""Creates preview parameters string representation"""
if isinstance(value, str):
if value[:2] == "ct":
container_id = value
container = self.add_to_cache(container_id)
cont_name = PreviewParameters.format_container_name(container)
self.add_to_selected(container_id)
return cont_name
else:
return value
else:
return value
def add_to_selected(self, container_id, well_idx=None):
"""Saves which containers were selected."""
if container_id in self.selected_samples:
self.selected_samples[container_id].append(well_idx)
else:
self.selected_samples[container_id] = [well_idx]
def get_selected_aliquots(self, container, index_arr):
"""Grabs the properties from the selected aliquots"""
ref_aliquots = dict()
container_aliquots = {
ali.get("well_idx"): ali for ali in container.get("aliquots")
}
for i in index_arr:
ali = container_aliquots.get(i, container)
ref_aliquots[i] = {
"name": ali.get("name"),
"volume": "{}:microliter".format(ali.get("volume_ul", 10)),
"properties": ali.get("properties"),
}
return ref_aliquots
def update_nested(self, in_dict, key, value):
for k, v in in_dict.items():
if key == k:
in_dict[k] = [value, v]
elif isinstance(v, dict):
self.update_nested(v, key, value)
elif isinstance(v, list):
for o in v:
if isinstance(o, dict):
self.update_nested(o, key, value)
def traverse_protocol_obj(self, obj, parentkey=None):
if isinstance(obj, dict):
if obj.get("type") == "csv-table":
t = obj.get("template")
headers = {k: c for k, c in zip(t.get("keys"), t.get("col_type"))}
self.update_nested(self.modified_params, parentkey, headers)
return obj
else:
value = {
pkey: self.traverse_protocol_obj(v, pkey) for pkey, v in obj.items()
}
elif isinstance(obj, list):
return [self.traverse_protocol_obj(elem, parentkey) for elem in obj]
else:
value = obj
return value
def merge(self, manifest):
# Get selected protocol
selected_protocol = next(
p
for p in manifest["protocols"]
if p["name"] == self.protocol_obj.get("name")
)
# Get the index of the protocol in the protocols list
protocol_idx = manifest["protocols"].index(selected_protocol)
updated_protocol = OrderedDict()
# Ensure that the merged protocol object has the same key order
updated_protocol["name"] = self.protocol_obj["name"]
updated_protocol["display_name"] = self.protocol_obj["display_name"]
updated_protocol["categories"] = self.protocol_obj.get("categories", [])
updated_protocol["description"] = self.protocol_obj["description"]
updated_protocol["version"] = self.protocol_obj["version"]
updated_protocol["command_string"] = self.protocol_obj["command_string"]
updated_protocol["inputs"] = self.protocol_obj["inputs"]
updated_protocol["preview"] = self.preview.get("preview")
# Place modified protocol in the appropriate index
manifest["protocols"][protocol_idx] = updated_protocol
# Ensure that manifest has correct order
self.merged_manifest = OrderedDict()
self.merged_manifest["format"] = "python"
self.merged_manifest["license"] = "MIT"
self.merged_manifest["protocols"] = manifest["protocols"]
@classmethod
def format_container_name(cls, container):
return container.get("label").replace(" ", "_")
| 4,525 | 0 | 314 |
da4d6902696eeeab8fc42d5ee76fbedcae018a29 | 1,062 | py | Python | src/scripts/metodos_painel_administrativo.py | danilopcarlotti/scdf | cb89216f6a07da94f765d101390a521861063c76 | [
"MIT"
] | 3 | 2019-11-28T22:58:50.000Z | 2020-08-20T12:23:38.000Z | src/scripts/metodos_painel_administrativo.py | danilopcarlotti/scdf | cb89216f6a07da94f765d101390a521861063c76 | [
"MIT"
] | null | null | null | src/scripts/metodos_painel_administrativo.py | danilopcarlotti/scdf | cb89216f6a07da94f765d101390a521861063c76 | [
"MIT"
] | 1 | 2019-03-21T20:13:51.000Z | 2019-03-21T20:13:51.000Z | import os
from dotenv import load_dotenv, find_dotenv
from pymongo import MongoClient
load_dotenv(find_dotenv())
mongo_url = os.getenv("mongo_url")
myclient = MongoClient(mongo_url)
mydb_master = myclient["SCDF"]
col = mydb_master["investigacoes"] | 33.1875 | 72 | 0.764595 | import os
from dotenv import load_dotenv, find_dotenv
from pymongo import MongoClient
load_dotenv(find_dotenv())
mongo_url = os.getenv("mongo_url")
myclient = MongoClient(mongo_url)
mydb_master = myclient["SCDF"]
col = mydb_master["investigacoes"]
def usuarios_ativos():
usuarios = []
for data in col.find({}):
usuarios.append(data["id_responsavel"])
return set(usuarios)
def investigacoes_usuario(id_responsavel):
investigacoes = []
for data in col.find({"id_responsavel":id_responsavel}):
investigacoes.append(data["id_investigacao"])
return set(investigacoes)
def deletar_investigacao(id_investigacao):
myclient.db.command("SCDF_" + id_investigacao)
myclient.db.command("indice_palavras_documentos_" + id_investigacao)
myclient.db.command("palavras_interesse_" + id_investigacao)
myclient.db.command("relatorios_indice_arquivos_" + id_investigacao)
def deletar_usuario(id_responsavel):
for id_investigacao in investigacoes_usuario(id_responsavel):
deletar_investigacao(id_investigacao) | 721 | 0 | 92 |
13a3177441684c7e57faf556b63af77fa9647257 | 9,185 | py | Python | eotile/eotile_cli.py | CS-SI/eotile | af395a0804af79ed1e7f25eb2cf3d875fcd85108 | [
"Apache-2.0"
] | 7 | 2021-09-21T09:08:13.000Z | 2021-09-30T13:16:51.000Z | eotile/eotile_cli.py | CS-SI/eotile | af395a0804af79ed1e7f25eb2cf3d875fcd85108 | [
"Apache-2.0"
] | 2 | 2021-11-16T15:20:46.000Z | 2022-02-11T17:12:52.000Z | eotile/eotile_cli.py | CS-SI/eotile | af395a0804af79ed1e7f25eb2cf3d875fcd85108 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021 CS GROUP - France.
#
# This file is part of EOTile.
# See https://github.com/CS-SI/eotile for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
EO tile
:author: mgerma
:organization: CS GROUP - France
:copyright: 2021 CS GROUP - France. All rights reserved.
:license: see LICENSE file.
"""
import argparse
import logging
import sys
from pathlib import Path
from geopy.geocoders import Nominatim
from eotile import eotile_module
from eotile.eotiles.eotiles import write_tiles_bb
def build_parser():
"""Creates a parser suitable for parsing a command line invoking this program.
:return: An parser.
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"input",
help="Choose amongst : a file, a tile_id, a location, a wkt, a bbox",
)
parser.add_argument("-epsg", help="Specify the epsg of the input")
parser.add_argument("-no_l8", action="store_true", help="output L8 tiles")
parser.add_argument("-no_s2", action="store_true", help="Disable S2 tiles")
parser.add_argument("-dem", action="store_true", help='Use DEM 1" tiles as well')
parser.add_argument(
"-srtm5x5", action="store_true", help="Use specific srtm 5x5 tiles as well"
)
# Output arguments
parser.add_argument("-to_file", help="Write tiles to a file")
parser.add_argument(
"-to_wkt",
action="store_true",
help="Output the geometry of matching tiles with wkt format on standard output",
)
parser.add_argument(
"-to_bbox",
action="store_true",
help="Output the bounding box of matching tiles on standard output",
)
parser.add_argument(
"-to_tile_id",
action="store_true",
help="Output the id(s) of matching tiles on standard output",
)
parser.add_argument(
"-to_location",
action="store_true",
help="Output the location of the centroid of matching tiles "
"on standard output",
)
parser.add_argument(
"-s2_overlap",
action="store_true",
help="Do you want to have overlaps on S2 tiles ?",
)
parser.add_argument(
"-v", "--verbose", action="count", help="Increase output verbosity"
)
parser.add_argument(
"-logger_file", help="Redirect information from standard output to a file"
)
parser.add_argument(
"-location_type",
help="If needed, specify the location type that is requested (city, county, state, country)",
)
parser.add_argument(
"-threshold",
help="For large polygons at high resolution, you might want to simplify them using a threshold"
"(0 to 1)",
)
parser.add_argument(
"-min_overlap",
help="Minimum percentage of overlap to consider a tile (0 to 1)",
)
return parser
def build_output(source, tile_list, user_logger, message, args):
"""
Sub-function of the main
Formats an output depending on a specified message & arguments over a dataframe pandas of tiles.
:param source: Type of the source (DEM, S2, L8)
:type source: str
:param user_logger: LOGGER to log the message to
:type user_logger: logging.LOGGER
:param tile_list: pandas dataframe of the tiles to format
:type tile_list: pandas DataFrame
:param message: The message to format
:type message: str
:param args: fields to look in
:type args: list
"""
if source != "DEM":
interesting_columns = []
for elt in args:
if elt == "bounds":
interesting_columns.append("geometry")
else:
interesting_columns.append(elt)
for elt in tile_list[interesting_columns].iterrows():
arguments = []
for arg in args:
if arg == "geometry":
arguments.append(elt[1]["geometry"].wkt)
elif arg == "bounds":
arguments.append(elt[1]["geometry"].bounds)
else:
arguments.append(str(elt[1][arg]))
user_logger.info(message.format(source, *arguments))
else:
interesting_columns = ["EXIST_SRTM", "EXIST_COP30", "EXIST_COP90"]
for elt in args:
if elt == "bounds":
interesting_columns.append("geometry")
else:
interesting_columns.append(elt)
for elt in tile_list[interesting_columns].iterrows():
availability = []
if elt[1]["EXIST_SRTM"]:
availability.append("SRTM")
if elt[1]["EXIST_COP30"]:
availability.append("Copernicus 30")
if elt[1]["EXIST_COP90"]:
availability.append("Copernicus 90")
arguments = []
for arg in args:
if arg == "geometry":
arguments.append(elt[1]["geometry"].wkt)
elif arg == "bounds":
arguments.append(elt[1]["geometry"].bounds)
else:
arguments.append(str(elt[1][arg]))
user_logger.info(message.format(", ".join(availability), *arguments))
def main(arguments=None):
"""
Command line interface to perform
:param list arguments: list of arguments
"""
arg_parser = build_parser()
args = arg_parser.parse_args(args=arguments)
[tile_list_s2, tile_list_l8, tile_list_dem, tile_list_srtm5x5] = eotile_module.main(
args.input,
args.logger_file,
args.no_l8,
args.no_s2,
args.dem,
args.srtm5x5,
args.location_type,
args.min_overlap,
args.epsg,
args.threshold,
args.verbose,
args.s2_overlap,
)
tile_sources = ["S2", "L8", "DEM", "SRTM 5x5"]
user_logger = logging.getLogger("user_logger")
# Outputting the result
tile_lists = [tile_list_s2, tile_list_l8, tile_list_dem, tile_list_srtm5x5]
if args.to_file is not None:
output_path = Path(args.to_file)
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
if output_path.suffix == ".gpkg":
# Using layers method to combine sources if geopackage
write_tiles_bb(tile_list, output_path, source=source)
else:
# Else, we split into several files
write_tiles_bb(
tile_list,
output_path.with_name(
output_path.stem + "_" + source + output_path.suffix
),
)
elif args.to_wkt:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source, tile_list, user_logger, "[{}] Tile: {}", ["geometry"]
)
elif args.to_bbox:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source, tile_list, user_logger, "[{}] Tile Bounds: {}", ["bounds"]
)
elif args.to_tile_id:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(source, tile_list, user_logger, "[{}] Tile id: {}", ["id"])
elif args.to_location:
geolocator = Nominatim(user_agent="EOTile")
for tile_list in tile_lists:
if len(tile_list) > 0:
for elt in tile_list["geometry"]:
centroid = list(list(elt.centroid.coords)[0])
centroid.reverse()
location = geolocator.reverse(centroid, language="en")
if location is not None:
user_logger.info(str(location))
else:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source,
tile_list,
user_logger,
"[{} tile]\n {}\n {}",
["id", "geometry"],
)
# counts
user_logger.info("--- Summary ---")
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
user_logger.info("- %s %s Tiles", len(tile_list), source)
if __name__ == "__main__":
sys.exit(main())
| 33.892989 | 103 | 0.583669 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021 CS GROUP - France.
#
# This file is part of EOTile.
# See https://github.com/CS-SI/eotile for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
EO tile
:author: mgerma
:organization: CS GROUP - France
:copyright: 2021 CS GROUP - France. All rights reserved.
:license: see LICENSE file.
"""
import argparse
import logging
import sys
from pathlib import Path
from geopy.geocoders import Nominatim
from eotile import eotile_module
from eotile.eotiles.eotiles import write_tiles_bb
def build_parser():
"""Creates a parser suitable for parsing a command line invoking this program.
:return: An parser.
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"input",
help="Choose amongst : a file, a tile_id, a location, a wkt, a bbox",
)
parser.add_argument("-epsg", help="Specify the epsg of the input")
parser.add_argument("-no_l8", action="store_true", help="output L8 tiles")
parser.add_argument("-no_s2", action="store_true", help="Disable S2 tiles")
parser.add_argument("-dem", action="store_true", help='Use DEM 1" tiles as well')
parser.add_argument(
"-srtm5x5", action="store_true", help="Use specific srtm 5x5 tiles as well"
)
# Output arguments
parser.add_argument("-to_file", help="Write tiles to a file")
parser.add_argument(
"-to_wkt",
action="store_true",
help="Output the geometry of matching tiles with wkt format on standard output",
)
parser.add_argument(
"-to_bbox",
action="store_true",
help="Output the bounding box of matching tiles on standard output",
)
parser.add_argument(
"-to_tile_id",
action="store_true",
help="Output the id(s) of matching tiles on standard output",
)
parser.add_argument(
"-to_location",
action="store_true",
help="Output the location of the centroid of matching tiles "
"on standard output",
)
parser.add_argument(
"-s2_overlap",
action="store_true",
help="Do you want to have overlaps on S2 tiles ?",
)
parser.add_argument(
"-v", "--verbose", action="count", help="Increase output verbosity"
)
parser.add_argument(
"-logger_file", help="Redirect information from standard output to a file"
)
parser.add_argument(
"-location_type",
help="If needed, specify the location type that is requested (city, county, state, country)",
)
parser.add_argument(
"-threshold",
help="For large polygons at high resolution, you might want to simplify them using a threshold"
"(0 to 1)",
)
parser.add_argument(
"-min_overlap",
help="Minimum percentage of overlap to consider a tile (0 to 1)",
)
return parser
def build_output(source, tile_list, user_logger, message, args):
"""
Sub-function of the main
Formats an output depending on a specified message & arguments over a dataframe pandas of tiles.
:param source: Type of the source (DEM, S2, L8)
:type source: str
:param user_logger: LOGGER to log the message to
:type user_logger: logging.LOGGER
:param tile_list: pandas dataframe of the tiles to format
:type tile_list: pandas DataFrame
:param message: The message to format
:type message: str
:param args: fields to look in
:type args: list
"""
if source != "DEM":
interesting_columns = []
for elt in args:
if elt == "bounds":
interesting_columns.append("geometry")
else:
interesting_columns.append(elt)
for elt in tile_list[interesting_columns].iterrows():
arguments = []
for arg in args:
if arg == "geometry":
arguments.append(elt[1]["geometry"].wkt)
elif arg == "bounds":
arguments.append(elt[1]["geometry"].bounds)
else:
arguments.append(str(elt[1][arg]))
user_logger.info(message.format(source, *arguments))
else:
interesting_columns = ["EXIST_SRTM", "EXIST_COP30", "EXIST_COP90"]
for elt in args:
if elt == "bounds":
interesting_columns.append("geometry")
else:
interesting_columns.append(elt)
for elt in tile_list[interesting_columns].iterrows():
availability = []
if elt[1]["EXIST_SRTM"]:
availability.append("SRTM")
if elt[1]["EXIST_COP30"]:
availability.append("Copernicus 30")
if elt[1]["EXIST_COP90"]:
availability.append("Copernicus 90")
arguments = []
for arg in args:
if arg == "geometry":
arguments.append(elt[1]["geometry"].wkt)
elif arg == "bounds":
arguments.append(elt[1]["geometry"].bounds)
else:
arguments.append(str(elt[1][arg]))
user_logger.info(message.format(", ".join(availability), *arguments))
def main(arguments=None):
"""
Command line interface to perform
:param list arguments: list of arguments
"""
arg_parser = build_parser()
args = arg_parser.parse_args(args=arguments)
[tile_list_s2, tile_list_l8, tile_list_dem, tile_list_srtm5x5] = eotile_module.main(
args.input,
args.logger_file,
args.no_l8,
args.no_s2,
args.dem,
args.srtm5x5,
args.location_type,
args.min_overlap,
args.epsg,
args.threshold,
args.verbose,
args.s2_overlap,
)
tile_sources = ["S2", "L8", "DEM", "SRTM 5x5"]
user_logger = logging.getLogger("user_logger")
# Outputting the result
tile_lists = [tile_list_s2, tile_list_l8, tile_list_dem, tile_list_srtm5x5]
if args.to_file is not None:
output_path = Path(args.to_file)
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
if output_path.suffix == ".gpkg":
# Using layers method to combine sources if geopackage
write_tiles_bb(tile_list, output_path, source=source)
else:
# Else, we split into several files
write_tiles_bb(
tile_list,
output_path.with_name(
output_path.stem + "_" + source + output_path.suffix
),
)
elif args.to_wkt:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source, tile_list, user_logger, "[{}] Tile: {}", ["geometry"]
)
elif args.to_bbox:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source, tile_list, user_logger, "[{}] Tile Bounds: {}", ["bounds"]
)
elif args.to_tile_id:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(source, tile_list, user_logger, "[{}] Tile id: {}", ["id"])
elif args.to_location:
geolocator = Nominatim(user_agent="EOTile")
for tile_list in tile_lists:
if len(tile_list) > 0:
for elt in tile_list["geometry"]:
centroid = list(list(elt.centroid.coords)[0])
centroid.reverse()
location = geolocator.reverse(centroid, language="en")
if location is not None:
user_logger.info(str(location))
else:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source,
tile_list,
user_logger,
"[{} tile]\n {}\n {}",
["id", "geometry"],
)
# counts
user_logger.info("--- Summary ---")
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
user_logger.info("- %s %s Tiles", len(tile_list), source)
if __name__ == "__main__":
sys.exit(main())
| 0 | 0 | 0 |
cf1d39991d50302fa1db47ade5d5dd38ded6bc27 | 3,812 | py | Python | backend/views/aws_model_view_set.py | crosspower/naruko | 4c524e2ef955610a711830bc86d730ffe4fc2bd8 | [
"MIT"
] | 17 | 2019-01-23T04:37:43.000Z | 2019-10-15T01:42:31.000Z | backend/views/aws_model_view_set.py | snickerjp/naruko | 4c524e2ef955610a711830bc86d730ffe4fc2bd8 | [
"MIT"
] | 1 | 2019-01-23T08:04:44.000Z | 2019-01-23T08:44:33.000Z | backend/views/aws_model_view_set.py | snickerjp/naruko | 4c524e2ef955610a711830bc86d730ffe4fc2bd8 | [
"MIT"
] | 6 | 2019-01-23T09:10:59.000Z | 2020-12-02T04:15:41.000Z | from rest_framework.viewsets import ViewSet
from backend.models import AwsEnvironmentModel, TenantModel
from backend.serializers.aws_environment_model_serializer import (AwsEnvironmentModelGetDetailSerializer,
AwsEnvironmentModelCreateSerializer,
AwsEnvironmentModelUpdateSerializer)
from backend.usecases.control_aws_environment import ControlAwsEnvironment
from rest_framework.response import Response
from rest_framework import status
from backend.logger import NarukoLogging
from django.db import transaction
from rest_framework.decorators import action
| 48.871795 | 116 | 0.695435 | from rest_framework.viewsets import ViewSet
from backend.models import AwsEnvironmentModel, TenantModel
from backend.serializers.aws_environment_model_serializer import (AwsEnvironmentModelGetDetailSerializer,
AwsEnvironmentModelCreateSerializer,
AwsEnvironmentModelUpdateSerializer)
from backend.usecases.control_aws_environment import ControlAwsEnvironment
from rest_framework.response import Response
from rest_framework import status
from backend.logger import NarukoLogging
from django.db import transaction
from rest_framework.decorators import action
class AwsEnvironmentModelViewSet(ViewSet):
queryset = AwsEnvironmentModel.objects.all()
serializer_class = AwsEnvironmentModelGetDetailSerializer
def list(self, request, tenant_pk=None, detail=True):
log = NarukoLogging(request)
logger = log.get_logger(__name__)
logger.info("START: list")
tenant = TenantModel.objects.get(id=tenant_pk)
aws_environments = ControlAwsEnvironment(log).fetch_aws_environments(request.user, tenant)
return Response(data={"aws_environments": [
AwsEnvironmentModelGetDetailSerializer(aws_environment).data for aws_environment in aws_environments]})
@transaction.atomic
def create(self, request, tenant_pk=None):
log = NarukoLogging(request)
logger = log.get_logger(__name__)
logger.info("START: create")
request.data['tenant'] = tenant_pk
create_serializer = AwsEnvironmentModelCreateSerializer(data=request.data)
create_serializer.is_valid(raise_exception=True)
model = create_serializer.save()
ControlAwsEnvironment(log).save_aws_environment(request.user, model)
data = AwsEnvironmentModelGetDetailSerializer(model).data
logger.info("END: create")
return Response(data=data, status=status.HTTP_201_CREATED)
@transaction.atomic
def update(self, request, tenant_pk=None, pk=None):
log = NarukoLogging(request)
logger = log.get_logger(__name__)
logger.info("START: update")
model = AwsEnvironmentModel.objects.get(id=pk, tenant_id=tenant_pk)
serializer = AwsEnvironmentModelUpdateSerializer(
instance=model,
data=request.data,
partial=True)
serializer.is_valid(raise_exception=True)
updated_model = serializer.save()
ControlAwsEnvironment(log).save_aws_environment(request.user, updated_model)
data = AwsEnvironmentModelGetDetailSerializer(updated_model).data
logger.info("END: update")
return Response(data=data, status=status.HTTP_200_OK)
@transaction.atomic
def destroy(self, request, tenant_pk=None, pk=None):
log = NarukoLogging(request)
logger = log.get_logger(__name__)
logger.info("START: destroy")
model = AwsEnvironmentModel.objects.get(id=pk, tenant_id=tenant_pk)
ControlAwsEnvironment(log).delete_aws_environment(request.user, model)
logger.info("END: destroy")
return Response(status=status.HTTP_204_NO_CONTENT)
@action(methods=['post'], detail=True)
def billing(self, request, tenant_pk=None, pk=None):
log = NarukoLogging(request)
logger = log.get_logger(__name__)
logger.info("START: billing")
aws_environment = AwsEnvironmentModel.objects.get(id=pk, tenant_id=tenant_pk)
billing_graph = ControlAwsEnvironment(log).billing_graph(request.user, aws_environment, **request.data)
logger.info("END: billing")
return Response(data=billing_graph, status=status.HTTP_200_OK)
| 2,686 | 399 | 24 |
106c341a5d8629033f9f396ac74dcbbce0511048 | 458 | py | Python | setup.py | openabis/openabis-fingerjetfx | 869eadd23a21a34dad6da69e26e2993495ddc7ba | [
"Apache-2.0"
] | 2 | 2021-09-13T18:34:33.000Z | 2021-10-30T19:18:32.000Z | setup.py | openabis/openabis-fingerjetfx | 869eadd23a21a34dad6da69e26e2993495ddc7ba | [
"Apache-2.0"
] | 2 | 2021-06-08T20:35:40.000Z | 2022-01-13T01:48:52.000Z | setup.py | openabis/openabis-fingerjetfx | 869eadd23a21a34dad6da69e26e2993495ddc7ba | [
"Apache-2.0"
] | null | null | null | from distutils.core import setup
setup(
name='openabis-fingerjetfx',
version='0.0.1',
packages=['openabis_fingerjetfx'],
url='https://github.com/newlogic42/openabis-fingerjetfx',
license='Apache License',
author='newlogic42',
author_email='',
description='OpenAbis\' plugin implementation of FingerJetFXOSE/FingerJetFXOSE.',
install_requires=[
'pillow==6.2.1'
],
package_data={
'': ['*'],
}
)
| 24.105263 | 85 | 0.641921 | from distutils.core import setup
setup(
name='openabis-fingerjetfx',
version='0.0.1',
packages=['openabis_fingerjetfx'],
url='https://github.com/newlogic42/openabis-fingerjetfx',
license='Apache License',
author='newlogic42',
author_email='',
description='OpenAbis\' plugin implementation of FingerJetFXOSE/FingerJetFXOSE.',
install_requires=[
'pillow==6.2.1'
],
package_data={
'': ['*'],
}
)
| 0 | 0 | 0 |
857d84388f45bda63af9b6545aeac3ca456dc9df | 4,050 | py | Python | cpu.py | WorldsApartDevTeam/py-snes | fc0c5be44a0ac8ef560f94eb5b0a29823666c4af | [
"MIT"
] | null | null | null | cpu.py | WorldsApartDevTeam/py-snes | fc0c5be44a0ac8ef560f94eb5b0a29823666c4af | [
"MIT"
] | null | null | null | cpu.py | WorldsApartDevTeam/py-snes | fc0c5be44a0ac8ef560f94eb5b0a29823666c4af | [
"MIT"
] | null | null | null | import memory
import instructions
cpu_flags = {
"N": 0x80, # negative
"V": 0x40, # overflow
"M": 0x20, # accumulator size (set => 8bits)
"X": 0x10, # index size (set => 8bits)
"D": 0x08, # decimal flag (does nothing on SNES, I think)
"I": 0x04, # IRQ disabled when set
"Z": 0x02, # zero
"C": 0x01 # carry (can be copied to the emulation flag)
}
if __name__ == "__main__":
main()
| 28.125 | 85 | 0.53037 | import memory
import instructions
cpu_flags = {
"N": 0x80, # negative
"V": 0x40, # overflow
"M": 0x20, # accumulator size (set => 8bits)
"X": 0x10, # index size (set => 8bits)
"D": 0x08, # decimal flag (does nothing on SNES, I think)
"I": 0x04, # IRQ disabled when set
"Z": 0x02, # zero
"C": 0x01 # carry (can be copied to the emulation flag)
}
class CPU:
def __init__(self, mem):
self.mem = mem
self.reset = True
self.halt = False
self.cycle_count = 0
self.A = 0 # Accumulator
self.B = 0 # backup copy of the high byte in 8-bit mode
self.X = 0 # X index
self.Y = 0 # Y index
self.S = 0 # Stack pointer
self.DB = 0 # Default bank
self.DP = 0 # Direct page
self.PB = 0 # Program bank
self.P = 0 # Status flags
self.PC = 0 # Program counter
self.EMU = True # does nothing, just a debug info
self.instructions = instructions.getAllInstructions()
def set_flag(self, flag):
self.P |= cpu_flags[flag]
def clear_flag(self, flag):
self.P &= ~cpu_flags[flag]
def get_flag(self, flag):
return self.P & cpu_flags[flag]
def get_pc(self):
return (self.PB << 16) | self.PC
def stack_push(self, b):
self.mem.write(self.S, b)
self.S = (self.S - 1) & 0xFFFF
def stack_pop(self):
self.S = (self.S + 1) & 0xFFFF
return self.mem.read(self.S)
def get_full_a(self):
return (self.B << 8) | self.A
def set_full_a(self, a):
if self.get_flag("M"):
self.A = a & 0xFF
else:
self.A = a & 0xFFFF
self.B = (a>>8) & 0xFF
def cycle(self):
"""
Parse an instruction. May take several cycles. Exits when the PC changes
"""
if self.halt:
return
if self.reset:
# Do reset sequence.
print("[reset]")
self.set_flag("I")
self.clear_flag("D")
self.EMU = True
self.set_flag("M")
self.set_flag("X")
self.DB = 0
self.PB = 0
self.S = 0x01FF
# Read reset vector
self.PC = self.mem.read(0xFFFC) | (self.mem.read(0xFFFD) << 8)
# Reset cycle counter
self.cycle_count = 0
self.reset = False
opcode = self.mem.read(self.get_pc())
if not opcode in self.instructions:
print("ILLEGAL OPCODE %02x @ $%06x -- halting" % (opcode, self.get_pc()))
self.halt = True
return
# print("%02x -- %s" % (opcode, self.instructions[opcode]))
instr = self.instructions[opcode](opcode)
old_m = self.get_flag("M")
step = instr.fetch(self)
print("[$%02x:%04x]: %s" % (self.PB, self.PC, instr))
cycles = instr.execute(self)
if not self.get_flag("M"):
# Back up high byte in B
if old_m:
self.A |= self.B << 8
else:
self.B = (self.A >> 8) & 0xFF
if self.get_flag("X"):
# Force X and Y to 0
self.X &= 0xFF
self.Y &= 0xFF
self.PC = instructions.nextAddr(self.PC, step+1)
self.cycle_count += cycles
def main():
ram = memory.RAM(0x10000) # 64K of RAM
mem = memory.AddressSpace()
mem.map(0x0000, 0x10000, 0x0000, ram)
cpu = CPU(mem)
# Set reset vector 0x0800
ram.write(0xFFFC, 0x00)
ram.write(0xFFFD, 0x08)
# Write some code
ram.write(0x0800, 0x69) # ADC immediate (8 bits, since we don't have REP)
ram.write(0x0801, 0x05) # 5
ram.write(0x0802, 0x6D) # ADC absolute (again, 8 bits)
ram.write(0x0803, 0x00) # Address 0xFD00
ram.write(0x0804, 0xFD)
# Put our variable
ram.write(0xFD00, 0xFE) # -2
while not cpu.halt:
cpu.cycle()
print("A = %d" % cpu.A)
print("cycle count = %d" % cpu.cycle_count)
if __name__ == "__main__":
main()
| 1,747 | 1,838 | 46 |
68036449168a00a08f919bb1b733ec487866094f | 3,778 | py | Python | camera_calibration.py | zyfccc/Spectral-Illumination-Correction-Achieving-Relative-Color-Constancy-Under-the-Spectral-Domain | 051af9662dbe53deaf2d493fe8dbf0c9adce7ccb | [
"MIT"
] | 8 | 2019-12-17T15:07:17.000Z | 2021-08-19T09:13:58.000Z | camera_calibration.py | zyfccc/Spectral-Illumination-Correction-Achieving-Relative-Color-Constancy-Under-the-Spectral-Domain | 051af9662dbe53deaf2d493fe8dbf0c9adce7ccb | [
"MIT"
] | null | null | null | camera_calibration.py | zyfccc/Spectral-Illumination-Correction-Achieving-Relative-Color-Constancy-Under-the-Spectral-Domain | 051af9662dbe53deaf2d493fe8dbf0c9adce7ccb | [
"MIT"
] | 3 | 2020-01-06T04:20:55.000Z | 2020-01-25T08:42:30.000Z | import cv2
import json
import statistics
import matplotlib.pyplot as plt
import numpy as np
import libs.method.QcImage as QcImage
import libs.method.SICCalibrationRegression_MB3 as SICCalibrationRegression_MB3
from libs.model.TrainingSet import TrainingSet
JSON_PATH = 'Dataset/data_color_chart/tags.json'
IMAGE_PATH = 'Dataset/data_color_chart/'
RECT_SCALE = 1000
if __name__ == "__main__":
jsonPath = JSON_PATH
imagePath = IMAGE_PATH
vis = False
channel = 'green'
# train
with open(jsonPath) as json_data:
objs = json.load(json_data)
images_b = None
images_g = None
images_r = None
for obj in objs:
colors_b = []
colors_g = []
colors_r = []
trainingSet = TrainingSet(obj)
cv_image = cv2.imread(
imagePath + trainingSet.imagePath, cv2.IMREAD_COLOR)
if cv_image is None:
print('Training image: ' + trainingSet.imagePath + ' cannot be found.')
continue
dis_image = cv_image.copy()
height, width, channels = cv_image.shape
background_anno = trainingSet.background
background_area = QcImage.crop_image_by_position_and_rect(
cv_image, background_anno.position, background_anno.rect)
background_bgr = QcImage.get_average_rgb(background_area)
colors_b.append(background_bgr[0])
colors_g.append(background_bgr[1])
colors_r.append(background_bgr[2])
for anno in trainingSet.references:
colour_area = QcImage.crop_image_by_position_and_rect(
cv_image, anno.position, anno.rect)
sample_bgr = QcImage.get_average_rgb(colour_area)
colors_b.append(sample_bgr[0])
colors_g.append(sample_bgr[1])
colors_r.append(sample_bgr[2])
# draw training label
if vis:
pos_x = int(width * anno.position.x)
pos_y = int(height * anno.position.y)
dim_x = int(width * anno.rect.x / RECT_SCALE) + pos_x
dim_y = int(height * anno.rect.y / RECT_SCALE) + pos_y
cv2.rectangle(dis_image,
(pos_x, pos_y),
(dim_x, dim_y),
(0, 255, 0), 1)
images_b = np.array([colors_b]) if images_b is None else np.append(
images_b, [colors_b], axis=0)
images_g = np.array([colors_g]) if images_g is None else np.append(
images_g, [colors_g], axis=0)
images_r = np.array([colors_r]) if images_r is None else np.append(
images_r, [colors_r], axis=0)
# display training image and label
if vis:
dis_image = cv2.cvtColor(dis_image, cv2.COLOR_BGR2RGB)
plt.imshow(dis_image)
plt.title(trainingSet.imagePath)
plt.show()
if 'blue' in channel:
# blue channel
print('blue============')
M_b, B_b, err_b = SICCalibrationRegression_MB3.sic_calibration_regression(
images_b)
print('a, b and error for blue channel: %s,%s, %s' %
(M_b, B_b, err_b))
if 'green' in channel:
# green channel
print('green============')
M_g, B_g, err_g = SICCalibrationRegression_MB3.sic_calibration_regression(
images_g)
print('a, b and error for green channel: %s,%s, %s' %
(M_g, B_g, err_g))
if 'red' in channel:
# red channel
print('red============')
M_r, B_r, err_r = SICCalibrationRegression_MB3.sic_calibration_regression(
images_r)
print('a, b and error for red channel: %s,%s, %s' %
(M_r, B_r, err_r))
input("Press Enter to exit...")
| 30.967213 | 83 | 0.593965 | import cv2
import json
import statistics
import matplotlib.pyplot as plt
import numpy as np
import libs.method.QcImage as QcImage
import libs.method.SICCalibrationRegression_MB3 as SICCalibrationRegression_MB3
from libs.model.TrainingSet import TrainingSet
JSON_PATH = 'Dataset/data_color_chart/tags.json'
IMAGE_PATH = 'Dataset/data_color_chart/'
RECT_SCALE = 1000
if __name__ == "__main__":
jsonPath = JSON_PATH
imagePath = IMAGE_PATH
vis = False
channel = 'green'
# train
with open(jsonPath) as json_data:
objs = json.load(json_data)
images_b = None
images_g = None
images_r = None
for obj in objs:
colors_b = []
colors_g = []
colors_r = []
trainingSet = TrainingSet(obj)
cv_image = cv2.imread(
imagePath + trainingSet.imagePath, cv2.IMREAD_COLOR)
if cv_image is None:
print('Training image: ' + trainingSet.imagePath + ' cannot be found.')
continue
dis_image = cv_image.copy()
height, width, channels = cv_image.shape
background_anno = trainingSet.background
background_area = QcImage.crop_image_by_position_and_rect(
cv_image, background_anno.position, background_anno.rect)
background_bgr = QcImage.get_average_rgb(background_area)
colors_b.append(background_bgr[0])
colors_g.append(background_bgr[1])
colors_r.append(background_bgr[2])
for anno in trainingSet.references:
colour_area = QcImage.crop_image_by_position_and_rect(
cv_image, anno.position, anno.rect)
sample_bgr = QcImage.get_average_rgb(colour_area)
colors_b.append(sample_bgr[0])
colors_g.append(sample_bgr[1])
colors_r.append(sample_bgr[2])
# draw training label
if vis:
pos_x = int(width * anno.position.x)
pos_y = int(height * anno.position.y)
dim_x = int(width * anno.rect.x / RECT_SCALE) + pos_x
dim_y = int(height * anno.rect.y / RECT_SCALE) + pos_y
cv2.rectangle(dis_image,
(pos_x, pos_y),
(dim_x, dim_y),
(0, 255, 0), 1)
images_b = np.array([colors_b]) if images_b is None else np.append(
images_b, [colors_b], axis=0)
images_g = np.array([colors_g]) if images_g is None else np.append(
images_g, [colors_g], axis=0)
images_r = np.array([colors_r]) if images_r is None else np.append(
images_r, [colors_r], axis=0)
# display training image and label
if vis:
dis_image = cv2.cvtColor(dis_image, cv2.COLOR_BGR2RGB)
plt.imshow(dis_image)
plt.title(trainingSet.imagePath)
plt.show()
if 'blue' in channel:
# blue channel
print('blue============')
M_b, B_b, err_b = SICCalibrationRegression_MB3.sic_calibration_regression(
images_b)
print('a, b and error for blue channel: %s,%s, %s' %
(M_b, B_b, err_b))
if 'green' in channel:
# green channel
print('green============')
M_g, B_g, err_g = SICCalibrationRegression_MB3.sic_calibration_regression(
images_g)
print('a, b and error for green channel: %s,%s, %s' %
(M_g, B_g, err_g))
if 'red' in channel:
# red channel
print('red============')
M_r, B_r, err_r = SICCalibrationRegression_MB3.sic_calibration_regression(
images_r)
print('a, b and error for red channel: %s,%s, %s' %
(M_r, B_r, err_r))
input("Press Enter to exit...")
| 0 | 0 | 0 |
50e801d52e406df4ca9071d550d4975d7ffab046 | 621 | py | Python | setup.py | im-na02/melke | f25a08aafb52c596ff839799ac05b3dd336afc42 | [
"MIT"
] | 2 | 2020-10-10T07:05:37.000Z | 2020-11-26T08:31:07.000Z | setup.py | im-na02/melke | f25a08aafb52c596ff839799ac05b3dd336afc42 | [
"MIT"
] | null | null | null | setup.py | im-na02/melke | f25a08aafb52c596ff839799ac05b3dd336afc42 | [
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name = 'MELKE',
version = '1',
description = 'Extract entities and relations from BIO-text',
long_description = 'You can read brief description of MELKE here: \nhttps://github.com/im-na02/melke/',
url = 'https://github.com/im-na02/melke/',
license = 'MIT',
packages = ['melke'],
keywords = ['bio', 'text', 'NER', 'entity', 'relation'],
py_modules = ['EntityRelation'],
python_requires = '>=3',
include_package_data = True,
package_data = {'melke':['*']},
zip_safe = False
)
| 32.684211 | 110 | 0.587762 |
from setuptools import setup, find_packages
setup(name = 'MELKE',
version = '1',
description = 'Extract entities and relations from BIO-text',
long_description = 'You can read brief description of MELKE here: \nhttps://github.com/im-na02/melke/',
url = 'https://github.com/im-na02/melke/',
license = 'MIT',
packages = ['melke'],
keywords = ['bio', 'text', 'NER', 'entity', 'relation'],
py_modules = ['EntityRelation'],
python_requires = '>=3',
include_package_data = True,
package_data = {'melke':['*']},
zip_safe = False
)
| 0 | 0 | 0 |
28d0f1dc82637d31f1d9cbb5b22536dc42f77318 | 45 | py | Python | tests/__init__.py | krahabb/motion_frontend | 57576cc95d5105b604b8b270d449b6bf9be54356 | [
"MIT"
] | null | null | null | tests/__init__.py | krahabb/motion_frontend | 57576cc95d5105b604b8b270d449b6bf9be54356 | [
"MIT"
] | null | null | null | tests/__init__.py | krahabb/motion_frontend | 57576cc95d5105b604b8b270d449b6bf9be54356 | [
"MIT"
] | null | null | null | """Tests for motion_frontend integration."""
| 22.5 | 44 | 0.755556 | """Tests for motion_frontend integration."""
| 0 | 0 | 0 |
93541fed16a76521b9e6ed4cde781c0fa86a2f6c | 398 | py | Python | ex104.py | felipesch92/PythonExercicios | 73edcbde6beaabcfc86af3dd6e58473f1eecabd3 | [
"MIT"
] | null | null | null | ex104.py | felipesch92/PythonExercicios | 73edcbde6beaabcfc86af3dd6e58473f1eecabd3 | [
"MIT"
] | null | null | null | ex104.py | felipesch92/PythonExercicios | 73edcbde6beaabcfc86af3dd6e58473f1eecabd3 | [
"MIT"
] | null | null | null | # Crie um programa que tenha a função leiaInt(), que vai funcionar
# de forma semelhante ‘a função input() do Python, só que fazendo a
# validação para aceitar apenas um valor numérico. Ex: n = leiaInt(‘Digite um n: ‘)
n = leiaInt('Número: ')
print(n) | 30.615385 | 83 | 0.663317 | # Crie um programa que tenha a função leiaInt(), que vai funcionar
# de forma semelhante ‘a função input() do Python, só que fazendo a
# validação para aceitar apenas um valor numérico. Ex: n = leiaInt(‘Digite um n: ‘)
def leiaInt(msg):
num = input(msg)
if num.isnumeric():
return int(num)
else:
print('ERRO, digite um número válido!')
n = leiaInt('Número: ')
print(n) | 125 | 0 | 22 |
c6c6ce5278640388d7790acb0f21dac193c29b5a | 24,111 | py | Python | DataHandler.py | COE420Group4/Donation-Nation | 58d62bc3a28aba0ce2b484ad68329ac0bd0680f2 | [
"MIT"
] | null | null | null | DataHandler.py | COE420Group4/Donation-Nation | 58d62bc3a28aba0ce2b484ad68329ac0bd0680f2 | [
"MIT"
] | null | null | null | DataHandler.py | COE420Group4/Donation-Nation | 58d62bc3a28aba0ce2b484ad68329ac0bd0680f2 | [
"MIT"
] | null | null | null | # Import our database and initialize it
from db import DB
import send_email
import re
import hashlib
import uuid
import traceback
from datetime import datetime
from base64 import standard_b64encode
sql = DB()
sql.clear_db()
sql.init_db()
sql.populate()
# Checker function to check all form variables
# Checker function to check that all form variables are alphabetic
# Checker function to check that all form variables are alphanum
# Checker function to check that all form variables are alphanum
# Get user information by supplying their UUID
| 36.42145 | 380 | 0.690017 | # Import our database and initialize it
from db import DB
import send_email
import re
import hashlib
import uuid
import traceback
from datetime import datetime
from base64 import standard_b64encode
sql = DB()
sql.clear_db()
sql.init_db()
sql.populate()
# Checker function to check all form variables
def check_form(form, paramters):
for param in paramters:
try:
if form[param] and len(form[param]) < 1:
return False
except Exception:
return False
return True
# Checker function to check that all form variables are alphabetic
def is_all_alpha(form, paramters):
for param in paramters:
if not all(x.isalpha() or x.isspace() for x in form[param]):
raise UserException(f'{param.capitalize()} must consist of only alphabetic characters.')
return True
# Checker function to check that all form variables are alphanum
def is_all_alnum(form, paramters):
for param in paramters:
if not form[param].isalnum():
raise UserException(f'{param.capitalize()} must consist of only alphanumeric characters.')
return
# Checker function to check that all form variables are alphanum
def is_all_numeric(form, paramters):
for param in paramters:
if not form[param].isnumeric():
raise UserException(f'{param.capitalize()} must consist of only numeric characters.')
return True
def is_email(form, parameter):
regex = r'^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if re.search(regex, form[parameter]):
pass
else:
raise UserException(f'{parameter.capitalize()} must be a valid email.')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in {'jpg','png','jpeg'}
class User:
def insert(form):
# Check that all information is here
if check_form(form, ['firstName', 'lastName', 'dob', 'city', 'emirate', 'POBox', 'address1', 'address2', 'phone', 'email', 'password', 'confirmPassword']):
is_all_alpha(form, ['firstName', 'lastName', 'city', 'emirate'])
is_all_alnum(form, ['POBox'])
is_all_numeric(form, ['phone'])
is_email(form, 'email')
User.check_phone_exists(form['phone'])
User.check_email_exists(form['email'])
hash = ''
if form['password'] != form['confirmPassword']:
raise UserException('Both password fields must be the same.')
else:
hash = hashlib.sha256(form['password'].encode('utf-8')).hexdigest()
user_uuid = str(uuid.uuid4())
try:
dbcon = sql.connect()
dbcon.execute("INSERT INTO users (UUID, first_name, last_name, dob, city, emirate, po_box, address_1, address_2, phone, email, password, isAdmin, isVerified) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,0,0)", (user_uuid,form['firstName'], form['lastName'], form['dob'], form['city'], form['emirate'], form['POBox'], form['address1'], form['address2'], form['phone'], form['email'], hash))
verification_uuid = str(uuid.uuid4())
dbcon.execute("INSERT INTO verifications VALUES (?,?)", (user_uuid, verification_uuid))
# Send email to user for verification
send_email.send('Email Verification', f'Hi {form["firstName"].strip()}!\n\n\nThank you for signing up for Donation Nation!\n\nTo complete your registration and enable your account, please verify your email by visiting the link: http://127.0.0.1:5000/verify_user/{verification_uuid}\n\nRegards,\nDonation Nation', [form['email'],])
# Commit changes and close the db connection
dbcon.commit()
dbcon.close()
except Exception:
traceback.print_exc()
raise UserException("Something went wrong. Contact an admin.")
else:
raise UserException("Invalid or missing information!")
def check_phone_exists(value):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT id FROM users WHERE phone=?", (value,))
if cur.fetchone() is not None:
cur.close()
dbcon.close()
raise UserException("A user with that phone number already exists.")
else:
cur.close()
dbcon.close()
except UserException as e:
raise e
except Exception:
traceback.print_exc()
raise UserException("Something went wrong. Contact an admin.")
def check_email_exists(value):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT id FROM users WHERE email=?", (value,))
if cur.fetchone() is not None:
cur.close()
dbcon.close()
raise UserException("A user with that email already exists.")
else:
cur.close()
dbcon.close()
except UserException as e:
raise e
except Exception:
traceback.print_exc()
raise UserException("Something went wrong. Contact an admin.")
# Get user information by supplying their UUID
def fetchByUUID(user_uuid):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT * FROM users WHERE UUID=?", (user_uuid,))
res = cur.fetchone()
cur.close()
dbcon.close()
if res is not None:
return res
else:
return False
except Exception:
traceback.print_exc()
return False
def login(form):
if check_form(form, ['email', 'password']):
hash = hashlib.sha256(form['password'].encode('utf-8')).hexdigest()
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT * FROM users WHERE email=? AND password=?", (form['email'], hash))
data = cur.fetchone()
if data is not None:
if data[14] == 0:
raise UserException("You haven't verified your email yet! Please verify it then try again.")
return data
else:
raise UserException("Invalid email or password. Please try again.")
except UserException as e:
raise e
except Exception:
traceback.print_exc()
raise UserException("Something went wrong. Contact an admin.")
else:
raise UserException("Invalid or missing information!")
def verify(verify_uuid):
try:
# Check that the verification UUID exists
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT user_uuid FROM verifications WHERE verification_uuid=?", (verify_uuid,))
uuid = cur.fetchone()
if uuid is None:
raise UserException("NotFound") # Generic name so that we can catch it in flask
# If we're here, then the verification exists and we should verify the user
cur.execute("UPDATE users SET isVerified=1 WHERE UUID=?", (uuid[0],))
# Remove the verification from the database
cur.execute("DELETE FROM verifications WHERE user_uuid=?", (uuid[0],))
# Commit the changes and close connections
dbcon.commit()
cur.close()
dbcon.close()
except UserException as e:
raise e
except Exception as e:
# We raise any exception so that the flask app can handle it
traceback.print_exc()
raise e
def addItem(form,session,files):
if check_form(form, ['name','category','condition','description','organization','time']) and 'image' in files:
item_uuid = str(uuid.uuid4())
user_uuid = session['isLoggedIn'][1]
current_time = datetime.now().strftime("%d/%m/%Y - %H:%M:%S")
image = standard_b64encode(files['image'].read())
try:
org = Organization.fetchByUUID(form['organization'])
send_email.send('New Item Offered!', f'Hi {org[2].strip()}!\n\n\nYou have been offered a new item ({form["name"]}) [{form["category"]}]! Log into the application to approve or reject this item!\n\nRegards,\nDonation Nation', [org[12],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("INSERT INTO items (UUID,item_name,category,condition,description,org_id,user_id,time_submitted,pickup_time,image,status) VALUES (?,?,?,?,?,?,?,?,?,?,0)",(item_uuid,form['name'],form['category'],form['condition'],form['description'],form['organization'],user_uuid,current_time,form['time'],image))
dbcon.commit()
cur.close()
dbcon.close()
except Exception as e:
# We raise any exception so that the flask app can handle it
traceback.print_exc()
raise UserException('Something went wrong. Contact an admin.')
else:
raise UserException('Missing or invalid information!')
def removeItem(uuid):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute('DELETE FROM items WHERE UUID=?', (uuid,))
dbcon.commit()
cur.close()
dbcon.close()
except UserException as ue:
raise ue
except Exception:
traceback.print_exc()
raise UserException('An issue has occurred. Please contact an admin.')
def changePickupTime(form, uuid):
try:
item_data = User.fetchItemByUUID(uuid)
org_data = Organization.fetchByUUID(item_data[6])
send_email.send('Item Pickup Date Changed', f'Hi {org_data[2]}!\n\n\nThe item ({item_data[2]}) [UUID: {item_data[1]}] has been suggested a new pickup time by the donator. Log in to the application to view and accept or reject the new pickup time.\n\nRegards,\nDonation Nation', [org_data[12],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute('UPDATE items SET pickup_time=?, status=? WHERE UUID=?', (form['time'], 3, uuid))
dbcon.commit()
cur.close()
dbcon.close()
except UserException as ue:
raise ue
except Exception:
traceback.print_exc()
raise UserException('An issue has occurred. Please contact an admin.')
def accept(uuid):
try:
item_data = User.fetchItemByUUID(uuid)
org_data = Organization.fetchByUUID(item_data[6])
send_email.send('Item Accepted', f'Hi {org_data[2]}!\n\n\nThe item ({item_data[2]}) [UUID: {item_data[1]}] has been accepted by the user for pickup. Contact the user for further details.\n\nRegards,\nDonation Nation', [org_data[12],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute('UPDATE items SET status=? WHERE UUID=?', (1, uuid))
dbcon.commit()
cur.close()
dbcon.close()
except UserException as ue:
raise ue
except Exception:
traceback.print_exc()
raise UserException('An issue has occurred. Please contact an admin.')
def getAllItems(user_uuid):
# Connect to the database
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT items.id, items.UUID, item_name, category, condition, description, org_id, user_id, time_submitted, pickup_time, image, items.status, organizations.name FROM items, organizations WHERE organizations.UUID=items.org_id AND user_id=?", (user_uuid,))
items = cur.fetchall()
cur.close()
dbcon.close()
if len(items) > 0:
return items
else:
raise UserException("No items exist for this user.")
except UserException as ue:
raise ue
except Exception as e:
raise UserException("Something went wrong. Please contact an admin.")
def changePassword(form, session):
if check_form(form, ['password', 'confirmPassword']):
hash = ''
if form['password'] != form['confirmPassword']:
raise UserException('Both password fields must be the same.')
else:
hash = hashlib.sha256(form['password'].encode('utf-8')).hexdigest()
try:
dbcon = sql.connect()
dbcon.execute("UPDATE users set password = ? where UUID = ?", (hash, session['isLoggedIn'][1]))
# Commit changes and close the db connection
dbcon.commit()
dbcon.close()
except Exception:
traceback.print_exc()
raise UserException("Something went wrong. Contact an admin.")
else:
raise UserException("Invalid or missing information!")
def editInformation(form, session):
# Check that all information is here
if check_form(form, ['city', 'emirate', 'POBox', 'address1', 'address2', 'phone']):
is_all_alpha(form, ['city', 'emirate'])
is_all_alnum(form, ['POBox'])
is_all_numeric(form, ['phone'])
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("UPDATE users SET city = ?, emirate = ?, po_box = ?, address_1 = ?, address_2 = ?, phone = ? WHERE UUID = ?", (form['city'], form['emirate'], form['POBox'], form['address1'], form['address2'], form['phone'], session['isLoggedIn'][1]))
dbcon.commit()
cur.execute("SELECT * FROM users WHERE UUID=?", (session['isLoggedIn'][1],))
data = cur.fetchone()
cur.close()
dbcon.close()
return data
except Exception:
traceback.print_exc()
raise UserException("Something went wrong. Contact an admin.")
def fetchItemByUUID(item_uuid):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute('SELECT * FROM items WHERE UUID=?', (item_uuid,))
data = cur.fetchone()
cur.close()
dbcon.close()
return data
except Exception:
traceback.print_exc()
raise UserException("Something went wrong. Contact an admin.")
class UserException(Exception):
def __init__(self, message):
self.reason = message
super().__init__(self, self.reason)
class Organization:
def insert(form, files):
if check_form(form, ['name', 'registrationNumber', 'city', 'emirate', 'POBox', 'address1', 'address2', 'phone', 'email', 'password', 'confirmPassword']) and (files['logo'] is not None):
is_all_alpha(form, ['name', 'city', 'emirate'])
is_all_alnum(form, ['POBox'])
is_all_numeric(form, ['phone', 'registrationNumber'])
is_email(form, 'email')
Organization.check_phone_exists(form['phone'])
Organization.check_email_exists(form['email'])
hash = ''
logo = standard_b64encode(files['logo'].read())
if form['password'] != form['confirmPassword']:
raise OrgException('Both password fields must be the same.')
else:
hash = hashlib.sha256(form['password'].encode('utf-8')).hexdigest()
org_uuid = str(uuid.uuid4())
try:
dbcon = sql.connect()
dbcon.execute("INSERT INTO organizations (UUID, name, status, license_no, city, emirate, po_box, address_1, address_2, phone, logo, email, password) VALUES (?,?,0,?,?,?,?,?,?,?,?,?,?)", (org_uuid, form['name'], form['registrationNumber'], form['city'], form['emirate'], form['POBox'], form['address1'], form['address2'], form['phone'], logo, form['email'], hash))
verification_uuid = str(uuid.uuid4())
dbcon.execute("INSERT INTO verifications VALUES (?,?)", (org_uuid, verification_uuid))
# Send email to user for verification
send_email.send('Email Verification', f'Hi {form["name"].strip()}!\n\n\nThank you for signing up for Donation Nation!\n\nTo complete your registration and enable your account, please verify your email by visiting the link: http://127.0.0.1:5000/verify_org/{verification_uuid}\n\nRegards,\nDonation Nation', [form['email'],])
# Commit changes and close the db connection
dbcon.commit()
dbcon.close()
except Exception:
traceback.print_exc()
raise OrgException("Something went wrong. Contact an admin.")
else:
raise OrgException("Invalid or missing information!")
def check_phone_exists(value):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT id FROM organizations WHERE phone=?", (value,))
if cur.fetchone() is not None:
cur.close()
dbcon.close()
raise OrgException("An organization with that phone number already exists.")
else:
cur.close()
dbcon.close()
except OrgException as e:
raise e
except Exception:
traceback.print_exc()
raise OrgException("Something went wrong. Contact an admin.")
def check_email_exists(value):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT id FROM organizations WHERE email=?", (value,))
if cur.fetchone() is not None:
cur.close()
dbcon.close()
raise OrgException("An organization with that email already exists.")
else:
cur.close()
dbcon.close()
except OrgException as e:
raise e
except Exception:
traceback.print_exc()
raise OrgException("Something went wrong. Contact an admin.")
def login(form):
try:
if check_form(form, ['email', 'password']):
hash = hashlib.sha256(form['password'].encode('utf-8')).hexdigest()
# Verify the creds
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT id,UUID,name,status,license_no,city,emirate,po_box,address_1,address_2,phone,1,email,password FROM organizations WHERE email=? AND password=?", (form['email'], hash))
org_data = cur.fetchone()
if org_data is not None:
# Check that the user is verified
if org_data[3] == 2:
# This means the credentials are correct and we do nothing
return org_data
elif org_data[3] == 1:
raise OrgException("An administrator has not verified your account yet. Please wait and try later.")
else:
raise OrgException("Please verify your email so that an admin can review your account.")
else:
raise OrgException("Invalid email or password.")
else:
raise OrgException("Missing or invalid information!")
except OrgException as e:
raise e
except Exception:
traceback.print_exc()
raise OrgException("Something went wrong. Contact an admin.")
def verify(verify_uuid):
try:
# Check if this verification_uuid exists
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT user_uuid FROM verifications WHERE verification_uuid=?", (verify_uuid,))
uuid = cur.fetchone()
if uuid is None:
raise OrgException("NotFound") # Generic name so that we can catch it in flask
# If we're here, then the verification exists and we should verify the org
cur.execute("UPDATE organizations SET status=1 WHERE UUID=?", (uuid[0],))
# Remove the verification from the database
cur.execute("DELETE FROM verifications WHERE user_uuid=?", (uuid[0],))
# Commit the changes and close connections
dbcon.commit()
cur.close()
dbcon.close()
except OrgException as e:
raise e
except Exception:
traceback.print_exc()
raise OrgException("Something went wrong. Contact an admin.")
def accept(org_uuid):
try:
org_data = Organization.fetchByUUID(org_uuid)
# Check their status
if org_data[3] == 2:
raise OrgException("Organization already accepted!")
else:
send_email.send('Application Accepted', f'Hi {org_data[2].strip()}!\n\n\nWe are pleased to inform you that your application ({org_data[1]}) for being an organization registered with us has been accepted. You can now log in to the application and begin accepting donations.\n\nRegards,\nDonation Nation', [org_data[12],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("UPDATE organizations SET status=2 WHERE UUID=?", (org_data[1],))
cur.close()
dbcon.commit()
dbcon.close()
except OrgException as e:
raise e
except Exception as e:
traceback.print_exc()
raise e
def reject(org_uuid):
try:
org_data = Organization.fetchByUUID(org_uuid)
# Check their status
if org_data[3] == 2:
raise OrgException("Organization already accepted!")
else:
send_email.send('Application Rejected', f'Hi {org_data[2].strip()}!\n\n\nWe regret to inform you that your application ({org_data[1]}) for being an organization registered with us has been rejected. You can contact us at tips@fbi.gov to repeal your rejection.\n\nRegards,\nDonation Nation', [org_data[12],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("DELETE FROM organizations WHERE UUID=?", (org_data[1],))
cur.fetchone()
cur.close()
dbcon.commit()
dbcon.close()
except OrgException as e:
raise e
except Exception as e:
traceback.print_exc()
raise e
def getAll():
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT * FROM organizations")
data = cur.fetchall()
cur.close()
dbcon.close()
if data is not None:
return data
else:
raise OrgException("There are no organizations registered yet.")
except Exception as e:
raise e
def fetchByUUID(org_uuid):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT * FROM organizations WHERE UUID=?", (org_uuid,))
data = cur.fetchone()
cur.close()
dbcon.close()
if data is not None:
return data
else:
return False
except Exception as e:
return False
def getAllVerified():
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT * FROM organizations WHERE status=2")
data = cur.fetchall()
cur.close()
dbcon.close()
if data is not None:
return data
else:
raise OrgException("There are no organizations registered yet.")
except Exception as e:
raise e
def getAllPending():
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT * FROM organizations WHERE status!=2")
data = cur.fetchall()
cur.close()
dbcon.close()
if data is not None:
return data
else:
raise OrgException("There are no organizations registered yet.")
except Exception as e:
raise e
def changePassword(form, session):
if check_form(form, ['password', 'confirmPassword']):
hash = ''
if form['password'] != form['confirmPassword']:
raise OrgException('Both password fields must be the same.')
else:
hash = hashlib.sha256(form['password'].encode('utf-8')).hexdigest()
user_uuid = str(uuid.uuid4())
try:
dbcon = sql.connect()
dbcon.execute("UPDATE organizations set password = ? where UUID = ?", (hash, session['isLoggedIn'][1]))
# Commit changes and close the db connection
dbcon.commit()
dbcon.close()
except Exception:
traceback.print_exc()
raise OrgException("Something went wrong. Contact an admin.")
else:
raise OrgException("Invalid or missing information!")
def getAllItems(org_uuid):
try:
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute("SELECT items.id, items.UUID, item_name, category, condition, description, org_id, user_id, time_submitted, pickup_time, image, items.status, users.first_name, users.last_name FROM items, users WHERE users.UUID=items.user_id AND org_id=? AND items.status!=-1", (org_uuid,))
items = cur.fetchall()
cur.close()
dbcon.close()
if len(items) > 0:
return items
else:
raise OrgException("No items exist for this organization.")
except OrgException as ue:
raise ue
except Exception as e:
raise OrgException("Something went wrong. Please contact an admin.")
def acceptItem(uuid):
try:
item_data = User.fetchItemByUUID(uuid)
user_data = User.fetchByUUID(item_data[7])
send_email.send('Item Accepted', f'Hi {user_data[2]}!\n\n\nYour item ({item_data[2]}) [UUID: {item_data[1]}] has been accepted for pickup. The organization you donated to should contact you shortly.\n\nRegards,\nDonation Nation', [user_data[11],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute('UPDATE items SET status=? WHERE UUID=?', (1, uuid))
dbcon.commit()
cur.close()
dbcon.close()
except OrgException as ue:
raise ue
except Exception:
traceback.print_exc()
raise OrgException('An issue has occurred. Please contact an admin.')
def removeItem(uuid):
try:
item_data = User.fetchItemByUUID(uuid)
user_data = User.fetchByUUID(item_data[7])
send_email.send('Item Rejected', f'Hi {user_data[2]}!\n\n\nYour item ({item_data[2]}) [UUID: {item_data[1]}] has been rejected by the organization. Either try again or contact the organization for more details.\n\nRegards,\nDonation Nation', [user_data[11],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute('UPDATE items SET status=-1 WHERE UUID=?', (uuid,))
dbcon.commit()
cur.close()
dbcon.close()
except OrgException as ue:
raise ue
except Exception:
traceback.print_exc()
raise OrgException('An issue has occurred. Please contact an admin.')
def changePickupTime(form, uuid):
try:
item_data = User.fetchItemByUUID(uuid)
user_data = User.fetchByUUID(item_data[7])
send_email.send('Item Pickup Time Changed', f'Hi {user_data[2]}!\n\n\nYour item ({item_data[2]}) [UUID: {item_data[1]}] has been suggested a new pickup time. Log into the application to approve or reject this new time.\n\nRegards,\nDonation Nation', [user_data[11],])
dbcon = sql.connect()
cur = dbcon.cursor()
cur.execute('UPDATE items SET pickup_time=?, status=? WHERE UUID=?', (form['time'], 2, uuid))
dbcon.commit()
cur.close()
dbcon.close()
except OrgException as ue:
raise ue
except Exception:
traceback.print_exc()
raise OrgException('An issue has occurred. Please contact an admin.')
class OrgException(Exception):
def __init__(self, message):
self.reason = message
super().__init__(self, self.reason) | 22,562 | 7 | 989 |
db78e595d47bb20e71fcc8360f4c0e8c6f29044c | 327 | py | Python | js/json2/__init__.py | fanstatic/js.json | 907c75b0867930fefba839cdaad3de22286d279d | [
"BSD-3-Clause"
] | null | null | null | js/json2/__init__.py | fanstatic/js.json | 907c75b0867930fefba839cdaad3de22286d279d | [
"BSD-3-Clause"
] | null | null | null | js/json2/__init__.py | fanstatic/js.json | 907c75b0867930fefba839cdaad3de22286d279d | [
"BSD-3-Clause"
] | null | null | null | from fanstatic import Library, Resource
from fanstatic.core import render_js
library = Library('json2', 'resources')
def earlier_than_ie8(url):
"""Native JSON support was introduced in IE8."""
return '<!--[if lt IE 8]>%s<![endif]-->' % render_js(url)
json2 = Resource(library, 'json2.js', renderer=earlier_than_ie8)
| 29.727273 | 64 | 0.712538 | from fanstatic import Library, Resource
from fanstatic.core import render_js
library = Library('json2', 'resources')
def earlier_than_ie8(url):
"""Native JSON support was introduced in IE8."""
return '<!--[if lt IE 8]>%s<![endif]-->' % render_js(url)
json2 = Resource(library, 'json2.js', renderer=earlier_than_ie8)
| 0 | 0 | 0 |
74ced1dbbb7b86d316e596ebb0e42efcb2687c49 | 45 | py | Python | gears/compilers/__init__.py | gears/gears | 5729c2525a8c04c185e998bd9a86233708972921 | [
"0BSD"
] | 9 | 2015-03-23T15:34:04.000Z | 2021-03-19T03:03:48.000Z | gears/compilers/__init__.py | gears/gears | 5729c2525a8c04c185e998bd9a86233708972921 | [
"0BSD"
] | 2 | 2015-08-31T03:19:27.000Z | 2016-01-20T09:54:01.000Z | gears/compilers/__init__.py | gears/gears | 5729c2525a8c04c185e998bd9a86233708972921 | [
"0BSD"
] | 3 | 2015-02-01T06:21:24.000Z | 2015-07-30T02:31:31.000Z | from .base import BaseCompiler, ExecCompiler
| 22.5 | 44 | 0.844444 | from .base import BaseCompiler, ExecCompiler
| 0 | 0 | 0 |
ea39ce6ed581ea5749f8a481a06db78673172cb8 | 1,148 | py | Python | sim_swiss.py | geordanr/tourneysim | cea8ee3ea60b9e622b2338d46b98b673d05bc0a2 | [
"MIT"
] | null | null | null | sim_swiss.py | geordanr/tourneysim | cea8ee3ea60b9e622b2338d46b98b673d05bc0a2 | [
"MIT"
] | null | null | null | sim_swiss.py | geordanr/tourneysim | cea8ee3ea60b9e622b2338d46b98b673d05bc0a2 | [
"MIT"
] | null | null | null | '''Swiss pairing simulation'''
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
import numpy as np
from tournament import SwissTournament, PairingError
if __name__ == '__main__':
main()
| 31.027027 | 95 | 0.680314 | '''Swiss pairing simulation'''
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
import numpy as np
from tournament import SwissTournament, PairingError
def main():
# SwissTournament.performance_sigma = 2
num_iterations = 1000
num_players = 64
tournament_points = np.empty([num_iterations, num_players])
tournament_points[:] = np.NAN
for i in range(num_iterations):
try:
t = SwissTournament(num_players).run()
except PairingError:
pass
else:
for j, player in enumerate(sorted(t.players, key=lambda p: p.skill, reverse=True)):
tournament_points[i, j] = player.tournamentPoints
means = np.nanmean(tournament_points, axis=0)
error = 2 * np.nanstd(tournament_points, axis=0)
plt.errorbar(range(num_players), means, yerr=error)
plt.title('Swiss Tournament, sigma=%f' % SwissTournament.performance_sigma)
plt.xlabel('Player (sorted by skill)')
plt.ylabel('Tournament points')
plt.savefig('swiss_%s' % str(SwissTournament.performance_sigma).replace('.', '_'))
if __name__ == '__main__':
main()
| 910 | 0 | 23 |
973f09129ea0344a429f65f36474f379bbe8c43b | 638 | py | Python | LabCalc/Ex02/plot.py | giuuliorusso/uni-physics | 11939b34cb09ca579d9e45fa224b23db0fb7e4f9 | [
"MIT"
] | 2 | 2020-11-06T15:45:46.000Z | 2020-11-08T15:52:15.000Z | LabCalc/Ex02/plot.py | giuuliorusso/uni-physics | 11939b34cb09ca579d9e45fa224b23db0fb7e4f9 | [
"MIT"
] | null | null | null | LabCalc/Ex02/plot.py | giuuliorusso/uni-physics | 11939b34cb09ca579d9e45fa224b23db0fb7e4f9 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
t, x, y = np.loadtxt("out/traiettoria.dat", skiprows=1, unpack=True)
# Traiettoria
plt.figure(figsize=(5, 5))
plt.plot(x, y, "-o", color="tab:blue", markersize=3)
plt.title("Traiettoria")
plt.xlabel("x(t)")
plt.ylabel("y(t)")
plt.savefig("out/traiettoria")
# x
plt.figure(figsize=(5, 5))
plt.plot(t, x, "-o", color="tab:green", markersize=3)
plt.title("x")
plt.xlabel("t")
plt.ylabel("x(t)")
plt.savefig("out/x")
# y
plt.figure(figsize=(5, 5))
plt.plot(t, y, "-o", color="tab:red", markersize=3)
plt.title("y")
plt.xlabel("t")
plt.ylabel("y(t)")
plt.savefig("out/y")
plt.show()
| 18.228571 | 68 | 0.652038 | import matplotlib.pyplot as plt
import numpy as np
t, x, y = np.loadtxt("out/traiettoria.dat", skiprows=1, unpack=True)
# Traiettoria
plt.figure(figsize=(5, 5))
plt.plot(x, y, "-o", color="tab:blue", markersize=3)
plt.title("Traiettoria")
plt.xlabel("x(t)")
plt.ylabel("y(t)")
plt.savefig("out/traiettoria")
# x
plt.figure(figsize=(5, 5))
plt.plot(t, x, "-o", color="tab:green", markersize=3)
plt.title("x")
plt.xlabel("t")
plt.ylabel("x(t)")
plt.savefig("out/x")
# y
plt.figure(figsize=(5, 5))
plt.plot(t, y, "-o", color="tab:red", markersize=3)
plt.title("y")
plt.xlabel("t")
plt.ylabel("y(t)")
plt.savefig("out/y")
plt.show()
| 0 | 0 | 0 |
860a5571e29f5dffb1eb769b525b0bee732faa7d | 9,185 | py | Python | src/main.py | igor97100/tf2up | d45d449f4f0cf325b758b0023fc2654c5232fe70 | [
"MIT"
] | null | null | null | src/main.py | igor97100/tf2up | d45d449f4f0cf325b758b0023fc2654c5232fe70 | [
"MIT"
] | null | null | null | src/main.py | igor97100/tf2up | d45d449f4f0cf325b758b0023fc2654c5232fe70 | [
"MIT"
] | null | null | null | """Simple wrapper to upgrade the files by github URL"""
import json
import logging
import os
import re
import shutil
import subprocess
import urllib
from hashlib import md5
from typing import Tuple, List
import requests
import tensorflow as tf
# TODO: install file properly with `pip install -e .`
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from storage import FileStorage
from flask import (
Flask, redirect, request, render_template, send_from_directory)
app = Flask(__name__)
class NotebookDownloadException(Exception):
"""Notebook download exception"""
class ConvertionException(Exception):
"""NBdime conversion exception"""
def download_file(requested_url: str) -> str:
"""Download a file from github repository"""
url = f"https://github.com/{requested_url.replace('blob', 'raw')}"
resp = requests.get(url)
logging.info(F"Requested URL: {requested_url}")
if resp.status_code != 200:
logging.info(f"Can not download {url}")
raise NotebookDownloadException("Can not download the file. Please, check the URL")
return resp.text
# TODO: Run conversion in temp folder,
# so we do not have issues with concurrent conversion
def convert_file(in_file: str, out_file: str) -> List[str]:
"""Upgrade file with tf_upgrade_v2."""
comand = f"tf_upgrade_v2 --infile {in_file} --outfile {out_file}"
process = subprocess.Popen(comand,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result_bytes = process.stdout.readlines()
process.wait()
result = [line.decode('utf-8') for line in result_bytes]
if process.returncode:
details = "<br>".join(result)
raise ConvertionException("Can not convert the file", details)
return result
def save_ipynb_from_py(folder: str, py_filename: str) -> str:
"""Save ipynb file based on python file"""
full_filename = f"{folder}/{py_filename}"
with open(full_filename) as pyfile:
code_lines = [line.replace("\n", "\\n").replace('"', '\\"')
for line in pyfile.readlines()]
pycode = '",\n"'.join(code_lines)
with open('template.ipynb') as template:
template_body = ''.join(template.readlines())
ipynb_code = template_body.replace('{{TEMPLATE}}', pycode)
new_filename = full_filename.replace('.py', '.ipynb')
with open(new_filename, "w") as ipynb_file:
ipynb_file.write(ipynb_code)
return py_filename.replace('.py', '.ipynb')
def process_file(file_url: str) -> Tuple[str, Tuple[str, ...]]:
"""Process file with download, cache and upgrade."""
_, file_ext = os.path.splitext(file_url)
folder_hash = md5(file_url.encode('utf-8')).hexdigest()
path = f"/notebooks/{folder_hash}"
original = f"original{file_ext}"
converted = f"converted{file_ext}"
# TODO: delete the folder completely if `force`
if not os.path.exists(path):
file_content = download_file(file_url)
os.mkdir(path)
with open(f"{path}/{original}", "w") as original_file:
original_file.write(file_content)
try:
output = convert_file(f"{path}/{original}", f"{path}/{converted}")
except ConvertionException as error:
shutil.rmtree(path)
raise error
with open(f"{path}/output", "w") as summary_output:
summary_output.write('\n'.join(output))
shutil.copy('report.txt', f"{path}/report")
# persist `report.txt` to GCS
storage = FileStorage()
storage.save_file('report.txt', folder_hash)
# found a python file, need to encode separately
if original.endswith('.py'):
result_filenames = []
for py_file in [original, converted]:
result_filenames.append(save_ipynb_from_py(path, py_file))
assert len(result_filenames) == 2
return path, tuple(result_filenames[:2])
if original.endswith('.py'):
return path, (original.replace('.py', '.ipynb'),
converted.replace('.py', '.ipynb'))
return path, (original, converted)
def inject_nbdime(content: str, folder_hash: str) -> str:
"""Inject report strings before `nbdime`' diff"""
replace_token = "<h3>Notebook Diff</h3>"
position = content.find(replace_token)
# nothing to inject here, just return the content
if position == -1:
return content
path = f"/notebooks/{folder_hash}"
with open(f"{path}/report") as summary_output:
report_lines = [line for line in summary_output.readlines()
if line.strip() != '']
return render_template("nbdime_inject.html",
before=content[:position],
report_lines=report_lines,
after=content[position:],
folder=folder_hash,
file='converted.ipynb',
tf_version=tf.version.VERSION)
@app.route("/")
def hello():
"""Index page with intro info."""
return render_template('index.html',
tf_version=tf.version.VERSION)
@app.route('/download/<path:folder>/<path:filename>')
def download(folder, filename):
"""Allow to download files."""
# TODO: move all /notebooks to a single config
uploads = os.path.join('/notebooks/', folder)
return send_from_directory(directory=uploads, filename=filename)
@app.route("/d/<path:path>", methods=['GET'])
def proxy(path):
"""Proxy request to index of `nbdime`"""
nbdime_url = os.environ.get('NBDIME_URL')
params = '&'.join([f"{k}={v}" for k, v in request.values.items()])
url = f"{nbdime_url}{path}?{params}"
logging.info(f"URL: {url}")
try:
response = urllib.request.urlopen(url)
content = response.read()
if b'notebooks' in content:
folder_hash = re.findall(r"/notebooks\/([^\/]+)/", url)[0]
try:
content = inject_nbdime(content.decode('utf-8'), folder_hash)
return content
except FileNotFoundError:
return ("The cache was invalidated meanwhile. "
"Please start by submitting the URL again.")
else:
return content
except urllib.error.URLError:
logging.error(f"Can not proxy nbdime for GET: {url}")
message = "Something went wrong, can not proxy nbdime"
return render_template('error.html', message=message), 502
@app.route("/d/<path:path>", methods=['POST'])
def proxy_api(path):
"""Proxy request to `nbdime` API"""
nbdime_url = os.environ.get('NBDIME_URL')
url = f"{nbdime_url}{path}"
try:
payload = json.dumps(request.json).encode()
headers = {'content-type': 'application/json'}
# dirty hack: seems like sometimes nbdime looses `content type`
# from `application/json` to `text/plain;charset=UTF-8`
if not request.json:
logging.warning(f"WARNING: somehow lost json payload {request.json}")
base = re.findall(r"base=([^\&]+)", request.referrer)[0]
remote = re.findall(r"remote=([^\&]+)", request.referrer)[0]
payload = json.dumps({'base': base, 'remote': remote})
payload = payload.replace('%2F', '/').encode('utf-8')
req = urllib.request.Request(url,
data=payload,
headers=headers)
resp = urllib.request.urlopen(req)
return resp.read()
except urllib.error.URLError:
logging.error(f"Can not proxy nbdime for POST: {url}")
message = "Something went wrong, can not proxy nbdime"
return render_template('error.html', message=message), 502
# TODO force refresh
@app.route('/<path:path>')
def catch_all(path):
"""Endpoint for all URLs from Github"""
if not (path.endswith('.py') or path.endswith('.ipynb')):
message = "Currently we only support `.py` and `.ipynb` files."
return render_template('error.html', message=message), 501
try:
folder, files = process_file(path)
url = f"/d/diff?base={folder}/{files[0]}&remote={folder}/{files[1]}"
return redirect(url, code=302)
except NotebookDownloadException as error:
message = error.args[0]
return render_template('error.html', message=message), 400
except ConvertionException as error:
logging.error(f"Can not convert for path {path}: {error.details}")
return render_template('error.html',
message=error.message,
details=error.details), 400
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| 31.892361 | 91 | 0.615133 | """Simple wrapper to upgrade the files by github URL"""
import json
import logging
import os
import re
import shutil
import subprocess
import urllib
from hashlib import md5
from typing import Tuple, List
import requests
import tensorflow as tf
# TODO: install file properly with `pip install -e .`
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from storage import FileStorage
from flask import (
Flask, redirect, request, render_template, send_from_directory)
app = Flask(__name__)
class NotebookDownloadException(Exception):
"""Notebook download exception"""
def __init__(self, message):
super(NotebookDownloadException, self).__init__(message)
self.message = message
class ConvertionException(Exception):
"""NBdime conversion exception"""
def __init__(self, message, details):
super(ConvertionException, self).__init__(message)
self.message = message
self.details = details
def download_file(requested_url: str) -> str:
"""Download a file from github repository"""
url = f"https://github.com/{requested_url.replace('blob', 'raw')}"
resp = requests.get(url)
logging.info(F"Requested URL: {requested_url}")
if resp.status_code != 200:
logging.info(f"Can not download {url}")
raise NotebookDownloadException("Can not download the file. Please, check the URL")
return resp.text
# TODO: Run conversion in temp folder,
# so we do not have issues with concurrent conversion
def convert_file(in_file: str, out_file: str) -> List[str]:
"""Upgrade file with tf_upgrade_v2."""
comand = f"tf_upgrade_v2 --infile {in_file} --outfile {out_file}"
process = subprocess.Popen(comand,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result_bytes = process.stdout.readlines()
process.wait()
result = [line.decode('utf-8') for line in result_bytes]
if process.returncode:
details = "<br>".join(result)
raise ConvertionException("Can not convert the file", details)
return result
def save_ipynb_from_py(folder: str, py_filename: str) -> str:
"""Save ipynb file based on python file"""
full_filename = f"{folder}/{py_filename}"
with open(full_filename) as pyfile:
code_lines = [line.replace("\n", "\\n").replace('"', '\\"')
for line in pyfile.readlines()]
pycode = '",\n"'.join(code_lines)
with open('template.ipynb') as template:
template_body = ''.join(template.readlines())
ipynb_code = template_body.replace('{{TEMPLATE}}', pycode)
new_filename = full_filename.replace('.py', '.ipynb')
with open(new_filename, "w") as ipynb_file:
ipynb_file.write(ipynb_code)
return py_filename.replace('.py', '.ipynb')
def process_file(file_url: str) -> Tuple[str, Tuple[str, ...]]:
"""Process file with download, cache and upgrade."""
_, file_ext = os.path.splitext(file_url)
folder_hash = md5(file_url.encode('utf-8')).hexdigest()
path = f"/notebooks/{folder_hash}"
original = f"original{file_ext}"
converted = f"converted{file_ext}"
# TODO: delete the folder completely if `force`
if not os.path.exists(path):
file_content = download_file(file_url)
os.mkdir(path)
with open(f"{path}/{original}", "w") as original_file:
original_file.write(file_content)
try:
output = convert_file(f"{path}/{original}", f"{path}/{converted}")
except ConvertionException as error:
shutil.rmtree(path)
raise error
with open(f"{path}/output", "w") as summary_output:
summary_output.write('\n'.join(output))
shutil.copy('report.txt', f"{path}/report")
# persist `report.txt` to GCS
storage = FileStorage()
storage.save_file('report.txt', folder_hash)
# found a python file, need to encode separately
if original.endswith('.py'):
result_filenames = []
for py_file in [original, converted]:
result_filenames.append(save_ipynb_from_py(path, py_file))
assert len(result_filenames) == 2
return path, tuple(result_filenames[:2])
if original.endswith('.py'):
return path, (original.replace('.py', '.ipynb'),
converted.replace('.py', '.ipynb'))
return path, (original, converted)
def inject_nbdime(content: str, folder_hash: str) -> str:
"""Inject report strings before `nbdime`' diff"""
replace_token = "<h3>Notebook Diff</h3>"
position = content.find(replace_token)
# nothing to inject here, just return the content
if position == -1:
return content
path = f"/notebooks/{folder_hash}"
with open(f"{path}/report") as summary_output:
report_lines = [line for line in summary_output.readlines()
if line.strip() != '']
return render_template("nbdime_inject.html",
before=content[:position],
report_lines=report_lines,
after=content[position:],
folder=folder_hash,
file='converted.ipynb',
tf_version=tf.version.VERSION)
@app.route("/")
def hello():
"""Index page with intro info."""
return render_template('index.html',
tf_version=tf.version.VERSION)
@app.route('/download/<path:folder>/<path:filename>')
def download(folder, filename):
"""Allow to download files."""
# TODO: move all /notebooks to a single config
uploads = os.path.join('/notebooks/', folder)
return send_from_directory(directory=uploads, filename=filename)
@app.route("/d/<path:path>", methods=['GET'])
def proxy(path):
"""Proxy request to index of `nbdime`"""
nbdime_url = os.environ.get('NBDIME_URL')
params = '&'.join([f"{k}={v}" for k, v in request.values.items()])
url = f"{nbdime_url}{path}?{params}"
logging.info(f"URL: {url}")
try:
response = urllib.request.urlopen(url)
content = response.read()
if b'notebooks' in content:
folder_hash = re.findall(r"/notebooks\/([^\/]+)/", url)[0]
try:
content = inject_nbdime(content.decode('utf-8'), folder_hash)
return content
except FileNotFoundError:
return ("The cache was invalidated meanwhile. "
"Please start by submitting the URL again.")
else:
return content
except urllib.error.URLError:
logging.error(f"Can not proxy nbdime for GET: {url}")
message = "Something went wrong, can not proxy nbdime"
return render_template('error.html', message=message), 502
@app.route("/d/<path:path>", methods=['POST'])
def proxy_api(path):
"""Proxy request to `nbdime` API"""
nbdime_url = os.environ.get('NBDIME_URL')
url = f"{nbdime_url}{path}"
try:
payload = json.dumps(request.json).encode()
headers = {'content-type': 'application/json'}
# dirty hack: seems like sometimes nbdime looses `content type`
# from `application/json` to `text/plain;charset=UTF-8`
if not request.json:
logging.warning(f"WARNING: somehow lost json payload {request.json}")
base = re.findall(r"base=([^\&]+)", request.referrer)[0]
remote = re.findall(r"remote=([^\&]+)", request.referrer)[0]
payload = json.dumps({'base': base, 'remote': remote})
payload = payload.replace('%2F', '/').encode('utf-8')
req = urllib.request.Request(url,
data=payload,
headers=headers)
resp = urllib.request.urlopen(req)
return resp.read()
except urllib.error.URLError:
logging.error(f"Can not proxy nbdime for POST: {url}")
message = "Something went wrong, can not proxy nbdime"
return render_template('error.html', message=message), 502
# TODO force refresh
@app.route('/<path:path>')
def catch_all(path):
"""Endpoint for all URLs from Github"""
if not (path.endswith('.py') or path.endswith('.ipynb')):
message = "Currently we only support `.py` and `.ipynb` files."
return render_template('error.html', message=message), 501
try:
folder, files = process_file(path)
url = f"/d/diff?base={folder}/{files[0]}&remote={folder}/{files[1]}"
return redirect(url, code=302)
except NotebookDownloadException as error:
message = error.args[0]
return render_template('error.html', message=message), 400
except ConvertionException as error:
logging.error(f"Can not convert for path {path}: {error.details}")
return render_template('error.html',
message=error.message,
details=error.details), 400
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| 241 | 0 | 54 |
3117f09b419d9cef11d0b9cd97028c681b2e4929 | 177 | py | Python | A/resolve.py | staguchi0703/ABC174 | 7afa7c72cb26653808947538dbeaa9cb386f16af | [
"MIT"
] | null | null | null | A/resolve.py | staguchi0703/ABC174 | 7afa7c72cb26653808947538dbeaa9cb386f16af | [
"MIT"
] | null | null | null | A/resolve.py | staguchi0703/ABC174 | 7afa7c72cb26653808947538dbeaa9cb386f16af | [
"MIT"
] | null | null | null | def resolve():
'''
code here
'''
X = int(input())
if X >= 30:
print('Yes')
else:
print('No')
if __name__ == "__main__":
resolve()
| 11.8 | 26 | 0.429379 | def resolve():
'''
code here
'''
X = int(input())
if X >= 30:
print('Yes')
else:
print('No')
if __name__ == "__main__":
resolve()
| 0 | 0 | 0 |
2c59b084e6e7d905719a8cb30cf5382be7e6db57 | 405 | py | Python | twistedbot/plugins/core/chat_follow.py | lukleh/TwistedBot | 310509c037335845838e699f9f9d56af117e03c9 | [
"MIT"
] | 12 | 2015-01-21T00:24:06.000Z | 2021-07-01T03:06:39.000Z | twistedbot/plugins/core/chat_follow.py | lukleh/TwistedBot | 310509c037335845838e699f9f9d56af117e03c9 | [
"MIT"
] | 1 | 2015-01-21T00:23:24.000Z | 2015-01-21T20:21:09.000Z | twistedbot/plugins/core/chat_follow.py | lukleh/TwistedBot | 310509c037335845838e699f9f9d56af117e03c9 | [
"MIT"
] | 2 | 2015-01-20T21:31:10.000Z | 2018-06-19T09:12:04.000Z |
from twistedbot.plugins.base import PluginChatBase
from twistedbot.behavior_tree import FollowPlayer
plugin = Follow
| 20.25 | 62 | 0.718519 |
from twistedbot.plugins.base import PluginChatBase
from twistedbot.behavior_tree import FollowPlayer
class Follow(PluginChatBase):
@property
def command_verb(self):
return "follow"
@property
def help(self):
return "bot starts following you"
def command(self, sender, command, args):
self.world.bot.behavior_tree.new_command(FollowPlayer)
plugin = Follow
| 145 | 116 | 23 |
d93a810f176bb70d03d48e1e3ea61908e1fdda6d | 914 | py | Python | tests/db/data/system_data.py | jamespfennell/realtimerail | 352dd7d185d3501d28276476e1390d3288735690 | [
"MIT"
] | 10 | 2018-10-25T13:07:42.000Z | 2022-02-08T20:49:07.000Z | tests/db/data/system_data.py | jamespfennell/realtimerail | 352dd7d185d3501d28276476e1390d3288735690 | [
"MIT"
] | 80 | 2019-04-06T23:01:44.000Z | 2022-02-05T23:35:54.000Z | tests/db/data/system_data.py | jamespfennell/realtimerail | 352dd7d185d3501d28276476e1390d3288735690 | [
"MIT"
] | 3 | 2021-05-07T16:43:39.000Z | 2021-07-15T18:06:07.000Z | import pytest
from transiter.db import models
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 20.772727 | 87 | 0.601751 | import pytest
from transiter.db import models
@pytest.fixture
def system_1(add_model):
return add_model(
models.System(
pk=1, id="2", status=models.System.SystemStatus.ACTIVE, name="System 1"
)
)
@pytest.fixture
def system_2(add_model):
return add_model(
models.System(
pk=3, id="4", status=models.System.SystemStatus.ACTIVE, name="System 2"
)
)
@pytest.fixture
def agency_1_1(add_model, system_1, feed_1_1_update_1):
return add_model(
models.Agency(
id="6",
name="Agency",
timezone="America/New York",
system=system_1,
source=feed_1_1_update_1,
)
)
@pytest.fixture
def installing_system(add_model):
return add_model(
models.System(
pk=5, id="6", status=models.System.SystemStatus.INSTALLING, name="System 3"
)
)
| 707 | 0 | 88 |
8ce2e728b9e0427b8494a7449751f3f4fb8788cf | 4,934 | py | Python | zincbase/graph/Edge.py | complexdb/zincbase | 0c8ce46bc392dfa8ee99414877adb3b41648451e | [
"MIT"
] | 174 | 2020-02-04T08:36:09.000Z | 2022-01-03T15:53:05.000Z | zincbase/graph/Edge.py | complexdb/zincbase | 0c8ce46bc392dfa8ee99414877adb3b41648451e | [
"MIT"
] | 6 | 2020-02-08T18:11:36.000Z | 2021-03-07T20:00:20.000Z | zincbase/graph/Edge.py | complexdb/zincbase | 0c8ce46bc392dfa8ee99414877adb3b41648451e | [
"MIT"
] | 22 | 2020-02-07T03:17:17.000Z | 2022-03-08T15:02:18.000Z | from collections import defaultdict
import copy
import networkx as nx
from zincbase import context
class Edge:
"""Class representing an edge in the KB.
"""
@property
def nodes(self):
"""Return the nodes that this edge is connected to as tuple of (subject, object)
"""
return [context.kb.node(self._sub), context.kb.node(self._ob)]
@property
def attrs(self):
"""Returns attributes of the edge stored in the KB
"""
attributes = None
for _, edge in self._edge.items():
if edge['pred'] == self._pred:
attributes = copy.deepcopy(edge)
if attributes is None:
return False
try:
del attributes['pred']
del attributes['_watches']
except:
pass
return attributes
def watch(self, attribute, fn):
"""Execute user-defined function when the value of attribute changes.
Function takes two args: `edge` which has access to all
its own attributes, and the second
arg is the previous value of the attribute that changed.
As cycles are possible in the graph, changes to an edge attribute, that
change the attributes of the nodes it's connected to, etc,
may eventually propagate back to change the original edge's attribute again,
ad infinitum until the stack explodes. To prevent this, in one "update cycle", more
than `kb._MAX_RECURSION` updates will be rejected.
:returns int: id of the watch
:Example:
>>> from zincbase import KB
>>> kb = KB()
>>> kb.store('edge(a,b)')
0
>>> edge = kb.edge('a', 'edge', 'b')
>>> edge.resistance = 3
>>> print(edge.resistance)
3
>>> edge.watch('resistance', lambda x, prev_val: print('resistance changed to ' + str(x.resistance)))
('resistance', 0)
>>> edge.resistance += 1
resistance changed to 4
"""
self._watches[attribute].append(fn)
return (attribute, len(self._watches) - 1)
def remove_watch(self, attribute_or_watch_id):
"""Stop watching `attribute_or_watch_id`.
If it is a string, delete all watches for that attribute.
If it is a tuple of (attribute, watch_id): delete that specific watch.
"""
if isinstance(attribute_or_watch_id, tuple):
self._watches[attribute_or_watch_id[0]].pop(attribute_or_watch_id[1])
else:
self._watches[attribute_or_watch_id] = [] | 34.992908 | 109 | 0.585732 | from collections import defaultdict
import copy
import networkx as nx
from zincbase import context
class Edge:
"""Class representing an edge in the KB.
"""
def __init__(self, sub, pred, ob, data={}, watches=[]):
super().__setattr__('_name', str(sub) + '___' + str(pred) + '___' + str(ob))
super().__setattr__('_sub', str(sub))
super().__setattr__('_pred', str(pred))
super().__setattr__('_ob', str(ob))
super().__setattr__('_recursion_depth', 0)
super().__setattr__('_watches', defaultdict(list))
super().__setattr__('_edge', context.kb.G[self._sub][self._ob])
for watch in watches:
self._watches[watch[0]].append(watch[1])
def __repr__(self):
return self._name
def __eq__(self, comparator):
return self._name == str(comparator)
def __ne__(self, comparator):
return self._name != str(comparator)
def __iter__(self):
for attr in self.attrs:
yield(attr)
def __getattr__(self, key):
try:
for _, edge in self._edge.items():
if edge['pred'] == self._pred:
return edge[key]
except KeyError as e:
return None
def __setattr__(self, key, value):
if context.kb._global_propagations > context.kb._PROPAGATION_LIMIT:
return False
if self._recursion_depth > context.kb._MAX_RECURSION:
return False
context.kb._global_propagations += 1
super().__setattr__('_recursion_depth', self._recursion_depth + 1)
for _, attrs in self._edge.items():
if attrs['pred'] == self._pred:
prev_val = attrs.get(key, None)
attrs.update({key: value})
if not context.kb._dont_propagate:
for watch_fn in self._watches.get(key, []):
watch_fn(self, prev_val)
super().__setattr__('_recursion_depth', self._recursion_depth - 1)
context.kb._global_propagations -= 1
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
return self.__setattr__(key, value)
def __delitem__(self, attr):
for _, attrs in self._edge.items():
if attrs['pred'] == self._pred:
del attrs[attr]
def get(self, attr, default):
try:
return self.attrs[attr]
except:
return default
@property
def nodes(self):
"""Return the nodes that this edge is connected to as tuple of (subject, object)
"""
return [context.kb.node(self._sub), context.kb.node(self._ob)]
@property
def attrs(self):
"""Returns attributes of the edge stored in the KB
"""
attributes = None
for _, edge in self._edge.items():
if edge['pred'] == self._pred:
attributes = copy.deepcopy(edge)
if attributes is None:
return False
try:
del attributes['pred']
del attributes['_watches']
except:
pass
return attributes
def watch(self, attribute, fn):
"""Execute user-defined function when the value of attribute changes.
Function takes two args: `edge` which has access to all
its own attributes, and the second
arg is the previous value of the attribute that changed.
As cycles are possible in the graph, changes to an edge attribute, that
change the attributes of the nodes it's connected to, etc,
may eventually propagate back to change the original edge's attribute again,
ad infinitum until the stack explodes. To prevent this, in one "update cycle", more
than `kb._MAX_RECURSION` updates will be rejected.
:returns int: id of the watch
:Example:
>>> from zincbase import KB
>>> kb = KB()
>>> kb.store('edge(a,b)')
0
>>> edge = kb.edge('a', 'edge', 'b')
>>> edge.resistance = 3
>>> print(edge.resistance)
3
>>> edge.watch('resistance', lambda x, prev_val: print('resistance changed to ' + str(x.resistance)))
('resistance', 0)
>>> edge.resistance += 1
resistance changed to 4
"""
self._watches[attribute].append(fn)
return (attribute, len(self._watches) - 1)
def remove_watch(self, attribute_or_watch_id):
"""Stop watching `attribute_or_watch_id`.
If it is a string, delete all watches for that attribute.
If it is a tuple of (attribute, watch_id): delete that specific watch.
"""
if isinstance(attribute_or_watch_id, tuple):
self._watches[attribute_or_watch_id[0]].pop(attribute_or_watch_id[1])
else:
self._watches[attribute_or_watch_id] = [] | 2,037 | 0 | 320 |
86c69a45f72b481968e7937e112e92137f543764 | 53,829 | py | Python | dataloader/dataset.py | XiaoJake/DS-Net | 8400da1bd7c7b1ccf4d5c6782b86372957e79a6b | [
"MIT"
] | null | null | null | dataloader/dataset.py | XiaoJake/DS-Net | 8400da1bd7c7b1ccf4d5c6782b86372957e79a6b | [
"MIT"
] | null | null | null | dataloader/dataset.py | XiaoJake/DS-Net | 8400da1bd7c7b1ccf4d5c6782b86372957e79a6b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SemKITTI dataloader
"""
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
import pickle
from torch.utils import data
from tqdm import tqdm
from scipy import stats as s
from os.path import join
# load Semantic KITTI class info
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
SemKITTI_label_name = dict()
for i in sorted(list(semkittiyaml['learning_map'].keys()))[::-1]:
SemKITTI_label_name[semkittiyaml['learning_map'][i]] = semkittiyaml['labels'][i]
# things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
# stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
# things_ids = []
# for i in sorted(list(semkittiyaml['labels'].keys())):
# if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in things:
# things_ids.append(i)
# print(things_ids)
# transformation between Cartesian coordinates and polar coordinates
things_ids = set([10, 11, 13, 15, 16, 18, 20, 30, 31, 32, 252, 253, 254, 255, 256, 257, 258, 259])
# @nb.jit #TODO: why jit would lead to offsets all zero?
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])',nopython=True,cache=True,parallel = False)
if __name__ == '__main__':
dataset = SemKITTI('./sequences', 'train')
dataset.count_box_size()
| 41.598918 | 208 | 0.582734 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SemKITTI dataloader
"""
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
import pickle
from torch.utils import data
from tqdm import tqdm
from scipy import stats as s
from os.path import join
# load Semantic KITTI class info
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
SemKITTI_label_name = dict()
for i in sorted(list(semkittiyaml['learning_map'].keys()))[::-1]:
SemKITTI_label_name[semkittiyaml['learning_map'][i]] = semkittiyaml['labels'][i]
# things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
# stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
# things_ids = []
# for i in sorted(list(semkittiyaml['labels'].keys())):
# if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in things:
# things_ids.append(i)
# print(things_ids)
class SemKITTI(data.Dataset):
def __init__(self, data_path, imageset = 'train', return_ref = False, return_ins = False):
self.return_ref = return_ref
self.return_ins = return_ins
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
self.sequences = sorted(split)
self.data_path = data_path
self.im_idx = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path,str(i_folder).zfill(2),'velodyne']))
self.im_idx.sort()
self.load_calib_poses()
self.im_idx_ind = []
for im in self.im_idx:
frame_path = im.split('/')
frame_id = im.split('/')[-1].split('.')[0]
assert len(frame_id) == 6
frame_id = int(frame_id)
seq = frame_path[-3]
seq_ind = self.seq2ind[seq]
self.im_idx_ind.append((seq_ind, frame_id))
self.things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
self.stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
self.things_ids = []
for i in sorted(list(semkittiyaml['labels'].keys())):
if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in self.things:
self.things_ids.append(i)
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
self.times = []
self.poses = []
self.seq2ind = {}
for i, seq in enumerate(self.sequences):
self.seq2ind[str(seq).zfill(2)] = i
seq_folder = join(self.data_path, str(seq).zfill(2))
# Read Calib
self.calibrations.append(self.parse_calibration(join(seq_folder, "calib.txt")))
# Read times
self.times.append(np.loadtxt(join(seq_folder, 'times.txt'), dtype=np.float32))
# Read poses
poses_f64 = self.parse_poses(join(seq_folder, 'poses.txt'), self.calibrations[-1])
self.poses.append([pose.astype(np.float32) for pose in poses_f64])
def parse_calibration(self, filename):
""" read calibration file with given filename
Returns
-------
dict
Calibration matrices as 4x4 numpy arrays.
"""
calib = {}
calib_file = open(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(self, filename, calibration):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
file = open(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = np.linalg.inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
return poses
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
# print("loading {}, shape {}".format(self.im_idx[index], raw_data.shape))
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:,0],dtype=int),axis=1)
sem_labels = annotated_data
ins_labels = annotated_data
valid = annotated_data
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
# ins_labels = (annotated_data & 0xFFFF0000) >> 16 # different classes could use same ins ids
ins_labels = annotated_data
# valid = (((ins_labels & 0xFFFF0000) >> 16) != 0).reshape(-1) # TODO: maybe this is not ok
valid = np.isin(sem_labels, self.things_ids).reshape(-1) # use 0 to filter out valid indexes is enough
# print(np.sum(valid) - np.sum((((ins_labels & 0xFFFF0000) >> 16) != 0)))
sem_labels = np.vectorize(self.learning_map.__getitem__)(sem_labels)
data_tuple = (raw_data[:,:3], sem_labels.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:,3],)
if self.return_ins:
data_tuple += (ins_labels, valid)
data_tuple += (self.im_idx[index], self.poses[self.im_idx_ind[index][0]][self.im_idx_ind[index][1]])
return data_tuple
def count_ins(self):
pbar = tqdm(total=len(self.im_idx), dynamic_ncols=True)
counter = np.zeros([9], dtype=np.int32)
min_valid_pn = 10000086
max_valid_pn = -1
for i in range(len(self.im_idx)):
# raw_data = np.fromfile(self.im_idx[i], dtype=np.float32).reshape((-1, 4))
annotated_data = np.fromfile(self.im_idx[i].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
_sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
ins_labels = annotated_data
sem_labels = np.vectorize(self.learning_map.__getitem__)(_sem_labels)
for j in range(1,9):
j_ind = (sem_labels == j)
j_ins_labels = ins_labels[j_ind]
counter[j] += np.unique(j_ins_labels).reshape(-1).shape[0]
pbar.update(1)
valid_pn = np.sum(np.isin(_sem_labels, self.things_ids).reshape(-1))
if valid_pn > max_valid_pn:
max_valid_pn = valid_pn
if valid_pn < min_valid_pn:
min_valid_pn = valid_pn
print(valid_pn, sem_labels.shape[0])
pbar.close()
counter = counter[1:]
print("Counting results: ")
print(counter)
counter = counter.astype(np.float32)
counter /= (np.min(counter) if np.min(counter) != 0 else 1.0)
print("Weights: ")
print(counter)
print("max_valid_pn: {}".format(max_valid_pn))
print("min_valid_pn: {}".format(min_valid_pn))
def count_box_size(self):
pbar = tqdm(total=len(self.im_idx), dynamic_ncols=True)
counter = np.zeros([9], dtype=np.float32)
mean_size = np.zeros([9, 2], dtype=np.float32)
max_size = np.zeros([9, 2], dtype=np.float32)
min_size = np.zeros([9, 2], dtype=np.float32) + 10086
for i in range(len(self.im_idx)):
#if i % 10 != 0:
# pbar.update(1)
# continue
raw_data = np.fromfile(self.im_idx[i], dtype=np.float32).reshape((-1, 4))
annotated_data = np.fromfile(self.im_idx[i].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
_sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
ins_labels = annotated_data
sem_labels = np.vectorize(self.learning_map.__getitem__)(_sem_labels)
pbar.update(1)
for j in range(1, 9):
j_ind = (sem_labels == j)
j_ins_labels = ins_labels[j_ind]
for j_ins_lab in np.unique(j_ins_labels):
j_pcd = raw_data[(ins_labels == j_ins_lab).reshape(-1)]
if j_pcd.shape[0] < 50:
continue
x = j_pcd[:, 0].max() - j_pcd[:, 0].min()
y = j_pcd[:, 1].max() - j_pcd[:, 1].min()
if x < y:
tmp = x
x = y
y = tmp
mean_size[j, 0] += x
mean_size[j, 1] += y
counter[j] += 1
if x > max_size[j, 0]:
max_size[j, 0] = x
if y > max_size[j, 1]:
max_size[j, 1] = y
if x < min_size[j, 0]:
min_size[j, 0] = x
if y < min_size[j, 1]:
min_size[j, 1] = y
pbar.close()
counter[0] = 1
print("Mean Size: {}".format(mean_size / counter.reshape(-1, 1)))
print("Max Size: {}".format(max_size))
print("Min Size: {}".format(min_size))
class SemKITTI_tracking(data.Dataset):
def __init__(self, data_path, imageset = 'train', return_ref = False, return_ins = False):
self.return_ref = return_ref
self.return_ins = return_ins
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
self.sequences = sorted(split)
self.data_path = data_path
self.im_idx = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path,str(i_folder).zfill(2),'velodyne']))
self.im_idx.sort()
self.im_pair = []
self.im_pair_ind = []
self.findNext()
self.things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
self.stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
self.things_ids = []
for i in sorted(list(semkittiyaml['labels'].keys())):
if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in self.things:
self.things_ids.append(i)
self.load_calib_poses()
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
self.times = []
self.poses = []
self.seq2ind = {}
for i, seq in enumerate(self.sequences):
self.seq2ind[str(seq).zfill(2)] = i
seq_folder = join(self.data_path, str(seq).zfill(2))
# Read Calib
self.calibrations.append(self.parse_calibration(join(seq_folder, "calib.txt")))
# Read times
self.times.append(np.loadtxt(join(seq_folder, 'times.txt'), dtype=np.float32))
# Read poses
poses_f64 = self.parse_poses(join(seq_folder, 'poses.txt'), self.calibrations[-1])
self.poses.append([pose.astype(np.float32) for pose in poses_f64])
def parse_calibration(self, filename):
""" read calibration file with given filename
Returns
-------
dict
Calibration matrices as 4x4 numpy arrays.
"""
calib = {}
calib_file = open(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(self, filename, calibration):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
file = open(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = np.linalg.inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
return poses
def __len__(self):
'Denotes the total number of samples'
# return len(self.im_idx)
return len(self.im_pair)
def findNext(self):
for i in self.im_idx:
frame_path = i.split('/')
frame_id = i.split('/')[-1].split('.')[0]
assert len(frame_id) == 6
frame_id = int(frame_id)
im_list = [i]
seq = frame_path[-3]
seq_ind = self.seq2ind[seq]
frame_ind = frame_id
next_frame = str(frame_id + 1).zfill(6) + '.bin'
frame_path[-1] = next_frame
next_frame_path = '/'.join(frame_path)
if os.path.exists(next_frame_path):
self.im_pair.append((i, next_frame_path))
self.im_pair_ind.append((seq_ind, frame_ind, frame_ind + 1))
def __getitem__(self, index):
raw_data = np.fromfile(self.im_pair[index][0], dtype=np.float32).reshape((-1, 4))
next_raw_data = np.fromfile(self.im_pair[index][1], dtype=np.float32).reshape((-1, 4))
if self.imageset == 'test':
raise NotImplementedError
else:
annotated_data = np.fromfile(self.im_pair[index][0].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
# ins_labels = (annotated_data & 0xFFFF0000) >> 16 # different classes could use same ins ids
ins_labels = annotated_data
valid = np.isin(sem_labels, self.things_ids).reshape(-1)
sem_labels = np.vectorize(self.learning_map.__getitem__)(sem_labels)
next_annotated_data = np.fromfile(self.im_pair[index][1].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
next_sem_labels = next_annotated_data & 0xFFFF
next_ins_labels = next_annotated_data
next_valid = np.isin(next_sem_labels, self.things_ids).reshape(-1)
next_sem_labels = np.vectorize(self.learning_map.__getitem__)(next_sem_labels)
data_tuple = (raw_data[:,:3], sem_labels.astype(np.uint8))
next_data_tuple = (next_raw_data[:,:3], next_sem_labels.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:,3],)
next_data_tuple += (next_raw_data[:,3],)
if self.return_ins:
data_tuple += (ins_labels, valid)
next_data_tuple += (next_ins_labels, next_valid)
data_tuple += (self.im_pair[index][0], self.poses[self.im_pair[index][0]][self.im_pair[index][1]])
next_data_tuple += (self.im_pair[index][1], self.poses[self.im_pair[index][0]][self.im_pair[index][2]])
return (next_data_tuple, data_tuple)
class SemKITTI_multi_frames(data.Dataset):
def __init__(self, data_path, imageset = 'train', return_ref = False, return_ins = False, n_frames = 3):
self.return_ref = return_ref
self.return_ins = return_ins
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
self.sequences = sorted(split)
self.data_path = data_path
self.im_idx = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path,str(i_folder).zfill(2),'velodyne']))
self.im_idx.sort()
self.things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
self.stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
self.things_ids = []
for i in sorted(list(semkittiyaml['labels'].keys())):
if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in self.things:
self.things_ids.append(i)
self.load_calib_poses()
self.n_frames = n_frames
self.multi_im_list = []
self.multi_im_list_ind = []
self.findNFrames()
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
self.times = []
self.poses = []
self.seq2ind = {}
for i, seq in enumerate(self.sequences):
self.seq2ind[str(seq).zfill(2)] = i
seq_folder = join(self.data_path, str(seq).zfill(2))
# Read Calib
self.calibrations.append(self.parse_calibration(join(seq_folder, "calib.txt")))
# Read times
self.times.append(np.loadtxt(join(seq_folder, 'times.txt'), dtype=np.float32))
# Read poses
poses_f64 = self.parse_poses(join(seq_folder, 'poses.txt'), self.calibrations[-1])
self.poses.append([pose.astype(np.float32) for pose in poses_f64])
def parse_calibration(self, filename):
""" read calibration file with given filename
Returns
-------
dict
Calibration matrices as 4x4 numpy arrays.
"""
calib = {}
calib_file = open(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(self, filename, calibration):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
file = open(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = np.linalg.inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
file.close()
return poses
def findNFrames(self):
# looking past self.n_frames frames
# if not enough existing self.n_frames frames, then just find as much as possible
# e.g. the first frame will only contain one frame
for i in self.im_idx:
frame_path = i.split('/')
frame_id = i.split('/')[-1].split('.')[0]
assert len(frame_id) == 6
frame_id = int(frame_id)
im_list = [i]
seq = frame_path[-3]
seq_ind = self.seq2ind[seq]
frame_ind = frame_id
im_ind_list = [(seq_ind, frame_ind)]
for j in range(self.n_frames - 1):
if frame_id - j - 1 >= 0:
cur_frame = str(frame_id - j - 1).zfill(6) + '.bin'
frame_path[-1] = cur_frame
cur_frame_path = '/'.join(frame_path)
im_list.append(cur_frame_path)
frame_ind -= 1
im_ind_list.append((seq_ind, frame_ind))
else:
break
self.multi_im_list.append(im_list)
self.multi_im_list_ind.append(im_ind_list)
def __len__(self):
return len(self.multi_im_list)
def __getitem__(self, index):
cur_im_list = self.multi_im_list[index]
cur_im_ind = self.multi_im_list_ind[index]
merged_pts = np.zeros([0, 3], dtype=np.float32)
merged_ref = np.zeros([0, 1], dtype=np.float32)
merged_sem = np.zeros([0, 1], dtype=np.uint8)
merged_ins = np.zeros([0, 1], dtype=np.int32)
merged_valid = np.zeros([0, 1], dtype=np.int32)
merged_mask = np.zeros([0, 1], dtype=np.uint8)
merged_fnames = []
for i, im in enumerate(cur_im_list):
raw_data = np.fromfile(im, dtype=np.float32).reshape((-1, 4))
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:,0],dtype=int),axis=1)
sem_labels = annotated_data
ins_labels = annotated_data
valid = annotated_data
else:
annotated_data = np.fromfile(im.replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
ins_labels = annotated_data
valid = np.isin(sem_labels, self.things_ids).reshape(-1) # use 0 to filter out valid indexes is enough
sem_labels = np.vectorize(self.learning_map.__getitem__)(sem_labels)
seq_ind, frame_ind = cur_im_ind[i]
cur_pose = self.poses[seq_ind][frame_ind]
if i == 0:
p_origin = np.zeros((1, 4))
p_origin[0, 3] = 1
pose0 = cur_pose
p0 = p_origin.dot(pose0.T)[:, :3]
p0 = np.squeeze(p0)
points = raw_data[:, :3]
else:
# to global coor
hpoints = np.hstack((raw_data[:, :3], np.ones_like(raw_data[:, :1])))
new_points = np.sum(np.expand_dims(hpoints, 2) * cur_pose.T, axis=1)[:, :3]
# to first frame coor
new_coords = new_points - pose0[:3, 3]
new_coords = np.sum(np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1)
points = new_coords
merged_pts = np.vstack((merged_pts, points))
merged_ref = np.vstack((merged_ref, raw_data[:, 3].reshape(-1, 1)))
merged_sem = np.vstack((merged_sem, sem_labels))
merged_ins = np.vstack((merged_ins, ins_labels))
merged_valid = np.vstack((merged_valid, valid.reshape(-1, 1)))
merged_mask = np.vstack((merged_mask, np.zeros_like(sem_labels) + i))
merged_fnames.append(im)
return (
merged_pts,
merged_sem,
merged_ref,
merged_ins,
merged_valid,
merged_mask,
merged_fnames,
)
def absoluteFilePaths(directory):
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
class voxel_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug = False, flip_aug = False, ignore_label = 255, return_test = False,
fixed_volume_space= False, max_volume_space = [50,50,1.5], min_volume_space = [-50,-50,-3]):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.flip_aug = flip_aug
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz,labels = data
elif len(data) == 3:
xyz,labels,sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 4:
raise Exception('Not implement instance label for voxel_dataset')
else: raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random()*360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:,:2] = np.dot( xyz[:,:2],j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4,1)
if flip_type==1:
xyz[:,0] = -xyz[:,0]
elif flip_type==2:
xyz[:,1] = -xyz[:,1]
elif flip_type==3:
xyz[:,:2] = -xyz[:,:2]
max_bound = np.percentile(xyz,100,axis = 0)
min_bound = np.percentile(xyz,0,axis = 0)
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range/(cur_grid_size-1)
if (intervals==0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz,min_bound,max_bound)-min_bound)/intervals)).astype(np.int)
# process voxel position
voxel_position = np.zeros(self.grid_size,dtype = np.float32)
dim_array = np.ones(len(self.grid_size)+1,int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size)*intervals.reshape(dim_array) + min_bound.reshape(dim_array)
# process labels
processed_label = np.ones(self.grid_size,dtype = np.uint8)*self.ignore_label
label_voxel_pair = np.concatenate([grid_ind,labels],axis = 1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:,0],grid_ind[:,1],grid_ind[:,2])),:]
processed_label = nb_process_label(np.copy(processed_label),label_voxel_pair)
data_tuple = (voxel_position,processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5)*intervals + min_bound
return_xyz = xyz - voxel_centers
return_xyz = np.concatenate((return_xyz,xyz),axis = 1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) == 3:
return_fea = np.concatenate((return_xyz,sig[...,np.newaxis]),axis = 1)
if self.return_test:
data_tuple += (grid_ind,labels,return_fea,index)
else:
data_tuple += (grid_ind,labels,return_fea)
return data_tuple
# transformation between Cartesian coordinates and polar coordinates
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:,0]**2 + input_xyz[:,1]**2)
phi = np.arctan2(input_xyz[:,1],input_xyz[:,0])
return np.stack((rho,phi,input_xyz[:,2]),axis=1)
def polar2cat(input_xyz_polar):
x = input_xyz_polar[0]*np.cos(input_xyz_polar[1])
y = input_xyz_polar[0]*np.sin(input_xyz_polar[1])
return np.stack((x,y,input_xyz_polar[2]),axis=0)
class spherical_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug = False, flip_aug = False,
scale_aug =False, transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi/4, max_rad=np.pi/4, ignore_label = 255,
return_test = False, fixed_volume_space= False,
max_volume_space = [50,np.pi,1.5], min_volume_space = [3,-np.pi,-3],
center_type='Axis_center'):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.scale_aug = scale_aug
self.transform = transform_aug
self.trans_std = trans_std
self.noise_rotation = np.random.uniform(min_rad, max_rad)
assert center_type in ['Axis_center', 'Mass_center']
self.center_type = center_type
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz,labels = data
elif len(data) == 3:
xyz,labels,sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz,labels,sig,ins_labels,valid,pcd_fname = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 7:
xyz,labels,sig,ins_labels,valid,pcd_fname,pose = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else: raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random()*360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:,:2] = np.dot( xyz[:,:2],j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4,1)
if flip_type==1:
xyz[:,0] = -xyz[:,0]
elif flip_type==2:
xyz[:,1] = -xyz[:,1]
elif flip_type==3:
xyz[:,:2] = -xyz[:,:2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:,0] = noise_scale * xyz[:,0]
xyz[:,1] = noise_scale * xyz[:,1]
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
# convert coordinate into polar coordinates
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:,0],100,axis = 0)
min_bound_r = np.percentile(xyz_pol[:,0],0,axis = 0)
max_bound = np.max(xyz_pol[:,1:],axis = 0)
min_bound = np.min(xyz_pol[:,1:],axis = 0)
max_bound = np.concatenate(([max_bound_r],max_bound))
min_bound = np.concatenate(([min_bound_r],min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range/(cur_grid_size-1) # (size-1) could directly get index starting from 0, very convenient
if (intervals==0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol,min_bound,max_bound)-min_bound)/intervals)).astype(np.int) # point-wise grid index
# process voxel position
voxel_position = np.zeros(self.grid_size,dtype = np.float32)
dim_array = np.ones(len(self.grid_size)+1,int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size)*intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
# process labels
processed_label = np.ones(self.grid_size,dtype = np.uint8)*self.ignore_label
label_voxel_pair = np.concatenate([grid_ind,labels],axis = 1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:,0],grid_ind[:,1],grid_ind[:,2])),:]
processed_label = nb_process_label(np.copy(processed_label),label_voxel_pair)
data_tuple = (voxel_position,processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5)*intervals + min_bound
return_xyz = xyz_pol - voxel_centers #TODO: calculate relative coordinate using polar system?
return_xyz = np.concatenate((return_xyz,xyz_pol,xyz[:,:2]),axis = 1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz,sig[...,np.newaxis]),axis = 1)
if self.return_test:
data_tuple += (grid_ind,labels,return_fea,index)
else:
data_tuple += (grid_ind,labels,return_fea) # (grid-wise coor, grid-wise sem label, point-wise grid index, point-wise sem label, [relative polar coor(3), polar coor(3), cat coor(2), ref signal(1)])
if len(data) == 6:
offsets = np.zeros([xyz.shape[0], 3], dtype=np.float32)
offsets = nb_aggregate_pointwise_center_offset(offsets, xyz, ins_labels, self.center_type)
data_tuple += (ins_labels, offsets, valid, xyz, pcd_fname) # plus (point-wise instance label, point-wise center offset)
if len(data) == 7:
offsets = np.zeros([xyz.shape[0], 3], dtype=np.float32)
offsets = nb_aggregate_pointwise_center_offset(offsets, xyz, ins_labels, self.center_type)
data_tuple += (ins_labels, offsets, valid, xyz, pcd_fname, pose) # plus (point-wise instance label, point-wise center offset)
return data_tuple
class spherical_dataset_tracking(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug = False, flip_aug = False,
scale_aug =False, transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi/4, max_rad=np.pi/4, ignore_label = 255,
return_test = False, fixed_volume_space= False,
max_volume_space = [50,np.pi,1.5], min_volume_space = [3,-np.pi,-3],
center_type='Axis_center'):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.scale_aug = scale_aug
self.transform = transform_aug
self.trans_std = trans_std
self.noise_rotation = np.random.uniform(min_rad, max_rad)
assert center_type in ['Axis_center', 'Mass_center']
self.center_type = center_type
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data, before_data = self.point_cloud_dataset[index]
xyz, labels, sig, ins_labels, valid, pcd_fname, pose = data
before_xyz, before_labels, before_sig, before_ins_labels, before_valid, before_pcd_fname, before_pose = before_data
if len(sig.shape) == 2: sig = np.squeeze(sig)
if len(before_sig.shape) == 2: before_sig = np.squeeze(before_sig)
aug_info = {}
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random()*360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
# xyz[:,:2] = np.dot( xyz[:,:2],j)
aug_info['j'] = j
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4,1)
# if flip_type==1:
# xyz[:,0] = -xyz[:,0]
# elif flip_type==2:
# xyz[:,1] = -xyz[:,1]
# elif flip_type==3:
# xyz[:,:2] = -xyz[:,:2]
aug_info['flip_type'] = flip_type
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
# xyz[:,0] = noise_scale * xyz[:,0]
# xyz[:,1] = noise_scale * xyz[:,1]
aug_info['noise_scale'] = noise_scale
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
# xyz[:, 0:3] += noise_translate
aug_info['noise_translate'] = noise_translate
data_tuple = self.process_one_frame(xyz, labels, sig, ins_labels, valid, pcd_fname, aug_info, pose)
before_data_tuple = self.process_one_frame(before_xyz, before_labels, before_sig, before_ins_labels, before_valid, before_pcd_fname, aug_info, before_pose)
return data_tuple + before_data_tuple
def process_one_frame(self, xyz, labels, sig, ins_labels, valid, pcd_fname, aug_info, pose):
# random data augmentation by rotation
if self.rotate_aug:
xyz[:,:2] = np.dot(xyz[:,:2], aug_info['j'])
# random data augmentation by flip x , y or x+y
if self.flip_aug:
if aug_info['flip_type']==1:
xyz[:,0] = -xyz[:,0]
elif aug_info['flip_type']==2:
xyz[:,1] = -xyz[:,1]
elif aug_info['flip_type']==3:
xyz[:,:2] = -xyz[:,:2]
if self.scale_aug:
xyz[:,0] = aug_info['noise_scale'] * xyz[:,0]
xyz[:,1] = aug_info['noise_scale'] * xyz[:,1]
if self.transform:
xyz[:, 0:3] += aug_info['noise_translate']
# convert coordinate into polar coordinates
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:,0],100,axis = 0)
min_bound_r = np.percentile(xyz_pol[:,0],0,axis = 0)
max_bound = np.max(xyz_pol[:,1:],axis = 0)
min_bound = np.min(xyz_pol[:,1:],axis = 0)
max_bound = np.concatenate(([max_bound_r],max_bound))
min_bound = np.concatenate(([min_bound_r],min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range/(cur_grid_size-1) # (size-1) could directly get index starting from 0, very convenient
if (intervals==0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol,min_bound,max_bound)-min_bound)/intervals)).astype(np.int) # point-wise grid index
# process voxel position
voxel_position = np.zeros(self.grid_size,dtype = np.float32)
dim_array = np.ones(len(self.grid_size)+1,int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size)*intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
# process labels
processed_label = np.ones(self.grid_size,dtype = np.uint8)*self.ignore_label
label_voxel_pair = np.concatenate([grid_ind,labels],axis = 1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:,0],grid_ind[:,1],grid_ind[:,2])),:]
processed_label = nb_process_label(np.copy(processed_label),label_voxel_pair)
data_tuple = (voxel_position,processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5)*intervals + min_bound
return_xyz = xyz_pol - voxel_centers #TODO: calculate relative coordinate using polar system?
return_xyz = np.concatenate((return_xyz,xyz_pol,xyz[:,:2]),axis = 1)
return_fea = np.concatenate((return_xyz,sig[...,np.newaxis]),axis = 1)
data_tuple += (grid_ind,labels,return_fea) # (grid-wise coor, grid-wise sem label, point-wise grid index, point-wise sem label, [relative polar coor(3), polar coor(3), cat coor(2), ref signal(1)])
offsets = np.zeros([xyz.shape[0], 3], dtype=np.float32)
offsets = nb_aggregate_pointwise_center_offset(offsets, xyz, ins_labels, self.center_type)
data_tuple += (ins_labels, offsets, valid, xyz, pcd_fname, pose) # plus (point-wise instance label, point-wise center offset)
return data_tuple
class spherical_dataset_multi_frames(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug = False, flip_aug = False,
scale_aug =False, transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi/4, max_rad=np.pi/4, ignore_label = 255,
return_test = False, fixed_volume_space= False,
max_volume_space = [50,np.pi,1.5], min_volume_space = [3,-np.pi,-3],
center_type='Axis_center'):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.scale_aug = scale_aug
self.transform = transform_aug
self.trans_std = trans_std
self.noise_rotation = np.random.uniform(min_rad, max_rad)
assert center_type in ['Axis_center', 'Mass_center']
self.center_type = center_type
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
assert len(data) == 7
xyz,labels,sig,ins_labels,valid,mask,pcd_fname = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random()*360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:,:2] = np.dot( xyz[:,:2],j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4,1)
if flip_type==1:
xyz[:,0] = -xyz[:,0]
elif flip_type==2:
xyz[:,1] = -xyz[:,1]
elif flip_type==3:
xyz[:,:2] = -xyz[:,:2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:,0] = noise_scale * xyz[:,0]
xyz[:,1] = noise_scale * xyz[:,1]
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
# convert coordinate into polar coordinates
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:,0],100,axis = 0)
min_bound_r = np.percentile(xyz_pol[:,0],0,axis = 0)
max_bound = np.max(xyz_pol[:,1:],axis = 0)
min_bound = np.min(xyz_pol[:,1:],axis = 0)
max_bound = np.concatenate(([max_bound_r],max_bound))
min_bound = np.concatenate(([min_bound_r],min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range/(cur_grid_size-1) # (size-1) could directly get index starting from 0, very convenient
if (intervals==0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol,min_bound,max_bound)-min_bound)/intervals)).astype(np.int) # point-wise grid index
# process voxel position
voxel_position = np.zeros(self.grid_size,dtype = np.float32)
dim_array = np.ones(len(self.grid_size)+1,int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size)*intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
# process labels
processed_label = np.ones(self.grid_size,dtype = np.uint8)*self.ignore_label
label_voxel_pair = np.concatenate([grid_ind,labels],axis = 1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:,0],grid_ind[:,1],grid_ind[:,2])),:]
processed_label = nb_process_label(np.copy(processed_label),label_voxel_pair)
data_tuple = (voxel_position,processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5)*intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz,xyz_pol,xyz[:,:2]),axis = 1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz,sig[...,np.newaxis]),axis = 1)
if self.return_test:
data_tuple += (grid_ind,labels,return_fea,index)
else:
data_tuple += (grid_ind,labels,return_fea) # (grid-wise coor, grid-wise sem label, point-wise grid index, point-wise sem label, [relative polar coor(3), polar coor(3), cat coor(2), ref signal(1)])
offsets = np.zeros([xyz.shape[0], 3], dtype=np.float32)
offsets = nb_aggregate_pointwise_center_offset(offsets, xyz, ins_labels, self.center_type)
data_tuple += (ins_labels, offsets, valid, xyz, mask, pcd_fname) # plus (point-wise instance label, point-wise center offset)
return data_tuple
def calc_xyz_middle(xyz):
return np.array([
(np.max(xyz[:, 0]) + np.min(xyz[:, 0])) / 2.0,
(np.max(xyz[:, 1]) + np.min(xyz[:, 1])) / 2.0,
(np.max(xyz[:, 2]) + np.min(xyz[:, 2])) / 2.0
], dtype=np.float32)
things_ids = set([10, 11, 13, 15, 16, 18, 20, 30, 31, 32, 252, 253, 254, 255, 256, 257, 258, 259])
# @nb.jit #TODO: why jit would lead to offsets all zero?
def nb_aggregate_pointwise_center_offset(offsets, xyz, ins_labels, center_type):
# ins_num = np.max(ins_labels) + 1
# for i in range(1, ins_num):
for i in np.unique(ins_labels):
# if ((i & 0xFFFF0000) >> 16) == 0: #TODO: change to use thing list to filter
# continue
if (i & 0xFFFF) not in things_ids:
continue
i_indices = (ins_labels == i).reshape(-1)
xyz_i = xyz[i_indices]
if xyz_i.shape[0] <= 0:
continue
if center_type == 'Axis_center':
mean_xyz = calc_xyz_middle(xyz_i)
elif center_type == 'Mass_center':
mean_xyz = np.mean(xyz_i, axis=0)
else:
raise NotImplementedError
offsets[i_indices] = mean_xyz - xyz_i
return offsets
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])',nopython=True,cache=True,parallel = False)
def nb_process_label(processed_label,sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,),dtype = np.uint16)
counter[sorted_label_voxel_pair[0,3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0,:3]
for i in range(1,sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i,:3]
if not np.all(np.equal(cur_ind,cur_sear_ind)):
processed_label[cur_sear_ind[0],cur_sear_ind[1],cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,),dtype = np.uint16)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i,3]] += 1
processed_label[cur_sear_ind[0],cur_sear_ind[1],cur_sear_ind[2]] = np.argmax(counter)
return processed_label
def collate_fn_BEV(data): # stack alone batch dimension
data2stack=np.stack([d[0] for d in data]).astype(np.float32) # grid-wise coor
label2stack=np.stack([d[1] for d in data]) # grid-wise sem label
grid_ind_stack = [d[2] for d in data] # point-wise grid index
point_label = [d[3] for d in data] # point-wise sem label
xyz = [d[4] for d in data] # point-wise coor
pt_ins_labels = [d[5] for d in data] # point-wise instance label
pt_offsets = [d[6] for d in data] # point-wise center offset
pt_valid = [d[7] for d in data] # point-wise indicator for foreground points
pt_cart_xyz = [d[8] for d in data] # point-wise cart coor
return {
'vox_coor': torch.from_numpy(data2stack),
'vox_label': torch.from_numpy(label2stack),
'grid': grid_ind_stack,
'pt_labs': point_label,
'pt_fea': xyz,
'pt_ins_labels': pt_ins_labels,
'pt_offsets': pt_offsets,
'pt_valid': pt_valid,
'pt_cart_xyz': pt_cart_xyz,
'pcd_fname': [d[9] for d in data],
'pose': [d[10] for d in data] if len(data[0]) > 10 else None,
}
def collate_fn_BEV_multi_frames(data): # stack alone batch dimension
data2stack=np.stack([d[0] for d in data]).astype(np.float32) # grid-wise coor
label2stack=np.stack([d[1] for d in data]) # grid-wise sem label
grid_ind_stack = [d[2] for d in data] # point-wise grid index
point_label = [d[3] for d in data] # point-wise sem label
xyz = [d[4] for d in data] # point-wise coor
pt_ins_labels = [d[5] for d in data] # point-wise instance label
pt_offsets = [d[6] for d in data] # point-wise center offset
pt_valid = [d[7] for d in data] # point-wise indicator for foreground points
pt_cart_xyz = [d[8] for d in data] # point-wise cart coor
mask = np.stack([d[9] for d in data]).astype(np.uint8)
return {
'vox_coor': torch.from_numpy(data2stack),
'vox_label': torch.from_numpy(label2stack),
'grid': grid_ind_stack,
'pt_labs': point_label,
'pt_fea': xyz,
'pt_ins_labels': pt_ins_labels,
'pt_offsets': pt_offsets,
'pt_valid': pt_valid,
'pt_cart_xyz': pt_cart_xyz,
'pcd_fname': [d[10][0] for d in data],
'pcd_list_fname': [d[10] for d in data],
'mask': torch.from_numpy(mask),
'mask_np': mask,
}
def collate_fn_BEV_test(data):
data2stack=np.stack([d[0] for d in data]).astype(np.float32)
label2stack=np.stack([d[1] for d in data])
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
index = [d[5] for d in data]
return torch.from_numpy(data2stack),torch.from_numpy(label2stack),grid_ind_stack,point_label,xyz,index
def collate_fn_BEV_tracking(_data): # stack alone batch dimension
data = [d[:11] for d in _data]
before_data = [d[11:] for d in _data]
data_dict = collate_fn_BEV(data)
before_data_dict = collate_fn_BEV(before_data)
for k, v in before_data_dict.items():
data_dict['before_' + k] = v
return data_dict
if __name__ == '__main__':
dataset = SemKITTI('./sequences', 'train')
dataset.count_box_size()
| 25,717 | 26,275 | 388 |
53798844621efdda39001d96aea1bde606980017 | 2,151 | py | Python | tests/test_utils_serialized.py | cwichel/embutils | 188d86d84637088bafef188b3312078048934113 | [
"MIT"
] | null | null | null | tests/test_utils_serialized.py | cwichel/embutils | 188d86d84637088bafef188b3312078048934113 | [
"MIT"
] | null | null | null | tests/test_utils_serialized.py | cwichel/embutils | 188d86d84637088bafef188b3312078048934113 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: ascii -*-
"""
Serialized usage test.
:date: 2021
:author: Christian Wiche
:contact: cwichel@gmail.com
:license: The MIT License (MIT)
"""
import unittest
from examples.stream_setup import SimplePacket
from embutils.utils import CRC
# -->> Definitions <<------------------
# -->> Test API <<---------------------
class TestSerialized(unittest.TestCase):
"""
Basic reference tests using the SimplePacket example.
"""
def test_01_serialize(self):
"""
Check if the serialization is being done correctly.
"""
# By hand
raw = bytearray([0x01, 0x02, 0x02, 0xDD, 0x07])
raw.extend(CRC().compute(data=raw).to_bytes(length=2, byteorder='little', signed=False))
# Frame implementation
item = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
# Compare
assert raw == item.serialize()
def test_02_deserialize(self):
"""
Check if the deserialization is being done correctly.
"""
# By hand
raw = bytearray([0x01, 0x02, 0x02, 0xDD, 0x07])
raw.extend(CRC().compute(data=raw).to_bytes(length=2, byteorder='little', signed=False))
# Frame creation
item = SimplePacket.deserialize(data=raw)
# Compare
assert item is not None
assert raw == item.serialize()
def test_03_comparison(self):
"""
Check if the comparison is being done correctly.
"""
# Create frames
item_1 = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
item_2 = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
item_3 = SimplePacket(source=0x02, destination=0x01, payload=bytearray([0xDD, 0x08]))
# Compare
assert item_1 is not item_2
assert item_1 == item_2
assert item_1.serialize() == item_2.serialize()
assert item_1 != item_3
assert item_1.serialize() != item_3.serialize()
# -->> Test Execution <<---------------
if __name__ == '__main__':
unittest.main()
| 27.935065 | 96 | 0.609484 | #!/usr/bin/python
# -*- coding: ascii -*-
"""
Serialized usage test.
:date: 2021
:author: Christian Wiche
:contact: cwichel@gmail.com
:license: The MIT License (MIT)
"""
import unittest
from examples.stream_setup import SimplePacket
from embutils.utils import CRC
# -->> Definitions <<------------------
# -->> Test API <<---------------------
class TestSerialized(unittest.TestCase):
"""
Basic reference tests using the SimplePacket example.
"""
def test_01_serialize(self):
"""
Check if the serialization is being done correctly.
"""
# By hand
raw = bytearray([0x01, 0x02, 0x02, 0xDD, 0x07])
raw.extend(CRC().compute(data=raw).to_bytes(length=2, byteorder='little', signed=False))
# Frame implementation
item = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
# Compare
assert raw == item.serialize()
def test_02_deserialize(self):
"""
Check if the deserialization is being done correctly.
"""
# By hand
raw = bytearray([0x01, 0x02, 0x02, 0xDD, 0x07])
raw.extend(CRC().compute(data=raw).to_bytes(length=2, byteorder='little', signed=False))
# Frame creation
item = SimplePacket.deserialize(data=raw)
# Compare
assert item is not None
assert raw == item.serialize()
def test_03_comparison(self):
"""
Check if the comparison is being done correctly.
"""
# Create frames
item_1 = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
item_2 = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
item_3 = SimplePacket(source=0x02, destination=0x01, payload=bytearray([0xDD, 0x08]))
# Compare
assert item_1 is not item_2
assert item_1 == item_2
assert item_1.serialize() == item_2.serialize()
assert item_1 != item_3
assert item_1.serialize() != item_3.serialize()
# -->> Test Execution <<---------------
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
7cc74888d6101a1254757e95a5e30b2406237e2a | 22,720 | py | Python | src/sentry/models/release.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/models/release.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/models/release.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, print_function
import logging
import re
import six
import itertools
from django.db import models, IntegrityError, transaction
from django.db.models import F
from django.utils import timezone
from time import time
from sentry.app import locks
from sentry.db.models import (
ArrayField,
BoundedPositiveIntegerField,
FlexibleForeignKey,
JSONField,
Model,
sane_repr,
)
from sentry.constants import BAD_RELEASE_CHARS, COMMIT_RANGE_DELIMITER
from sentry.models import CommitFileChange
from sentry.signals import issue_resolved, release_commits_updated
from sentry.utils import metrics
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5_text
from sentry.utils.retries import TimedRetryPolicy
logger = logging.getLogger(__name__)
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
_dotted_path_prefix_re = re.compile(r"^([a-zA-Z][a-zA-Z0-9-]+)(\.[a-zA-Z][a-zA-Z0-9-]+)+-")
DB_VERSION_LENGTH = 250
class Release(Model):
"""
A release is generally created when a new version is pushed into a
production state.
"""
__core__ = False
organization = FlexibleForeignKey("sentry.Organization")
projects = models.ManyToManyField(
"sentry.Project", related_name="releases", through=ReleaseProject
)
# DEPRECATED
project_id = BoundedPositiveIntegerField(null=True)
version = models.CharField(max_length=DB_VERSION_LENGTH)
# ref might be the branch name being released
ref = models.CharField(max_length=DB_VERSION_LENGTH, null=True, blank=True)
url = models.URLField(null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now)
# DEPRECATED - not available in UI or editable from API
date_started = models.DateTimeField(null=True, blank=True)
date_released = models.DateTimeField(null=True, blank=True)
# arbitrary data recorded with the release
data = JSONField(default={})
new_groups = BoundedPositiveIntegerField(default=0)
# generally the release manager, or the person initiating the process
owner = FlexibleForeignKey("sentry.User", null=True, blank=True, on_delete=models.SET_NULL)
# materialized stats
commit_count = BoundedPositiveIntegerField(null=True, default=0)
last_commit_id = BoundedPositiveIntegerField(null=True)
authors = ArrayField(null=True)
total_deploys = BoundedPositiveIntegerField(null=True, default=0)
last_deploy_id = BoundedPositiveIntegerField(null=True)
__repr__ = sane_repr("organization_id", "version")
@staticmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
def add_project(self, project):
"""
Add a project to this release.
Returns True if the project was added and did not already exist.
"""
from sentry.models import Project
try:
with transaction.atomic():
ReleaseProject.objects.create(project=project, release=self)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(flags=F("flags").bitor(Project.flags.has_releases))
except IntegrityError:
return False
else:
return True
def handle_commit_ranges(self, refs):
"""
Takes commit refs of the form:
[
{
'previousCommit': None,
'commit': 'previous_commit..commit',
}
]
Note: Overwrites 'previousCommit' and 'commit'
"""
for ref in refs:
if COMMIT_RANGE_DELIMITER in ref["commit"]:
ref["previousCommit"], ref["commit"] = ref["commit"].split(COMMIT_RANGE_DELIMITER)
def set_commits(self, commit_list):
"""
Bind a list of commits to this release.
This will clear any existing commit log and replace it with the given
commits.
"""
# Sort commit list in reverse order
commit_list.sort(key=lambda commit: commit.get("timestamp"), reverse=True)
# TODO(dcramer): this function could use some cleanup/refactoring as its a bit unwieldly
from sentry.models import (
Commit,
CommitAuthor,
Group,
GroupLink,
GroupResolution,
GroupStatus,
ReleaseCommit,
ReleaseHeadCommit,
Repository,
PullRequest,
)
from sentry.plugins.providers.repository import RepositoryProvider
from sentry.tasks.integrations import kick_off_status_syncs
# todo(meredith): implement for IntegrationRepositoryProvider
commit_list = [
c
for c in commit_list
if not RepositoryProvider.should_ignore_commit(c.get("message", ""))
]
lock_key = type(self).get_lock_key(self.organization_id, self.id)
lock = locks.get(lock_key, duration=10)
with TimedRetryPolicy(10)(lock.acquire):
start = time()
with transaction.atomic():
# TODO(dcramer): would be good to optimize the logic to avoid these
# deletes but not overly important
initial_commit_ids = set(
ReleaseCommit.objects.filter(release=self).values_list("commit_id", flat=True)
)
ReleaseCommit.objects.filter(release=self).delete()
authors = {}
repos = {}
commit_author_by_commit = {}
head_commit_by_repo = {}
latest_commit = None
for idx, data in enumerate(commit_list):
repo_name = data.get("repository") or u"organization-{}".format(
self.organization_id
)
if repo_name not in repos:
repos[repo_name] = repo = Repository.objects.get_or_create(
organization_id=self.organization_id, name=repo_name
)[0]
else:
repo = repos[repo_name]
author_email = data.get("author_email")
if author_email is None and data.get("author_name"):
author_email = (
re.sub(r"[^a-zA-Z0-9\-_\.]*", "", data["author_name"]).lower()
+ "@localhost"
)
if not author_email:
author = None
elif author_email not in authors:
author_data = {"name": data.get("author_name")}
author, created = CommitAuthor.objects.create_or_update(
organization_id=self.organization_id,
email=author_email,
values=author_data,
)
if not created:
author = CommitAuthor.objects.get(
organization_id=self.organization_id, email=author_email
)
authors[author_email] = author
else:
author = authors[author_email]
commit_data = {}
defaults = {}
# Update/set message and author if they are provided.
if author is not None:
commit_data["author"] = author
if "message" in data:
commit_data["message"] = data["message"]
if "timestamp" in data:
commit_data["date_added"] = data["timestamp"]
else:
defaults["date_added"] = timezone.now()
commit, created = Commit.objects.create_or_update(
organization_id=self.organization_id,
repository_id=repo.id,
key=data["id"],
defaults=defaults,
values=commit_data,
)
if not created:
commit = Commit.objects.get(
organization_id=self.organization_id,
repository_id=repo.id,
key=data["id"],
)
if author is None:
author = commit.author
commit_author_by_commit[commit.id] = author
patch_set = data.get("patch_set", [])
for patched_file in patch_set:
try:
with transaction.atomic():
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit=commit,
filename=patched_file["path"],
type=patched_file["type"],
)
except IntegrityError:
pass
try:
with transaction.atomic():
ReleaseCommit.objects.create(
organization_id=self.organization_id,
release=self,
commit=commit,
order=idx,
)
except IntegrityError:
pass
if latest_commit is None:
latest_commit = commit
head_commit_by_repo.setdefault(repo.id, commit.id)
self.update(
commit_count=len(commit_list),
authors=[
six.text_type(a_id)
for a_id in ReleaseCommit.objects.filter(
release=self, commit__author_id__isnull=False
)
.values_list("commit__author_id", flat=True)
.distinct()
],
last_commit_id=latest_commit.id if latest_commit else None,
)
metrics.timing("release.set_commits.duration", time() - start)
# fill any missing ReleaseHeadCommit entries
for repo_id, commit_id in six.iteritems(head_commit_by_repo):
try:
with transaction.atomic():
ReleaseHeadCommit.objects.create(
organization_id=self.organization_id,
release_id=self.id,
repository_id=repo_id,
commit_id=commit_id,
)
except IntegrityError:
pass
release_commits = list(
ReleaseCommit.objects.filter(release=self)
.select_related("commit")
.values("commit_id", "commit__key")
)
final_commit_ids = set(rc["commit_id"] for rc in release_commits)
removed_commit_ids = initial_commit_ids - final_commit_ids
added_commit_ids = final_commit_ids - initial_commit_ids
if removed_commit_ids or added_commit_ids:
release_commits_updated.send_robust(
release=self,
removed_commit_ids=removed_commit_ids,
added_commit_ids=added_commit_ids,
sender=self.__class__,
)
commit_resolutions = list(
GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.commit,
linked_id__in=[rc["commit_id"] for rc in release_commits],
).values_list("group_id", "linked_id")
)
commit_group_authors = [
(cr[0], commit_author_by_commit.get(cr[1])) for cr in commit_resolutions # group_id
]
pr_ids_by_merge_commit = list(
PullRequest.objects.filter(
merge_commit_sha__in=[rc["commit__key"] for rc in release_commits],
organization_id=self.organization_id,
).values_list("id", flat=True)
)
pull_request_resolutions = list(
GroupLink.objects.filter(
relationship=GroupLink.Relationship.resolves,
linked_type=GroupLink.LinkedType.pull_request,
linked_id__in=pr_ids_by_merge_commit,
).values_list("group_id", "linked_id")
)
pr_authors = list(
PullRequest.objects.filter(
id__in=[prr[1] for prr in pull_request_resolutions]
).select_related("author")
)
pr_authors_dict = {pra.id: pra.author for pra in pr_authors}
pull_request_group_authors = [
(prr[0], pr_authors_dict.get(prr[1])) for prr in pull_request_resolutions
]
user_by_author = {None: None}
commits_and_prs = list(itertools.chain(commit_group_authors, pull_request_group_authors))
group_project_lookup = dict(
Group.objects.filter(id__in=[group_id for group_id, _ in commits_and_prs]).values_list(
"id", "project_id"
)
)
for group_id, author in commits_and_prs:
if author not in user_by_author:
try:
user_by_author[author] = author.find_users()[0]
except IndexError:
user_by_author[author] = None
actor = user_by_author[author]
with transaction.atomic():
GroupResolution.objects.create_or_update(
group_id=group_id,
values={
"release": self,
"type": GroupResolution.Type.in_release,
"status": GroupResolution.Status.resolved,
"actor_id": actor.id if actor else None,
},
)
group = Group.objects.get(id=group_id)
group.update(status=GroupStatus.RESOLVED)
metrics.incr("group.resolved", instance="in_commit", skip_internal=True)
issue_resolved.send_robust(
organization_id=self.organization_id,
user=actor,
group=group,
project=group.project,
resolution_type="with_commit",
sender=type(self),
)
kick_off_status_syncs.apply_async(
kwargs={"project_id": group_project_lookup[group_id], "group_id": group_id}
)
| 37.553719 | 99 | 0.548636 | from __future__ import absolute_import, print_function
import logging
import re
import six
import itertools
from django.db import models, IntegrityError, transaction
from django.db.models import F
from django.utils import timezone
from time import time
from sentry.app import locks
from sentry.db.models import (
ArrayField,
BoundedPositiveIntegerField,
FlexibleForeignKey,
JSONField,
Model,
sane_repr,
)
from sentry.constants import BAD_RELEASE_CHARS, COMMIT_RANGE_DELIMITER
from sentry.models import CommitFileChange
from sentry.signals import issue_resolved, release_commits_updated
from sentry.utils import metrics
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5_text
from sentry.utils.retries import TimedRetryPolicy
logger = logging.getLogger(__name__)
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
_dotted_path_prefix_re = re.compile(r"^([a-zA-Z][a-zA-Z0-9-]+)(\.[a-zA-Z][a-zA-Z0-9-]+)+-")
DB_VERSION_LENGTH = 250
class ReleaseProject(Model):
__core__ = False
project = FlexibleForeignKey("sentry.Project")
release = FlexibleForeignKey("sentry.Release")
new_groups = BoundedPositiveIntegerField(null=True, default=0)
class Meta:
app_label = "sentry"
db_table = "sentry_release_project"
unique_together = (("project", "release"),)
class Release(Model):
"""
A release is generally created when a new version is pushed into a
production state.
"""
__core__ = False
organization = FlexibleForeignKey("sentry.Organization")
projects = models.ManyToManyField(
"sentry.Project", related_name="releases", through=ReleaseProject
)
# DEPRECATED
project_id = BoundedPositiveIntegerField(null=True)
version = models.CharField(max_length=DB_VERSION_LENGTH)
# ref might be the branch name being released
ref = models.CharField(max_length=DB_VERSION_LENGTH, null=True, blank=True)
url = models.URLField(null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now)
# DEPRECATED - not available in UI or editable from API
date_started = models.DateTimeField(null=True, blank=True)
date_released = models.DateTimeField(null=True, blank=True)
# arbitrary data recorded with the release
data = JSONField(default={})
new_groups = BoundedPositiveIntegerField(default=0)
# generally the release manager, or the person initiating the process
owner = FlexibleForeignKey("sentry.User", null=True, blank=True, on_delete=models.SET_NULL)
# materialized stats
commit_count = BoundedPositiveIntegerField(null=True, default=0)
last_commit_id = BoundedPositiveIntegerField(null=True)
authors = ArrayField(null=True)
total_deploys = BoundedPositiveIntegerField(null=True, default=0)
last_deploy_id = BoundedPositiveIntegerField(null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_release"
unique_together = (("organization", "version"),)
__repr__ = sane_repr("organization_id", "version")
@staticmethod
def is_valid_version(value):
return not (
any(c in value for c in BAD_RELEASE_CHARS)
or value in (".", "..")
or not value
or value.lower() == "latest"
)
@classmethod
def get_cache_key(cls, organization_id, version):
return "release:3:%s:%s" % (organization_id, md5_text(version).hexdigest())
@classmethod
def get_lock_key(cls, organization_id, release_id):
return u"releasecommits:{}:{}".format(organization_id, release_id)
@classmethod
def get(cls, project, version):
cache_key = cls.get_cache_key(project.organization_id, version)
release = cache.get(cache_key)
if release is None:
try:
release = cls.objects.get(
organization_id=project.organization_id, projects=project, version=version
)
except cls.DoesNotExist:
release = -1
cache.set(cache_key, release, 300)
if release == -1:
return
return release
@classmethod
def get_or_create(cls, project, version, date_added=None):
from sentry.models import Project
if date_added is None:
date_added = timezone.now()
cache_key = cls.get_cache_key(project.organization_id, version)
release = cache.get(cache_key)
if release in (None, -1):
# TODO(dcramer): if the cache result is -1 we could attempt a
# default create here instead of default get
project_version = ("%s-%s" % (project.slug, version))[:DB_VERSION_LENGTH]
releases = list(
cls.objects.filter(
organization_id=project.organization_id,
version__in=[version, project_version],
projects=project,
)
)
if releases:
try:
release = [r for r in releases if r.version == project_version][0]
except IndexError:
release = releases[0]
else:
try:
with transaction.atomic():
release = cls.objects.create(
organization_id=project.organization_id,
version=version,
date_added=date_added,
total_deploys=0,
)
except IntegrityError:
release = cls.objects.get(
organization_id=project.organization_id, version=version
)
release.add_project(project)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(flags=F("flags").bitor(Project.flags.has_releases))
# TODO(dcramer): upon creating a new release, check if it should be
# the new "latest release" for this project
cache.set(cache_key, release, 3600)
return release
@classmethod
def merge(cls, to_release, from_releases):
# The following models reference release:
# ReleaseCommit.release
# ReleaseEnvironment.release_id
# ReleaseProject.release
# GroupRelease.release_id
# GroupResolution.release
# Group.first_release
# ReleaseFile.release
from sentry.models import (
ReleaseCommit,
ReleaseEnvironment,
ReleaseFile,
ReleaseProject,
ReleaseProjectEnvironment,
Group,
GroupRelease,
GroupResolution,
)
model_list = (
ReleaseCommit,
ReleaseEnvironment,
ReleaseFile,
ReleaseProject,
ReleaseProjectEnvironment,
GroupRelease,
GroupResolution,
)
for release in from_releases:
for model in model_list:
if hasattr(model, "release"):
update_kwargs = {"release": to_release}
else:
update_kwargs = {"release_id": to_release.id}
try:
with transaction.atomic():
model.objects.filter(release_id=release.id).update(**update_kwargs)
except IntegrityError:
for item in model.objects.filter(release_id=release.id):
try:
with transaction.atomic():
model.objects.filter(id=item.id).update(**update_kwargs)
except IntegrityError:
item.delete()
Group.objects.filter(first_release=release).update(first_release=to_release)
release.delete()
def add_dist(self, name, date_added=None):
from sentry.models import Distribution
if date_added is None:
date_added = timezone.now()
return Distribution.objects.get_or_create(
release=self,
name=name,
defaults={"date_added": date_added, "organization_id": self.organization_id},
)[0]
def get_dist(self, name):
from sentry.models import Distribution
try:
return Distribution.objects.get(name=name, release=self)
except Distribution.DoesNotExist:
pass
def add_project(self, project):
"""
Add a project to this release.
Returns True if the project was added and did not already exist.
"""
from sentry.models import Project
try:
with transaction.atomic():
ReleaseProject.objects.create(project=project, release=self)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(flags=F("flags").bitor(Project.flags.has_releases))
except IntegrityError:
return False
else:
return True
def handle_commit_ranges(self, refs):
"""
Takes commit refs of the form:
[
{
'previousCommit': None,
'commit': 'previous_commit..commit',
}
]
Note: Overwrites 'previousCommit' and 'commit'
"""
for ref in refs:
if COMMIT_RANGE_DELIMITER in ref["commit"]:
ref["previousCommit"], ref["commit"] = ref["commit"].split(COMMIT_RANGE_DELIMITER)
def set_refs(self, refs, user, fetch=False):
from sentry.api.exceptions import InvalidRepository
from sentry.models import Commit, ReleaseHeadCommit, Repository
from sentry.tasks.commits import fetch_commits
# TODO: this does the wrong thing unless you are on the most
# recent release. Add a timestamp compare?
prev_release = (
type(self)
.objects.filter(organization_id=self.organization_id, projects__in=self.projects.all())
.extra(select={"sort": "COALESCE(date_released, date_added)"})
.exclude(version=self.version)
.order_by("-sort")
.first()
)
names = {r["repository"] for r in refs}
repos = list(
Repository.objects.filter(organization_id=self.organization_id, name__in=names)
)
repos_by_name = {r.name: r for r in repos}
invalid_repos = names - set(repos_by_name.keys())
if invalid_repos:
raise InvalidRepository("Invalid repository names: %s" % ",".join(invalid_repos))
self.handle_commit_ranges(refs)
for ref in refs:
repo = repos_by_name[ref["repository"]]
commit = Commit.objects.get_or_create(
organization_id=self.organization_id, repository_id=repo.id, key=ref["commit"]
)[0]
# update head commit for repo/release if exists
ReleaseHeadCommit.objects.create_or_update(
organization_id=self.organization_id,
repository_id=repo.id,
release=self,
values={"commit": commit},
)
if fetch:
fetch_commits.apply_async(
kwargs={
"release_id": self.id,
"user_id": user.id,
"refs": refs,
"prev_release_id": prev_release and prev_release.id,
}
)
def set_commits(self, commit_list):
"""
Bind a list of commits to this release.
This will clear any existing commit log and replace it with the given
commits.
"""
# Sort commit list in reverse order
commit_list.sort(key=lambda commit: commit.get("timestamp"), reverse=True)
# TODO(dcramer): this function could use some cleanup/refactoring as its a bit unwieldly
from sentry.models import (
Commit,
CommitAuthor,
Group,
GroupLink,
GroupResolution,
GroupStatus,
ReleaseCommit,
ReleaseHeadCommit,
Repository,
PullRequest,
)
from sentry.plugins.providers.repository import RepositoryProvider
from sentry.tasks.integrations import kick_off_status_syncs
# todo(meredith): implement for IntegrationRepositoryProvider
commit_list = [
c
for c in commit_list
if not RepositoryProvider.should_ignore_commit(c.get("message", ""))
]
lock_key = type(self).get_lock_key(self.organization_id, self.id)
lock = locks.get(lock_key, duration=10)
with TimedRetryPolicy(10)(lock.acquire):
start = time()
with transaction.atomic():
# TODO(dcramer): would be good to optimize the logic to avoid these
# deletes but not overly important
initial_commit_ids = set(
ReleaseCommit.objects.filter(release=self).values_list("commit_id", flat=True)
)
ReleaseCommit.objects.filter(release=self).delete()
authors = {}
repos = {}
commit_author_by_commit = {}
head_commit_by_repo = {}
latest_commit = None
for idx, data in enumerate(commit_list):
repo_name = data.get("repository") or u"organization-{}".format(
self.organization_id
)
if repo_name not in repos:
repos[repo_name] = repo = Repository.objects.get_or_create(
organization_id=self.organization_id, name=repo_name
)[0]
else:
repo = repos[repo_name]
author_email = data.get("author_email")
if author_email is None and data.get("author_name"):
author_email = (
re.sub(r"[^a-zA-Z0-9\-_\.]*", "", data["author_name"]).lower()
+ "@localhost"
)
if not author_email:
author = None
elif author_email not in authors:
author_data = {"name": data.get("author_name")}
author, created = CommitAuthor.objects.create_or_update(
organization_id=self.organization_id,
email=author_email,
values=author_data,
)
if not created:
author = CommitAuthor.objects.get(
organization_id=self.organization_id, email=author_email
)
authors[author_email] = author
else:
author = authors[author_email]
commit_data = {}
defaults = {}
# Update/set message and author if they are provided.
if author is not None:
commit_data["author"] = author
if "message" in data:
commit_data["message"] = data["message"]
if "timestamp" in data:
commit_data["date_added"] = data["timestamp"]
else:
defaults["date_added"] = timezone.now()
commit, created = Commit.objects.create_or_update(
organization_id=self.organization_id,
repository_id=repo.id,
key=data["id"],
defaults=defaults,
values=commit_data,
)
if not created:
commit = Commit.objects.get(
organization_id=self.organization_id,
repository_id=repo.id,
key=data["id"],
)
if author is None:
author = commit.author
commit_author_by_commit[commit.id] = author
patch_set = data.get("patch_set", [])
for patched_file in patch_set:
try:
with transaction.atomic():
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit=commit,
filename=patched_file["path"],
type=patched_file["type"],
)
except IntegrityError:
pass
try:
with transaction.atomic():
ReleaseCommit.objects.create(
organization_id=self.organization_id,
release=self,
commit=commit,
order=idx,
)
except IntegrityError:
pass
if latest_commit is None:
latest_commit = commit
head_commit_by_repo.setdefault(repo.id, commit.id)
self.update(
commit_count=len(commit_list),
authors=[
six.text_type(a_id)
for a_id in ReleaseCommit.objects.filter(
release=self, commit__author_id__isnull=False
)
.values_list("commit__author_id", flat=True)
.distinct()
],
last_commit_id=latest_commit.id if latest_commit else None,
)
metrics.timing("release.set_commits.duration", time() - start)
# fill any missing ReleaseHeadCommit entries
for repo_id, commit_id in six.iteritems(head_commit_by_repo):
try:
with transaction.atomic():
ReleaseHeadCommit.objects.create(
organization_id=self.organization_id,
release_id=self.id,
repository_id=repo_id,
commit_id=commit_id,
)
except IntegrityError:
pass
release_commits = list(
ReleaseCommit.objects.filter(release=self)
.select_related("commit")
.values("commit_id", "commit__key")
)
final_commit_ids = set(rc["commit_id"] for rc in release_commits)
removed_commit_ids = initial_commit_ids - final_commit_ids
added_commit_ids = final_commit_ids - initial_commit_ids
if removed_commit_ids or added_commit_ids:
release_commits_updated.send_robust(
release=self,
removed_commit_ids=removed_commit_ids,
added_commit_ids=added_commit_ids,
sender=self.__class__,
)
commit_resolutions = list(
GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.commit,
linked_id__in=[rc["commit_id"] for rc in release_commits],
).values_list("group_id", "linked_id")
)
commit_group_authors = [
(cr[0], commit_author_by_commit.get(cr[1])) for cr in commit_resolutions # group_id
]
pr_ids_by_merge_commit = list(
PullRequest.objects.filter(
merge_commit_sha__in=[rc["commit__key"] for rc in release_commits],
organization_id=self.organization_id,
).values_list("id", flat=True)
)
pull_request_resolutions = list(
GroupLink.objects.filter(
relationship=GroupLink.Relationship.resolves,
linked_type=GroupLink.LinkedType.pull_request,
linked_id__in=pr_ids_by_merge_commit,
).values_list("group_id", "linked_id")
)
pr_authors = list(
PullRequest.objects.filter(
id__in=[prr[1] for prr in pull_request_resolutions]
).select_related("author")
)
pr_authors_dict = {pra.id: pra.author for pra in pr_authors}
pull_request_group_authors = [
(prr[0], pr_authors_dict.get(prr[1])) for prr in pull_request_resolutions
]
user_by_author = {None: None}
commits_and_prs = list(itertools.chain(commit_group_authors, pull_request_group_authors))
group_project_lookup = dict(
Group.objects.filter(id__in=[group_id for group_id, _ in commits_and_prs]).values_list(
"id", "project_id"
)
)
for group_id, author in commits_and_prs:
if author not in user_by_author:
try:
user_by_author[author] = author.find_users()[0]
except IndexError:
user_by_author[author] = None
actor = user_by_author[author]
with transaction.atomic():
GroupResolution.objects.create_or_update(
group_id=group_id,
values={
"release": self,
"type": GroupResolution.Type.in_release,
"status": GroupResolution.Status.resolved,
"actor_id": actor.id if actor else None,
},
)
group = Group.objects.get(id=group_id)
group.update(status=GroupStatus.RESOLVED)
metrics.incr("group.resolved", instance="in_commit", skip_internal=True)
issue_resolved.send_robust(
organization_id=self.organization_id,
user=actor,
group=group,
project=group.project,
resolution_type="with_commit",
sender=type(self),
)
kick_off_status_syncs.apply_async(
kwargs={"project_id": group_project_lookup[group_id], "group_id": group_id}
)
| 7,134 | 452 | 287 |
99f7d22741a69a05f92fb1e23cbfa5c23a93ecba | 992 | py | Python | 02/01/startswith.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | 02/01/startswith.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | 39 | 2017-07-31T22:54:01.000Z | 2017-08-31T00:19:03.000Z | 02/01/startswith.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | print('abc'.startswith('a'))
print('abc'.startswith('b'))
print('abc'.startswith('c'))
print('abc'.startswith('bc'))
print('abc'.startswith('abc'))
print('aBc'.casefold().startswith(('b', 'c')))
print('aBc'.casefold().startswith(('x', 'y')))
print('aBc'.casefold().startswith('A'.casefold()))
print('aBc'.casefold().startswith('b'.casefold()))
print('aBc'.casefold().startswith('C'.casefold()))
print('aBc'.casefold().startswith('bC'.casefold()))
print('aBc'.casefold().startswith('AbC'.casefold()))
print()
print('abc'.endswith('a'))
print('abc'.endswith('b'))
print('abc'.endswith('c'))
print('abc'.endswith('bc'))
print('abc'.endswith('abc'))
print('aBc'.casefold().endswith(('b', 'c')))
print('aBc'.casefold().endswith(('x', 'y')))
print('aBc'.casefold().endswith('A'.casefold()))
print('aBc'.casefold().endswith('b'.casefold()))
print('aBc'.casefold().endswith('C'.casefold()))
print('aBc'.casefold().endswith('bC'.casefold()))
print('aBc'.casefold().endswith('AbC'.casefold()))
| 29.176471 | 52 | 0.650202 | print('abc'.startswith('a'))
print('abc'.startswith('b'))
print('abc'.startswith('c'))
print('abc'.startswith('bc'))
print('abc'.startswith('abc'))
print('aBc'.casefold().startswith(('b', 'c')))
print('aBc'.casefold().startswith(('x', 'y')))
print('aBc'.casefold().startswith('A'.casefold()))
print('aBc'.casefold().startswith('b'.casefold()))
print('aBc'.casefold().startswith('C'.casefold()))
print('aBc'.casefold().startswith('bC'.casefold()))
print('aBc'.casefold().startswith('AbC'.casefold()))
print()
print('abc'.endswith('a'))
print('abc'.endswith('b'))
print('abc'.endswith('c'))
print('abc'.endswith('bc'))
print('abc'.endswith('abc'))
print('aBc'.casefold().endswith(('b', 'c')))
print('aBc'.casefold().endswith(('x', 'y')))
print('aBc'.casefold().endswith('A'.casefold()))
print('aBc'.casefold().endswith('b'.casefold()))
print('aBc'.casefold().endswith('C'.casefold()))
print('aBc'.casefold().endswith('bC'.casefold()))
print('aBc'.casefold().endswith('AbC'.casefold()))
| 0 | 0 | 0 |
cfe1dfed89332ce52c13620ed1c784c81c5d3d5c | 398 | py | Python | Esercizio 1/Codice/ex1.py | SymonLM/LabCalc1 | f30d5b37678e2b4ef15e8dea536aef6df30e08b4 | [
"Unlicense"
] | 7 | 2021-12-10T23:56:03.000Z | 2022-01-03T19:20:45.000Z | Esercizio 1/Codice/ex1.py | SymonLM/LabCalc1 | f30d5b37678e2b4ef15e8dea536aef6df30e08b4 | [
"Unlicense"
] | 4 | 2021-12-19T08:02:16.000Z | 2021-12-19T21:52:17.000Z | Esercizio 1/Codice/ex1.py | SymonLM/LabCalc1 | f30d5b37678e2b4ef15e8dea536aef6df30e08b4 | [
"Unlicense"
] | 1 | 2021-12-19T11:02:50.000Z | 2021-12-19T11:02:50.000Z | import matplotlib.pyplot as plt
import numpy as np
plt.title('Un primo plot con Python')
x, y = np.loadtxt('ex1.dat', unpack=True)
plt.plot(x ,y, 'o-.b', label='Temperature Convertite')
plt.xlim((-10,130)) # intervallo lungo asse x
plt.ylim((10,250)) # intervallo lungo asse y
plt.xlabel('Temperature Celsius')
plt.ylabel('Temperature Fahrenheit')
plt.savefig('temp.png')
plt.legend()
plt.show() | 33.166667 | 55 | 0.723618 | import matplotlib.pyplot as plt
import numpy as np
plt.title('Un primo plot con Python')
x, y = np.loadtxt('ex1.dat', unpack=True)
plt.plot(x ,y, 'o-.b', label='Temperature Convertite')
plt.xlim((-10,130)) # intervallo lungo asse x
plt.ylim((10,250)) # intervallo lungo asse y
plt.xlabel('Temperature Celsius')
plt.ylabel('Temperature Fahrenheit')
plt.savefig('temp.png')
plt.legend()
plt.show() | 0 | 0 | 0 |
d3f9dd1da4670ee11e553ff68eef69aec911d3ad | 2,591 | py | Python | tests/system/web/api_1_0/resources/test_software.py | dimensigon/dimensigon | 079d7c91a66e10f13510d89844fbadb27e005b40 | [
"Apache-2.0"
] | 2 | 2020-11-20T10:27:14.000Z | 2021-02-21T13:57:56.000Z | tests/system/web/api_1_0/resources/test_software.py | dimensigon/dimensigon | 079d7c91a66e10f13510d89844fbadb27e005b40 | [
"Apache-2.0"
] | null | null | null | tests/system/web/api_1_0/resources/test_software.py | dimensigon/dimensigon | 079d7c91a66e10f13510d89844fbadb27e005b40 | [
"Apache-2.0"
] | null | null | null | import os
from flask import url_for
from dimensigon.domain.entities import Software, Server
from dimensigon.utils.helpers import md5
from dimensigon.web import db
from tests.base import TestResourceBase
| 43.183333 | 121 | 0.642995 | import os
from flask import url_for
from dimensigon.domain.entities import Software, Server
from dimensigon.utils.helpers import md5
from dimensigon.web import db
from tests.base import TestResourceBase
class TestSoftwareList(TestResourceBase):
def fill_database(self):
self.soft1 = Software(id='11111111-2222-3333-4444-555555550001', name='Dimensigon', version='0.0.1',
filename='Dimensigon_0.0.1.tar.gz')
self.soft2 = Software(id='11111111-2222-3333-4444-555555550002', name='Dimensigon', version='0.0.2',
filename='Dimensigon_0.0.2.tar.gz')
self.soft3 = Software(id='11111111-2222-3333-4444-555555550003', name='python', version='3.6.8',
filename='python_3.6.8.x64.tar.gz')
db.session.add_all([self.soft1, self.soft2, self.soft3])
def test_get(self):
resp = self.client.get(url_for('api_1_0.softwarelist'), headers=self.auth.header)
self.assertListEqual(
[self.soft1.to_json(no_delete=False),
self.soft2.to_json(no_delete=False),
self.soft3.to_json(no_delete=False)], resp.get_json())
def test_get_with_filter(self):
resp = self.client.get(url_for('api_1_0.softwarelist', **{'filter[name]': 'Dimensigon'}),
headers=self.auth.header)
self.assertListEqual([self.soft1.to_json(no_delete=False), self.soft2.to_json(no_delete=False)],
resp.get_json())
def test_get_with_filter2(self):
resp = self.client.get(url_for('api_1_0.softwarelist', **{'filter[version]': '0.0.1,3.6.8'}),
headers=self.auth.header)
self.assertListEqual([self.soft1.to_json(no_delete=False), self.soft3.to_json(no_delete=False)], resp.get_json())
def test_post(self):
size = os.path.getsize(__file__)
checksum = md5(__file__)
filename = os.path.basename(__file__)
data = dict(name="Dimensigon", version="0.0.3", family='middleware', file=__file__)
resp = self.client.post(url_for('api_1_0.softwarelist'), headers=self.auth.header, json=data)
self.assertEqual(201, resp.status_code)
soft = Software.query.filter_by(name="Dimensigon", version="0.0.3").one()
self.assertEqual(size, soft.size)
self.assertEqual(checksum, soft.checksum)
self.assertEqual(filename, soft.filename)
self.assertEqual(1, len(soft.ssas))
ssa = soft.ssas[0]
self.assertEqual(os.path.dirname(__file__), ssa.path)
| 2,207 | 20 | 158 |
d4e2aaf92bc444dd9c87d874c3f7b979927592ea | 183 | py | Python | detection_spam_project/clustering/urls.py | Altraya/detection_spam | 92404ab9fad5398ac17df885d559a6d96630db1d | [
"MIT"
] | null | null | null | detection_spam_project/clustering/urls.py | Altraya/detection_spam | 92404ab9fad5398ac17df885d559a6d96630db1d | [
"MIT"
] | null | null | null | detection_spam_project/clustering/urls.py | Altraya/detection_spam | 92404ab9fad5398ac17df885d559a6d96630db1d | [
"MIT"
] | null | null | null | from django.conf.urls import url, patterns
from . import views
urlpatterns = patterns('clustering.views',
url(r'^accueil$', 'home'),
url(r'^screen/(\d+)$', views.view_screen),
)
| 22.875 | 44 | 0.688525 | from django.conf.urls import url, patterns
from . import views
urlpatterns = patterns('clustering.views',
url(r'^accueil$', 'home'),
url(r'^screen/(\d+)$', views.view_screen),
)
| 0 | 0 | 0 |
0c1ae7368dcabf173521979dc546a65b378c1f59 | 1,829 | py | Python | setup.py | STARS4ALL/tessdb-import | 424569d66f2ff6f04f2b172d92278524aa0d0c12 | [
"MIT"
] | null | null | null | setup.py | STARS4ALL/tessdb-import | 424569d66f2ff6f04f2b172d92278524aa0d0c12 | [
"MIT"
] | null | null | null | setup.py | STARS4ALL/tessdb-import | 424569d66f2ff6f04f2b172d92278524aa0d0c12 | [
"MIT"
] | null | null | null | import os
import os.path
from setuptools import setup, Extension
import versioneer
# Default description in markdown
LONG_DESCRIPTION = open('README.md').read()
PKG_NAME = 'tessdb-cmdline'
AUTHOR = 'Rafael Gonzalez'
AUTHOR_EMAIL = 'astrorafael@yahoo.es'
DESCRIPTION = 'tessdb command line tool to manage tessdb database',
LICENSE = 'MIT'
KEYWORDS = 'Astronomy Python RaspberryPi LightPollution'
URL = 'http://github.com/stars4all/tessdb-comdline/'
PACKAGES = ["tess"]
DEPENDENCIES = [
'tabulate',
'matplotlib'
]
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: SQL',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Development Status :: 4 - Beta',
]
SCRIPTS = [
'files/usr/local/bin/tess'
]
if os.name == "posix":
setup(name = PKG_NAME,
version = versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
description = DESCRIPTION,
long_description_content_type = "text/markdown",
long_description = LONG_DESCRIPTION,
license = LICENSE,
keywords = KEYWORDS,
url = URL,
classifiers = CLASSIFIERS,
packages = PACKAGES,
install_requires = DEPENDENCIES,
scripts = SCRIPTS
)
else:
print("Not supported OS")
| 27.298507 | 68 | 0.598688 | import os
import os.path
from setuptools import setup, Extension
import versioneer
# Default description in markdown
LONG_DESCRIPTION = open('README.md').read()
PKG_NAME = 'tessdb-cmdline'
AUTHOR = 'Rafael Gonzalez'
AUTHOR_EMAIL = 'astrorafael@yahoo.es'
DESCRIPTION = 'tessdb command line tool to manage tessdb database',
LICENSE = 'MIT'
KEYWORDS = 'Astronomy Python RaspberryPi LightPollution'
URL = 'http://github.com/stars4all/tessdb-comdline/'
PACKAGES = ["tess"]
DEPENDENCIES = [
'tabulate',
'matplotlib'
]
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: SQL',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Development Status :: 4 - Beta',
]
SCRIPTS = [
'files/usr/local/bin/tess'
]
if os.name == "posix":
setup(name = PKG_NAME,
version = versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
description = DESCRIPTION,
long_description_content_type = "text/markdown",
long_description = LONG_DESCRIPTION,
license = LICENSE,
keywords = KEYWORDS,
url = URL,
classifiers = CLASSIFIERS,
packages = PACKAGES,
install_requires = DEPENDENCIES,
scripts = SCRIPTS
)
else:
print("Not supported OS")
| 0 | 0 | 0 |
33956487d1a7473e4e9523d340a69d50a0cca0c9 | 5,303 | py | Python | replace_text_with_number.py | nacbotics5/web-scraping-with-python | 5c5d89d58173ee2e6491283d7d5ba0a413d6961c | [
"BSD-3-Clause"
] | 3 | 2019-07-03T13:10:21.000Z | 2020-01-09T10:34:12.000Z | replace_text_with_number.py | nacbotics5/web-scraping-with-python | 5c5d89d58173ee2e6491283d7d5ba0a413d6961c | [
"BSD-3-Clause"
] | null | null | null | replace_text_with_number.py | nacbotics5/web-scraping-with-python | 5c5d89d58173ee2e6491283d7d5ba0a413d6961c | [
"BSD-3-Clause"
] | 1 | 2021-11-08T18:53:12.000Z | 2021-11-08T18:53:12.000Z | #-*-coding:utf8;-*-
import re
from random import choice
class sub(object):
""" a simple text to number evaluating class """
def text_to_number(self,text):
'''convert a number written as text to its real number equivalence'''
text = text.lower()
text = re.sub(r"ten", "10", text)
text = re.sub(r"eleven", "11", text)
text = re.sub(r"twelve", "12", text)
text = re.sub(r"thirteen", "13", text)
text = re.sub(r"fourteen", "14", text)
text = re.sub(r"fifteen", "15", text)
text = re.sub(r"sixteen", "16", text)
text = re.sub(r"seventeen", "17", text)
text = re.sub(r"eighteen", "18", text)
text = re.sub(r"nineteen", "19", text)
text = re.sub(r"twenty one", "21", text)
text = re.sub(r"twenty two", "22", text)
text = re.sub(r"twenty three", "23", text)
text = re.sub(r"twenty four", "24", text)
text = re.sub(r"twenty five", "25", text)
text = re.sub(r"twenty six", "26", text)
text = re.sub(r"twenty seven", "27", text)
text = re.sub(r"twenty eight", "28", text)
text = re.sub(r"twenty nine", "29", text)
text = re.sub(r"twenty", "20", text)
text = re.sub(r"thirty one", "31", text)
text = re.sub(r"thirty two", "32", text)
text = re.sub(r"thirty three", "33", text)
text = re.sub(r"thirty four", "34", text)
text = re.sub(r"thirty five", "35", text)
text = re.sub(r"thirty six", "36", text)
text = re.sub(r"thirty seven", "37", text)
text = re.sub(r"thirty eight", "38", text)
text = re.sub(r"thirty nine", "39", text)
text = re.sub(r"thirty", "30", text)
text = re.sub(r"forty one", "41", text)
text = re.sub(r"forty two", "42", text)
text = re.sub(r"forty three", "43", text)
text = re.sub(r"forty four", "44", text)
text = re.sub(r"forty five", "45", text)
text = re.sub(r"forty six", "46", text)
text = re.sub(r"forty seven", "47", text)
text = re.sub(r"forty eight", "48", text)
text = re.sub(r"forty nine", "49", text)
text = re.sub(r"forty", "40", text)
text = re.sub(r"fifty one", "51", text)
text = re.sub(r"fifty two", "52", text)
text = re.sub(r"fifty three", "53", text)
text = re.sub(r"fifty four", "54", text)
text = re.sub(r"fifty five", "55", text)
text = re.sub(r"fifty six", "56", text)
text = re.sub(r"fifty seven", "57", text)
text = re.sub(r"fifty eight", "58", text)
text = re.sub(r"fifty nine", "59", text)
text = re.sub(r"fifty", "50", text)
text = re.sub(r"sixty one", "61", text)
text = re.sub(r"sixty two", "62", text)
text = re.sub(r"sixty three", "63", text)
text = re.sub(r"sixty four", "64", text)
text = re.sub(r"sixty five", "65", text)
text = re.sub(r"sixty six", "66", text)
text = re.sub(r"sixty seven", "67", text)
text = re.sub(r"sixty eight", "68", text)
text = re.sub(r"sixty nine", "69", text)
text = re.sub(r"sixty", "60", text)
text = re.sub(r"seventy one", "71", text)
text = re.sub(r"seventy two", "72", text)
text = re.sub(r"seventy three", "73", text)
text = re.sub(r"seventy four", "74", text)
text = re.sub(r"seventy five", "75", text)
text = re.sub(r"seventy six", "76", text)
text = re.sub(r"seventy seven", "77", text)
text = re.sub(r"seventy eight", "78", text)
text = re.sub(r"seventy nine", "79", text)
text = re.sub(r"seventy", "70", text)
text = re.sub(r"eighty one", "81", text)
text = re.sub(r"eighty two", "82", text)
text = re.sub(r"eighty three", "83", text)
text = re.sub(r"eighty four", "84", text)
text = re.sub(r"eighty five", "85", text)
text = re.sub(r"eighty six", "86", text)
text = re.sub(r"eighty seven", "87", text)
text = re.sub(r"eighty eight", "88", text)
text = re.sub(r"eighty nine", "89", text)
text = re.sub(r"eighty", "80", text)
text = re.sub(r"ninety one", "91", text)
text = re.sub(r"ninety two", "92", text)
text = re.sub(r"ninety three", "93", text)
text = re.sub(r"ninety four", "94", text)
text = re.sub(r"ninety five", "95", text)
text = re.sub(r"ninety six", "96", text)
text = re.sub(r"ninety seven", "97", text)
text = re.sub(r"ninety eight", "98", text)
text = re.sub(r"ninety nine", "99", text)
text = re.sub(r"ninety", "90", text)
text = re.sub(r"one", "01", text)
text = re.sub(r"two", "02", text)
text = re.sub(r"three", "03", text)
text = re.sub(r"four", "04", text)
text = re.sub(r"five", "05", text)
text = re.sub(r"six", "06", text)
text = re.sub(r"seven", "07", text)
text = re.sub(r"eight", "08", text)
text = re.sub(r"nine", "09", text)
text = re.sub(r"hundred", "00", text)
text = re.sub(r"thousand", "000", text)
text = re.sub(r"million", "000000", text)
text = re.sub(r"billion", "000000000", text)
return text
| 44.191667 | 77 | 0.51518 | #-*-coding:utf8;-*-
import re
from random import choice
class sub(object):
""" a simple text to number evaluating class """
def text_to_number(self,text):
'''convert a number written as text to its real number equivalence'''
text = text.lower()
text = re.sub(r"ten", "10", text)
text = re.sub(r"eleven", "11", text)
text = re.sub(r"twelve", "12", text)
text = re.sub(r"thirteen", "13", text)
text = re.sub(r"fourteen", "14", text)
text = re.sub(r"fifteen", "15", text)
text = re.sub(r"sixteen", "16", text)
text = re.sub(r"seventeen", "17", text)
text = re.sub(r"eighteen", "18", text)
text = re.sub(r"nineteen", "19", text)
text = re.sub(r"twenty one", "21", text)
text = re.sub(r"twenty two", "22", text)
text = re.sub(r"twenty three", "23", text)
text = re.sub(r"twenty four", "24", text)
text = re.sub(r"twenty five", "25", text)
text = re.sub(r"twenty six", "26", text)
text = re.sub(r"twenty seven", "27", text)
text = re.sub(r"twenty eight", "28", text)
text = re.sub(r"twenty nine", "29", text)
text = re.sub(r"twenty", "20", text)
text = re.sub(r"thirty one", "31", text)
text = re.sub(r"thirty two", "32", text)
text = re.sub(r"thirty three", "33", text)
text = re.sub(r"thirty four", "34", text)
text = re.sub(r"thirty five", "35", text)
text = re.sub(r"thirty six", "36", text)
text = re.sub(r"thirty seven", "37", text)
text = re.sub(r"thirty eight", "38", text)
text = re.sub(r"thirty nine", "39", text)
text = re.sub(r"thirty", "30", text)
text = re.sub(r"forty one", "41", text)
text = re.sub(r"forty two", "42", text)
text = re.sub(r"forty three", "43", text)
text = re.sub(r"forty four", "44", text)
text = re.sub(r"forty five", "45", text)
text = re.sub(r"forty six", "46", text)
text = re.sub(r"forty seven", "47", text)
text = re.sub(r"forty eight", "48", text)
text = re.sub(r"forty nine", "49", text)
text = re.sub(r"forty", "40", text)
text = re.sub(r"fifty one", "51", text)
text = re.sub(r"fifty two", "52", text)
text = re.sub(r"fifty three", "53", text)
text = re.sub(r"fifty four", "54", text)
text = re.sub(r"fifty five", "55", text)
text = re.sub(r"fifty six", "56", text)
text = re.sub(r"fifty seven", "57", text)
text = re.sub(r"fifty eight", "58", text)
text = re.sub(r"fifty nine", "59", text)
text = re.sub(r"fifty", "50", text)
text = re.sub(r"sixty one", "61", text)
text = re.sub(r"sixty two", "62", text)
text = re.sub(r"sixty three", "63", text)
text = re.sub(r"sixty four", "64", text)
text = re.sub(r"sixty five", "65", text)
text = re.sub(r"sixty six", "66", text)
text = re.sub(r"sixty seven", "67", text)
text = re.sub(r"sixty eight", "68", text)
text = re.sub(r"sixty nine", "69", text)
text = re.sub(r"sixty", "60", text)
text = re.sub(r"seventy one", "71", text)
text = re.sub(r"seventy two", "72", text)
text = re.sub(r"seventy three", "73", text)
text = re.sub(r"seventy four", "74", text)
text = re.sub(r"seventy five", "75", text)
text = re.sub(r"seventy six", "76", text)
text = re.sub(r"seventy seven", "77", text)
text = re.sub(r"seventy eight", "78", text)
text = re.sub(r"seventy nine", "79", text)
text = re.sub(r"seventy", "70", text)
text = re.sub(r"eighty one", "81", text)
text = re.sub(r"eighty two", "82", text)
text = re.sub(r"eighty three", "83", text)
text = re.sub(r"eighty four", "84", text)
text = re.sub(r"eighty five", "85", text)
text = re.sub(r"eighty six", "86", text)
text = re.sub(r"eighty seven", "87", text)
text = re.sub(r"eighty eight", "88", text)
text = re.sub(r"eighty nine", "89", text)
text = re.sub(r"eighty", "80", text)
text = re.sub(r"ninety one", "91", text)
text = re.sub(r"ninety two", "92", text)
text = re.sub(r"ninety three", "93", text)
text = re.sub(r"ninety four", "94", text)
text = re.sub(r"ninety five", "95", text)
text = re.sub(r"ninety six", "96", text)
text = re.sub(r"ninety seven", "97", text)
text = re.sub(r"ninety eight", "98", text)
text = re.sub(r"ninety nine", "99", text)
text = re.sub(r"ninety", "90", text)
text = re.sub(r"one", "01", text)
text = re.sub(r"two", "02", text)
text = re.sub(r"three", "03", text)
text = re.sub(r"four", "04", text)
text = re.sub(r"five", "05", text)
text = re.sub(r"six", "06", text)
text = re.sub(r"seven", "07", text)
text = re.sub(r"eight", "08", text)
text = re.sub(r"nine", "09", text)
text = re.sub(r"hundred", "00", text)
text = re.sub(r"thousand", "000", text)
text = re.sub(r"million", "000000", text)
text = re.sub(r"billion", "000000000", text)
return text
| 0 | 0 | 0 |
2fb3b7760ce16f04dae9b780bc957d0768081080 | 1,106 | py | Python | examples/messaging_interactions_transcripts_example.py | estvar19x84/liveperson-api-python-wrapper | 27d8575f542ba029521e7d995bbabb5c4b90d131 | [
"MIT"
] | 1 | 2020-04-06T04:47:18.000Z | 2020-04-06T04:47:18.000Z | examples/messaging_interactions_transcripts_example.py | estvar19x84/liveperson-api-python-wrapper | 27d8575f542ba029521e7d995bbabb5c4b90d131 | [
"MIT"
] | null | null | null | examples/messaging_interactions_transcripts_example.py | estvar19x84/liveperson-api-python-wrapper | 27d8575f542ba029521e7d995bbabb5c4b90d131 | [
"MIT"
] | null | null | null | """
This example shows how to create a Messaging Interactions transcripts CSV flat file from the lp_api_wrapper library.
"""
from lp_api_wrapper import MessagingInteractions, UserLogin
from datetime import datetime, timedelta
import pandas as pd
# For User Login
auth = UserLogin(account_id='1234', username='YOURUSERNAME', password='YOURPASSWORD')
# Create MI Connections
mi_conn = MessagingInteractions(auth=auth)
# Creates Epoch Time from 1 day ago. (If your volume is low, or none. Consider increasing days)
start_from = int((datetime.now() - timedelta(days=1)).timestamp() * 1000)
# Creates Epoch Time right now.
start_to = int(datetime.now().timestamp() * 1000)
# Conversations from date range created above
body = {'start': {'from': start_from, 'to': start_to}}
# Get data!
conversations = mi_conn.conversations(body=body)
# Convert into Pandas DataFrame
df = pd.DataFrame(conversations.message_record)
# File path with file name.
file_path = './transcripts.csv'
# Export into CSV with no index column
df.to_csv(path_or_buf=file_path, index=False)
# Now you have a Transcripts Flat File!
| 29.891892 | 116 | 0.764919 | """
This example shows how to create a Messaging Interactions transcripts CSV flat file from the lp_api_wrapper library.
"""
from lp_api_wrapper import MessagingInteractions, UserLogin
from datetime import datetime, timedelta
import pandas as pd
# For User Login
auth = UserLogin(account_id='1234', username='YOURUSERNAME', password='YOURPASSWORD')
# Create MI Connections
mi_conn = MessagingInteractions(auth=auth)
# Creates Epoch Time from 1 day ago. (If your volume is low, or none. Consider increasing days)
start_from = int((datetime.now() - timedelta(days=1)).timestamp() * 1000)
# Creates Epoch Time right now.
start_to = int(datetime.now().timestamp() * 1000)
# Conversations from date range created above
body = {'start': {'from': start_from, 'to': start_to}}
# Get data!
conversations = mi_conn.conversations(body=body)
# Convert into Pandas DataFrame
df = pd.DataFrame(conversations.message_record)
# File path with file name.
file_path = './transcripts.csv'
# Export into CSV with no index column
df.to_csv(path_or_buf=file_path, index=False)
# Now you have a Transcripts Flat File!
| 0 | 0 | 0 |
e4943d0cbf2264ece34b5f12fed6a79240a520d8 | 855 | py | Python | main.py | kagaya25/How-to-Auto-Login-to-Zoom-using-python- | d2f0d2025f143256edaaef4392dc5c5f653961d0 | [
"MIT"
] | 1 | 2020-11-18T03:51:16.000Z | 2020-11-18T03:51:16.000Z | main.py | kagaya25/How-to-Auto-Login-to-Zoom-using-python- | d2f0d2025f143256edaaef4392dc5c5f653961d0 | [
"MIT"
] | null | null | null | main.py | kagaya25/How-to-Auto-Login-to-Zoom-using-python- | d2f0d2025f143256edaaef4392dc5c5f653961d0 | [
"MIT"
] | null | null | null | from selenium import webdriver
from time import sleep
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
usr=input('Enter Email Address :')
pwd=input('Enter Password:')
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://zoom.us/signin')
print ("Opened Zoom")
sleep(1)
username_box = driver.find_element_by_css_selector("#email")
username_box.send_keys(usr)
print ("Email Id entered")
sleep(1)
password_box = driver.find_element_by_css_selector('#password')
password_box.send_keys(pwd)
print ("Password entered")
sleep(1)
login_box = driver.find_element_by_css_selector("#login-form > div:nth-child(4) > div > div.signin > button")
login_box.click()
print ("Done")
input('Press anything to quit')
driver.quit()
print("Finished") | 27.580645 | 110 | 0.749708 | from selenium import webdriver
from time import sleep
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
usr=input('Enter Email Address :')
pwd=input('Enter Password:')
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://zoom.us/signin')
print ("Opened Zoom")
sleep(1)
username_box = driver.find_element_by_css_selector("#email")
username_box.send_keys(usr)
print ("Email Id entered")
sleep(1)
password_box = driver.find_element_by_css_selector('#password')
password_box.send_keys(pwd)
print ("Password entered")
sleep(1)
login_box = driver.find_element_by_css_selector("#login-form > div:nth-child(4) > div > div.signin > button")
login_box.click()
print ("Done")
input('Press anything to quit')
driver.quit()
print("Finished") | 0 | 0 | 0 |
0a3917d97fa2bfb17855ede4f3e057d098a6cc11 | 339 | py | Python | resumes/migrations/0004_remove_contactdetails_address_2.py | USUDR2604/Django-ResumeBuilder | 0c6066d96fd20c029e5d5b0a447eaa5e8fc80fb6 | [
"MIT"
] | null | null | null | resumes/migrations/0004_remove_contactdetails_address_2.py | USUDR2604/Django-ResumeBuilder | 0c6066d96fd20c029e5d5b0a447eaa5e8fc80fb6 | [
"MIT"
] | null | null | null | resumes/migrations/0004_remove_contactdetails_address_2.py | USUDR2604/Django-ResumeBuilder | 0c6066d96fd20c029e5d5b0a447eaa5e8fc80fb6 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-12 05:08
from django.db import migrations
| 18.833333 | 47 | 0.60472 | # Generated by Django 3.2.5 on 2021-07-12 05:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resumes', '0003_auto_20210712_1037'),
]
operations = [
migrations.RemoveField(
model_name='contactdetails',
name='Address_2',
),
]
| 0 | 233 | 23 |
605cf64194db6eb62f12a4193d0dc608c00f00c9 | 1,508 | py | Python | src/ecsim/scrapers/census.py | fillstaley/ecsim | f775c8a975dba7a372d0d0831bf8b54df7c27cb2 | [
"MIT"
] | null | null | null | src/ecsim/scrapers/census.py | fillstaley/ecsim | f775c8a975dba7a372d0d0831bf8b54df7c27cb2 | [
"MIT"
] | null | null | null | src/ecsim/scrapers/census.py | fillstaley/ecsim | f775c8a975dba7a372d0d0831bf8b54df7c27cb2 | [
"MIT"
] | null | null | null | from logging import getLogger
from pandas import read_html
# from ecsim._scrapers.base import state_names
logger = getLogger(__name__)
url = "https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_historical_population"
# if __name__ == "__main__":
# data = scrape_data()
# for foo, bar in zip(data.index, state_names):
# print(f"Checking that {foo} is the same as {bar}")
# assert foo == bar
| 27.925926 | 98 | 0.659814 | from logging import getLogger
from pandas import read_html
# from ecsim._scrapers.base import state_names
logger = getLogger(__name__)
url = "https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_historical_population"
def scrape_data():
global url
logger.debug(f"Getting US Census data from {url}")
[
table1, # census data for 1790--1860, also admitted years
table2, # enslaved population for 1790--1860
table3, # census data for 1870--1950
table4, # census data for 1960--2020
*_,
] = read_html(url, match="Name", flavor="lxml")
states, _ = clean_recent_data(table4)
return states
def clean_recent_data(table):
logger.debug("Separating states and territories")
t_index = [2, 12, 37, 42, 48]
territories = table.loc[t_index].copy()
states = table.drop(t_index)
# remove the total row
states = states.drop(56)
logger.debug("Cleaning territories data")
territories.loc[37, "Name"] = territories.loc[37, "Name"][:-4].replace(",", "")
territories.loc[37, "1960"] = territories.loc[37, "1960"][:-4].replace(",", "")
territories.set_index("Name", inplace=True)
logger.debug("Cleaning states data")
states.set_index("Name", inplace=True)
return states, territories
# if __name__ == "__main__":
# data = scrape_data()
# for foo, bar in zip(data.index, state_names):
# print(f"Checking that {foo} is the same as {bar}")
# assert foo == bar
| 1,021 | 0 | 46 |
488b4cc09de4a2a0e1ff3f23b837efa088af88f0 | 2,227 | py | Python | minio/versioningconfig.py | neuneck/minio-py | a964d8c92a2533c3dcd01530308577e7864928de | [
"Apache-2.0"
] | 1 | 2021-01-06T21:13:01.000Z | 2021-01-06T21:13:01.000Z | minio/versioningconfig.py | neuneck/minio-py | a964d8c92a2533c3dcd01530308577e7864928de | [
"Apache-2.0"
] | null | null | null | minio/versioningconfig.py | neuneck/minio-py | a964d8c92a2533c3dcd01530308577e7864928de | [
"Apache-2.0"
] | 1 | 2019-04-02T16:13:36.000Z | 2019-04-02T16:13:36.000Z | # -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)
# 2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Request/response of PutBucketVersioning and GetBucketVersioning APIs."""
from __future__ import absolute_import
from .commonconfig import DISABLED, ENABLED
from .xml import Element, SubElement, findtext
OFF = "Off"
SUSPENDED = "Suspended"
class VersioningConfig:
"""Versioning configuration."""
@property
def status(self):
"""Get status."""
return self._status or OFF
@property
def mfa_delete(self):
"""Get MFA delete."""
return self._mfa_delete
@classmethod
def fromxml(cls, element):
"""Create new object with values from XML element."""
status = findtext(element, "Status")
mfa_delete = findtext(element, "MFADelete")
return cls(status, mfa_delete)
def toxml(self, element):
"""Convert to XML."""
element = Element("VersioningConfiguration")
if self._status:
SubElement(element, "Status", self._status)
if self._mfa_delete:
SubElement(element, "MFADelete", self._mfa_delete)
return element
| 32.75 | 76 | 0.660979 | # -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)
# 2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Request/response of PutBucketVersioning and GetBucketVersioning APIs."""
from __future__ import absolute_import
from .commonconfig import DISABLED, ENABLED
from .xml import Element, SubElement, findtext
OFF = "Off"
SUSPENDED = "Suspended"
class VersioningConfig:
"""Versioning configuration."""
def __init__(self, status=None, mfa_delete=None):
if status is not None and status not in [ENABLED, SUSPENDED]:
raise ValueError(
"status must be {0} or {1}".format(ENABLED, SUSPENDED),
)
if mfa_delete is not None and mfa_delete not in [ENABLED, DISABLED]:
raise ValueError(
"MFA delete must be {0} or {1}".format(ENABLED, DISABLED),
)
self._status = status
self._mfa_delete = mfa_delete
@property
def status(self):
"""Get status."""
return self._status or OFF
@property
def mfa_delete(self):
"""Get MFA delete."""
return self._mfa_delete
@classmethod
def fromxml(cls, element):
"""Create new object with values from XML element."""
status = findtext(element, "Status")
mfa_delete = findtext(element, "MFADelete")
return cls(status, mfa_delete)
def toxml(self, element):
"""Convert to XML."""
element = Element("VersioningConfiguration")
if self._status:
SubElement(element, "Status", self._status)
if self._mfa_delete:
SubElement(element, "MFADelete", self._mfa_delete)
return element
| 478 | 0 | 27 |
1d1ba8f14377a03c56514916211c087f53426980 | 10,296 | py | Python | applications/TrilinosApplication/tests/test_trilinos_levelset_convection.py | qaumann/Kratos | fd1702687997322d7a94642fb58e3453f7d4b002 | [
"BSD-4-Clause"
] | null | null | null | applications/TrilinosApplication/tests/test_trilinos_levelset_convection.py | qaumann/Kratos | fd1702687997322d7a94642fb58e3453f7d4b002 | [
"BSD-4-Clause"
] | null | null | null | applications/TrilinosApplication/tests/test_trilinos_levelset_convection.py | qaumann/Kratos | fd1702687997322d7a94642fb58e3453f7d4b002 | [
"BSD-4-Clause"
] | null | null | null | from __future__ import print_function, absolute_import, division
import os
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.MetisApplication as MetisApplication
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
import KratosMultiphysics.kratos_utilities as KratosUtils
from KratosMultiphysics.mpi import distributed_import_model_part_utility
from KratosMultiphysics.TrilinosApplication import trilinos_linear_solver_factory
from KratosMultiphysics import ParallelEnvironment
if __name__ == '__main__':
KratosUnittest.main() | 43.627119 | 145 | 0.634421 | from __future__ import print_function, absolute_import, division
import os
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.MetisApplication as MetisApplication
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
import KratosMultiphysics.kratos_utilities as KratosUtils
from KratosMultiphysics.mpi import distributed_import_model_part_utility
from KratosMultiphysics.TrilinosApplication import trilinos_linear_solver_factory
from KratosMultiphysics import ParallelEnvironment
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
def BaseDistance(x, y, z):
if (x <= 5.0):
return -0.16*x**2 + 0.8*x
else:
return 0.0
def BaseJumpedDistance(x, y, z):
if (x >= 5.0 and x <= 15.0):
return 1.0
else:
return 0.0
def ConvectionVelocity(x, y, z):
vel = KratosMultiphysics.Vector(3, 0.0)
vel[0] = 1.0
return vel
class TestTrilinosLevelSetConvection(KratosUnittest.TestCase):
def setUp(self):
self.parameters = """{
"echo_level" : 0,
"model_import_settings" : {
"input_type" : "mdpa",
"input_filename" : \"""" + GetFilePath("levelset_convection_process_mesh") + """\"
}
} """
def tearDown(self):
my_pid = self.model_part.GetCommunicator().MyPID()
# Remove the .time file
KratosUtils.DeleteFileIfExisting("levelset_convection_process_mesh.time")
# Remove the Metis partitioning files
KratosUtils.DeleteFileIfExisting("levelset_convection_process_mesh_" + str(my_pid) + ".time")
KratosUtils.DeleteFileIfExisting("levelset_convection_process_mesh_" + str(my_pid) + ".mdpa")
# While compining in debug, in memory partitioner also writes down the mpda in plain text
# and needs to be cleaned.
KratosUtils.DeleteFileIfExisting("debug_modelpart_" + str(my_pid) + ".mdpa")
def test_trilinos_levelset_convection(self):
current_model = KratosMultiphysics.Model()
self.model_part = current_model.CreateModelPart("Main",2)
self.model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE)
self.model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PARTITION_INDEX)
# Import the model part, perform the partitioning and create communicators
import_settings = KratosMultiphysics.Parameters(self.parameters)
DistributedModelPartImporter = distributed_import_model_part_utility.DistributedImportModelPartUtility(self.model_part, import_settings)
DistributedModelPartImporter.ImportModelPart()
DistributedModelPartImporter.CreateCommunicators()
# Recall to set the buffer size
self.model_part.SetBufferSize(2)
# Set the initial distance field and the convection velocity
for node in self.model_part.Nodes:
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, 0, BaseDistance(node.X,node.Y,node.Z))
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY, 0, ConvectionVelocity(node.X,node.Y,node.Z))
# Fix the left side values
for node in self.model_part.Nodes:
if node.X < 0.001:
node.Fix(KratosMultiphysics.DISTANCE)
# Set the Trilinos linear solver and Epetra communicator
trilinos_linear_solver = trilinos_linear_solver_factory.ConstructSolver(
KratosMultiphysics.Parameters("""{"solver_type" : "amesos" }""")
)
epetra_comm = TrilinosApplication.CreateCommunicator()
# Fake time advance
self.model_part.CloneTimeStep(40.0)
# Convect the distance field
TrilinosApplication.TrilinosLevelSetConvectionProcess2D(
epetra_comm,
KratosMultiphysics.DISTANCE,
self.model_part,
trilinos_linear_solver).Execute()
# Check the obtained values
max_distance = -1.0
min_distance = +1.0
for node in self.model_part.Nodes:
d = node.GetSolutionStepValue(KratosMultiphysics.DISTANCE)
max_distance = max(max_distance, d)
min_distance = min(min_distance, d)
comm = self.model_part.GetCommunicator().GetDataCommunicator()
min_distance = comm.MinAll(min_distance)
max_distance = comm.MaxAll(max_distance)
self.assertAlmostEqual(max_distance, 0.7333041045431626)
self.assertAlmostEqual(min_distance,-0.06371359024393104)
def test_trilinos_levelset_convection_BFECC(self):
current_model = KratosMultiphysics.Model()
self.model_part = current_model.CreateModelPart("Main",2)
self.model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE)
self.model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PARTITION_INDEX)
# Import the model part, perform the partitioning and create communicators
import_settings = KratosMultiphysics.Parameters(self.parameters)
DistributedModelPartImporter = distributed_import_model_part_utility.DistributedImportModelPartUtility(self.model_part, import_settings)
DistributedModelPartImporter.ImportModelPart()
DistributedModelPartImporter.CreateCommunicators()
# Recall to set the buffer size
self.model_part.SetBufferSize(2)
self.model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, 2)
# Set the initial distance field and the convection velocity
for node in self.model_part.Nodes:
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, BaseJumpedDistance(node.X,node.Y,node.Z))
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY, ConvectionVelocity(node.X,node.Y,node.Z))
# Fix the left side values
for node in self.model_part.Nodes:
if node.X < 0.001:
node.Fix(KratosMultiphysics.DISTANCE)
# Set the Trilinos linear solver and Epetra communicator
trilinos_linear_solver = trilinos_linear_solver_factory.ConstructSolver(
KratosMultiphysics.Parameters("""{"solver_type" : "amesos" }""")
)
epetra_comm = TrilinosApplication.CreateCommunicator()
comm = ParallelEnvironment.GetDefaultDataCommunicator()
#self.model_part.GetCommunicator().GetDataCommunicator()
# Fake time advance
self.model_part.CloneTimeStep(30.0)
#kratos_comm = KratosMultiphysics.DataCommunicator.GetDefault()
KratosMultiphysics.FindGlobalNodalNeighboursProcess(
comm, self.model_part).Execute()
KratosMultiphysics.ComputeNonHistoricalNodalGradientProcess(
self.model_part,
KratosMultiphysics.DISTANCE,
KratosMultiphysics.DISTANCE_GRADIENT,
KratosMultiphysics.NODAL_AREA).Execute()
levelset_convection_settings = KratosMultiphysics.Parameters("""{
"levelset_variable_name" : "DISTANCE",
"levelset_convection_variable_name" : "VELOCITY",
"levelset_gradient_variable_name" : "DISTANCE_GRADIENT",
"max_CFL" : 1.0,
"max_substeps" : 0,
"levelset_splitting" : false,
"eulerian_error_compensation" : true,
"cross_wind_stabilization_factor" : 0.7
}""")
TrilinosApplication.TrilinosLevelSetConvectionProcess2D(
epetra_comm,
self.model_part,
trilinos_linear_solver,
levelset_convection_settings).Execute()
max_distance = -1.0
min_distance = +1.0
for node in self.model_part.Nodes:
d = node.GetSolutionStepValue(KratosMultiphysics.DISTANCE)
max_distance = max(max_distance, d)
min_distance = min(min_distance, d)
min_distance = comm.MinAll(min_distance)
max_distance = comm.MaxAll(max_distance)
# gid_output = GiDOutputProcess(model_part,
# "levelset_test_2D",
# KratosMultiphysics.Parameters("""
# {
# "result_file_configuration" : {
# "gidpost_flags": {
# "GiDPostMode": "GiD_PostBinary",
# "WriteDeformedMeshFlag": "WriteUndeformed",
# "WriteConditionsFlag": "WriteConditions",
# "MultiFileFlag": "SingleFile"
# },
# "nodal_results" : ["DISTANCE","VELOCITY"]
# }
# }
# """)
# )
# gid_output.ExecuteInitialize()
# gid_output.ExecuteBeforeSolutionLoop()
# gid_output.ExecuteInitializeSolutionStep()
# gid_output.PrintOutput()
# gid_output.ExecuteFinalizeSolutionStep()
# gid_output.ExecuteFinalize()
self.assertAlmostEqual(max_distance, 1.0617777301844604)
self.assertAlmostEqual(min_distance, -0.061745786561321375)
class TestTrilinosLevelSetConvectionInMemory(TestTrilinosLevelSetConvection):
def setUp(self):
self.parameters = """{
"echo_level" : 0,
"model_import_settings" : {
"input_type" : "mdpa",
"input_filename" : \"""" + GetFilePath("levelset_convection_process_mesh") + """\",
"partition_in_memory" : true
}
} """
if __name__ == '__main__':
KratosUnittest.main() | 9,274 | 97 | 295 |
8724b25b1724a09fb92755e931aac4227407a53f | 331 | py | Python | tools/regression/xsl_reports/utils/zip.py | zyiacas/boost-doc-zh | 689e5a3a0a4dbead1a960f7b039e3decda54aa2c | [
"BSL-1.0"
] | 198 | 2015-01-13T05:47:18.000Z | 2022-03-09T04:46:46.000Z | tools/regression/xsl_reports/utils/zip.py | sdfict/boost-doc-zh | 689e5a3a0a4dbead1a960f7b039e3decda54aa2c | [
"BSL-1.0"
] | 9 | 2015-01-28T16:33:19.000Z | 2020-04-12T23:03:28.000Z | tools/regression/xsl_reports/utils/zip.py | sdfict/boost-doc-zh | 689e5a3a0a4dbead1a960f7b039e3decda54aa2c | [
"BSL-1.0"
] | 139 | 2015-01-15T20:09:31.000Z | 2022-01-31T15:21:16.000Z |
import zipfile
import os.path
| 25.461538 | 70 | 0.60423 |
import zipfile
import os.path
def unzip( archive_path, result_dir ):
z = zipfile.ZipFile( archive_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
result = open( os.path.join( result_dir, f.filename ), 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()
| 272 | 0 | 25 |
0149416f32756f6d9180e1150524f22901eedcfb | 85 | py | Python | docs/docs_settings.py | leukeleu/django-fiber-multilingual | 4574fffb953c442ff7981c16ea1d460784e38eab | [
"Apache-2.0"
] | 143 | 2015-01-06T01:15:22.000Z | 2017-07-08T04:10:08.000Z | docs/docs_settings.py | check4anjil/django-fiber | 48d1af8867e19b9e27332d2b98ca07a47927de15 | [
"Apache-2.0"
] | 44 | 2015-01-22T14:21:32.000Z | 2017-05-31T16:59:23.000Z | docs/docs_settings.py | check4anjil/django-fiber | 48d1af8867e19b9e27332d2b98ca07a47927de15 | [
"Apache-2.0"
] | 53 | 2015-01-21T21:48:49.000Z | 2017-06-12T07:33:13.000Z | # Mock settings file imported by sphinx when building docs
SECRET_KEY = 'not empty'
| 21.25 | 58 | 0.776471 | # Mock settings file imported by sphinx when building docs
SECRET_KEY = 'not empty'
| 0 | 0 | 0 |
0536bbd1db2cb05cededd1cb0edc40a6651c3fac | 3,442 | py | Python | model/decode_heads/encnet/encnet.py | UESTC-Liuxin/CVMI_Sementic_Segmentation | dc5bf6e940cf6961ef65abb6e7ec372f29d55249 | [
"Apache-2.0"
] | null | null | null | model/decode_heads/encnet/encnet.py | UESTC-Liuxin/CVMI_Sementic_Segmentation | dc5bf6e940cf6961ef65abb6e7ec372f29d55249 | [
"Apache-2.0"
] | null | null | null | model/decode_heads/encnet/encnet.py | UESTC-Liuxin/CVMI_Sementic_Segmentation | dc5bf6e940cf6961ef65abb6e7ec372f29d55249 | [
"Apache-2.0"
] | null | null | null | '''
Author: Liu Xin
Date: 2021-11-29 11:08:53
LastEditors: Liu Xin
LastEditTime: 2021-11-30 19:43:19
Description: file content
FilePath: /CVMI_Sementic_Segmentation/model/decode_heads/encnet/encnet.py
'''
'''
Author: Liu Xin
Date: 2021-11-29 11:08:53
LastEditors: Liu Xin
LastEditTime: 2021-11-30 19:31:07
Description: file content
FilePath: /CVMI_Sementic_Segmentation/model/decode_heads/encnet/encnet.py
'''
"""Context Encoding for Semantic Segmentation"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.utils.enc_module import EncModule
from model.builder import DECODE_HEAD
__all__ = ['EncNet']
@DECODE_HEAD.register_module("EncNet")
if __name__ == '__main__':
x1 = torch.randn(4,256,64,64)
x2 = torch.randn(4,512,16,16)
x3 = torch.randn(4,1024,16,16)
x4 = torch.randn(4,2048,16,16)
model = EncNet(2048,11)
out = model([x1,x2,x3,x4])
print(type(out))
# outputs = model(img)
| 35.122449 | 95 | 0.606334 | '''
Author: Liu Xin
Date: 2021-11-29 11:08:53
LastEditors: Liu Xin
LastEditTime: 2021-11-30 19:43:19
Description: file content
FilePath: /CVMI_Sementic_Segmentation/model/decode_heads/encnet/encnet.py
'''
'''
Author: Liu Xin
Date: 2021-11-29 11:08:53
LastEditors: Liu Xin
LastEditTime: 2021-11-30 19:31:07
Description: file content
FilePath: /CVMI_Sementic_Segmentation/model/decode_heads/encnet/encnet.py
'''
"""Context Encoding for Semantic Segmentation"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.utils.enc_module import EncModule
from model.builder import DECODE_HEAD
__all__ = ['EncNet']
@DECODE_HEAD.register_module("EncNet")
class EncNet(nn.Module):
def __init__(self, in_channels, num_classes, criterion, match_block,lateral=True,**kwargs):
super(EncNet, self).__init__()
self.head = _EncHead(in_channels, num_classes, lateral=lateral, **kwargs)
self.match_block = match_block
self.criterion = criterion
self.__setattr__('exclusive', ['head'])
def forward(self, inputs, data_batch):
base_out, se_out = self.head(*inputs)
out = self.match_block(base_out)
seg_loss, se_loss = self.criterion(out, se_out, data_batch["mask"])
return {"seg_out":out, " seg_loss":seg_loss, "se_loss":se_loss}
class _EncHead(nn.Module):
def __init__(self, in_channels, num_classes, se_loss=True, lateral=True,
norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_EncHead, self).__init__()
self.lateral = lateral
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels, 512, 3, padding=1, bias=False),
norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
if lateral:
self.connect = nn.ModuleList([
nn.Sequential(
nn.Conv2d(512, 512, 1, bias=False),
norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)),
nn.Sequential(
nn.Conv2d(1024, 512, 1, bias=False),
norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)),
])
self.fusion = nn.Sequential(
nn.Conv2d(3 * 512, 512, 3, padding=1, bias=False),
norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.encmodule = EncModule(512, num_classes, ncodes=32, se_loss=se_loss,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
self.conv6 = nn.Sequential(
nn.Dropout(0.1, False),
nn.Conv2d(512, num_classes, 1)
)
def forward(self, *inputs):
feat = self.conv5(inputs[-1])
if self.lateral:
c2 = self.connect[0](inputs[1])
c3 = self.connect[1](inputs[2])
feat = self.fusion(torch.cat([feat, c2, c3], 1))
outs = list(self.encmodule(feat))
outs[0] = self.conv6(outs[0])
return tuple(outs)
if __name__ == '__main__':
x1 = torch.randn(4,256,64,64)
x2 = torch.randn(4,512,16,16)
x3 = torch.randn(4,1024,16,16)
x4 = torch.randn(4,2048,16,16)
model = EncNet(2048,11)
out = model([x1,x2,x3,x4])
print(type(out))
# outputs = model(img)
| 2,330 | 8 | 151 |
5283b9315291930963962a4c5aded09fae094e29 | 2,671 | py | Python | universal/algos/tco.py | richmanbtc/universal-portfolios | cd9db76e8f039edafe256b9992e4e65bca96ba7d | [
"MIT"
] | 506 | 2015-01-14T22:34:19.000Z | 2022-03-29T18:36:55.000Z | universal/algos/tco.py | richmanbtc/universal-portfolios | cd9db76e8f039edafe256b9992e4e65bca96ba7d | [
"MIT"
] | 56 | 2015-07-10T15:34:51.000Z | 2022-03-23T22:18:50.000Z | universal/algos/tco.py | richmanbtc/universal-portfolios | cd9db76e8f039edafe256b9992e4e65bca96ba7d | [
"MIT"
] | 165 | 2015-02-07T05:09:38.000Z | 2022-03-29T18:36:57.000Z | import numpy as np
import numpy.typing as npt
from .. import tools
from ..algo import Algo
class TCO(Algo):
"""Transaction costs optimization. The TCO algorithm needs just a next return prediction
to work, see the paper for more details.
Paper : https://ink.library.smu.edu.sg/cgi/viewcontent.cgi?referer=&httpsredir=1&article=4761&context=sis_research
"""
PRICE_TYPE = "raw"
REPLACE_MISSING = True
def __init__(self, trx_fee_pct=0, eta=10, **kwargs):
"""
:param trx_fee_pct: transaction fee in percent
:param eta: smoothing parameter
"""
super().__init__(**kwargs)
self.trx_fee_pct = trx_fee_pct
self.eta = eta
def predict(self, p, history) -> npt.NDArray:
"""Predict returns on next day.
:param p: raw price
"""
raise NotImplementedError()
def update_tco(self, x: npt.NDArray, b: npt.NDArray, x_pred: npt.NDArray):
"""
:param x: ratio of change in price
"""
lambd = 10 * self.trx_fee_pct
# last price adjusted weights
updated_b = np.multiply(b, x) / np.dot(b, x)
# Calculate variables
vt = x_pred / np.dot(updated_b, x_pred)
v_t_ = np.mean(vt)
# Update portfolio
b_1 = self.eta * (vt - np.dot(v_t_, 1))
b_ = updated_b + np.sign(b_1) * np.maximum(
np.zeros(len(b_1)), np.abs(b_1) - lambd
)
# project it onto simplex
proj = tools.simplex_proj(y=b_)
return proj
if __name__ == "__main__":
tools.quickrun(TCO1())
| 26.979798 | 118 | 0.585548 | import numpy as np
import numpy.typing as npt
from .. import tools
from ..algo import Algo
class TCO(Algo):
"""Transaction costs optimization. The TCO algorithm needs just a next return prediction
to work, see the paper for more details.
Paper : https://ink.library.smu.edu.sg/cgi/viewcontent.cgi?referer=&httpsredir=1&article=4761&context=sis_research
"""
PRICE_TYPE = "raw"
REPLACE_MISSING = True
def __init__(self, trx_fee_pct=0, eta=10, **kwargs):
"""
:param trx_fee_pct: transaction fee in percent
:param eta: smoothing parameter
"""
super().__init__(**kwargs)
self.trx_fee_pct = trx_fee_pct
self.eta = eta
def init_weights(self, columns):
m = len(columns)
return np.ones(m) / m
def step(self, p, last_b, history):
# calculate return prediction
x_pred = self.predict(p, history)
x = p / history.iloc[-2]
b = self.update_tco(x, last_b, x_pred)
return b
def predict(self, p, history) -> npt.NDArray:
"""Predict returns on next day.
:param p: raw price
"""
raise NotImplementedError()
def update_tco(self, x: npt.NDArray, b: npt.NDArray, x_pred: npt.NDArray):
"""
:param x: ratio of change in price
"""
lambd = 10 * self.trx_fee_pct
# last price adjusted weights
updated_b = np.multiply(b, x) / np.dot(b, x)
# Calculate variables
vt = x_pred / np.dot(updated_b, x_pred)
v_t_ = np.mean(vt)
# Update portfolio
b_1 = self.eta * (vt - np.dot(v_t_, 1))
b_ = updated_b + np.sign(b_1) * np.maximum(
np.zeros(len(b_1)), np.abs(b_1) - lambd
)
# project it onto simplex
proj = tools.simplex_proj(y=b_)
return proj
class TCO1(TCO):
def __init__(self, type="reversal", **kwargs):
self.type = type
super().__init__(min_history=1, **kwargs)
def predict(self, p, history):
if self.type == "reversal":
return history.iloc[-2] / p
elif self.type == "trend":
return p / history.iloc[-2]
else:
raise NotImplementedError()
class TCO2(TCO):
def __init__(self, window=5, **kwargs):
# input check
if window < 2:
raise ValueError("window parameter must be >=3")
super().__init__(min_history=window, **kwargs)
self.window = window
def predict(self, p, history):
# OLMAR style prediction
return (history.iloc[-self.window :] / p).mean()
if __name__ == "__main__":
tools.quickrun(TCO1())
| 880 | -10 | 206 |
3bc7505fd36246309e9da9e7e9a9eb38727f649e | 1,690 | py | Python | floodsystem/plot.py | ak2380/Flood-Warning- | 3efe644a211607d64d9e2a82234e779f45e8d703 | [
"MIT"
] | null | null | null | floodsystem/plot.py | ak2380/Flood-Warning- | 3efe644a211607d64d9e2a82234e779f45e8d703 | [
"MIT"
] | null | null | null | floodsystem/plot.py | ak2380/Flood-Warning- | 3efe644a211607d64d9e2a82234e779f45e8d703 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from datetime import datetime, timedelta
from floodsystem.analysis import polyfit
| 30.727273 | 107 | 0.681657 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from datetime import datetime, timedelta
from floodsystem.analysis import polyfit
def plot_water_levels(station, dates, levels):
high = station.typical_range[1]
low = station.typical_range[0]
# Plot
plt.plot(dates, levels)
plt.hlines(y=high, xmin=dates[0], xmax=dates[-1],color='red')
plt.hlines(y=low, xmin=dates[0], xmax=dates[-1],color='gold')
# Add axis labels, rotate date labels and add plot title
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title("Station Name:{}".format(station.name))
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
# station is a MonitoringStation object
# Plots the water level data with the best-fit polynomial
poly, shift = polyfit(dates, levels, p)
# Plot original data points
plt.plot(dates, levels, '.')
# Plot polynomial fit at 30 points along interval (note that polynomial is evaluated using the shift x)
x = matplotlib.dates.date2num(dates)
x1 = np.linspace(x[0], x[-1], 30)
plt.plot(x1, poly(x1 - shift))
# Plot typical range lows/highs
plt.plot([min(dates),max(dates)], [station.typical_range[0], station.typical_range[0]])
plt.plot([min(dates),max(dates)], [station.typical_range[1], station.typical_range[1]])
plt.xlabel('Date/time since %s' % dates[0])
plt.ylabel('Water Level (m)')
plt.xticks(rotation = 45)
plt.title("{}".format(station.name))
# Display plot
plt.tight_layout()
plt.show() | 1,492 | 0 | 46 |
a824ce5bc8d317e3044eda2945064609a32f467d | 11,348 | py | Python | solution.py | matanmula172/dragons-and-princesses | 553c6f602d344b169190849b2b9c5469d2d11a00 | [
"MIT"
] | null | null | null | solution.py | matanmula172/dragons-and-princesses | 553c6f602d344b169190849b2b9c5469d2d11a00 | [
"MIT"
] | null | null | null | solution.py | matanmula172/dragons-and-princesses | 553c6f602d344b169190849b2b9c5469d2d11a00 | [
"MIT"
] | null | null | null | import sys
import numpy as np
'''
This function parses the yaml input file, assuming the input is correct The parsing works in the following way:
given a correct file that defines len = n, the function returns two arrays of length n - cell_value_arr (beauty/num
coins in each cell) and cell_title_arr (princess or dragon)
'''
'''
index - an index of a princess in the input arrays
title_arr - cell_title_arr (input)
this functions returns the index of the previous princess
'''
'''
index - an index of a princess in the input arrays
title_arr - cell_title_arr (input)
output_array - this array contains the lower_bound and upper_bound for each princess
this functions returns the index of the previous princess with non empty lower bound (explanation will follow)
'''
'''
cell_value_arr - a version of cell_value_array
num_dragons_allowed - a number of dragons the knight is allowed to kill
this function returns the indices of the dragons with most of the coins (bound by num_dragons_allowed)
'''
'''
cell_title_arr - a version of cell_title_arr
this function counts the number of dragons in it, and returns the count
'''
'''
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
index_list - an index_list of dragon cells
prev_princess_index - the index of a previous princess
this function marks the following cells: all cells before prev_princess_index are marked if
they are not in index_list or not a dragon, and the rest of the cells are marked if they are not a dragon
the mark is the lowest integer number (i'm assuming it won't be given as an input)
'''
# This as an explanation for calculate_princess_lower_bound(), calculate_princess_upper_bound(): the output array
# will hold for each princess two lists of indices - lower_bound: minimal number of dragons to kill, that maximize
# coin sum and allow marrying that princess upper_bound: maximal number of dragons to kill, that maximal coin sum and
# allow marrying that princess (without marrying previous princesses)
'''
prev_lower_bound - lower bound of the previous princess
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
i - current princess index
beauty_val - current princess beauty value
prev_princess_index - the index of a previous princess
this function returns the current princess lower_bound
'''
'''
prev_lower_bound - lower bound of the previous princess
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
dragon_count_in_range - number of dragons in between current princess and previous princess
i - current princess index
prev_princess_index - the index of a previous princess
this function returns the current princess upper_bound
'''
'''
i - current index in output array
cell_title_arr - a version of cell_title_arr
cell_value_arr - a version of cell_value_arr
output_array - this array contains the lower_bound and upper_bound for each princess
this function uses the previous functions and the previous cells of output_array to calculate
lower_bound and upper_bound of output_array[i], and returns it
'''
'''
output_array - this array contains the lower_bound and upper_bound for each princess
value_array - cell_value_arr (input)
n - index of princess we want to print
this function prints the output according to the instruction
'''
'''
title_arr - cell_title_arr (input)
value_array - cell_value_arr (input)
this function initializes output_array, fills it and prints it
'''
'''
main parses the input and runs run()
'''
if __name__ == '__main__':
input_file = input("Enter file name: for example input_file.yaml\n After output is printed, press Enter\n")
parser_val = parse_input_file(input_file)
if parser_val is not None:
input_title_arr, input_value_arr = parser_val
if len(input_title_arr) != 0:
run(input_title_arr, input_value_arr)
else:
# No princess
print(-1)
input("")
| 38.467797 | 120 | 0.695012 | import sys
import numpy as np
'''
This function parses the yaml input file, assuming the input is correct The parsing works in the following way:
given a correct file that defines len = n, the function returns two arrays of length n - cell_value_arr (beauty/num
coins in each cell) and cell_title_arr (princess or dragon)
'''
def file_is_empty(file_name):
try:
file = open(file_name, "r")
return len(file.read()) == 0
except FileNotFoundError as e:
print("File not found - type in the correct file name and place the file in the correct folder")
return
def parse_input_file(file_name):
if file_is_empty(file_name) is None:
return
elif file_is_empty(file_name):
print("Empty input file")
return
try:
file = open(file_name, "r")
except Exception as e:
print(e)
return
line = file.readline()
try:
array_len = int(line)
except:
print("Missing definition of cell number")
return
if array_len == 0:
return [], []
title_arr = ['' for i in range(array_len)]
value_arr = np.zeros(array_len)
title_arr[0] = 'p'
i = 1
while line and i < array_len:
line = file.readline().split()
if len(line) < 2:
print("Missing values in input")
return
if line[0] == 'd':
title_arr[i] = 'd'
else:
title_arr[i] = 'p'
value_arr[i] = int(line[1])
i += 1
if len(file.readline()) != 0:
print("Cell numbers does not match input")
return
return title_arr, value_arr
'''
index - an index of a princess in the input arrays
title_arr - cell_title_arr (input)
this functions returns the index of the previous princess
'''
def get_previous_princess_index(index, title_arr):
for i in range(index - 1, 0, -1):
if title_arr[i] == 'p':
return i
return 0
'''
index - an index of a princess in the input arrays
title_arr - cell_title_arr (input)
output_array - this array contains the lower_bound and upper_bound for each princess
this functions returns the index of the previous princess with non empty lower bound (explanation will follow)
'''
def get_non_empty_prev_princess_index(current_index, cell_title_arr, output_array):
prev_princess_index = get_previous_princess_index(current_index, cell_title_arr)
while len(output_array[prev_princess_index][0]) == 0 and prev_princess_index != 0:
prev_princess_index = get_previous_princess_index(prev_princess_index, cell_title_arr)
return prev_princess_index
'''
cell_value_arr - a version of cell_value_array
num_dragons_allowed - a number of dragons the knight is allowed to kill
this function returns the indices of the dragons with most of the coins (bound by num_dragons_allowed)
'''
def get_best_dragon_combination(cell_value_arr, num_dragons_allowed):
index_list = cell_value_arr.argsort()[int(-1 * num_dragons_allowed):][::-1]
return index_list
'''
cell_title_arr - a version of cell_title_arr
this function counts the number of dragons in it, and returns the count
'''
def dragon_count(cell_title_arr):
count = 0
for i in cell_title_arr:
if i == 'd':
count += 1
return count
'''
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
index_list - an index_list of dragon cells
prev_princess_index - the index of a previous princess
this function marks the following cells: all cells before prev_princess_index are marked if
they are not in index_list or not a dragon, and the rest of the cells are marked if they are not a dragon
the mark is the lowest integer number (i'm assuming it won't be given as an input)
'''
def mark_elements(cell_value_arr, cell_title_arr, index_list, prev_princess_index):
for i in range(len(cell_value_arr)):
if i < prev_princess_index:
if i not in index_list or cell_title_arr[i] == 'p':
cell_value_arr[i] = sys.maxsize * -1
else:
if cell_title_arr[i] == 'p':
cell_value_arr[i] = sys.maxsize * -1
return cell_value_arr
# This as an explanation for calculate_princess_lower_bound(), calculate_princess_upper_bound(): the output array
# will hold for each princess two lists of indices - lower_bound: minimal number of dragons to kill, that maximize
# coin sum and allow marrying that princess upper_bound: maximal number of dragons to kill, that maximal coin sum and
# allow marrying that princess (without marrying previous princesses)
'''
prev_lower_bound - lower bound of the previous princess
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
i - current princess index
beauty_val - current princess beauty value
prev_princess_index - the index of a previous princess
this function returns the current princess lower_bound
'''
def calculate_princess_lower_bound(prev_lower_bound, cell_value_arr, cell_title_arr, i, beauty_val,
prev_princess_index):
# remove index that gives minimal coin value from prev_lower_bound
left_hand_lower_bound = prev_lower_bound[:len(prev_lower_bound) - 1]
# mark all princesses and dragons not in left_hand_lower_bound in cell_value_arr
dragons_in_range = mark_elements(cell_value_arr[:i], cell_title_arr[:i], left_hand_lower_bound, prev_princess_index)
potential_lower_bound = get_best_dragon_combination(dragons_in_range, beauty_val)
lower_bound = np.array([])
# get unmarked indices from potential_lower_bound
for i in range(len(potential_lower_bound)):
if cell_value_arr[int(potential_lower_bound[i])] != sys.maxsize * -1:
lower_bound = np.append(lower_bound, int(potential_lower_bound[i]))
return lower_bound
'''
prev_lower_bound - lower bound of the previous princess
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
dragon_count_in_range - number of dragons in between current princess and previous princess
i - current princess index
prev_princess_index - the index of a previous princess
this function returns the current princess upper_bound
'''
def calculate_princess_upper_bound(prev_lower_bound, cell_value_arr, cell_title_arr, dragon_count_in_range, i,
prev_princess_index):
# remove index that gives minimal coin value from prev_lower_bound
left_hand_lower_bound = prev_lower_bound[:len(prev_lower_bound) - 1]
# mark all princesses and dragons not in left_hand_lower_bound in cell_value_arr
dragons_in_range = mark_elements(cell_value_arr[:i], cell_title_arr[:i], left_hand_lower_bound, prev_princess_index)
potential_upper_bound = get_best_dragon_combination(dragons_in_range,
len(left_hand_lower_bound) + dragon_count_in_range)
upper_bound = np.array([])
# get unmarked indices from potential_upper_bound
for i in range(len(potential_upper_bound)):
if cell_value_arr[int(potential_upper_bound[i])] != sys.maxsize * -1:
upper_bound = np.append(upper_bound, potential_upper_bound[i])
return upper_bound
'''
i - current index in output array
cell_title_arr - a version of cell_title_arr
cell_value_arr - a version of cell_value_arr
output_array - this array contains the lower_bound and upper_bound for each princess
this function uses the previous functions and the previous cells of output_array to calculate
lower_bound and upper_bound of output_array[i], and returns it
'''
def max_coins_per_index(i, cell_title_arr, cell_value_arr, output_array):
upper_bound, lower_bound = [], []
copy_value_arr = np.copy(cell_value_arr)
# first index no dragons seen yet
if i == 0:
return upper_bound, lower_bound
else:
# if cell is a dragon, do nothing
if cell_title_arr[i] == 'p':
# get prev_princess_index that it's lower_bound is not empty
prev_princess_index = get_non_empty_prev_princess_index(i, cell_title_arr, output_array)
prev_lower_bound = output_array[prev_princess_index][0]
dragons_in_range_title = cell_title_arr[prev_princess_index:i]
# if there are not enough dragons between current princess and previous princess to marry current
# princess, return empty bounds
if len(prev_lower_bound) != 0 and \
len(prev_lower_bound) + dragon_count(dragons_in_range_title) - 1 < copy_value_arr[i]:
return upper_bound, lower_bound
elif len(prev_lower_bound) == 0 and dragon_count(dragons_in_range_title) < copy_value_arr[i]:
return upper_bound, lower_bound
# calculate lower bound
lower_bound = calculate_princess_lower_bound(prev_lower_bound, copy_value_arr, cell_title_arr,
i, copy_value_arr[i], prev_princess_index)
# calculate upper bound only for last princess, to conserve space
if i == len(cell_value_arr) - 1:
upper_bound = calculate_princess_upper_bound(prev_lower_bound, copy_value_arr, cell_title_arr,
dragon_count(dragons_in_range_title), i,
prev_princess_index)
else:
upper_bound = []
# if lower bound is insufficient - return empty bound (this condition may be unnecessary)
if len(lower_bound) < copy_value_arr[i]:
return [], []
return lower_bound, upper_bound
'''
output_array - this array contains the lower_bound and upper_bound for each princess
value_array - cell_value_arr (input)
n - index of princess we want to print
this function prints the output according to the instruction
'''
def print_result(output_array, value_array, n):
if len(output_array[n][0]) == 0:
print(-1)
else:
indices_arr = output_array[n][1].astype(int)
maximum_num_of_coins = value_array[indices_arr].sum()
num_of_dragons_to_kill = len(output_array[n][1])
cells_ascending = np.sort(output_array[n][1]).astype(int) + 1
print(int(maximum_num_of_coins))
print(num_of_dragons_to_kill)
for i in cells_ascending:
print(i, end=" ")
'''
title_arr - cell_title_arr (input)
value_array - cell_value_arr (input)
this function initializes output_array, fills it and prints it
'''
def run(title_arr, value_arr):
output_arr = [[] for i in range(len(title_arr))]
for i in range(len(title_arr)):
output_arr[i] = max_coins_per_index(i, title_arr, value_arr, output_arr)
print_result(output_arr, value_arr, len(output_arr) - 1)
'''
main parses the input and runs run()
'''
if __name__ == '__main__':
input_file = input("Enter file name: for example input_file.yaml\n After output is printed, press Enter\n")
parser_val = parse_input_file(input_file)
if parser_val is not None:
input_title_arr, input_value_arr = parser_val
if len(input_title_arr) != 0:
run(input_title_arr, input_value_arr)
else:
# No princess
print(-1)
input("")
| 7,071 | 0 | 276 |
c2de931d8aae197294b54a447961099c0d687325 | 2,115 | py | Python | bin/Utils/PostInstallRoutines.py | juergenhoetzel/craft | 9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd | [
"BSD-2-Clause"
] | 55 | 2016-11-20T17:08:19.000Z | 2022-03-11T22:19:43.000Z | bin/Utils/PostInstallRoutines.py | juergenhoetzel/craft | 9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd | [
"BSD-2-Clause"
] | 17 | 2017-09-20T07:52:17.000Z | 2021-12-03T10:03:00.000Z | bin/Utils/PostInstallRoutines.py | juergenhoetzel/craft | 9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd | [
"BSD-2-Clause"
] | 29 | 2016-12-10T15:00:11.000Z | 2021-12-02T12:54:05.000Z | # -*- coding: utf-8 -*-
# Copyright Hannah von Reth <vonreth@kde.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import utils
from CraftOS.osutils import OsUtils
from CraftCore import CraftCore | 45 | 93 | 0.722931 | # -*- coding: utf-8 -*-
# Copyright Hannah von Reth <vonreth@kde.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import utils
from CraftOS.osutils import OsUtils
from CraftCore import CraftCore
class PostInstallRoutines(object):
@staticmethod
def updateSharedMimeInfo(package) -> bool:
if OsUtils.isWin():
dataDir = os.path.join("bin", "data", "mime")
else:
dataDir = os.path.join("share", "mime")
# only check in imageDir, if installDir differs from imageDir it is irrelevant for us
if not os.path.isdir(os.path.join(package.imageDir(), dataDir)):
return True
dataDir = os.path.join(CraftCore.standardDirs.craftRoot(), dataDir)
flags = []
if CraftCore.debug.verbose() > 0:
flags += ["-V"]
return utils.system(["update-mime-database"] + flags + [dataDir]) | 603 | 58 | 23 |
5e5d7d94b77b2f4cff2e387b15a916c86023be7e | 475 | py | Python | ora_tools/commands/info.py | henry4k/ora_tools | 82fdd959445cfcfd1d2cb6df2f6e5057566a4a79 | [
"Unlicense"
] | null | null | null | ora_tools/commands/info.py | henry4k/ora_tools | 82fdd959445cfcfd1d2cb6df2f6e5057566a4a79 | [
"Unlicense"
] | null | null | null | ora_tools/commands/info.py | henry4k/ora_tools | 82fdd959445cfcfd1d2cb6df2f6e5057566a4a79 | [
"Unlicense"
] | null | null | null | import argparse
import ora_tools as ora
| 29.6875 | 72 | 0.686316 | import argparse
import ora_tools as ora
def run(prog, description, args):
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument('file')
args = parser.parse_args(args)
reader = ora.OraFileReader(args.file)
print(str.format('width: {}',reader.width))
print(str.format('height: {}',reader.height))
print('layers:')
for layer in reader.get_nested_layers():
print(str.format(' {}', layer.get_path()))
| 412 | 0 | 23 |
b871837f7be9abf54bd661a9cff043abe1183a8f | 178 | py | Python | src/escape_rooms/escape_rooms/organizations_app/apps.py | ivelinakaraivanova/Escape_rooms | de13925ebf1062d3012c5a8ef99511573bb7968c | [
"MIT"
] | null | null | null | src/escape_rooms/escape_rooms/organizations_app/apps.py | ivelinakaraivanova/Escape_rooms | de13925ebf1062d3012c5a8ef99511573bb7968c | [
"MIT"
] | null | null | null | src/escape_rooms/escape_rooms/organizations_app/apps.py | ivelinakaraivanova/Escape_rooms | de13925ebf1062d3012c5a8ef99511573bb7968c | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 25.428571 | 56 | 0.797753 | from django.apps import AppConfig
class OrganizationsAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'escape_rooms.organizations_app'
| 0 | 120 | 23 |
70f5555205b4865a9c68461702364c2ad515b5e7 | 391 | py | Python | Python/packages/databricks-test/tests/context_test.py | anandmrya/DataOps | 1a671c707e27b30030687a2a88e5fa94374ce780 | [
"MIT"
] | 42 | 2019-12-04T04:10:53.000Z | 2022-03-31T13:04:17.000Z | Python/packages/databricks-test/tests/context_test.py | anandmrya/DataOps | 1a671c707e27b30030687a2a88e5fa94374ce780 | [
"MIT"
] | 2 | 2020-02-25T11:24:34.000Z | 2020-03-05T06:12:59.000Z | Python/packages/databricks-test/tests/context_test.py | anandmrya/DataOps | 1a671c707e27b30030687a2a88e5fa94374ce780 | [
"MIT"
] | 18 | 2020-01-25T06:25:08.000Z | 2021-11-16T08:40:09.000Z | import databricks_test
from databricks_test import SessionAlreadyExistsException
| 32.583333 | 73 | 0.705882 | import databricks_test
from databricks_test import SessionAlreadyExistsException
def test_forbidden_concurrent_sessions():
with databricks_test.session() as dbrickstest: # noqa: F841
try:
with databricks_test.session() as dbrickstest2: # noqa: F841
assert False, "should have failed"
except SessionAlreadyExistsException:
pass
| 286 | 0 | 23 |
14e775b9ad6127d6b0049366425561eeaccc2489 | 1,496 | py | Python | vivintpy/devices/switch.py | natekspencer/vivintpy | ea65b05871b3f13326ba370112357a6696793bf6 | [
"MIT"
] | 3 | 2022-02-10T14:08:59.000Z | 2022-03-30T18:55:25.000Z | vivintpy/devices/switch.py | natekspencer/pyvivint | ea65b05871b3f13326ba370112357a6696793bf6 | [
"MIT"
] | null | null | null | vivintpy/devices/switch.py | natekspencer/pyvivint | ea65b05871b3f13326ba370112357a6696793bf6 | [
"MIT"
] | 2 | 2021-10-31T01:43:26.000Z | 2021-11-21T13:33:55.000Z | """Module that implements the Switch class."""
from __future__ import annotations
from ..const import SwitchAttribute, ZWaveDeviceAttribute
from . import VivintDevice
class Switch(VivintDevice):
"""Represents a Vivint switch device."""
@property
def is_on(self) -> bool:
"""Return True if switch is on."""
return self.data[SwitchAttribute.STATE]
@property
def level(self) -> int:
"""Return the level of the switch betwen 0..100."""
return self.data[SwitchAttribute.VALUE]
@property
def node_online(self) -> bool:
"""Return True if the node is online."""
return self.data[ZWaveDeviceAttribute.ONLINE]
async def set_state(self, on: bool | None = None, level: int | None = None) -> None:
"""Set switch's state."""
await self.vivintskyapi.set_switch_state(
self.alarm_panel.id, self.alarm_panel.partition_id, self.id, on, level
)
async def turn_on(self) -> None:
"""Turn on the switch."""
await self.set_state(on=True)
async def turn_off(self) -> None:
"""Turn off the switch."""
await self.set_state(on=False)
class BinarySwitch(Switch):
"""Represents a Vivint binary switch device."""
class MultilevelSwitch(Switch):
"""Represents a Vivint multilevel switch device."""
async def set_level(self, level: int) -> None:
"""Set the level of the switch between 0..100."""
await self.set_state(level=level)
| 29.333333 | 88 | 0.64639 | """Module that implements the Switch class."""
from __future__ import annotations
from ..const import SwitchAttribute, ZWaveDeviceAttribute
from . import VivintDevice
class Switch(VivintDevice):
"""Represents a Vivint switch device."""
@property
def is_on(self) -> bool:
"""Return True if switch is on."""
return self.data[SwitchAttribute.STATE]
@property
def level(self) -> int:
"""Return the level of the switch betwen 0..100."""
return self.data[SwitchAttribute.VALUE]
@property
def node_online(self) -> bool:
"""Return True if the node is online."""
return self.data[ZWaveDeviceAttribute.ONLINE]
async def set_state(self, on: bool | None = None, level: int | None = None) -> None:
"""Set switch's state."""
await self.vivintskyapi.set_switch_state(
self.alarm_panel.id, self.alarm_panel.partition_id, self.id, on, level
)
async def turn_on(self) -> None:
"""Turn on the switch."""
await self.set_state(on=True)
async def turn_off(self) -> None:
"""Turn off the switch."""
await self.set_state(on=False)
class BinarySwitch(Switch):
"""Represents a Vivint binary switch device."""
class MultilevelSwitch(Switch):
"""Represents a Vivint multilevel switch device."""
async def set_level(self, level: int) -> None:
"""Set the level of the switch between 0..100."""
await self.set_state(level=level)
| 0 | 0 | 0 |
bd6b5ce29bd5833af3d7a202866f802b488a2c3a | 12,044 | py | Python | examples/test-alg.py | luoxiangyong/sprp | cfac1f3e86787bc1a3686b5112e991b7413bfa7b | [
"BSD-2-Clause"
] | null | null | null | examples/test-alg.py | luoxiangyong/sprp | cfac1f3e86787bc1a3686b5112e991b7413bfa7b | [
"BSD-2-Clause"
] | null | null | null | examples/test-alg.py | luoxiangyong/sprp | cfac1f3e86787bc1a3686b5112e991b7413bfa7b | [
"BSD-2-Clause"
] | null | null | null | from sprp.core.alg import *
from sprp.export.shapefile import *
if __name__ == "__main__":
slc = SimpleLineCalculator(116.23589,39.90387,116.25291,39.90391,**{
"cameraWidth": 4000,
"cameraHeight":3000,
"focusLength":35,
"pixelSize":2,
"gsd":0.05,
"flightSpeed":80,
"courseOverlap":0.8,
"sidewiseOverlap":0.6
})
print(slc)
linePointsResult,forwardAngle = slc.caculate_line(116.23589,39.90387,116.25291,39.90391)
#slc.setLine(116.23589,39.90387,116.25291,39.90391)
result = slc.calculate()
print(result)
print(slc.points)
print("###############################################################################")
ssc = SimpleStripCalculator(116.23589,39.90387,116.25291,39.90391,
3,2,
**{
"cameraWidth": 4000,
"cameraHeight":3000,
"focusLength":35,
"pixelSize":2,
"gsd":0.05,
"flightSpeed":80,
"courseOverlap":0.8,
"sidewiseOverlap":0.6,
})
result = ssc.calculate()
print(result)
print(ssc.points)
print(len(ssc.points))
sfe = ShapefileExportor('/Users/luoxiangyong/Devel/sprp/Data', 'test-project')
sfe.save(ssc)
################################################################
###############################################################################
CAMERA_WIDTH = 2000
CAMERA_HEIGHT = 1000
CAMERA_GSD = 0.05
OVERLAP_FWD = 0.8
OVERLAP_CROSS = 0.6
BASELINE = (1-OVERLAP_FWD) * CAMERA_HEIGHT * CAMERA_GSD
CROSSLINE = (1-OVERLAP_CROSS) * CAMERA_WIDTH * CAMERA_GSD
"""
@brief 从点和指定的角度计算地面覆盖的矩形(footprint)
@param point 指定点
@param angle 航线方向
@param iwidth 图像长度
@param iheight 图像高度
@param gsd 地面分辨率
@return 返回地面覆盖的矩形的四脚点坐标
"""
if __name__ == "__main__":
# points,angle = caculateLine(116.23589,39.90387,116.25291,39.90391,50)
# print("Angle:{}".format(angle))
# writeLineToShapefile(points,'test-shapefile-01')
# points,angle = caculateLine(116.23589,39.90287,116.25291,39.90291,50)
# print("Angle:{}".format(angle))
# writeLineToShapefile(points,'test-shapefile-02')
start_long = 116.23589
start_lat = 39.90387
end_long = 116.25291
end_lat = 39.90591
geod = pyproj.Geod(ellps="WGS84")
#long,lat,tmpAngle = geod.fwd(point[0],point[1],angleTR, distance/2)
# 计算两点的角度
angle,backAngle,distanceTmp = geod.inv(start_long, start_lat,end_long,end_lat)
pointsOfLine = []
long = start_long
lat = start_lat
for index in range(10):
long,lat,tmpAngle = geod.fwd(long,lat, angle-90,CROSSLINE)
end_long,end_lat,tempAngle = geod.fwd(long,lat, angle,distanceTmp)
pointsOfLine.append((long,lat,end_long,end_lat))
caculateArea(pointsOfLine,BASELINE)
# caculateArea([[116.23589,39.90387,116.25291,39.90391],
# [116.23589,39.90287,116.25291,39.90291]],
# CAMERA_GSD) | 35.528024 | 100 | 0.614912 | from sprp.core.alg import *
from sprp.export.shapefile import *
if __name__ == "__main__":
slc = SimpleLineCalculator(116.23589,39.90387,116.25291,39.90391,**{
"cameraWidth": 4000,
"cameraHeight":3000,
"focusLength":35,
"pixelSize":2,
"gsd":0.05,
"flightSpeed":80,
"courseOverlap":0.8,
"sidewiseOverlap":0.6
})
print(slc)
linePointsResult,forwardAngle = slc.caculate_line(116.23589,39.90387,116.25291,39.90391)
#slc.setLine(116.23589,39.90387,116.25291,39.90391)
result = slc.calculate()
print(result)
print(slc.points)
print("###############################################################################")
ssc = SimpleStripCalculator(116.23589,39.90387,116.25291,39.90391,
3,2,
**{
"cameraWidth": 4000,
"cameraHeight":3000,
"focusLength":35,
"pixelSize":2,
"gsd":0.05,
"flightSpeed":80,
"courseOverlap":0.8,
"sidewiseOverlap":0.6,
})
result = ssc.calculate()
print(result)
print(ssc.points)
print(len(ssc.points))
sfe = ShapefileExportor('/Users/luoxiangyong/Devel/sprp/Data', 'test-project')
sfe.save(ssc)
################################################################
###############################################################################
CAMERA_WIDTH = 2000
CAMERA_HEIGHT = 1000
CAMERA_GSD = 0.05
OVERLAP_FWD = 0.8
OVERLAP_CROSS = 0.6
BASELINE = (1-OVERLAP_FWD) * CAMERA_HEIGHT * CAMERA_GSD
CROSSLINE = (1-OVERLAP_CROSS) * CAMERA_WIDTH * CAMERA_GSD
def caculateLine(startx,starty, endx,endy,baseline):
geod = pyproj.Geod(ellps="WGS84")
forwardAngle,backwardAngle,distance = geod.inv(startx,starty, endx,endy)
stationCount = math.floor(distance / baseline)
wishedDistance = baseline * (stationCount + 1)
wished_endx,wished_endy,tempAngle = geod.fwd(startx,starty,forwardAngle,wishedDistance)
#print("Baseline = {}; Stations={}".format(baseline,stationCount + 1))
points = geod.npts(startx,starty,wished_endx,wished_endy,stationCount - 1)
#print(points)
results = []
results.append((startx,starty))
results.extend(points)
results.append((wished_endx,wished_endy))
return results,forwardAngle
def writeLinesToShapefile(areaStartEndPoints,filename):
if os.path.exists(filename):
shutil.rmtree(filename)
os.mkdir(filename)
driver = ogr.GetDriverByName('ESRI Shapefile')
path = os.path.join(filename,"{}.shp".format(filename))
dataSource = driver.CreateDataSource(path)
spatialReference = osr.SpatialReference()
spatialReference.SetWellKnownGeogCS('WGS84')
layer = dataSource.CreateLayer("layer", spatialReference)
field = ogr.FieldDefn("ID", ogr.OFTInteger)
field.SetWidth(4)
layer.CreateField(field)
field = ogr.FieldDefn("NAME", ogr.OFTString)
field.SetWidth(20)
layer.CreateField(field)
id = 0
print("Total point: {}".format(len(areaStartEndPoints)))
for p in areaStartEndPoints:
id = id + 1
name = "LINE-{}".format(id)
wkt = "LINESTRING({} {},{} {})".format(p[0],p[1],p[2],p[3])
#print("POINT({},{})".format(p[0],p[1]))
geometry = ogr.CreateGeometryFromWkt(wkt)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(geometry)
feature.SetField("ID", id)
feature.SetField("NAME", name)
layer.CreateFeature(feature)
def caculateArea(areaStartEndPoints,baseline):
lineIndex = 0
areaPoints = []
for startEndPoint in areaStartEndPoints:
#print("Caculate:{}".format(startEndPoint))
points,angle = caculateLine(startEndPoint[0],startEndPoint[1],
startEndPoint[2],startEndPoint[3],
BASELINE)
lineIndex = lineIndex + 1
#writeLineToShapefile(points,'test-shapefile-line-{}'.format(lineIndex),angle)
areaPoints.append(points)
writeLinesToShapefile(areaStartEndPoints,'test-shapefile-lines')
writeAreaToShapefile(areaPoints,"test-shapefile",angle)
def writeAreaToShapefile(areaPoints,filename,angle,cameraWidth=3000,cameraHeight=2000,gsd=0.05):
########################################################################
# 创建点文件
filename_points = "{}-points".format(filename)
if os.path.exists(filename_points):
shutil.rmtree(filename_points)
os.mkdir(filename_points)
driver = ogr.GetDriverByName('ESRI Shapefile')
path = os.path.join(filename_points,"{}.shp".format(filename_points))
dataSource = driver.CreateDataSource(path)
spatialReference = osr.SpatialReference()
spatialReference.SetWellKnownGeogCS('WGS84')
layer = dataSource.CreateLayer("layer", spatialReference)
field = ogr.FieldDefn("ID", ogr.OFTInteger)
field.SetWidth(4)
layer.CreateField(field)
field = ogr.FieldDefn("NAME", ogr.OFTString)
field.SetWidth(20)
layer.CreateField(field)
field = ogr.FieldDefn("LINE", ogr.OFTString)
field.SetWidth(20)
layer.CreateField(field)
########################################################################
# 写入点
id = 0
lineIndex = 0
print("Total Line: {}".format(len(areaPoints)))
for line in areaPoints:
lineIndex = lineIndex + 1
id = 0
for p in line:
id = id + 1
name = "{}".format(id)
lineName = "{}".format(lineIndex)
wkt = "POINT({} {})".format(p[0],p[1])
#print("POINT({},{})".format(p[0],p[1]))
geometry = ogr.CreateGeometryFromWkt(wkt)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(geometry)
feature.SetField("ID", id)
feature.SetField("NAME", name)
feature.SetField("LINE", lineName)
layer.CreateFeature(feature)
########################################################################
# 创建边界多边形文件
filename_polygon = "{}-area-polygon".format(filename)
if os.path.exists(filename_polygon):
shutil.rmtree(filename_polygon)
os.mkdir(filename_polygon)
driver = ogr.GetDriverByName('ESRI Shapefile')
path = os.path.join(filename_polygon,"{}.shp".format(filename_polygon))
dataSourcePolyon = driver.CreateDataSource(path)
spatialReference = osr.SpatialReference()
spatialReference.SetWellKnownGeogCS('WGS84')
layerPolygon = dataSourcePolyon.CreateLayer("layer", spatialReference)
field = ogr.FieldDefn("ID", ogr.OFTInteger)
field.SetWidth(4)
layerPolygon.CreateField(field)
########################################################################
# 写入边界多边形
wktPolygonStart = "POLYGON(("
wktPolygonEnd = "))"
wktPolygonStart = wktPolygonStart + "{} {},".format(areaPoints[0][0][0],areaPoints[0][0][1])
wktPolygonStart = wktPolygonStart + "{} {},".format(areaPoints[0][-1][0],areaPoints[0][-1][1])
wktPolygonStart = wktPolygonStart + "{} {},".format(areaPoints[-1][-1][0],areaPoints[-1][-1][1])
wktPolygonStart = wktPolygonStart + "{} {},".format(areaPoints[-1][0][0],areaPoints[-1][0][1])
wktPolygonStart = wktPolygonStart + "{} {}".format(areaPoints[0][0][0],areaPoints[0][0][1])
wktPolygonStart = wktPolygonStart + wktPolygonEnd
#print(wktPolygonStart)
geometryPolygon = ogr.CreateGeometryFromWkt(wktPolygonStart)
featurePolygon = ogr.Feature(layerPolygon.GetLayerDefn())
featurePolygon.SetGeometry(geometryPolygon)
featurePolygon.SetField("ID", 0)
layerPolygon.CreateFeature(featurePolygon)
########################################################################
# 创建每个点对应的多边形文件
filename_polygon = "{}-points-polygon".format(filename)
if os.path.exists(filename_polygon):
shutil.rmtree(filename_polygon)
os.mkdir(filename_polygon)
driverPointPloygon = ogr.GetDriverByName('ESRI Shapefile')
path = os.path.join(filename_polygon,"{}.shp".format(filename_polygon))
dataSourcePointPloygon = driverPointPloygon.CreateDataSource(path)
spatialReference = osr.SpatialReference()
spatialReference.SetWellKnownGeogCS('WGS84')
layerPointPloygon = dataSourcePointPloygon.CreateLayer("layer", spatialReference)
field = ogr.FieldDefn("ID", ogr.OFTInteger)
field.SetWidth(4)
layerPointPloygon.CreateField(field)
# 写入点对应的多边形
idPolygon = 0
lineIndex = 0
#print("Total Line: {}".format(len(areaPoints)))
for line in areaPoints:
lineIndex = lineIndex + 1
for p in line:
idPolygon = idPolygon + 1
name = "{}".format(id)
lineName = "{}".format(lineIndex)
rect = calculateRectangleFormPointAndAngle(p,angle,cameraWidth,cameraHeight,gsd)
wkt = "POLYGON(({} {},{} {},{} {},{} {},{} {}))".format(
rect[0][0],rect[0][1],
rect[1][0],rect[1][1],
rect[2][0],rect[2][1],
rect[3][0],rect[3][1],
rect[0][0],rect[0][1],
)
#print("POINT({},{})".format(p[0],p[1]))
#print(wkt)
geometry = ogr.CreateGeometryFromWkt(wkt)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(geometry)
feature.SetField("ID", idPolygon)
# feature.SetField("NAME", name)
# feature.SetField("LINE", lineName)
layerPointPloygon.CreateFeature(feature)
"""
@brief 从点和指定的角度计算地面覆盖的矩形(footprint)
@param point 指定点
@param angle 航线方向
@param iwidth 图像长度
@param iheight 图像高度
@param gsd 地面分辨率
@return 返回地面覆盖的矩形的四脚点坐标
"""
def calculateRectangleFormPointAndAngle(point, angle, iwidth,iheight,gsd):
width = iwidth * gsd
height = iheight * gsd
imgAngle = math.atan(iwidth*1.0/iheight) * 180/math.pi
geod = pyproj.Geod(ellps="WGS84")
# 矩形的对角线长
distance = math.sqrt(math.pow(width,2) + math.pow(height,2))
#print("矩形的计算值:width={} height={} dj = {}".format(width,height,distance))
# 计算右上角点
angleTR = angle - imgAngle
longTR,latTR,tmpAngle = geod.fwd(point[0],point[1],angleTR, distance/2)
# 计算右下角点
angleBR = angle + imgAngle
longBR,latBR,tmpAngle = geod.fwd(point[0],point[1],angleBR, distance/2)
# 计算左下角点
angleBL = angleTR + 180
longBL,latBL,tmpAngle = geod.fwd(point[0],point[1],angleBL, distance/2)
# 计算左上角点
angleTL = angleBR + 180
longTL,latTL,tmpAngle = geod.fwd(point[0],point[1],angleTL, distance/2)
#print("当前角度:\n{} \nTR:{} \nBR:{}\nBL:{}\nBT:{}".format(angle, angleTR,angleBR,angleBL,angleTL))
result = []
result.append((longTR,latTR))
result.append((longBR,latBR))
result.append((longBL,latBL))
result.append((longTL,latTL))
# 多边形闭合
result.append((longTR,latTR))
return result
if __name__ == "__main__":
# points,angle = caculateLine(116.23589,39.90387,116.25291,39.90391,50)
# print("Angle:{}".format(angle))
# writeLineToShapefile(points,'test-shapefile-01')
# points,angle = caculateLine(116.23589,39.90287,116.25291,39.90291,50)
# print("Angle:{}".format(angle))
# writeLineToShapefile(points,'test-shapefile-02')
start_long = 116.23589
start_lat = 39.90387
end_long = 116.25291
end_lat = 39.90591
geod = pyproj.Geod(ellps="WGS84")
#long,lat,tmpAngle = geod.fwd(point[0],point[1],angleTR, distance/2)
# 计算两点的角度
angle,backAngle,distanceTmp = geod.inv(start_long, start_lat,end_long,end_lat)
pointsOfLine = []
long = start_long
lat = start_lat
for index in range(10):
long,lat,tmpAngle = geod.fwd(long,lat, angle-90,CROSSLINE)
end_long,end_lat,tempAngle = geod.fwd(long,lat, angle,distanceTmp)
pointsOfLine.append((long,lat,end_long,end_lat))
caculateArea(pointsOfLine,BASELINE)
# caculateArea([[116.23589,39.90387,116.25291,39.90391],
# [116.23589,39.90287,116.25291,39.90291]],
# CAMERA_GSD) | 9,190 | 0 | 114 |
32899b5f613e38cb4ca971a6b6f01f29369c98af | 2,379 | py | Python | django_kwalitee/management/commands/test.py | lincolnloop/django-kwalitee | 6f5fb8a2e44fdf8508700a8935b54c1b22c3c493 | [
"BSD-3-Clause"
] | 2 | 2015-09-28T10:08:16.000Z | 2015-11-08T11:32:55.000Z | django_kwalitee/management/commands/test.py | lincolnloop/django-kwalitee | 6f5fb8a2e44fdf8508700a8935b54c1b22c3c493 | [
"BSD-3-Clause"
] | null | null | null | django_kwalitee/management/commands/test.py | lincolnloop/django-kwalitee | 6f5fb8a2e44fdf8508700a8935b54c1b22c3c493 | [
"BSD-3-Clause"
] | null | null | null | import sys
from optparse import make_option
from django.core import management
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import get_apps
from django_kwalitee.testrunners import get_runner | 39 | 83 | 0.642287 | import sys
from optparse import make_option
from django.core import management
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import get_apps
from django_kwalitee.testrunners import get_runner
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive',
default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--coverage', action='store_true', dest='coverage',
default=False,
help='Show coverage details'),
make_option('--local', action='store_true', dest='local',
default=False,
help='Only test "local" apps (submodules of the project).')
)
help = """Custom test command which allows for
specifying different test runners."""
args = '[appname ...]'
requires_model_validation = False
def handle(self, *test_labels, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive', True)
# it's quite possible someone, lets say South, might have stolen
# the syncdb command from django. For testing purposes we should
# probably put it back. Migrations don't really make sense
# for tests. Actually the South test runner does this too.
management.get_commands()
management._commands['syncdb'] = 'django.core'
if options.get('coverage'):
test_runner_name = 'django_kwalitee.testrunners.codecoverage.run_tests'
else:
test_runner_name = settings.TEST_RUNNER
# hack to run subset of full test suite
# just use test_labels to load up non-excluded apps
if options.get('local') and not test_labels:
local_apps = []
for app in get_apps():
app_label = app.__name__.split('.')[-2]
if not app_label in settings.KWALITEE_LOCAL_EXCLUDES:
local_apps.append(app_label)
test_labels = tuple(local_apps)
test_runner = get_runner(test_runner_name)
failures = test_runner(test_labels, verbosity=verbosity,
interactive=interactive)
if failures:
sys.exit(failures) | 1,363 | 739 | 23 |
d7c042c4f93d725a2d87b7099782de3718b57898 | 1,847 | py | Python | 题源分类/LeetCode/LeetCode日刷/python/76.最小覆盖子串.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | 题源分类/LeetCode/LeetCode日刷/python/76.最小覆盖子串.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | 题源分类/LeetCode/LeetCode日刷/python/76.最小覆盖子串.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=76 lang=python3
#
# [76] 最小覆盖子串
#
# https://leetcode-cn.com/problems/minimum-window-substring/description/
#
# algorithms
# Hard (38.57%)
# Likes: 701
# Dislikes: 0
# Total Accepted: 72K
# Total Submissions: 186K
# Testcase Example: '"ADOBECODEBANC"\n"ABC"'
#
# 给你一个字符串 S、一个字符串 T 。请你设计一种算法,可以在 O(n) 的时间复杂度内,从字符串 S 里面找出:包含 T 所有字符的最小子串。
#
#
#
# 示例:
#
# 输入:S = "ADOBECODEBANC", T = "ABC"
# 输出:"BANC"
#
#
#
# 提示:
#
#
# 如果 S 中不存这样的子串,则返回空字符串 ""。
# 如果 S 中存在这样的子串,我们保证它是唯一的答案。
#
#
#
# @lc code=start
# @lc code=end
# def minWindow(self, s: str, t: str) -> str:
# l,r = 0,0
# res = ''
# min_len = float('inf')
# need = Counter(t)
# needcnt = len(t)
# while r < len(s):
# if need[s[r]] > 0:
# needcnt -= 1
# need[s[r]] -= 1
# r += 1
# while needcnt == 0:
# if r - l < min_len:
# min_len = r - l
# res = s[l:r]
# if need[s[l]] == 0:
# needcnt += 1
# need[s[l]] += 1
# l += 1
# return res
| 22.52439 | 74 | 0.406064 | #
# @lc app=leetcode.cn id=76 lang=python3
#
# [76] 最小覆盖子串
#
# https://leetcode-cn.com/problems/minimum-window-substring/description/
#
# algorithms
# Hard (38.57%)
# Likes: 701
# Dislikes: 0
# Total Accepted: 72K
# Total Submissions: 186K
# Testcase Example: '"ADOBECODEBANC"\n"ABC"'
#
# 给你一个字符串 S、一个字符串 T 。请你设计一种算法,可以在 O(n) 的时间复杂度内,从字符串 S 里面找出:包含 T 所有字符的最小子串。
#
#
#
# 示例:
#
# 输入:S = "ADOBECODEBANC", T = "ABC"
# 输出:"BANC"
#
#
#
# 提示:
#
#
# 如果 S 中不存这样的子串,则返回空字符串 ""。
# 如果 S 中存在这样的子串,我们保证它是唯一的答案。
#
#
#
# @lc code=start
class Solution:
from collections import Counter
def minWindow(self, s: str, t: str) -> str:
r,l = 0,0
needcnt = len(t)
need = Counter(t)
min_len = float('inf')
res = ''
while r < len(s):
if need[s[r]] > 0:
needcnt -= 1
need[s[r]] -= 1
r += 1
while needcnt == 0:
if r - l < min_len:
min_len = r - l
res = s[l:r]
if need[s[l]] == 0:
needcnt += 1
need[s[l]] += 1
l += 1
return res
# @lc code=end
# def minWindow(self, s: str, t: str) -> str:
# l,r = 0,0
# res = ''
# min_len = float('inf')
# need = Counter(t)
# needcnt = len(t)
# while r < len(s):
# if need[s[r]] > 0:
# needcnt -= 1
# need[s[r]] -= 1
# r += 1
# while needcnt == 0:
# if r - l < min_len:
# min_len = r - l
# res = s[l:r]
# if need[s[l]] == 0:
# needcnt += 1
# need[s[l]] += 1
# l += 1
# return res
| 564 | 58 | 22 |
e51dab5a6d0ee2d6fb561e01a4461596d41a9b00 | 913 | py | Python | amuustr-beseda-str-flood.py | Tripl0Color/Amuuterasuu-STR | e3ed7bab5ebf7570e9247e5a285c06f287a45bea | [
"Unlicense"
] | null | null | null | amuustr-beseda-str-flood.py | Tripl0Color/Amuuterasuu-STR | e3ed7bab5ebf7570e9247e5a285c06f287a45bea | [
"Unlicense"
] | null | null | null | amuustr-beseda-str-flood.py | Tripl0Color/Amuuterasuu-STR | e3ed7bab5ebf7570e9247e5a285c06f287a45bea | [
"Unlicense"
] | null | null | null | print ("""
Working.
@muuT3ra$$uu-kick-my-str-v.1
#FuckAllEverything.
by Tripl_color vk.com/Tripl_color""")
import vk_requests
import time
import random
token = "токен бота"
cid = str(input('Айди беседы = '))
photo = "photo472165736_457244077"
audio = "audio472165736_456239668"
msg = "fuck all. by Tripl_Color. @muuT3ra$$uu-kick-my-str-v.1 " ## можешь добавить свое сообщение
while True:
api = vk_requests.create_api(service_token=token)
print(api.messages.send(chat_id= cid, message= msg, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, attachment= photo, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, attachment= audio, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, message= random.randint(1, 2147483647), random_id= random.randint(1, 2147483647)))
print('Круг сообщений сделан')
time.sleep(5)
| 35.115385 | 121 | 0.75356 | print ("""
Working.
@muuT3ra$$uu-kick-my-str-v.1
#FuckAllEverything.
by Tripl_color vk.com/Tripl_color""")
import vk_requests
import time
import random
token = "токен бота"
cid = str(input('Айди беседы = '))
photo = "photo472165736_457244077"
audio = "audio472165736_456239668"
msg = "fuck all. by Tripl_Color. @muuT3ra$$uu-kick-my-str-v.1 " ## можешь добавить свое сообщение
while True:
api = vk_requests.create_api(service_token=token)
print(api.messages.send(chat_id= cid, message= msg, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, attachment= photo, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, attachment= audio, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, message= random.randint(1, 2147483647), random_id= random.randint(1, 2147483647)))
print('Круг сообщений сделан')
time.sleep(5)
| 0 | 0 | 0 |
97e77f096a9f70ae7478dc37934a2432277f2fea | 1,693 | py | Python | heads/round_head.py | virajmehta/procedural_objects | a5d2416ca5a444c2d20788c78f03a201e6993da2 | [
"MIT"
] | 2 | 2018-01-25T08:01:04.000Z | 2020-06-24T20:44:27.000Z | heads/round_head.py | virajmehta/procedural_objects | a5d2416ca5a444c2d20788c78f03a201e6993da2 | [
"MIT"
] | null | null | null | heads/round_head.py | virajmehta/procedural_objects | a5d2416ca5a444c2d20788c78f03a201e6993da2 | [
"MIT"
] | null | null | null | import random
from heads import Head
| 40.309524 | 200 | 0.534554 | import random
from heads import Head
class RoundHead(Head):
def __init__(self,
min_radius=1.5e-2,
max_radius=3e-2,
min_length=10e-2,
max_length=20e-2,
max_tilt=20,
z_offset=0,
constant_diameter_prob=0.2,
is_L=False,
is_X=False):
super(RoundHead, self).__init__(min_radius, max_radius, min_length,
max_length, max_tilt, z_offset,
is_L, is_X)
self.scad = 'length = {0};translate([{4}, 0, {5}]) {{rotate(a=[{6},{3},0]) {{translate([-length/2, 0., 0.]) {{ rotate(a=[0, 90, 0]) {{ cylinder(length, {1}, {2}, $fn=90); }} }} }} }};' # NOQA
self.constant_diameter_prob = constant_diameter_prob
def get_random_scad(self):
length = random.uniform(self.min_length, self.max_length)
tilt = 0
roll = 0
z_offset = 0
x_offset = 0
if random.random() > self.constant_diameter_prob:
tilt = random.uniform(-self.max_tilt, self.max_tilt)
tilt = random.uniform(-self.max_tilt, self.max_tilt)
if self.is_L:
x_offset = (length / 2) - 3e-2
if self.is_X:
z_offset = random.uniform(-15e-2, 0)
radius1 = random.uniform(self.min_radius, self.max_radius)
radius2 = radius1
if random.random() > self.constant_diameter_prob:
radius2 = random.uniform(self.min_radius, self.max_radius)
return self.scad.format(length, radius1, radius2, tilt, x_offset,
z_offset, roll)
| 1,577 | 1 | 76 |