content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# coding=UTF-8
"""
--------------------------------------------------------
Copyright (c) ****-2018 ESR, Inc. All rights reserved.
--------------------------------------------------------
Author: Mingdong Zhu
Date: 2019/03/07
Design Name: The user interface of the DDS software
Purpose: Design an UI and test function for DDS board
using Python 3.6.3
--------------------------------------------------------
"""
# _name_ = 'main_process'
import time
import numpy as np
import dds
def num_to_bytes(num, bytenum, high_head=True):
"""To get the bytes format of a given decimal number
(used for data_pro)
:param num: A given number
:type num: int
:param bytenum: The number of` bytes (or len()) of the return word
:type bytenum: int
:param high_head: True/False -- big/little-endian; eg:num_to_bytes(1, 2, True/False)-->b'\x00\x01' or b'\x01\x00'
:type high_head: bool
:returns: Bytes for num, len() = bytenum
:rtype: bytes
"""
if high_head:
return np.array([num], dtype='>u8').tobytes()[-bytenum:] # big-endian
else:
return np.array([num], dtype='<u8').tobytes()[:bytenum] # little-endian
def bytes_to_num(bytes_, signed_=True, big_=True):
"""To get the int format of a given bytes
(used for data_pro)
:param bytes_: A given bytes
:type bytes_: bytes
:param signed_: True for signed input
:type signed_: bool
:param big_: Same as the "high_head" in the function 'num_to_bytes'
:type big_: bool
:returns: Int for bytes
:rtype: int
"""
if not signed_:
if big_:
return int.from_bytes(bytes_, byteorder='big')
else:
return int.from_bytes(bytes_, byteorder='little')
else:
if big_:
return int.from_bytes(bytes_, byteorder='big', signed=True)
else:
return int.from_bytes(bytes_, byteorder='little', signed=True)
def bytes_to_hexstr(bytes_, space=True):
"""To get the string format of a given bytes
(used for print/debug)
:param bytes_: A given bytes
:type bytes_: bytes
:param space: True for insert a ' ' per byte
:type space: bool
:returns: String for bytes
:rtype: str
"""
# ss = s_str.encode('hex') # original solution in Python2
string = bytes_.hex() # original solution in Python2
if space:
string_with_space = [string[i:i + 2] for i in range(0, len(string), 2)]
return ' '.join(string_with_space)
else:
return string
class FPGA(dds.HardWare): # GenWave,
""" A class used for integration, in other word, the final application """
"""To clarify the user-defined scan-sign ******
var_type = [0, 1, 2, 3, 4], which is show the scan_para's variable type
[0, 1, 2, 3, 4] represents ["no scan", "amp", "freq", "phase", "time"]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num), which show the scan_para's type and group number
para_num = 0, 1...; The group number for the scan_para
"""
def __init__(self, dev_index=0, test_mode=False):
""" To launch the Instantiation of classes"""
# GenWave.__init__(self)
dds.HardWare.__init__(self, dev_index=dev_index, test_mode=test_mode)
def cw_play(self, ch_num, amp, freq, phase):
"""Single channel setting for DDS
(can be applied in spectrum test or non-sequence wave_play)
:param ch_num: The number ch to be set, [0,1,...,15] is available
:type ch_num: int
:param amp: Amplitude of DDS, range:[0,1]
:type amp: float
:param freq: Frequency of DDS, unit: MHz
:type freq: int or float
:param amp: Phase of DDS, unit: pi, range: [0,2)
:type amp: float
:returns: unit: MHz, Hz
:rtype: float, float
"""
hp_channel, reg_wr = self.ch2identify(ch_num)
ch_num_byte = num_to_bytes(2**ch_num, 2)
dds_data_list = self.dds_data_form(hp_channel, amp, freq, phase)
print(bytes_to_hexstr(dds_data_list[0]))
self.l_configure(ch_num_byte, reg_wr, dds_data_list[0])
"""
return specification:
1--the real digital freq (set)
2--the difference of freq (real - set)
"""
return dds_data_list[1], dds_data_list[2]
def ttl_set(self, ch_num, level):
"""To set the TTL manually
:param ch_num: channel number of TTL, [0,1] correspond to TTL9,10 and 0x5/6 0,1
:type ch_num: int
:param level: 0/1 for low and high
:type level: int
:returns:
:rtype:
"""
word_in_num = 5*16 + ch_num + 16*level
word_in_bytes = num_to_bytes(word_in_num % 256, 2)
print(bytes_to_hexstr(word_in_bytes))
self.write(word_in_bytes)
def ad5371_ini(self):
"""To initialize the AD5371 which is a 40-ch low-speed DAC
:param :
:type :
:returns:
:rtype:
"""
self.write(b'\x00\x34'+b'\x00'+b'\x02'+b'\x20\x00') # the b'\x02' can be b'\x03',b'\x04'
self.write(b'\x00\x34'+b'\x00'+b'\x03'+b'\x20\x00') # the OFS_g1 is set to be +10V.
self.write(b'\x00\x34'+b'\x00'+b'\x04'+b'\x20\x00') # the OFS_g2~4 is set to be +10V.
self.write(b'\x00\x34'+b'\x00'+b'\x80'+b'\x80\x00') # C
self.write(b'\x00\x34'+b'\x00'+b'\x40'+b'\xFF\xFC') # M
self.write(b'\x00\x34'+b'\x00'+b'\xC0'+b'\x80\x00') # X = +10
stamp_list = [0, 1, 3]
self.ad5371_wr_stamp_set(stamp_list) # To set the SPI rate
# self.ad5371_play_set(ch_num, [106, 59, 111])
print('AD5371 initial has been finished')
#################################################################
# integration-experiment function
# 以下都是支持多个通道的操作
#################################################################
def initial_dds(self):
"""To initialize and synchronize the 16 DDSs
:param :
:type :
:returns:
:rtype:
"""
ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
self.delay_para_set()
self.sync_on()
for index_1 in range(len(ch_num_list)):
if ch_num_list[index_1] < 4:
self.initial_AD9915(ch_num_list[index_1])
else:
self.initial_ad9910(ch_num_list[index_1])
self.mannual_sync_2g5()
self.mannual_sync_1g()
self.sync_off()
self.stamp_reset() # When there are some bugs, this one will be used
print('channel ', ch_num_list, ' initial has been finished')
def phase_clear_dds(self, ch_num_list):
"""To clear the phase of DDS in ch_num_list, after that the phase in accumulator will be 0
What's more, if a dds is play at a freq != 0, we need to stop it and clear the phase for "sequence play".
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..15]
:type ch_num_list: list
:returns:
:rtype:
"""
for index_1 in range(len(ch_num_list)):
if ch_num_list[index_1] < 4:
self.phase_clear_2g5(ch_num_list[index_1])
else:
self.phase_clear_1g(ch_num_list[index_1])
# print 'phase of channel ',ch_num_list,' has been cleared'
def sequence_data_download(self, ch_num_list, raw_data_list_list, check_sign=False):
"""To download the sequence play data for multi channels
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..15]
:type ch_num_list: list
:param raw_data_list_list: List of raw_data_list(for one channel)
:*** format of raw_data_list: [ [scan_sign,[A,f(MHz),fai(pi)],[level,time]], ...]
:*** eg: [ [scan_sign0,[A0, f0, fai0],[level0, time0]], [scan_sign1,[A1, f1, fai1],[level1, time1]], ... ]
: scan_sign: int, [0,1, .. ,4,5,..8]--["no scan", "amp"_0, "freq"_0, "phase"_0, "time"_0]
: amp: float, range: [0,1]
: freq: int or float, unit: MHz
: phase: float, unit: pi, range: [0,2)
: level: str, 'high'/'low'
: time: float, unit: us
:type raw_data_list_list: list
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
if len(ch_num_list) != len(raw_data_list_list):
print('mismatch of ch_num and data_list')
exit()
else:
play_address_word = b''
for index_1 in range(len(ch_num_list)):
raw_data_list_temp = raw_data_list_list[index_1]
play_address_word_temp = self.single_data_download(ch_num_list[index_1], raw_data_list_temp,
check_sign, print_sign=True)
play_address_word += play_address_word_temp
print('\ndata-download of channel ', ch_num_list, ' has been finished')
self.play_sequence_set(ch_num_list, play_address_word, print_sign=True)
# return play_address_word
"""
var_type = [0, 1, 2, 3, 4], which is show the scan_para's variable type
[0, 1, 2, 3, 4] represents ["no scan", "amp", "freq", "phase", "time"]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num), which show the scan_para's type and group number
para_num = 0, 1...; The group number for the scan_para
"""
def play(self, var_type, scan_para_list, check_sign=False):
"""To download the scan data and trigger the play
What's more ,a PMT counter receive function will also be carried
:param var_type: Int represents the variable type
:type var_type: int
:param scan_para_list: List of scan data
:*** format: [[N_0, para0, para1], [N_1, para0, para1],..]
:type scan_para_list: list
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
print('')
scan_para_gen = self.scan_data_gen(var_type, scan_para_list)
print(bytes_to_hexstr(scan_para_gen[0]))
self.scan_data_download(scan_para_gen[0], print_sign=True)
if check_sign:
if not self.scan_data_check(scan_para_gen[0]):
self.write(b'\x00\x00')
print('Scan_data download check failed!')
exit()
print('Play ins is ', bytes_to_hexstr(b'\x00\x01' + scan_para_gen[0][0:4]))
self.write(b'\x00\x01' + scan_para_gen[0][0:4])
print("total_play ", scan_para_gen[1])
return self.counter_receive(scan_para_gen[1])
def counter_receive(self, cnt_number):#PMT
"""To receive PMT counter's result for each single play
:param cnt_number: Total number of single play in current play
:type cnt_number: int
:returns: A list of PMT counter's result
:rtype: list
"""
readout_bytes = b''
cnt_result_list = []
counter_end_sign = True
print('')
# t1 = time.time()
while counter_end_sign:
temp = self.read()
readout_bytes += temp
while readout_bytes != b'':
# print('Current time consumed is ', time.time()-t1)
# print(bytes_to_hexstr(readout_bytes))
# print('')
if readout_bytes[0:2] == b'\xFF\xFA': # start sign
readout_bytes = readout_bytes[2:]
cnt_addr_start = bytes_to_num(readout_bytes[0:2])
elif readout_bytes[0:2] == b'\xFF\xF5': # stop sign(The end sign of this infinite loop)
readout_bytes = readout_bytes[2:]
cnt_addr_stop = bytes_to_num(readout_bytes[0:2])
counter_end_sign = False # To break from the whole while-loop
break
else:
if readout_bytes[0:2] == b'\xFF\xF8':
cnt_result_list.append('overflow')
else:
cnt_result_list.append(bytes_to_num(readout_bytes[0:2]))
readout_bytes = readout_bytes[2:]
# print('the start and stop of cnt_addr are %d, %d' % (cnt_addr_start, cnt_addr_stop))
# print('The length of result is %d' % len(cnt_result_list))
if cnt_number == (cnt_addr_stop-cnt_addr_start) + 1:
print('The cnt_number match the input scan number')
else:
print('The cnt_number miss match')
# print('Counter number is ', cnt_number)
print('The counter results is ', cnt_result_list)
return cnt_result_list
def ad5371_play(self, ch_num_list, raw_wave_list, play_sign=True, check_sign=False):#PMT
"""To receive PMT counter's result for each single play
:param ch_num_list: List of ch_num(int), ch_num can be [0,1,..39]
:type ch_num_list: list
:param raw_wave_list: List of raw_wave_data, len(raw_wave_list[0]) = len(ch_num_list)
:*** format : [[ch0_pt0, ch1_pt0, ...], [ch0_pt1, ch1_pt1, ...], ...]
:type raw_wave_list: list
:param play_sign: True/False -- Enable/Disable the play
:type play_sign: bool
:param check_sign: If True, the check function will be carried out, which will consume more time.
:type check_sign: bool
:returns:
:rtype:
"""
addr_start, addr_stop = self.dac_ad5371_data_download(ch_num_list, raw_wave_list, check_sign)
if play_sign:
ch_num = len(ch_num_list)
self.ad5371_play_set(ch_num, [106, 59, 111]) # [106, 59, 111]
self.write(b'\x00\x31' + addr_start + addr_stop)
print(bytes_to_hexstr(b'\x00\x31' + addr_start + addr_stop))
time.sleep((bytes_to_num(addr_stop)-bytes_to_num(addr_start))*1e-6)
if __name__ == '__main__':
"""
var_type = [0, 1, 2, 3, 4]
scan_sign = [0, 1, 2, 3, 4] + 4*(para_num)
para_num = 0, 1...
"""
# # Part1
# """ DDS and TTL test modules """
# fpga = DDSTestClass(1)
# fpga.dll.flushInputBuffer() # To refresh the USB, just copy
# fpga.initial_device()
#
# var_type = 0
# play_ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
# # play_ch_num_list = [0, 1, 2, 3, 4, 5]
# # play_ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
# fpga.test_fun_basic(play_ch_num_list, var_type, check_sign=True)
# Part2 4
""" AD5371 test modules """
ad5371 = DacTestClass(1)
ad5371.dll.flushInputBuffer()
ad5371.ad5371_ini()
ad5371.ch_test_new(10)
# # Part3
# """ AD5371 test modules """
# fpga = DDSTestClass(1)
# fpga.cw_play(ch_num=5, amp=1, freq=0, phase=0)
# ch_num=5
# hp_channel, reg_wr = fpga.ch2identify(ch_num)
# ch_num_byte = num_to_bytes(2**ch_num, 2)
# print(fpga.l_read(ch_num_byte, reg_wr, right_rd=b'\x00\x00\x00\x00\x00\x00\x00\x00'))
| [
2,
19617,
28,
48504,
12,
23,
201,
198,
37811,
201,
198,
43801,
201,
198,
15269,
357,
66,
8,
25998,
12,
7908,
412,
12562,
11,
3457,
13,
220,
1439,
2489,
10395,
13,
201,
198,
43801,
201,
198,
13838,
25,
26980,
67,
506,
33144,
201,
1... | 1.910498 | 8,592 |
from django.conf.urls import url
from .views import listings_listing_view, listings_api_view
urlpatterns = [
url(
r'^listings/(?P<listing_hostname>[a-z0-9-\.]+)/?$',
listings_listing_view,
name='listings_listing_view',
),
url(
r'^api/v1/listings/(?P<listing_hostname>[a-z0-9-\.]+)/?$',
listings_api_view,
name='listings_api_view',
)
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
33571,
1330,
26890,
62,
4868,
278,
62,
1177,
11,
26890,
62,
15042,
62,
1177,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
198,
220,... | 1.985222 | 203 |
'''
Author: xyb
Date: 2020-08-10 18:35:32
LastEditTime: 2020-08-10 18:52:50
'''
from flask import Flask, make_response, request
app = Flask(__name__)
app.secret_key = 'dfslkfjdlfsdkjfnskj' #直接设置
#间接设置
# class DefaultConfig(object):
# SECRET_KEY = 'dfslkfjdlfsdkjfnskj'
# app.config.from_object(DefaultConfig)
@app.route('/set_session')
@app.route('/get_session')
if __name__ == "__main__":
app.run(host='', port=5000, debug=False)
| [
7061,
6,
198,
13838,
25,
2124,
88,
65,
198,
10430,
25,
12131,
12,
2919,
12,
940,
1248,
25,
2327,
25,
2624,
198,
5956,
18378,
7575,
25,
12131,
12,
2919,
12,
940,
1248,
25,
4309,
25,
1120,
198,
7061,
6,
198,
6738,
42903,
1330,
46947... | 2.138756 | 209 |
import tempfile
from pysubs2 import SSAFile, SSAStyle, Color, SSAEvent, make_time
from audio_pipeline import logging_config
from audio_pipeline.audio_processing.ffmpeg_processor import run_ffmpeg
logger = logging_config.get_logger(__name__)
def _adjust_for_clashing_subs(combined_subs, working_sub, exclude):
"""
Helper function for the append code. Looking for overlapping subtitles and make adjustments
"""
# If we haven't got a set of subs to check against early return
if not combined_subs or not exclude:
return working_sub, None
second_working_sub = None
for sub in combined_subs:
# Standard style exit
if exclude and sub.style not in exclude:
continue
if sub.start <= working_sub.start <= sub.end:
# Drop the start of the working sub
working_sub.start = sub.end
elif working_sub.start <= sub.start <= working_sub.end:
# Drop the end of the working sub
if sub.end < working_sub.end:
# We might need to split the sub
second_working_sub = working_sub.copy()
second_working_sub.start = sub.end
second_working_sub.end = working_sub.end
working_sub.end = sub.start
# Check that we now have a sub that has no duration
if working_sub.start >= working_sub.end:
working_sub = None
return working_sub, second_working_sub
def append_subs(combined_subs, new_subs, style=None, formatter=None, exclude=None):
"""
Append a set of subs to a current set avoiding a clash if needed. Also allows for styling and formatting
"""
if exclude is None:
exclude = []
new_combined_subs = SSAFile()
if combined_subs:
# First add the subs we are keeping
new_combined_subs.extend(combined_subs)
for sub in new_subs:
# Add a style
if style:
sub.style = style
# Perform the formatting
if formatter:
sub.text = formatter(sub.text)
# See if we want to cater for clashes
sub, second_sub = _adjust_for_clashing_subs(combined_subs, sub, exclude)
# Prepare results
if sub:
new_combined_subs.append(sub)
if second_sub:
new_combined_subs.append(second_sub)
new_combined_subs.sort()
return new_combined_subs
def flatten_subs(starting_subs, style=None):
"""
Take some subs and merge them together (adjacent subtitle which are the same)
"""
new_subs = SSAFile()
for sub in starting_subs:
# Standard style exit
if style and sub.style != style:
continue
if not new_subs:
new_subs.append(sub)
elif sub.text == new_subs[-1].text and sub.start <= new_subs[-1].end:
if sub.end > new_subs[-1].end:
new_subs[-1].end = sub.end
else:
new_subs.append(sub)
# Copy in all the subs we skipped due to styling
if style:
for sub in starting_subs:
if sub.style != style:
new_subs.append(sub)
new_subs.sort()
return new_subs
def merge_subs(starting_subs, tolerance_millis=1000, style=None):
"""
Take some subs and eliminate any blank spots where they are less than a tolerance (default of 1 second)
"""
merged_subs = SSAFile()
for sub in starting_subs:
if style and sub.style != style:
continue
if merged_subs and merged_subs[-1].end + tolerance_millis >= sub.start:
merged_subs[-1].end = sub.start
merged_subs.append(sub)
if style:
for sub in starting_subs:
if sub.style != style:
merged_subs.append(sub)
merged_subs.sort()
return merged_subs
def compress_subs(subs, max_chars=30, max_stretch_millis=3000, max_oldest_millis=10000, style=None):
"""
Mostly for the use of speech subtitles this will take individual words and create a running subtitle
"""
# Phase 1 based on character count so that we dont overflow the screen
# Phase 2 is to make sure that the oldest word on the screen has not been there for too long
# First remove gaps where they exist
merged_subs = merge_subs(subs, max_stretch_millis, style)
char_count = 0
oldest_start_time = 0
compressed_subs = SSAFile()
for sub in merged_subs:
if style and sub.style is not style:
continue
char_count += len(sub.text)
# Check the character count and reset if needed
if char_count > max_chars:
char_count = len(sub.text)
oldest_start_time = sub.start
# Check if subtitle has been on screen for too long then reset
elif sub.start - oldest_start_time > max_oldest_millis:
char_count = len(sub.text)
oldest_start_time = sub.start
# If there is a gap in time between subtitles then reset
elif len(compressed_subs) > 0 and sub.start != compressed_subs[-1].end:
char_count = len(sub.text)
oldest_start_time = sub.start
# Add this sub
elif len(compressed_subs) > 0:
sub.text = compressed_subs[-1].text + ' ' + sub.text
char_count += 1
compressed_subs.append(sub)
# Append all the other subs
if style:
for sub in merged_subs:
if sub.style is not style:
compressed_subs.append(sub)
compressed_subs.sort()
return compressed_subs
def remove_tiny_subs(subs, duration_millis=1000, left_millis=2000, right_millis=2000, style=None):
"""
Remove any subs that are out on their own or too short
"""
copy_subs = SSAFile()
new_subs = SSAFile()
for sub in subs:
if (style and sub.style is style) or not style:
copy_subs.append(sub)
for i, sub in enumerate(copy_subs):
# if it is longer it goes in
if sub.duration >= duration_millis:
new_subs.append(sub)
continue
# if its the first one then look right only
# if its the last one then look left only
# if its in the middle then look both ways
if left_millis is None and right_millis is None:
continue
if i == 0:
if copy_subs[i + 1].start - sub.end < right_millis:
new_subs.append(sub)
elif i == len(copy_subs) - 1:
if sub.start - copy_subs[i - 1].end < left_millis:
new_subs.append(sub)
elif copy_subs[i + 1].start - sub.end < right_millis or sub.start - copy_subs[i - 1].end < left_millis:
new_subs.append(sub)
if style:
for sub in subs:
if sub.style is not style:
new_subs.append(sub)
new_subs.sort()
return new_subs
def add_styles(subs, style_list=None):
"""
Add styles to the subtitle file based on the style strings in each individual subtitle
"""
if style_list is None:
style_list = []
for style in style_list:
new_style = SSAStyle()
# Number for position refers to the number on a keypad
if 'top_left' in style:
new_style.alignment = 7
elif 'top_right' in style:
new_style.alignment = 9
elif 'bottom_left' in style:
new_style.alignment = 1
elif 'bottom_right' in style:
new_style.alignment = 3
elif 'left' in style:
new_style.alignment = 4
elif 'right' in style:
new_style.alignment = 6
elif 'top' in style:
new_style.alignment = 8
elif 'bottom' in style:
new_style.alignment = 2
# Setting the RGB values for the text
if 'pred' in style:
new_style.primarycolor = Color(255, 0, 0, 0)
elif 'pblue' in style:
new_style.primarycolor = Color(0, 0, 255, 0)
elif 'pgreen' in style:
new_style.primarycolor = Color(0, 255, 0, 0)
elif 'pwhite' in style:
new_style.primarycolor = Color(255, 255, 255, 0)
# Setting the RGB values for the text's background
if 'bred' in style:
new_style.backcolor = Color(255, 0, 0, 0)
elif 'bblue' in style:
new_style.backcolor = Color(0, 0, 255, 0)
elif 'bgreen' in style:
new_style.backcolor = Color(0, 255, 0, 0)
elif 'bwhite' in style:
new_style.backcolor = Color(255, 255, 255, 0)
# Setting different font types
if 'bold' in style:
new_style.bold = True
if 'italic' in style:
new_style.italic = True
subs.styles[style] = new_style
return subs
def save_to_subtitles(results, formatter):
"""
Save to subtitle file
:param results: Dictionary containing info and start/end times
:param formatter: Apply text formating to the subtitle
:return: New subtitle file
"""
subs = SSAFile()
for result in results:
event = SSAEvent(start=make_time(s=result['start']),
end=make_time(s=result['end']), text=formatter(result))
if 'highlight' in result and result['highlight']:
event.style = 'red'
subs.append(event)
logger.info(f'Processed {len(results)} results to subtitle events')
return subs
def create_styles(subs):
"""
Gather text from subtitles and call the subtitle adder
"""
styles = set()
for sub in subs:
styles.add(sub.style)
add_styles(subs, styles)
def burn_subtitles_into_video(video_path, subtitle_path, output_path):
"""
Create new video with subtitles burned in
:param video_path: input video path
:param subtitle_path: subtitle input path
:param output_path: video output path
:return: File name that it has written to
"""
temp_file_name = tempfile.mktemp(dir=output_path, prefix='output_with_hard_subtitles_', suffix='.mp4')
# Handle srt files if needed
if subtitle_path.endswith('.srt.'):
subtitle_ass_file = subtitle_path.replace(".srt", ".ass")
run_ffmpeg(f'ffmpeg -y -i {subtitle_path} {subtitle_ass_file}')
else:
subtitle_ass_file = subtitle_path
run_ffmpeg(f'ffmpeg -i {video_path} -vf "ass={subtitle_ass_file}" {temp_file_name}')
logger.info(f'Burnt subtitles {subtitle_path} to {video_path} stored in {temp_file_name}')
return temp_file_name
| [
11748,
20218,
7753,
198,
6738,
279,
893,
23161,
17,
1330,
311,
4090,
8979,
11,
6723,
1921,
774,
293,
11,
5315,
11,
311,
4090,
9237,
11,
787,
62,
2435,
198,
6738,
6597,
62,
79,
541,
4470,
1330,
18931,
62,
11250,
198,
6738,
6597,
62,
... | 2.287371 | 4,569 |
# Generated by Django 2.2 on 2019-06-04 23:00
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
13130,
12,
3312,
12,
3023,
2242,
25,
405,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
#Gizem Özgün / 160401007
# -*- coding: utf-8 -*-
import sys
if __name__ == "__main__":
menu()
| [
2,
38,
528,
368,
43307,
89,
70,
9116,
77,
1220,
1467,
3023,
486,
25816,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
25064,
198,
220,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
... | 1.927273 | 55 |
# This script assumes taht the freesurfer csv for the BANC data has already been generated
import os
import pandas as pd
import numpy as np
import pdb
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from BayOptPy.helperfunctions import get_paths, get_data, drop_missing_features
def str_to_bool(s):
'''
As arg pass does not acess boolen, transfrom the string into
booleans
'''
if s == 'True':
return True
elif s == 'False':
return False
#-----------------------------------------------------------------------------
# Settings
#-----------------------------------------------------------------------------
debug = False
dataset = 'freesurf_combined'
resamplefactor = 1
save_path = os.path.join('/code/BayOptPy', 'freesurfer_preprocess')
raw = 'False'
analysis = 'uniform'
project_wd, project_data, project_sink = get_paths(debug, dataset)
demographics, imgs, dataframe = get_data(project_data, dataset,
debug, project_wd,
resamplefactor,
raw=str_to_bool(raw),
analysis=analysis)
# transform age into ints
demographics['age_int'] = demographics['age'].astype('int32', copy=False)
# Select 14 subjects for all ages that have 14 representatives.
age_range = np.arange(demographics['age'].min(), demographics['age'].max())
# remove entry where you don't have 14 subjects
max_n = 14
age_to_remove = [35, 36, 39, 42, 78, 79, 80, 81, 82, 83, 85, 89]
age_range = np.setdiff1d(age_range, age_to_remove)
# iterate over the dataframe and select 14 subjects for each age range
ids_to_use = []
for age in age_range:
ids_to_use.append(demographics.index[demographics['age_int'] ==
age].tolist()[:max_n])
# flatten ids_to_use
ids_to_use = [item for sublist in ids_to_use for item in sublist]
# Filter the demographics dataframe
demographics = demographics[demographics.index.isin(ids_to_use)]
# set subject's id as index
demographics = demographics.set_index('id')
# filter dataset using index of the subjects
dataframe = dataframe.loc[demographics.index]
# Print some diagnosis
print('Shape of the new demographics:')
print(demographics.shape)
print('Oldest %d and youngest %d subject' %(demographics['age_int'].max(),
demographics['age_int'].min()))
print('Number of age bins %d' %len(demographics['age_int'].unique()))
import pdb
pdb.set_trace()
print('Done')
| [
2,
770,
4226,
18533,
256,
993,
83,
262,
2030,
274,
333,
2232,
269,
21370,
329,
262,
347,
20940,
1366,
468,
1541,
587,
7560,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
279,
... | 2.584258 | 991 |
# pylint: disable=invalid-name,missing-docstring
# Generated by Django 2.2.1 on 2020-06-19 05:29
from django.db import migrations
from django.db import models
| [
2,
279,
2645,
600,
25,
15560,
28,
259,
12102,
12,
3672,
11,
45688,
12,
15390,
8841,
201,
198,
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
16,
319,
12131,
12,
3312,
12,
1129,
8870,
25,
1959,
201,
198,
6738,
42625,
14208,
13,
9945,
... | 2.75 | 60 |
from .abstract_dash_mp4_representation import AbstractDashMP4Representation
| [
6738,
764,
397,
8709,
62,
42460,
62,
3149,
19,
62,
15603,
341,
1330,
27741,
43041,
7378,
19,
40171,
341,
628
] | 3.85 | 20 |
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from __init__ import db
class User(db.Model):
"""Data model for user accounts."""
__tablename__ = 'usuario'
id = db.Column(
db.Integer,
primary_key=True
)
email = db.Column(
db.String(80),
index=True,
unique=True,
nullable=False
)
isadmin = db.Column(
db.Boolean,
index=False,
unique=False,
nullable=False
)
password_hash = db.Column(
db.String(128),
index=False,
unique=False,
nullable=False)
@staticmethod
@staticmethod
@property
def password(self):
"""
Prevent pasword from being accessed
"""
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
| [
6738,
42903,
62,
38235,
1330,
11787,
35608,
259,
201,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
7716,
62,
28712,
62,
17831,
11,
2198,
62,
28712,
62,
17831,
201,
198,
6738,
42903,
62,
38235,
1330,
11787,
35608,
259,
201,
198,
... | 2.230284 | 634 |
# MINLP written by GAMS Convert at 05/15/20 00:51:23
#
# Equation counts
# Total E G L N X C B
# 152 71 6 75 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 105 85 20 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 352 334 18 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - m.x12 - m.x13 + 5*m.x24 + 10*m.x25 - 2*m.x34 - m.x35 + 80*m.x36 + 90*m.x37 + 285*m.x38
+ 390*m.x39 + 290*m.x40 + 405*m.x41 - 5*m.b96 - 4*m.b97 - 8*m.b98 - 7*m.b99 - 6*m.b100
- 9*m.b101 - 10*m.b102 - 9*m.b103 - 6*m.b104 - 10*m.b105, sense=maximize)
m.c2 = Constraint(expr= m.x12 - m.x14 - m.x16 == 0)
m.c3 = Constraint(expr= m.x13 - m.x15 - m.x17 == 0)
m.c4 = Constraint(expr= - m.x18 - m.x20 + m.x22 == 0)
m.c5 = Constraint(expr= - m.x19 - m.x21 + m.x23 == 0)
m.c6 = Constraint(expr= m.x22 - m.x24 - m.x26 == 0)
m.c7 = Constraint(expr= m.x23 - m.x25 - m.x27 == 0)
m.c8 = Constraint(expr= m.x26 - m.x28 - m.x30 - m.x32 == 0)
m.c9 = Constraint(expr= m.x27 - m.x29 - m.x31 - m.x33 == 0)
m.c10 = Constraint(expr=(m.x50/(1e-6 + m.b86) - log(1 + m.x42/(1e-6 + m.b86)))*(1e-6 + m.b86) <= 0)
m.c11 = Constraint(expr=(m.x51/(1e-6 + m.b87) - log(1 + m.x43/(1e-6 + m.b87)))*(1e-6 + m.b87) <= 0)
m.c12 = Constraint(expr= m.x44 == 0)
m.c13 = Constraint(expr= m.x45 == 0)
m.c14 = Constraint(expr= m.x52 == 0)
m.c15 = Constraint(expr= m.x53 == 0)
m.c16 = Constraint(expr= m.x14 - m.x42 - m.x44 == 0)
m.c17 = Constraint(expr= m.x15 - m.x43 - m.x45 == 0)
m.c18 = Constraint(expr= m.x18 - m.x50 - m.x52 == 0)
m.c19 = Constraint(expr= m.x19 - m.x51 - m.x53 == 0)
m.c20 = Constraint(expr= m.x42 - 40*m.b86 <= 0)
m.c21 = Constraint(expr= m.x43 - 40*m.b87 <= 0)
m.c22 = Constraint(expr= m.x44 + 40*m.b86 <= 40)
m.c23 = Constraint(expr= m.x45 + 40*m.b87 <= 40)
m.c24 = Constraint(expr= m.x50 - 3.71357206670431*m.b86 <= 0)
m.c25 = Constraint(expr= m.x51 - 3.71357206670431*m.b87 <= 0)
m.c26 = Constraint(expr= m.x52 + 3.71357206670431*m.b86 <= 3.71357206670431)
m.c27 = Constraint(expr= m.x53 + 3.71357206670431*m.b87 <= 3.71357206670431)
m.c28 = Constraint(expr=(m.x54/(1e-6 + m.b88) - 1.2*log(1 + m.x46/(1e-6 + m.b88)))*(1e-6 + m.b88) <= 0)
m.c29 = Constraint(expr=(m.x55/(1e-6 + m.b89) - 1.2*log(1 + m.x47/(1e-6 + m.b89)))*(1e-6 + m.b89) <= 0)
m.c30 = Constraint(expr= m.x48 == 0)
m.c31 = Constraint(expr= m.x49 == 0)
m.c32 = Constraint(expr= m.x56 == 0)
m.c33 = Constraint(expr= m.x57 == 0)
m.c34 = Constraint(expr= m.x16 - m.x46 - m.x48 == 0)
m.c35 = Constraint(expr= m.x17 - m.x47 - m.x49 == 0)
m.c36 = Constraint(expr= m.x20 - m.x54 - m.x56 == 0)
m.c37 = Constraint(expr= m.x21 - m.x55 - m.x57 == 0)
m.c38 = Constraint(expr= m.x46 - 40*m.b88 <= 0)
m.c39 = Constraint(expr= m.x47 - 40*m.b89 <= 0)
m.c40 = Constraint(expr= m.x48 + 40*m.b88 <= 40)
m.c41 = Constraint(expr= m.x49 + 40*m.b89 <= 40)
m.c42 = Constraint(expr= m.x54 - 4.45628648004517*m.b88 <= 0)
m.c43 = Constraint(expr= m.x55 - 4.45628648004517*m.b89 <= 0)
m.c44 = Constraint(expr= m.x56 + 4.45628648004517*m.b88 <= 4.45628648004517)
m.c45 = Constraint(expr= m.x57 + 4.45628648004517*m.b89 <= 4.45628648004517)
m.c46 = Constraint(expr= - 0.75*m.x58 + m.x74 == 0)
m.c47 = Constraint(expr= - 0.75*m.x59 + m.x75 == 0)
m.c48 = Constraint(expr= m.x60 == 0)
m.c49 = Constraint(expr= m.x61 == 0)
m.c50 = Constraint(expr= m.x76 == 0)
m.c51 = Constraint(expr= m.x77 == 0)
m.c52 = Constraint(expr= m.x28 - m.x58 - m.x60 == 0)
m.c53 = Constraint(expr= m.x29 - m.x59 - m.x61 == 0)
m.c54 = Constraint(expr= m.x36 - m.x74 - m.x76 == 0)
m.c55 = Constraint(expr= m.x37 - m.x75 - m.x77 == 0)
m.c56 = Constraint(expr= m.x58 - 4.45628648004517*m.b90 <= 0)
m.c57 = Constraint(expr= m.x59 - 4.45628648004517*m.b91 <= 0)
m.c58 = Constraint(expr= m.x60 + 4.45628648004517*m.b90 <= 4.45628648004517)
m.c59 = Constraint(expr= m.x61 + 4.45628648004517*m.b91 <= 4.45628648004517)
m.c60 = Constraint(expr= m.x74 - 3.34221486003388*m.b90 <= 0)
m.c61 = Constraint(expr= m.x75 - 3.34221486003388*m.b91 <= 0)
m.c62 = Constraint(expr= m.x76 + 3.34221486003388*m.b90 <= 3.34221486003388)
m.c63 = Constraint(expr= m.x77 + 3.34221486003388*m.b91 <= 3.34221486003388)
m.c64 = Constraint(expr=(m.x78/(1e-6 + m.b92) - 1.5*log(1 + m.x62/(1e-6 + m.b92)))*(1e-6 + m.b92) <= 0)
m.c65 = Constraint(expr=(m.x79/(1e-6 + m.b93) - 1.5*log(1 + m.x63/(1e-6 + m.b93)))*(1e-6 + m.b93) <= 0)
m.c66 = Constraint(expr= m.x64 == 0)
m.c67 = Constraint(expr= m.x65 == 0)
m.c68 = Constraint(expr= m.x80 == 0)
m.c69 = Constraint(expr= m.x81 == 0)
m.c70 = Constraint(expr= m.x30 - m.x62 - m.x64 == 0)
m.c71 = Constraint(expr= m.x31 - m.x63 - m.x65 == 0)
m.c72 = Constraint(expr= m.x38 - m.x78 - m.x80 == 0)
m.c73 = Constraint(expr= m.x39 - m.x79 - m.x81 == 0)
m.c74 = Constraint(expr= m.x62 - 4.45628648004517*m.b92 <= 0)
m.c75 = Constraint(expr= m.x63 - 4.45628648004517*m.b93 <= 0)
m.c76 = Constraint(expr= m.x64 + 4.45628648004517*m.b92 <= 4.45628648004517)
m.c77 = Constraint(expr= m.x65 + 4.45628648004517*m.b93 <= 4.45628648004517)
m.c78 = Constraint(expr= m.x78 - 2.54515263975353*m.b92 <= 0)
m.c79 = Constraint(expr= m.x79 - 2.54515263975353*m.b93 <= 0)
m.c80 = Constraint(expr= m.x80 + 2.54515263975353*m.b92 <= 2.54515263975353)
m.c81 = Constraint(expr= m.x81 + 2.54515263975353*m.b93 <= 2.54515263975353)
m.c82 = Constraint(expr= - m.x66 + m.x82 == 0)
m.c83 = Constraint(expr= - m.x67 + m.x83 == 0)
m.c84 = Constraint(expr= - 0.5*m.x70 + m.x82 == 0)
m.c85 = Constraint(expr= - 0.5*m.x71 + m.x83 == 0)
m.c86 = Constraint(expr= m.x68 == 0)
m.c87 = Constraint(expr= m.x69 == 0)
m.c88 = Constraint(expr= m.x72 == 0)
m.c89 = Constraint(expr= m.x73 == 0)
m.c90 = Constraint(expr= m.x84 == 0)
m.c91 = Constraint(expr= m.x85 == 0)
m.c92 = Constraint(expr= m.x32 - m.x66 - m.x68 == 0)
m.c93 = Constraint(expr= m.x33 - m.x67 - m.x69 == 0)
m.c94 = Constraint(expr= m.x34 - m.x70 - m.x72 == 0)
m.c95 = Constraint(expr= m.x35 - m.x71 - m.x73 == 0)
m.c96 = Constraint(expr= m.x40 - m.x82 - m.x84 == 0)
m.c97 = Constraint(expr= m.x41 - m.x83 - m.x85 == 0)
m.c98 = Constraint(expr= m.x66 - 4.45628648004517*m.b94 <= 0)
m.c99 = Constraint(expr= m.x67 - 4.45628648004517*m.b95 <= 0)
m.c100 = Constraint(expr= m.x68 + 4.45628648004517*m.b94 <= 4.45628648004517)
m.c101 = Constraint(expr= m.x69 + 4.45628648004517*m.b95 <= 4.45628648004517)
m.c102 = Constraint(expr= m.x70 - 30*m.b94 <= 0)
m.c103 = Constraint(expr= m.x71 - 30*m.b95 <= 0)
m.c104 = Constraint(expr= m.x72 + 30*m.b94 <= 30)
m.c105 = Constraint(expr= m.x73 + 30*m.b95 <= 30)
m.c106 = Constraint(expr= m.x82 - 15*m.b94 <= 0)
m.c107 = Constraint(expr= m.x83 - 15*m.b95 <= 0)
m.c108 = Constraint(expr= m.x84 + 15*m.b94 <= 15)
m.c109 = Constraint(expr= m.x85 + 15*m.b95 <= 15)
m.c110 = Constraint(expr= m.x2 + 5*m.b96 == 0)
m.c111 = Constraint(expr= m.x3 + 4*m.b97 == 0)
m.c112 = Constraint(expr= m.x4 + 8*m.b98 == 0)
m.c113 = Constraint(expr= m.x5 + 7*m.b99 == 0)
m.c114 = Constraint(expr= m.x6 + 6*m.b100 == 0)
m.c115 = Constraint(expr= m.x7 + 9*m.b101 == 0)
m.c116 = Constraint(expr= m.x8 + 10*m.b102 == 0)
m.c117 = Constraint(expr= m.x9 + 9*m.b103 == 0)
m.c118 = Constraint(expr= m.x10 + 6*m.b104 == 0)
m.c119 = Constraint(expr= m.x11 + 10*m.b105 == 0)
m.c120 = Constraint(expr= m.b86 - m.b87 <= 0)
m.c121 = Constraint(expr= m.b88 - m.b89 <= 0)
m.c122 = Constraint(expr= m.b90 - m.b91 <= 0)
m.c123 = Constraint(expr= m.b92 - m.b93 <= 0)
m.c124 = Constraint(expr= m.b94 - m.b95 <= 0)
m.c125 = Constraint(expr= m.b96 + m.b97 <= 1)
m.c126 = Constraint(expr= m.b96 + m.b97 <= 1)
m.c127 = Constraint(expr= m.b98 + m.b99 <= 1)
m.c128 = Constraint(expr= m.b98 + m.b99 <= 1)
m.c129 = Constraint(expr= m.b100 + m.b101 <= 1)
m.c130 = Constraint(expr= m.b100 + m.b101 <= 1)
m.c131 = Constraint(expr= m.b102 + m.b103 <= 1)
m.c132 = Constraint(expr= m.b102 + m.b103 <= 1)
m.c133 = Constraint(expr= m.b104 + m.b105 <= 1)
m.c134 = Constraint(expr= m.b104 + m.b105 <= 1)
m.c135 = Constraint(expr= m.b86 - m.b96 <= 0)
m.c136 = Constraint(expr= - m.b86 + m.b87 - m.b97 <= 0)
m.c137 = Constraint(expr= m.b88 - m.b98 <= 0)
m.c138 = Constraint(expr= - m.b88 + m.b89 - m.b99 <= 0)
m.c139 = Constraint(expr= m.b90 - m.b100 <= 0)
m.c140 = Constraint(expr= - m.b90 + m.b91 - m.b101 <= 0)
m.c141 = Constraint(expr= m.b92 - m.b102 <= 0)
m.c142 = Constraint(expr= - m.b92 + m.b93 - m.b103 <= 0)
m.c143 = Constraint(expr= m.b94 - m.b104 <= 0)
m.c144 = Constraint(expr= - m.b94 + m.b95 - m.b105 <= 0)
m.c145 = Constraint(expr= m.b86 + m.b88 == 1)
m.c146 = Constraint(expr= m.b87 + m.b89 == 1)
m.c147 = Constraint(expr= m.b86 + m.b88 - m.b90 >= 0)
m.c148 = Constraint(expr= m.b87 + m.b89 - m.b91 >= 0)
m.c149 = Constraint(expr= m.b86 + m.b88 - m.b92 >= 0)
m.c150 = Constraint(expr= m.b87 + m.b89 - m.b93 >= 0)
m.c151 = Constraint(expr= m.b86 + m.b88 - m.b94 >= 0)
m.c152 = Constraint(expr= m.b87 + m.b89 - m.b95 >= 0)
| [
2,
220,
20625,
19930,
3194,
416,
402,
40834,
38240,
379,
8870,
14,
1314,
14,
1238,
3571,
25,
4349,
25,
1954,
198,
2,
220,
220,
198,
2,
220,
7889,
341,
9853,
198,
2,
220,
220,
220,
220,
220,
7472,
220,
220,
220,
220,
220,
220,
22... | 1.889175 | 8,148 |
import numpy as np
import pytest
from inverse_covariance import (
QuicGraphicalLassoEBIC,
AdaptiveGraphicalLasso,
QuicGraphicalLassoCV,
)
from inverse_covariance.profiling import ClusterGraph
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
34062,
62,
66,
709,
2743,
590,
1330,
357,
198,
220,
220,
220,
2264,
291,
37065,
605,
43,
28372,
30195,
2149,
11,
198,
220,
220,
220,
30019,
425,
37065,
605,
43,
... | 2.821918 | 73 |
DEVELOPERS_EMAILS = ['esoergel@dimagi.com', 'sreddy@dimagi.com'] | [
7206,
18697,
3185,
4877,
62,
27630,
45484,
796,
37250,
274,
78,
6422,
417,
31,
27740,
18013,
13,
785,
3256,
705,
82,
445,
9892,
31,
27740,
18013,
13,
785,
20520
] | 2.206897 | 29 |
import os
import pickle
import time
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
TARGET = 'target'
THRESHOLD = 0.7
df = pd.read_csv('data_test.csv', index_col=[1]) \
.drop('Unnamed: 0', axis=1)
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
df[TARGET] = (model.predict_proba(df)[:, 1] > THRESHOLD).astype('int')
df.to_csv('answers_test.csv')
| [
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
640,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
1341,
35720,
13,
8692,
1330,
7308,
22362,
320,
1352,
11,
3602,
16354,
35608,
259,
628,
628,
628,
198,
51,
46095,
796,
705... | 2.354651 | 172 |
from _Funções_e_Valores.verify_authors import treat_exceptions
from _Funções_e_Valores.values import ND
import pandas as pd
# Proceedings and Journals separated
| [
6738,
4808,
24629,
16175,
127,
113,
274,
62,
68,
62,
7762,
2850,
13,
332,
1958,
62,
41617,
1330,
2190,
62,
1069,
11755,
198,
6738,
4808,
24629,
16175,
127,
113,
274,
62,
68,
62,
7762,
2850,
13,
27160,
1330,
25524,
198,
198,
11748,
1... | 2.948276 | 58 |
"""Split sorted modely_02."""
import pandas as pd
url = "https://onemocneni-aktualne.mzcr.cz/api/account/mifLSHU2re3GAmiotOkdYExeoQ/file/modely%252Fmodely_02_efektivita_testovani.csv"
df = pd.read_csv(url, delimiter=';')
df = df.sort_values(['datum_hlaseni', 'datum_prvniho_priznaku', 'orp', 'vek_kat', 'pohlavi'])
df[df['datum_hlaseni'] < '2021'].to_csv('modely_02_efektivita_testovani_sorted_2020_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2021') & (df['datum_hlaseni'] < '2021-07')].to_csv('modely_02_efektivita_testovani_sorted_2021_1_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2021') & (df['datum_hlaseni'] >= '2021-07')].to_csv('modely_02_efektivita_testovani_sorted_2021_2_v1.csv')
df.loc[(df['datum_hlaseni'] >= '2022')].to_csv('modely_02_efektivita_testovani_sorted_2022_v1.csv')
df[(df['datum_hlaseni'] >= '2023') | df['datum_hlaseni'].isnull()].to_csv('modely_02_efektivita_testovani_sorted_null_v1.csv') | [
37811,
41205,
23243,
4235,
306,
62,
2999,
526,
15931,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6371,
796,
366,
5450,
1378,
261,
368,
420,
38572,
72,
12,
461,
83,
723,
710,
13,
76,
89,
6098,
13,
26691,
14,
15042,
14,
23... | 2.004357 | 459 |
from bokeh.models.widgets import Panel
class BacktraderPlottingTab:
'''
Abstract class for tabs
This class needs to be extended from when creating custom tabs.
It is required to overwrite the _is_useable and _get_panel method.
The _get_panel method needs to return a panel child and a title.
'''
def is_useable(self):
'''
Returns if the tab is useable within the current environment
'''
return self._is_useable()
def get_panel(self):
'''
Returns the panel to show as a tab
'''
child, title = self._get_panel()
self._panel = Panel(child=child, title=title)
return self._panel
| [
6738,
1489,
365,
71,
13,
27530,
13,
28029,
11407,
1330,
18810,
628,
198,
4871,
5157,
2213,
5067,
43328,
889,
33349,
25,
628,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
27741,
1398,
329,
22524,
198,
220,
220,
220,
770,
1398,
2476,
... | 2.601504 | 266 |
"""Functions that create samples."""
import chaospy as cp
import numpy as np
import respy as rp
import pandas as pd
from pathlib import Path
PROJECT_ROOT = Path(__file__).resolve().parents[1]
DATA_PATH = PROJECT_ROOT / "data"
CHAOSPY_SAMPLING_METHODS = {
"random",
"grid",
"chebyshev",
"korobov",
"sobol",
"halton",
"hammersley",
"latin_hypercube",
}
def create_sample(
n_samples=30,
seed=123,
M="None",
sampling_method="random",
MC_method="Brute force",
):
"""Simulate samples of qoi.
Parameters
----------
n_samples : int
Number of samples to draw.
seed : int
Seed for the random number generators.
M : int
The number of conditional bins to genetate if `MC_method` is "DLR".
sampling_method : string
Specifies which sampling method should be employed. Possible arguments
are in {"random", "grid", "chebyshev", "korobov","sobol", "halton",
"hammersley", "latin_hypercube"}
MC_method : string
Specify the Monte Carlo estimator. One of ["brute force", "DLR"],
where "DLR" denotes to the double loop reordering approach.
Returns
-------
input_x_respy: list
A list of input parameters that are ready to be passed into the
`respy` function.
input_x_mix_respy: list
A list of conditional input parameters that are ready to be passed
into the `respy` function.
"""
# load mean and cov
mean, cov = load_mean_and_cov()
# get unconditioal samples
sample_x, sample_x_prime = unconditional_samples(
mean,
cov,
n_samples,
seed,
sampling_method,
)
# fix parameters of interest
x_3 = subset_params(sample_x)
x_prime_3 = subset_params(sample_x_prime)
x = fix_true_params(x_3, mean)
# get conditional samples
x_mix_3 = conditional_samples(x_3, x_prime_3, MC_method, M)
# fix parameters of interest
x_mix = fix_true_params_mix(x_mix_3, mean, MC_method)
input_x_respy = [(params_to_respy)(i) for i in x]
input_x_mix_respy = [(params_to_respy)(z) for x in x_mix for y in x for z in y]
return input_x_respy, input_x_mix_respy
def load_mean_and_cov():
"""Return mean and covariance for Keane and Wolpin (1994) model."""
# load model specifications
base_params = pd.read_pickle(DATA_PATH / "params_kw_94_one_se.pkl")
# mean and cov for sampling
mean = base_params["value"].to_numpy()[:27]
cov = pd.read_pickle(DATA_PATH / "covariance_kw_94_one.pkl").to_numpy()
return mean, cov
def unconditional_samples(
mean,
cov,
n_samples,
seed,
sampling_method,
):
"""Generate two independent groups of sample points.
Parameters
----------
mean : pd.DataFrame or np.ndarray
The mean, of shape (k, ).
cov : pd.DataFrame or np.ndarrary
The covariance, has to be of shape (k, k).
n_samples : int
Number of samples to draw.
seed : int, optional
Random number generator seed.
sampling_method : string
Specifies which sampling method should be employed. Possible arguments
are in {"random", "grid", "chebyshev", "korobov","sobol", "halton",
"hammersley", "latin_hypercube"}
Returns
-------
sample_x, sample_x_prime : np.ndarray
Two arrays of shape (n_draws, n_params) with i.i.d draws from a
given joint distribution.
"""
distribution = cp.MvNormal(loc=mean, scale=cov)
if sampling_method in CHAOSPY_SAMPLING_METHODS:
np.random.seed(seed)
sample_x = np.array(distribution.sample(size=n_samples, rule=sampling_method).T)
np.random.seed(seed + 1)
sample_x_prime = np.array(
distribution.sample(size=n_samples, rule=sampling_method).T
)
else:
raise ValueError(f"Argument 'method' is not in {CHAOSPY_SAMPLING_METHODS}.")
return sample_x, sample_x_prime
def subset_params(x):
"""Pick a subset of samples from the sampled parameters.
Parameters
----------
x : np.ndarray
Array of shape (n_draws, n_params).
Returns
-------
params_interests : np.ndarray
Array of shape (n_draws, 3) contains only 3 seleted parameters.
"""
n_draws = x.shape[0]
indices = [2, 14, 16]
params_interests = np.zeros((n_draws, 3))
for i in range(n_draws):
params_interests[i] = np.take(x[i], indices)
return params_interests
def conditional_samples(x_3, x_prime_3, MC_method, M):
"""Generate mixed sample sets of interest distributed accroding to a conditional PDF.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
x_prime : np.ndarray
Array with shape (n_draws, 3).
MC_method : string
Specify the Monte Carlo estimator. One of ["brute force", "DLR"],
where "DLR" denotes to the double loop reordering approach.
M : int
The number of conditional bins to genetate if `MC_method` is "DLR".
Returns
-------
x_mix : np.ndarray
Mixed sample sets. Shape has the form (n_draws, 3, n_draws, 3).
"""
n_draws, n_params = x_3.shape
if MC_method == "Brute force":
x_3_mix = np.zeros((n_draws, n_params, n_draws, n_params))
for i in range(n_params):
for j in range(n_draws):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = x_prime_3[j, i]
if MC_method == "DLR":
conditional_bin = x_3[:M]
x_3_mix = np.zeros((M, n_params, n_draws, n_params))
# subdivide unconditional samples into M eaually bins,
# within each bin x_i being fixed.
for i in range(n_params):
for j in range(M):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = conditional_bin[j, i]
return x_3_mix
def fix_true_params(x_3, true_values):
"""Replace the 3 selected point estimates with the sampled parameters.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
true_values : np.ndarray
The point estimated, of shape (k, ).
Returns
-------
true_params_fix : np.ndarray
Shape has the form (n_draws, n_params, n_draws, n_params).
"""
n_draws = x_3.shape[0]
true_params_fix = np.tile(true_values, (n_draws, 1))
for i in range(n_draws):
np.put(true_params_fix[i], [2, 14, 16], x_3[i])
return true_params_fix
def fix_true_params_mix(x_3, true_values, MC_method):
"""Replace the 3 selected point estimates with the conditional sampled parameters.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
true_values : np.ndarray
The point estimated, of shape (k, ).
Returns
-------
true_params_fix : np.ndarray
Shape has the form (n_draws, n_params, n_draws, n_params).
"""
if MC_method == "Brute force":
n_draws, n_3_parmas = x_3.shape[:2]
true_params_fix = np.tile(true_values, (n_draws, n_3_parmas, n_draws, 1))
for i in range(n_draws):
for j in range(n_3_parmas):
for k in range(n_draws):
np.put(true_params_fix[i, j, k], [2, 14, 16], x_3[i, j, k])
if MC_method == "DLR":
M, n_3_parmas, n_draws = x_3.shape[:3]
true_params_fix = np.tile(true_values, (M, n_3_parmas, n_draws, 1))
for i in range(M):
for j in range(n_3_parmas):
for k in range(n_draws):
np.put(true_params_fix[i, j, k], [2, 14, 16], x_3[i, j, k])
return true_params_fix
def params_to_respy(input_params, *args):
"""transfer sampled paramters to respy format."""
# baseline options and params for the indices.
base_params = pd.read_pickle(DATA_PATH / "params_kw_94_one_se.pkl")
params_idx = pd.Series(data=input_params, index=base_params.index[0:27])
assert len(params_idx) == 27, "Length of KW94 vector must be 27."
part_1 = params_idx
rp_params, _ = rp.get_example_model("kw_94_one", with_data=False)
part_2 = rp_params.iloc[27:31, 0]
parts = [part_1, part_2]
rp_params_series = pd.concat(parts)
input_params_respy = pd.DataFrame(rp_params_series, columns=["value"])
return input_params_respy
| [
37811,
24629,
2733,
326,
2251,
8405,
526,
15931,
198,
11748,
17792,
2117,
88,
355,
31396,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1217,
88,
355,
374,
79,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
3108,
8019,
1330,
10644,
... | 2.281395 | 3,671 |
# -*- coding: utf-8 -*-
from http import HTTPStatus
from typing import Optional
from flask_httpauth import HTTPTokenAuth
from app.config import config
auth = HTTPTokenAuth(scheme="Bearer", header="Authorization")
@auth.verify_token
@auth.error_handler
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
2638,
1330,
14626,
19580,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
42903,
62,
4023,
18439,
1330,
14626,
30642,
30515,
198,
198,
6738,
598,
13,
11250,
1330,
456... | 3.2375 | 80 |
from re import M
import bottom
import asyncio
import platform
| [
6738,
302,
1330,
337,
198,
11748,
4220,
198,
11748,
30351,
952,
198,
11748,
3859,
628
] | 4.2 | 15 |
import logging
import os
import numpy as np
import pandas._libs.json as ujson
import pyarrow as pa
import pyarrow.parquet as pq
import scipy.sparse
from cirrocumulus.anndata_util import DataType
logger = logging.getLogger("cirro")
| [
11748,
18931,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
13557,
8019,
82,
13,
17752,
355,
334,
17752,
198,
11748,
12972,
6018,
355,
14187,
198,
11748,
12972,
6018,
13,
1845,
21108,
355,
279,
80,
... | 2.914634 | 82 |
import json
import pytest
from rest_framework import status
from usaspending_api.common.helpers.unit_test_helper import add_to_mock_objects
from usaspending_api.search.tests.test_mock_data_search import all_filters
from django_mock_queries.query import MockModel
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
def test_spending_by_award_pop_zip_filter(client, mock_matviews_qs):
""" Test that filtering by pop zips works"""
mock_model_1 = MockModel(pop_zip5="00501", pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None,
type='B', pulled_from="AWARD")
mock_model_2 = MockModel(pop_zip5="00502", pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None,
type='B', pulled_from="AWARD")
mock_model_3 = MockModel(pop_zip5="00503", pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None,
type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00501'},
{'internal_id': 2, 'Place of Performance Zip5': '00502'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_recipient_zip_filter(client, mock_matviews_qs):
""" Test that filtering by recipient zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00001'},
{'internal_id': 2, 'Place of Performance Zip5': '00002'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_both_zip_filter(client, mock_matviews_qs):
""" Test that filtering by both kinds of zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None, type='B',
pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None, type='B',
pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None, type='B',
pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single pair of zips that both match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test simple, single pair of zips that don't match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00002"}]
}
}))
assert len(resp.data['results']) == 0
# test 2 pairs (only one pair can be made from this)
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"},
{"country": "USA", "zip": "00003"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
@pytest.mark.django_db
def test_spending_by_award_foreign_filter(client, mock_matviews_qs):
""" Verify that foreign country filter is returning the correct results """
mock_model_0 = MockModel(award_id=0, piid=None, fain='aaa', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="USA")
mock_model_1 = MockModel(award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="", recipient_location_country_code="USA")
mock_model_2 = MockModel(award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="")
mock_model_3 = MockModel(award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="Gibraltar", recipient_location_country_code="GIB")
add_to_mock_objects(mock_matviews_qs, [mock_model_0, mock_model_1, mock_model_2, mock_model_3])
# add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_3])
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
# "recipient_locations": [{"country": "USA"}]
"recipient_scope": "domestic"
},
"fields": ["Award ID"]
}))
# Three results are returned when searching for "USA"-based recipients
# e.g. "USA"; "UNITED STATES"; "USA" and "UNITED STATES";
assert len(resp.data['results']) == 3
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_scope": "foreign"
},
"fields": ["Award ID"],
}))
# One result is returned when searching for "Foreign" recipients
assert len(resp.data['results']) == 1
| [
11748,
33918,
198,
11748,
12972,
9288,
198,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
514,
5126,
1571,
62,
15042,
13,
11321,
13,
16794,
364,
13,
20850,
62,
9288,
62,
2978,
525,
1330,
751,
62,
1462,
62,
76,
735,
62,
48205,
1... | 2.12543 | 5,517 |
from abei.interfaces import (
IProcedure,
IProcedureClass,
IProcedureFactory,
IProcedureData,
IProcedureLink,
)
from .procedure_joint_basic import (
joint_validate,
joint_run,
)
# native_function = staticmethod(lambda x, y: x)
# composite procedure class ------------------------------
procedure_class_composite = ProcedureClassComposite()
# bool procedure classes ----------------------------------
procedure_class_not = ProcedureClassBasic(
signature='not',
docstring='logic not',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: not x,
)
procedure_class_and = ProcedureClassBasic(
signature='and',
docstring='logic and',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x and y,
)
procedure_class_or = ProcedureClassBasic(
signature='or',
docstring='logic or',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x or y,
)
# calculation procedure classes ---------------------------
procedure_class_negate = ProcedureClassBasic(
signature='neg',
docstring='negate operator',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: not x,
)
procedure_class_add = ProcedureClassBasic(
signature='add',
docstring='add operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x + y,
)
procedure_class_subtract = ProcedureClassBasic(
signature='sub',
docstring='subtract operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x - y,
)
procedure_class_multiply = ProcedureClassBasic(
signature='mul',
docstring='multiply operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x * y,
)
procedure_class_divide = ProcedureClassBasic(
signature='div',
docstring='divide operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x / y,
)
procedure_class_modulo = ProcedureClassBasic(
signature='mod',
docstring='modulo operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x % y,
)
procedure_class_mod_divide = ProcedureClassBasic(
signature='modDiv',
docstring='modulo divide operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x // y,
)
procedure_class_square = ProcedureClassBasic(
signature='sq',
docstring='square operator',
procedure_type=ProcedureUnaryOperator,
native_function=lambda x: x * x,
)
procedure_class_power = ProcedureClassBasic(
signature='pow',
docstring='power operator',
procedure_type=ProcedureBinaryOperator,
native_function=lambda x, y: x ** y,
)
# comparision procedure classes ---------------------------
procedure_class_equal = ProcedureClassBasic(
signature='eq',
docstring='equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x == y,
)
procedure_class_not_equal = ProcedureClassBasic(
signature='ne',
docstring='not equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x != y,
)
procedure_class_less_than = ProcedureClassBasic(
signature='lt',
docstring='less than',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x < y,
)
procedure_class_less_than_or_equal = ProcedureClassBasic(
signature='lte',
docstring='less than or equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x <= y,
)
procedure_class_greater_than = ProcedureClassBasic(
signature='gt',
docstring='greater than',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x > y,
)
procedure_class_greater_than_or_equal = ProcedureClassBasic(
signature='gte',
docstring='greater than or equal',
procedure_type=ProcedureComparator,
native_function=lambda x, y: x >= y,
)
# probe class --------------------------------------------
procedure_class_probe = ProcedureClassBasic(
signature='probe',
docstring='probe',
procedure_type=ProcedureProbe,
)
# data class cast -----------------------------------------
procedure_class_cast_2_bool = ProcedureClassBasic(
signature='castToBool',
docstring='cast to bool',
procedure_type=ProcedureCast,
native_function=lambda x: bool(x),
)
procedure_class_cast_2_int = ProcedureClassBasic(
signature='castToInt',
docstring='cast to int',
procedure_type=ProcedureCast,
native_function=lambda x: int(x),
)
procedure_class_cast_2_float = ProcedureClassBasic(
signature='castToFloat',
docstring='cast to float',
procedure_type=ProcedureCast,
native_function=lambda x: float(x),
)
# data flow control ---------------------------------------
procedure_class_diverge = ProcedureClassBasic(
signature='diverge2',
docstring='diverge 1 branch to 2',
procedure_type=ProcedureDiverge2,
)
procedure_class_converge = ProcedureClassBasic(
signature='converge2',
docstring='converge 2 branches to 1',
procedure_type=ProcedureConverge2,
)
# implement procedure class factory -----------------------
class ProcedureFactory(IProcedureFactory):
"""
basic procedure class factory
"""
| [
6738,
450,
20295,
13,
3849,
32186,
1330,
357,
198,
220,
220,
220,
314,
2964,
771,
495,
11,
198,
220,
220,
220,
314,
2964,
771,
495,
9487,
11,
198,
220,
220,
220,
314,
2964,
771,
495,
22810,
11,
198,
220,
220,
220,
314,
2964,
771,
... | 2.863337 | 1,822 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from json import loads
from shutil import which
from preggy import expect
from tornado.testing import gen_test
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.engines.pil import Engine
from thumbor.importer import Importer
from thumbor.storages.file_storage import Storage as FileStorage
from thumbor.storages.no_storage import Storage as NoStorage
# pylint: disable=broad-except,abstract-method,attribute-defined-outside-init,line-too-long,too-many-public-methods
# pylint: disable=too-many-lines
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15683,
273,
19560,
2139,
198,
2,
3740,
1378,
12567,
13,
785,
14,
400,
2178,
273,
14,
400,
2178,
273,
14,
15466,
19... | 3.257463 | 268 |
"""empty message
Revision ID: 69858d32aaff
Revises: 160db434d139
Create Date: 2016-07-20 16:08:00.219265
"""
# revision identifiers, used by Alembic.
revision = '69858d32aaff'
down_revision = '160db434d139'
from alembic import op
import sqlalchemy as sa
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
39861,
3365,
67,
2624,
64,
2001,
198,
18009,
2696,
25,
13454,
9945,
47101,
67,
20219,
198,
16447,
7536,
25,
1584,
12,
2998,
12,
1238,
1467,
25,
2919,
25,
405,
13,
28896,
22980,
198,... | 2.653061 | 98 |
from .abstract_dict_writer import AbstractDictWriter
from typing import Union, Sequence
| [
6738,
764,
397,
8709,
62,
11600,
62,
16002,
1330,
27741,
35,
713,
34379,
198,
6738,
19720,
1330,
4479,
11,
45835,
628
] | 4.238095 | 21 |
import os
import time
import logging
from ....utils.loaders import load_pkl
from ....utils.exceptions import TimeLimitExceeded
from ......core import args
from ......scheduler.reporter import LocalStatusReporter
logger = logging.getLogger(__name__)
@args()
def model_trial(args, reporter: LocalStatusReporter):
""" Training script for hyperparameter evaluation of an arbitrary model that subclasses AbstractModel.
Notes:
- Model object itself must be passed as kwarg: model
- All model hyperparameters must be stored in model.params dict that may contain special keys such as:
'seed_value' to ensure reproducibility
'num_threads', 'num_gpus' to set specific resources in model.fit()
- model.save() must have return_filename, file_prefix, directory options
"""
try:
model, args, util_args = prepare_inputs(args=args)
X_train, y_train = load_pkl.load(util_args.directory + util_args.dataset_train_filename)
X_val, y_val = load_pkl.load(util_args.directory + util_args.dataset_val_filename)
fit_model_args = dict(X_train=X_train, Y_train=y_train, X_test=X_val, Y_test=y_val)
predict_proba_args = dict(X=X_val)
model = fit_and_save_model(model=model, params=args, fit_args=fit_model_args, predict_proba_args=predict_proba_args, y_test=y_val,
time_start=util_args.time_start, time_limit=util_args.get('time_limit', None), reporter=None)
except Exception as e:
if not isinstance(e, TimeLimitExceeded):
logger.exception(e, exc_info=True)
reporter.terminate()
else:
reporter(epoch=1, validation_performance=model.val_score)
| [
11748,
28686,
198,
11748,
640,
198,
11748,
18931,
198,
198,
6738,
19424,
26791,
13,
2220,
364,
1330,
3440,
62,
79,
41582,
198,
6738,
19424,
26791,
13,
1069,
11755,
1330,
3862,
39184,
3109,
2707,
276,
198,
6738,
47082,
7295,
1330,
26498,
... | 2.507891 | 697 |
from typing import Optional
from aioftx.session import FTXClientSession
from .schemas import (
FundingPayment,
GetFundingPaymentsRequest,
GetFundingPaymentsResponse,
)
async def get_funding_payments(
session: FTXClientSession,
*,
future: Optional[str] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
) -> list[FundingPayment]:
"""
Get the funding payments from the FTX API
"""
request = GetFundingPaymentsRequest(
future=future,
start_time=start_time,
end_time=end_time,
)
async with session.get(request.url) as resp:
data = await resp.json()
return GetFundingPaymentsResponse(**data).data()
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
257,
952,
701,
87,
13,
29891,
1330,
19446,
55,
11792,
36044,
198,
198,
6738,
764,
1416,
4411,
292,
1330,
357,
198,
220,
220,
220,
35249,
19197,
434,
11,
198,
220,
220,
220,
3497,
24553,
278,
... | 2.564286 | 280 |
import os
import getpass
from pathlib import Path
from argparse import ArgumentParser
from pyonepassword import OP, OPServerItem
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
651,
6603,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
12972,
505,
28712,
1330,
13349,
11,
40490,
18497,
7449,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
... | 3.372549 | 51 |
import uuid
import msgpack
import redis
| [
11748,
334,
27112,
198,
11748,
31456,
8002,
198,
11748,
2266,
271,
198
] | 3.333333 | 12 |
#Dependencies
from array import array
from operator import mod
from statistics import mode
from unicodedata import name
import praw
import os
from datetime import datetime
import time
from prawcore.exceptions import NotFound
import json
from dotenv import load_dotenv
import scraper as scrape
load_dotenv("./.env")
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
PASSWORD = os.getenv("PASS")
USER_AGENT = os.getenv("USER_AGENT")
USERNAME = os.getenv("USERNAME")
abs_path = os.path.abspath(__file__)
dir_name = os.path.dirname(abs_path)
os.chdir(dir_name)
if __name__ == '__main__':
reddit = praw.Reddit( #instance of praw reddit for API access
client_id = CLIENT_ID,
client_secret = CLIENT_SECRET,
password = PASSWORD,
user_agent = USER_AGENT,
username = USERNAME,
)
reddit.read_only = True;
print()
user_name = GetUsernameInput(reddit)
print()
with open("scraper_output.json", mode='w') as outfile:
json.dump([], outfile, indent=2)
user_as_redditor = reddit.redditor(user_name)
user_info = UserInfo()
user_comments_list = list(user_as_redditor.comments.new(limit=99)).copy() #Limited to 100 historical submissions by Reddit API
user_submissions_list = list(user_as_redditor.submissions.new(limit=99)).copy() #Limited to 100 historical submissions by Reddit API
if user_info.IsSuspended(): #todo issuspended status needs to be updated accurately prior
print("User is shadowbanned - only contains name and is_suspended attributes")
else:
user_info.SetBasicInfo()
user_info.PrintBasicInfo()
user_info.ConvertBasicInfoToTxt()
u1 = TopFiveVotedSubmissionsData()
u1.FindFiveMostVotedSubmissions(user_submissions_list)
u1.PrintFiveMostVotedSubmissions()
u1.ConvertFiveMostVotedSubmissionsToTxt()
u2 = TopFiveVotedCommentsData()
u2.FindFiveMostVotedComments(user_comments_list)
u2.PrintFiveMostVotedComments()
u2.ConvertFiveMostVotedCommentsToTxt()
u3 = VoteDistribution()
u3.FindVoteDistribution(user_comments_list, user_submissions_list)
u3.PrintVoteDistribution()
u3.ConvertVoteDistributionToTxt()
u4 = MostActiveSubs()
u4.FindMostActive(user_comments_list, user_submissions_list)
u4.PrintActiveSubs()
u4.ConvertActiveSubsToTxt()
#test json reader
'''print("")
temp = GetUserFromJson("scraper_output.json")
temp["UserInfo"].PrintBasicInfo()
temp["FiveMostVotedSubmissions"].PrintFiveMostVotedSubmissions()
temp["FiveMostVotedComments"].PrintFiveMostVotedComments()
temp["VoteDistribution"].PrintVoteDistribution()
temp["MostActiveSubreddits"].PrintActiveSubs()'''
print("") | [
2,
35,
2690,
3976,
198,
6738,
7177,
1330,
7177,
198,
6738,
10088,
1330,
953,
198,
6738,
7869,
1330,
4235,
198,
6738,
28000,
9043,
1045,
1330,
1438,
198,
11748,
279,
1831,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
... | 2.457555 | 1,178 |
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired
from .models import AccountType
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
9683,
15878,
11,
39900,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
6060,
37374,
198,
6738,
764,
27530,
1330,
10781,
6030,
19... | 4.04878 | 41 |
import logging
from datetime import datetime
from functools import wraps
LOGGER = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
25294,
30373,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 3.5 | 32 |
#!/usr/bin/python
import sys
import struct
import numpy as np
matrix = []
with open('dataset/glove.840B.300d.txt', 'r') as inf:
with open('dataset/glove.840B.300d.dat', 'wb') as ouf:
counter = 0
for line in inf:
row = [float(x) for x in line.split()[1:]]
assert len(row) == 300
ouf.write(struct.pack('i', len(row)))
ouf.write(struct.pack('%sf' % len(row), *row))
counter += 1
matrix.append(np.array(row, dtype=np.float32))
if counter % 10000 == 0:
sys.stdout.write('%d points processed...\n' % counter)
np.save('dataset/glove.840B.300d', np.array(matrix))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
198,
11748,
2878,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6759,
8609,
796,
17635,
198,
4480,
1280,
10786,
19608,
292,
316,
14,
4743,
659,
13,
40675,
33,
13,
6200,
67,... | 2.017857 | 336 |
# -*- coding: utf-8 -*-
from builtins import object
import re
from numbers import Integral
from collections import namedtuple
__all__ = ["countries"]
try:
str
except NameError:
str = str
Country = namedtuple('Country',
'name alpha2 alpha3 numeric apolitical_name')
_records = [
Country(u"Afghanistan", "AF", "AFG", "004", u"Afghanistan"),
Country(u"Åland Islands", "AX", "ALA", "248", u"Åland Islands"),
Country(u"Albania", "AL", "ALB", "008", u"Albania"),
Country(u"Algeria", "DZ", "DZA", "012", u"Algeria"),
Country(u"American Samoa", "AS", "ASM", "016", u"American Samoa"),
Country(u"Andorra", "AD", "AND", "020", u"Andorra"),
Country(u"Angola", "AO", "AGO", "024", u"Angola"),
Country(u"Anguilla", "AI", "AIA", "660", u"Anguilla"),
Country(u"Antarctica", "AQ", "ATA", "010", u"Antarctica"),
Country(u"Antigua and Barbuda", "AG", "ATG", "028",
u"Antigua and Barbuda"),
Country(u"Argentina", "AR", "ARG", "032", u"Argentina"),
Country(u"Armenia", "AM", "ARM", "051", u"Armenia"),
Country(u"Aruba", "AW", "ABW", "533", u"Aruba"),
Country(u"Australia", "AU", "AUS", "036", u"Australia"),
Country(u"Austria", "AT", "AUT", "040", u"Austria"),
Country(u"Azerbaijan", "AZ", "AZE", "031", u"Azerbaijan"),
Country(u"Bahamas", "BS", "BHS", "044", u"Bahamas"),
Country(u"Bahrain", "BH", "BHR", "048", u"Bahrain"),
Country(u"Bangladesh", "BD", "BGD", "050", u"Bangladesh"),
Country(u"Barbados", "BB", "BRB", "052", u"Barbados"),
Country(u"Belarus", "BY", "BLR", "112", u"Belarus"),
Country(u"Belgium", "BE", "BEL", "056", u"Belgium"),
Country(u"Belize", "BZ", "BLZ", "084", u"Belize"),
Country(u"Benin", "BJ", "BEN", "204", u"Benin"),
Country(u"Bermuda", "BM", "BMU", "060", u"Bermuda"),
Country(u"Bhutan", "BT", "BTN", "064", u"Bhutan"),
Country(u"Bolivia, Plurinational State of", "BO", "BOL", "068",
u"Bolivia, Plurinational State of"),
Country(u"Bonaire, Sint Eustatius and Saba", "BQ", "BES", "535",
u"Bonaire, Sint Eustatius and Saba"),
Country(u"Bosnia and Herzegovina", "BA", "BIH", "070",
u"Bosnia and Herzegovina"),
Country(u"Botswana", "BW", "BWA", "072", u"Botswana"),
Country(u"Bouvet Island", "BV", "BVT", "074", u"Bouvet Island"),
Country(u"Brazil", "BR", "BRA", "076", u"Brazil"),
Country(u"British Indian Ocean Territory", "IO", "IOT", "086",
u"British Indian Ocean Territory"),
Country(u"Brunei Darussalam", "BN", "BRN", "096",
u"Brunei Darussalam"),
Country(u"Bulgaria", "BG", "BGR", "100", u"Bulgaria"),
Country(u"Burkina Faso", "BF", "BFA", "854", u"Burkina Faso"),
Country(u"Burundi", "BI", "BDI", "108", u"Burundi"),
Country(u"Cambodia", "KH", "KHM", "116", u"Cambodia"),
Country(u"Cameroon", "CM", "CMR", "120", u"Cameroon"),
Country(u"Canada", "CA", "CAN", "124", u"Canada"),
Country(u"Cabo Verde", "CV", "CPV", "132", u"Cabo Verde"),
Country(u"Cayman Islands", "KY", "CYM", "136", u"Cayman Islands"),
Country(u"Central African Republic", "CF", "CAF", "140",
u"Central African Republic"),
Country(u"Chad", "TD", "TCD", "148", u"Chad"),
Country(u"Chile", "CL", "CHL", "152", u"Chile"),
Country(u"China", "CN", "CHN", "156", u"China"),
Country(u"Christmas Island", "CX", "CXR", "162", u"Christmas Island"),
Country(u"Cocos (Keeling) Islands", "CC", "CCK", "166",
u"Cocos (Keeling) Islands"),
Country(u"Colombia", "CO", "COL", "170", u"Colombia"),
Country(u"Comoros", "KM", "COM", "174", u"Comoros"),
Country(u"Congo", "CG", "COG", "178", u"Congo"),
Country(u"Congo, Democratic Republic of the", "CD", "COD", "180",
u"Congo, Democratic Republic of the"),
Country(u"Cook Islands", "CK", "COK", "184", u"Cook Islands"),
Country(u"Costa Rica", "CR", "CRI", "188", u"Costa Rica"),
Country(u"Côte d'Ivoire", "CI", "CIV", "384", u"Côte d'Ivoire"),
Country(u"Croatia", "HR", "HRV", "191", u"Croatia"),
Country(u"Cuba", "CU", "CUB", "192", u"Cuba"),
Country(u"Curaçao", "CW", "CUW", "531", u"Curaçao"),
Country(u"Cyprus", "CY", "CYP", "196", u"Cyprus"),
Country(u"Czechia", "CZ", "CZE", "203", u"Czechia"),
Country(u"Denmark", "DK", "DNK", "208", u"Denmark"),
Country(u"Djibouti", "DJ", "DJI", "262", u"Djibouti"),
Country(u"Dominica", "DM", "DMA", "212", u"Dominica"),
Country(u"Dominican Republic", "DO", "DOM", "214", u"Dominican Republic"),
Country(u"Ecuador", "EC", "ECU", "218", u"Ecuador"),
Country(u"Egypt", "EG", "EGY", "818", u"Egypt"),
Country(u"El Salvador", "SV", "SLV", "222", u"El Salvador"),
Country(u"Equatorial Guinea", "GQ", "GNQ", "226", u"Equatorial Guinea"),
Country(u"Eritrea", "ER", "ERI", "232", u"Eritrea"),
Country(u"Estonia", "EE", "EST", "233", u"Estonia"),
Country(u"Ethiopia", "ET", "ETH", "231", u"Ethiopia"),
Country(u"Falkland Islands (Malvinas)", "FK", "FLK", "238",
u"Falkland Islands (Malvinas)"),
Country(u"Faroe Islands", "FO", "FRO", "234", u"Faroe Islands"),
Country(u"Fiji", "FJ", "FJI", "242", u"Fiji"),
Country(u"Finland", "FI", "FIN", "246", u"Finland"),
Country(u"France", "FR", "FRA", "250", u"France"),
Country(u"French Guiana", "GF", "GUF", "254", u"French Guiana"),
Country(u"French Polynesia", "PF", "PYF", "258", u"French Polynesia"),
Country(u"French Southern Territories", "TF", "ATF", "260",
u"French Southern Territories"),
Country(u"Gabon", "GA", "GAB", "266", u"Gabon"),
Country(u"Gambia", "GM", "GMB", "270", u"Gambia"),
Country(u"Georgia", "GE", "GEO", "268", u"Georgia"),
Country(u"Germany", "DE", "DEU", "276", u"Germany"),
Country(u"Ghana", "GH", "GHA", "288", u"Ghana"),
Country(u"Gibraltar", "GI", "GIB", "292", u"Gibraltar"),
Country(u"Greece", "GR", "GRC", "300", u"Greece"),
Country(u"Greenland", "GL", "GRL", "304", u"Greenland"),
Country(u"Grenada", "GD", "GRD", "308", u"Grenada"),
Country(u"Guadeloupe", "GP", "GLP", "312", u"Guadeloupe"),
Country(u"Guam", "GU", "GUM", "316", u"Guam"),
Country(u"Guatemala", "GT", "GTM", "320", u"Guatemala"),
Country(u"Guernsey", "GG", "GGY", "831", u"Guernsey"),
Country(u"Guinea", "GN", "GIN", "324", u"Guinea"),
Country(u"Guinea-Bissau", "GW", "GNB", "624", u"Guinea-Bissau"),
Country(u"Guyana", "GY", "GUY", "328", u"Guyana"),
Country(u"Haiti", "HT", "HTI", "332", u"Haiti"),
Country(u"Heard Island and McDonald Islands", "HM", "HMD", "334",
u"Heard Island and McDonald Islands"),
Country(u"Holy See", "VA", "VAT", "336", u"Holy See"),
Country(u"Honduras", "HN", "HND", "340", u"Honduras"),
Country(u"Hong Kong", "HK", "HKG", "344", u"Hong Kong"),
Country(u"Hungary", "HU", "HUN", "348", u"Hungary"),
Country(u"Iceland", "IS", "ISL", "352", u"Iceland"),
Country(u"India", "IN", "IND", "356", u"India"),
Country(u"Indonesia", "ID", "IDN", "360", u"Indonesia"),
Country(u"Iran, Islamic Republic of", "IR", "IRN", "364",
u"Iran, Islamic Republic of"),
Country(u"Iraq", "IQ", "IRQ", "368", u"Iraq"),
Country(u"Ireland", "IE", "IRL", "372", u"Ireland"),
Country(u"Isle of Man", "IM", "IMN", "833", u"Isle of Man"),
Country(u"Israel", "IL", "ISR", "376", u"Israel"),
Country(u"Italy", "IT", "ITA", "380", u"Italy"),
Country(u"Jamaica", "JM", "JAM", "388", u"Jamaica"),
Country(u"Japan", "JP", "JPN", "392", u"Japan"),
Country(u"Jersey", "JE", "JEY", "832", u"Jersey"),
Country(u"Jordan", "JO", "JOR", "400", u"Jordan"),
Country(u"Kazakhstan", "KZ", "KAZ", "398", u"Kazakhstan"),
Country(u"Kenya", "KE", "KEN", "404", u"Kenya"),
Country(u"Kiribati", "KI", "KIR", "296", u"Kiribati"),
Country(u"Korea, Democratic People's Republic of", "KP", "PRK", "408",
u"Korea, Democratic People's Republic of"),
Country(u"Korea, Republic of", "KR", "KOR", "410", u"Korea, Republic of"),
Country(u"Kuwait", "KW", "KWT", "414", u"Kuwait"),
Country(u"Kyrgyzstan", "KG", "KGZ", "417", u"Kyrgyzstan"),
Country(u"Lao People's Democratic Republic", "LA", "LAO", "418",
u"Lao People's Democratic Republic"),
Country(u"Latvia", "LV", "LVA", "428", u"Latvia"),
Country(u"Lebanon", "LB", "LBN", "422", u"Lebanon"),
Country(u"Lesotho", "LS", "LSO", "426", u"Lesotho"),
Country(u"Liberia", "LR", "LBR", "430", u"Liberia"),
Country(u"Libya", "LY", "LBY", "434", u"Libya"),
Country(u"Liechtenstein", "LI", "LIE", "438", u"Liechtenstein"),
Country(u"Lithuania", "LT", "LTU", "440", u"Lithuania"),
Country(u"Luxembourg", "LU", "LUX", "442", u"Luxembourg"),
Country(u"Macao", "MO", "MAC", "446", u"Macao"),
Country(u"Macedonia, the former Yugoslav Republic of", "MK", "MKD", "807",
u"Macedonia, the former Yugoslav Republic of"),
Country(u"Madagascar", "MG", "MDG", "450", u"Madagascar"),
Country(u"Malawi", "MW", "MWI", "454", u"Malawi"),
Country(u"Malaysia", "MY", "MYS", "458", u"Malaysia"),
Country(u"Maldives", "MV", "MDV", "462", u"Maldives"),
Country(u"Mali", "ML", "MLI", "466", u"Mali"),
Country(u"Malta", "MT", "MLT", "470", u"Malta"),
Country(u"Marshall Islands", "MH", "MHL", "584", u"Marshall Islands"),
Country(u"Martinique", "MQ", "MTQ", "474", u"Martinique"),
Country(u"Mauritania", "MR", "MRT", "478", u"Mauritania"),
Country(u"Mauritius", "MU", "MUS", "480", u"Mauritius"),
Country(u"Mayotte", "YT", "MYT", "175", u"Mayotte"),
Country(u"Mexico", "MX", "MEX", "484", u"Mexico"),
Country(u"Micronesia, Federated States of", "FM", "FSM", "583",
u"Micronesia, Federated States of"),
Country(u"Moldova, Republic of", "MD", "MDA", "498",
u"Moldova, Republic of"),
Country(u"Monaco", "MC", "MCO", "492", u"Monaco"),
Country(u"Mongolia", "MN", "MNG", "496", u"Mongolia"),
Country(u"Montenegro", "ME", "MNE", "499", u"Montenegro"),
Country(u"Montserrat", "MS", "MSR", "500", u"Montserrat"),
Country(u"Morocco", "MA", "MAR", "504", u"Morocco"),
Country(u"Mozambique", "MZ", "MOZ", "508", u"Mozambique"),
Country(u"Myanmar", "MM", "MMR", "104", u"Myanmar"),
Country(u"Namibia", "NA", "NAM", "516", u"Namibia"),
Country(u"Nauru", "NR", "NRU", "520", u"Nauru"),
Country(u"Nepal", "NP", "NPL", "524", u"Nepal"),
Country(u"Netherlands", "NL", "NLD", "528", u"Netherlands"),
Country(u"New Caledonia", "NC", "NCL", "540", u"New Caledonia"),
Country(u"New Zealand", "NZ", "NZL", "554", u"New Zealand"),
Country(u"Nicaragua", "NI", "NIC", "558", u"Nicaragua"),
Country(u"Niger", "NE", "NER", "562", u"Niger"),
Country(u"Nigeria", "NG", "NGA", "566", u"Nigeria"),
Country(u"Niue", "NU", "NIU", "570", u"Niue"),
Country(u"Norfolk Island", "NF", "NFK", "574", u"Norfolk Island"),
Country(u"Northern Mariana Islands", "MP", "MNP", "580",
u"Northern Mariana Islands"),
Country(u"Norway", "NO", "NOR", "578", u"Norway"),
Country(u"Oman", "OM", "OMN", "512", u"Oman"),
Country(u"Pakistan", "PK", "PAK", "586", u"Pakistan"),
Country(u"Palau", "PW", "PLW", "585", u"Palau"),
Country(u"Palestine, State of", "PS", "PSE", "275",
u"Palestine"),
Country(u"Panama", "PA", "PAN", "591", u"Panama"),
Country(u"Papua New Guinea", "PG", "PNG", "598",
u"Papua New Guinea"),
Country(u"Paraguay", "PY", "PRY", "600", u"Paraguay"),
Country(u"Peru", "PE", "PER", "604", u"Peru"),
Country(u"Philippines", "PH", "PHL", "608", u"Philippines"),
Country(u"Pitcairn", "PN", "PCN", "612", u"Pitcairn"),
Country(u"Poland", "PL", "POL", "616", u"Poland"),
Country(u"Portugal", "PT", "PRT", "620", u"Portugal"),
Country(u"Puerto Rico", "PR", "PRI", "630", u"Puerto Rico"),
Country(u"Qatar", "QA", "QAT", "634", u"Qatar"),
Country(u"Réunion", "RE", "REU", "638", u"Réunion"),
Country(u"Romania", "RO", "ROU", "642", u"Romania"),
Country(u"Russian Federation", "RU", "RUS", "643",
u"Russian Federation"),
Country(u"Rwanda", "RW", "RWA", "646", u"Rwanda"),
Country(u"Saint Barthélemy", "BL", "BLM", "652",
u"Saint Barthélemy"),
Country(u"Saint Helena, Ascension and Tristan da Cunha",
"SH", "SHN", "654",
u"Saint Helena, Ascension and Tristan da Cunha"),
Country(u"Saint Kitts and Nevis", "KN", "KNA", "659",
u"Saint Kitts and Nevis"),
Country(u"Saint Lucia", "LC", "LCA", "662", u"Saint Lucia"),
Country(u"Saint Martin (French part)", "MF", "MAF", "663",
u"Saint Martin (French part)"),
Country(u"Saint Pierre and Miquelon", "PM", "SPM", "666",
u"Saint Pierre and Miquelon"),
Country(u"Saint Vincent and the Grenadines", "VC", "VCT", "670",
u"Saint Vincent and the Grenadines"),
Country(u"Samoa", "WS", "WSM", "882", u"Samoa"),
Country(u"San Marino", "SM", "SMR", "674", u"San Marino"),
Country(u"Sao Tome and Principe", "ST", "STP", "678",
u"Sao Tome and Principe"),
Country(u"Saudi Arabia", "SA", "SAU", "682", u"Saudi Arabia"),
Country(u"Senegal", "SN", "SEN", "686", u"Senegal"),
Country(u"Serbia", "RS", "SRB", "688", u"Serbia"),
Country(u"Seychelles", "SC", "SYC", "690", u"Seychelles"),
Country(u"Sierra Leone", "SL", "SLE", "694", u"Sierra Leone"),
Country(u"Singapore", "SG", "SGP", "702", u"Singapore"),
Country(u"Sint Maarten (Dutch part)", "SX", "SXM", "534",
u"Sint Maarten (Dutch part)"),
Country(u"Slovakia", "SK", "SVK", "703", u"Slovakia"),
Country(u"Slovenia", "SI", "SVN", "705", u"Slovenia"),
Country(u"Solomon Islands", "SB", "SLB", "090", u"Solomon Islands"),
Country(u"Somalia", "SO", "SOM", "706", u"Somalia"),
Country(u"South Africa", "ZA", "ZAF", "710", u"South Africa"),
Country(u"South Georgia and the South Sandwich Islands",
"GS", "SGS", "239",
u"South Georgia and the South Sandwich Islands",),
Country(u"South Sudan", "SS", "SSD", "728", u"South Sudan"),
Country(u"Spain", "ES", "ESP", "724", u"Spain"),
Country(u"Sri Lanka", "LK", "LKA", "144", u"Sri Lanka"),
Country(u"Sudan", "SD", "SDN", "729", u"Sudan"),
Country(u"Suriname", "SR", "SUR", "740", u"Suriname"),
Country(u"Svalbard and Jan Mayen", "SJ", "SJM", "744",
u"Svalbard and Jan Mayen"),
Country(u"Swaziland", "SZ", "SWZ", "748", u"Swaziland"),
Country(u"Sweden", "SE", "SWE", "752", u"Sweden"),
Country(u"Switzerland", "CH", "CHE", "756", u"Switzerland"),
Country(u"Syrian Arab Republic", "SY", "SYR", "760",
u"Syrian Arab Republic"),
Country(u"Taiwan, Province of China", "TW", "TWN", "158",
u"Taiwan"),
Country(u"Tajikistan", "TJ", "TJK", "762", u"Tajikistan"),
Country(u"Tanzania, United Republic of", "TZ", "TZA", "834",
u"Tanzania, United Republic of"),
Country(u"Thailand", "TH", "THA", "764", u"Thailand"),
Country(u"Timor-Leste", "TL", "TLS", "626", u"Timor-Leste"),
Country(u"Togo", "TG", "TGO", "768", u"Togo"),
Country(u"Tokelau", "TK", "TKL", "772", u"Tokelau"),
Country(u"Tonga", "TO", "TON", "776", u"Tonga"),
Country(u"Trinidad and Tobago", "TT", "TTO", "780",
u"Trinidad and Tobago"),
Country(u"Tunisia", "TN", "TUN", "788", u"Tunisia"),
Country(u"Turkey", "TR", "TUR", "792", u"Turkey"),
Country(u"Turkmenistan", "TM", "TKM", "795", u"Turkmenistan"),
Country(u"Turks and Caicos Islands", "TC", "TCA", "796",
u"Turks and Caicos Islands"),
Country(u"Tuvalu", "TV", "TUV", "798", u"Tuvalu"),
Country(u"Uganda", "UG", "UGA", "800", u"Uganda"),
Country(u"Ukraine", "UA", "UKR", "804", u"Ukraine"),
Country(u"United Arab Emirates", "AE", "ARE", "784",
u"United Arab Emirates"),
Country(u"United Kingdom of Great Britain and Northern Ireland",
"GB", "GBR", "826",
u"United Kingdom of Great Britain and Northern Ireland"),
Country(u"United States of America", "US", "USA", "840",
u"United States of America"),
Country(u"United States Minor Outlying Islands", "UM", "UMI", "581",
u"United States Minor Outlying Islands"),
Country(u"Uruguay", "UY", "URY", "858", u"Uruguay"),
Country(u"Uzbekistan", "UZ", "UZB", "860", u"Uzbekistan"),
Country(u"Vanuatu", "VU", "VUT", "548", u"Vanuatu"),
Country(u"Venezuela, Bolivarian Republic of", "VE", "VEN", "862",
u"Venezuela, Bolivarian Republic of"),
Country(u"Viet Nam", "VN", "VNM", "704", u"Viet Nam"),
Country(u"Virgin Islands, British", "VG", "VGB", "092",
u"Virgin Islands, British"),
Country(u"Virgin Islands, U.S.", "VI", "VIR", "850",
u"Virgin Islands, U.S."),
Country(u"Wallis and Futuna", "WF", "WLF", "876", u"Wallis and Futuna"),
Country(u"Western Sahara", "EH", "ESH", "732", u"Western Sahara"),
Country(u"Yemen", "YE", "YEM", "887", u"Yemen"),
Country(u"Zambia", "ZM", "ZMB", "894", u"Zambia"),
Country(u"Zimbabwe", "ZW", "ZWE", "716", u"Zimbabwe")]
# Internal country indexes
_by_alpha2 = _build_index(1)
_by_alpha3 = _build_index(2)
_by_numeric = _build_index(3)
_by_name = _build_index(0)
_by_apolitical_name = _build_index(4)
# Documented accessors for the country indexes
countries_by_alpha2 = _by_alpha2
countries_by_alpha3 = _by_alpha3
countries_by_numeric = _by_numeric
countries_by_name = _by_name
countries_by_apolitical_name = _by_apolitical_name
NOT_FOUND = object()
countries = _CountryLookup()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
3170,
1040,
1330,
2134,
198,
11748,
302,
198,
6738,
3146,
1330,
15995,
1373,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
834,
439,
834,
796,
14631,
912... | 2.201094 | 8,046 |
"""user_timezone
Revision ID: 2cd20ff3d23a
Revises: b4b8d57b54e5
Create Date: 2016-11-08 11:32:00.903232
"""
# revision identifiers, used by Alembic.
revision = '2cd20ff3d23a'
down_revision = 'b4b8d57b54e5'
from alembic import op
import sqlalchemy as sa
| [
37811,
7220,
62,
2435,
11340,
198,
198,
18009,
1166,
4522,
25,
362,
10210,
1238,
487,
18,
67,
1954,
64,
198,
18009,
2696,
25,
275,
19,
65,
23,
67,
3553,
65,
4051,
68,
20,
198,
16447,
7536,
25,
1584,
12,
1157,
12,
2919,
1367,
25,
... | 2.26087 | 115 |
import pathlib
from setuptools import setup
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='lavacord.py',
version='1.0.4a1',
description='Its a lavalink nodes manger to make a music bots for discord with python.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/CraazzzyyFoxx/lavacord.py',
author='CraazzzyyFoxx',
author_email='38073783+CraazzzyyFoxx@users.noreply.github.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.9',
"Programming Language :: Python :: 3.10",
'Programming Language :: Python :: 3 :: Only',
],
keywords='lavalink, discord, discord-lavalink, lavacord.py',
packages=["lavacord", "lavacord.types"],
install_requires=["aiohttp", "hikari", "yarl", "tekore", "pydantic"],
project_urls={
'Bug Reports': 'https://github.com/CraazzzyyFoxx/lavacord.py/issues',
'Source': 'https://github.com/CraazzzyyFoxx/lavacord.py/',
},
)
| [
11748,
3108,
8019,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
1456,
796,
3108,
8019,
13,
15235,
7,
834,
7753,
834,
737,
8000,
13,
411,
6442,
3419,
198,
6511,
62,
11213,
796,
357,
1456,
1220,
705,
15675,
11682,
13,
9132,
... | 2.56513 | 499 |
"""
Chirp CWT with Ricker
=======================
In this example, we analyze a chirp signal with a Ricker (a.k.a. Mexican Hat wavelet)
"""
# Configure JAX to work with 64-bit floating point precision.
from jax.config import config
config.update("jax_enable_x64", True)
# %%
# Let's import necessary libraries
import jax
import numpy as np
import jax.numpy as jnp
# CR.Sparse libraries
import cr.sparse as crs
import cr.sparse.wt as wt
# Utilty functions to construct sinusoids
import cr.sparse.dsp.signals as signals
# Plotting
import matplotlib.pyplot as plt
# %%
# Test signal generation
# ------------------------------
# Sampling frequency in Hz
fs = 100
# Signal duration in seconds
T = 10
# Initial instantaneous frequency for the chirp
f0 = 1
# Final instantaneous frequency for the chirp
f1 = 4
# Construct the chirp signal
t, x = signals.chirp(fs, T, f0, f1, initial_phase=0)
# Plot the chirp signal
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(t, x)
ax.grid('on')
# %%
# Power spectrum
# ------------------------------
# Compute the power spectrum
f, sxx = crs.power_spectrum(x, dt=1/fs)
# Plot the power spectrum
fig, ax = plt.subplots(1, figsize=(12,4))
ax.plot(f, sxx)
ax.grid('on')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power')
# %%
# As expected, the power spectrum is able to identify the
# frequencies in the zone 1Hz to 4Hz in the chirp.
# However, the spectrum is unable to localize the
# changes in frequency over time.
# %%
# Ricker/Mexican Hat Wavelet
# ------------------------------
wavelet = wt.build_wavelet('mexh')
# generate the wavelet function for the range of time [-8, 8]
psi, t_psi = wavelet.wavefun()
# plot the wavelet
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(t_psi, psi)
ax.grid('on')
# %%
# Wavelet Analysis
# ------------------------------
# select a set of scales for wavelet analysis
# voices per octave
nu = 8
scales = wt.scales_from_voices_per_octave(nu, jnp.arange(32))
scales = jax.device_get(scales)
# Compute the wavelet analysis
output = wt.cwt(x, scales, wavelet)
# Identify the frequencies for the analysis
frequencies = wt.scale2frequency(wavelet, scales) * fs
# Plot the analysis
cmap = plt.cm.seismic
fig, ax = plt.subplots(1, figsize=(10,10))
title = 'Wavelet Transform (Power Spectrum) of signal'
ylabel = 'Frequency (Hz)'
xlabel = 'Time'
power = (abs(output)) ** 2
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8]
contourlevels = np.log2(levels)
im = ax.contourf(t, jnp.log2(frequencies), jnp.log2(power), contourlevels, extend='both',cmap=cmap)
ax.set_title(title, fontsize=20)
ax.set_ylabel(ylabel, fontsize=18)
ax.set_xlabel(xlabel, fontsize=18)
yticks = 2**np.arange(np.ceil(np.log2(frequencies.min())), np.ceil(np.log2(frequencies.max())))
ax.set_yticks(np.log2(yticks))
ax.set_yticklabels(yticks)
ylim = ax.get_ylim()
| [
37811,
198,
1925,
343,
79,
24006,
51,
351,
8759,
263,
220,
198,
4770,
1421,
18604,
198,
198,
818,
428,
1672,
11,
356,
16602,
257,
442,
343,
79,
6737,
351,
257,
8759,
263,
357,
64,
13,
74,
13,
64,
13,
10816,
10983,
6769,
1616,
8,
... | 2.717036 | 1,039 |
#
# ISC License
#
# Copyright (C) 2021-present lifehackerhansol
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import discord
from discord.ext import commands
class Maps(commands.Cog):
"""
Map commands
"""
@commands.command()
async def kalos(self, ctx):
"""Kalos map"""
await self.simple_embed(ctx, "kalos.png", "Kalos Region Map")
@commands.command()
async def r1(self, ctx):
"""Route 1"""
await self.simple_embed(ctx, "r1.png", "Route 1")
@commands.command()
async def r2(self, ctx):
"""Route 2"""
await self.simple_embed(ctx, "r2.png", "Route 2")
@commands.command()
async def r3(self, ctx):
"""Route 3"""
await self.simple_embed(ctx, "r3.png", "Route 3")
@commands.command()
async def r4(self, ctx):
"""Route 4"""
await self.simple_embed(ctx, "r4.png", "Route 4")
@commands.command()
async def r5(self, ctx):
"""Route 5"""
await self.simple_embed(ctx, "r5.png", "Route 5")
@commands.command()
async def r6(self, ctx):
"""Route 6"""
await self.simple_embed(ctx, "r6.png", "Route 6")
@commands.command()
async def r7(self, ctx):
"""Route 7"""
await self.simple_embed(ctx, "r7.png", "Route 7")
@commands.command()
async def r8(self, ctx):
"""Route 8"""
await self.simple_embed(ctx, "r8.png", "Route 8")
@commands.command()
async def r9(self, ctx):
"""Route 9"""
await self.simple_embed(ctx, "r9.png", "Route 9")
@commands.command()
async def r10(self, ctx):
"""Route 10"""
await self.simple_embed(ctx, "r10.png", "Route 10")
@commands.command()
async def r11(self, ctx):
"""Route 11"""
await self.simple_embed(ctx, "r11.png", "Route 11")
@commands.command()
async def r12(self, ctx):
"""Route 12"""
await self.simple_embed(ctx, "r12.png", "Route 12")
@commands.command()
async def r13(self, ctx):
"""Route 13"""
await self.simple_embed(ctx, "r13.png", "Route 13")
@commands.command()
async def r14(self, ctx):
"""Route 14"""
await self.simple_embed(ctx, "r14.png", "Route 14")
@commands.command()
async def r15(self, ctx):
"""Route 15"""
await self.simple_embed(ctx, "r15.png", "Route 15")
@commands.command()
async def r16(self, ctx):
"""Route 16"""
await self.simple_embed(ctx, "r16.png", "Route 16")
@commands.command()
async def r17(self, ctx):
"""Route 17"""
await self.simple_embed(ctx, "r17.png", "Route 17")
@commands.command()
async def r18(self, ctx):
"""Route 18"""
await self.simple_embed(ctx, "r18.png", "Route 18")
@commands.command()
async def r19(self, ctx):
"""Route 19"""
await self.simple_embed(ctx, "r19.png", "Route 19")
@commands.command()
async def r20(self, ctx):
"""Route 20"""
await self.simple_embed(ctx, "r20.png", "Route 20")
@commands.command()
async def r21(self, ctx):
"""Route 21"""
await self.simple_embed(ctx, "r21.png", "Route 21")
@commands.command()
async def r22(self, ctx):
"""Route 22"""
await self.simple_embed(ctx, "r22.png", "Route 22")
@commands.command()
async def vaniville(self, ctx):
"""Vaniville Town"""
await self.simple_embed(ctx, "vaniville.png", "Vaniville Town")
@commands.command()
async def aquacorde(self, ctx):
"""Aquacorde Town"""
await self.simple_embed(ctx, "aquacorde.png", "Aquacorde Town")
@commands.command()
async def santalune(self, ctx):
"""Santalune City"""
await self.simple_embed(ctx, "santalune.png", "Santalune City")
@commands.command()
async def lumiosesouth(self, ctx):
"""Lumiose City South"""
await self.simple_embed(ctx, "lumiosesouth.png", "Lumiose City - South Boulevard")
@commands.command()
async def lumiosenorth(self, ctx):
"""Lumiose City North"""
await self.simple_embed(ctx, "lumiosenorth.png", "Lumiose City - North Boulevard")
@commands.command()
async def camphrier(self, ctx):
"""Camphrier Town"""
await self.simple_embed(ctx, "camphrier.png", "Camphrier Town")
@commands.command()
async def cyllage(self, ctx):
"""Cyllage City"""
await self.simple_embed(ctx, "cyllage.png", "Cyllage City")
@commands.command()
async def ambrette(self, ctx):
"""Ambrette Town"""
await self.simple_embed(ctx, "ambrette.png", "Ambrette Town")
async def geosenge(self, ctx):
"""Geosenge Town"""
await self.simple_embed(ctx, "geosenge.png", "Geosenge Town")
@commands.command()
async def shalour(self, ctx):
"""Shalour City"""
await self.simple_embed(ctx, "shalour.png", "Shalour City")
@commands.command()
async def coumarine(self, ctx):
"""Coumarine City"""
await self.simple_embed(ctx, "coumarine.png", "Coumarine City")
@commands.command()
async def laverre(self, ctx):
"""Laverre City"""
await self.simple_embed(ctx, "laverre.png", "Laverre City")
@commands.command()
async def dendemille(self, ctx):
"""Dendemille Town"""
await self.simple_embed(ctx, "dendemille.png", "Dendemille Town")
@commands.command()
async def anistar(self, ctx):
"""Anistar City"""
await self.simple_embed(ctx, "anistar.png", "Anistar City")
@commands.command()
async def couriway(self, ctx):
"""Couriway Town"""
await self.simple_embed(ctx, "couriway.png", "Couriway Town")
@commands.command()
async def kiloude(self, ctx):
"""Kiloude City"""
await self.simple_embed(ctx, "kiloude.png", "Kiloude City")
| [
2,
198,
2,
3180,
34,
13789,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
12,
25579,
1204,
71,
10735,
71,
504,
349,
198,
2,
198,
2,
2448,
3411,
284,
779,
11,
4866,
11,
13096,
11,
290,
14,
273,
14983,
428,
3788,
329,
597,
198,
2,
... | 2.339576 | 2,830 |
from django.contrib import admin
from .models import Post, TagDict
# Register your models here.
admin.site.register(Post)
admin.site.register(TagDict)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
2947,
11,
17467,
35,
713,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
6307,
8,
198,
28482,
13,
15654,
13,
30238,... | 3.163265 | 49 |
from src.day4 import Board, Game, load_data
from unittest.mock import patch, mock_open
EXAMPLE_IN = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
"""
| [
6738,
12351,
13,
820,
19,
1330,
5926,
11,
3776,
11,
3440,
62,
7890,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
15290,
62,
9654,
198,
198,
6369,
2390,
16437,
62,
1268,
796,
37227,
22,
11,
19,
11,
24,
11,
20,
11,
1157,... | 1.980769 | 208 |
#!/usr/bin/env python
# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
from __future__ import print_function
from traceback import format_exc
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
from web import application, ctx, OK, header, InternalError
from rucio.api.authentication import validate_auth_token
from rucio.api.credential import get_signed_url
from rucio.common.exception import RucioException
from rucio.common.utils import generate_http_error
from rucio.web.rest.common import RucioController, check_accept_header_wrapper
URLS = (
'/signurl?$', 'SignURL',
)
class SignURL(RucioController):
"""
Request a signed URL.
"""
def OPTIONS(self):
"""
HTTP Success:
200 OK
Allow cross-site scripting. Explicit for Authorisation.
"""
header('Access-Control-Allow-Origin', ctx.env.get('HTTP_ORIGIN'))
header('Access-Control-Allow-Headers', ctx.env.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS'))
header('Access-Control-Allow-Methods', '*')
header('Access-Control-Allow-Credentials', 'true')
header('Access-Control-Expose-Headers', 'X-Rucio-Auth-Token')
raise OK
@check_accept_header_wrapper(['application/octet-stream'])
def GET(self):
"""
HTTP Success:
200 OK
HTTP Error:
400 Bad Request
401 Unauthorized
406 Not Acceptable
500 Internal Server Error
:param Rucio-VO: VO name as a string (Multi-VO only).
:param Rucio-Account: Account identifier as a string.
:param Rucio-AppID: Application identifier as a string.
:returns: Signed URL.
"""
vo = ctx.env.get('HTTP_X_RUCIO_VO')
account = ctx.env.get('HTTP_X_RUCIO_ACCOUNT')
appid = ctx.env.get('HTTP_X_RUCIO_APPID')
if appid is None:
appid = 'unknown'
ip = ctx.env.get('HTTP_X_FORWARDED_FOR')
if ip is None:
ip = ctx.ip
try:
validate_auth_token(ctx.env.get('HTTP_X_RUCIO_AUTH_TOKEN'))
except RucioException as e:
raise generate_http_error(500, e.__class__.__name__, e.args[0][0])
except Exception as e:
print(format_exc())
raise InternalError(e)
svc, operation, url = None, None, None
try:
params = parse_qs(ctx.query[1:])
lifetime = params.get('lifetime', [600])[0]
service = params.get('svc', ['gcs'])[0]
operation = params.get('op', ['read'])[0]
url = params.get('url', [None])[0]
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
if service not in ['gcs', 's3', 'swift']:
raise generate_http_error(400, 'ValueError', 'Parameter "svc" must be either empty(=gcs), gcs, s3 or swift')
if url is None:
raise generate_http_error(400, 'ValueError', 'Parameter "url" not found')
if operation not in ['read', 'write', 'delete']:
raise generate_http_error(400, 'ValueError', 'Parameter "op" must be either empty(=read), read, write, or delete.')
try:
result = get_signed_url(account, appid, ip, service=service, operation=operation, url=url, lifetime=lifetime, vo=vo)
except RucioException as e:
raise generate_http_error(500, e.__class__.__name__, e.args[0])
except Exception as e:
print(format_exc())
raise InternalError(e)
if not result:
raise generate_http_error(401, 'CannotAuthenticate', 'Cannot generate signed URL for account %(account)s' % locals())
return result
"""----------------------
Web service startup
----------------------"""
APP = application(URLS, globals())
application = APP.wsgifunc()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
2321,
12,
7908,
327,
28778,
329,
262,
4414,
286,
262,
41051,
1921,
12438,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 2.453919 | 1,888 |
__author__ = 'dkarchmer'
import datetime
import json
import logging
import pprint
import boto3
from django.conf import settings
from .common import AWS_REGION
# Get an instance of a logger
logger = logging.getLogger(__name__)
FIREHOSE_STREAM_NAME = getattr(settings, 'FIREHOSE_STREAM_NAME')
firehose_client = boto3.client('firehose', region_name=AWS_REGION)
| [
834,
9800,
834,
796,
705,
34388,
998,
647,
6,
198,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
279,
4798,
198,
198,
11748,
275,
2069,
18,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
673... | 2.816794 | 131 |
import sys
from PySide6 import QtGui
| [
11748,
25064,
198,
6738,
9485,
24819,
21,
1330,
33734,
8205,
72,
198
] | 3.083333 | 12 |
file_object.writelines(list_of_text_strings)
open('abinfile', 'wb').writelines(list_of_data_strings)
| [
7753,
62,
15252,
13,
8933,
20655,
7,
4868,
62,
1659,
62,
5239,
62,
37336,
8,
198,
9654,
10786,
6014,
7753,
3256,
705,
39346,
27691,
8933,
20655,
7,
4868,
62,
1659,
62,
7890,
62,
37336,
8,
198
] | 2.805556 | 36 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import six.moves.urllib.parse # pylint: disable=import-error
from core import benchmark_finders
from core import benchmark_utils
from telemetry.story import story_filter
_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')
_ALL_BENCHMARKS_BY_NAMES = dict(
(b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())
OFFICIAL_BENCHMARKS = frozenset(
b for b in benchmark_finders.GetOfficialBenchmarks()
if not b.Name().startswith('UNSCHEDULED_'))
CONTRIB_BENCHMARKS = frozenset(benchmark_finders.GetContribBenchmarks())
ALL_SCHEDULEABLE_BENCHMARKS = OFFICIAL_BENCHMARKS | CONTRIB_BENCHMARKS
GTEST_STORY_NAME = '_gtest_'
# Global |benchmarks| is convenient way to keep BenchmarkConfig objects
# unique, which allows us to use set subtraction below.
benchmarks = {b.Name(): {True: BenchmarkConfig(b, abridged=True),
False: BenchmarkConfig(b, abridged=False)}
for b in ALL_SCHEDULEABLE_BENCHMARKS}
OFFICIAL_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig(b.Name()) for b in OFFICIAL_BENCHMARKS])
# power.mobile requires special hardware.
# only run blink_perf.sanitizer-api on linux-perf.
OFFICIAL_BENCHMARK_CONFIGS = OFFICIAL_BENCHMARK_CONFIGS.Remove([
'power.mobile',
'blink_perf.sanitizer-api',
])
# TODO(crbug.com/965158): Remove OFFICIAL_BENCHMARK_NAMES once sharding
# scripts are no longer using it.
OFFICIAL_BENCHMARK_NAMES = frozenset(
b.name for b in OFFICIAL_BENCHMARK_CONFIGS.Frozenset())
# TODO(crbug.com/1030840): Stop using these 'OFFICIAL_EXCEPT' suites and instead
# define each benchmarking config separately as is already done for many of the
# suites below.
_OFFICIAL_EXCEPT_DISPLAY_LOCKING = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking'])
_OFFICIAL_EXCEPT_JETSTREAM2 = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove(
['jetstream2'])
_OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2 = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking', 'jetstream2'])
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP = PerfSuite([
_GetBenchmarkConfig('system_health.common_desktop')
])
_LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
]).Add([
'blink_perf.sanitizer-api',
])
_LINUX_EXECUTABLE_CONFIGS = frozenset([
# TODO(crbug.com/811766): Add views_perftests.
_base_perftests(200),
_load_library_perf_tests(),
_performance_browser_tests(165),
_tracing_perftests(5),
])
_MAC_HIGH_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_HIGH_END_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_MAC_LOW_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'jetstream2',
'v8.runtime_stats.top_25',
])
_MAC_LOW_END_EXECUTABLE_CONFIGS = frozenset([
_load_library_perf_tests(),
_performance_browser_tests(210),
])
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(300),
_dawn_perf_tests(330),
_performance_browser_tests(190),
_views_perftests(),
])
_WIN_10_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_WIN_10_EXECUTABLE_CONFIGS = frozenset([
_base_perftests(200),
_components_perftests(125),
_dawn_perf_tests(600),
_views_perftests(),
])
_WIN_10_LOW_END_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
])
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_desktop'),
_GetBenchmarkConfig('rendering.desktop', abridged=True),
])
_WIN_10_AMD_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('jetstream2'),
_GetBenchmarkConfig('kraken'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('system_health.common_desktop'),
])
_WIN_7_BENCHMARK_CONFIGS = PerfSuite([
'loading.desktop',
]).Abridge([
'loading.desktop',
])
_WIN_7_GPU_BENCHMARK_CONFIGS = PerfSuite(['rendering.desktop']).Abridge(
['rendering.desktop'])
_ANDROID_GO_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.webview_startup'),
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('speedometer'),
_GetBenchmarkConfig('speedometer2')])
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS = _ANDROID_GO_BENCHMARK_CONFIGS
# Note that Nexus 5 bot capacity is very low, so we must severely limit
# the benchmarks that we run on it and abridge large benchmarks in order
# to run them on it. See crbug.com/1030840 for details.
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS = PerfSuite([
'loading.mobile',
'startup.mobile',
'system_health.common_mobile',
'system_health.webview_startup',
]).Abridge(['loading.mobile', 'startup.mobile', 'system_health.common_mobile'])
_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(100),
_gpu_perftests(45),
_tracing_perftests(55),
])
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL2_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING).Remove(['system_health.weblayer_startup'])
_ANDROID_PIXEL4_EXECUTABLE_CONFIGS = frozenset([
_components_perftests(60),
])
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'jetstream2',
'system_health.weblayer_startup',
'v8.browsing_mobile-future',
])
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile', True),
_GetBenchmarkConfig('system_health.memory_mobile', True),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('system_health.weblayer_startup')
])
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.mobile'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('system_health.scroll_jank_mobile')])
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('startup.mobile')])
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('v8.browsing_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('startup.mobile'),
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('rendering.mobile'),
_GetBenchmarkConfig('octane'),
_GetBenchmarkConfig('jetstream'),
_GetBenchmarkConfig('system_health.scroll_jank_mobile')
])
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('rendering.desktop')])
_LACROS_EVE_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking',
'v8.runtime_stats.top_25',
])
_LINUX_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('power.desktop'),
_GetBenchmarkConfig('rendering.desktop'),
_GetBenchmarkConfig('system_health.common_desktop')
])
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_desktop'),
_GetBenchmarkConfig('media.mobile')
])
_LINUX_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('speedometer2'),
_GetBenchmarkConfig('blink_perf.shadow_dom'),
])
_ANDROID_PIXEL2_PERF_CALIBRATION_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.common_mobile'),
_GetBenchmarkConfig('system_health.memory_mobile'),
])
# Linux
LINUX = PerfPlatform(
'linux-perf',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_BENCHMARK_CONFIGS,
26,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
LINUX_REL = PerfPlatform(
'linux-perf-rel',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_CHROME_HEALTH_BENCHMARK_CONFIGS_DESKTOP,
2,
'linux',
executables=_LINUX_EXECUTABLE_CONFIGS)
# Mac
MAC_HIGH_END = PerfPlatform(
'mac-10_13_laptop_high_end-perf',
'MacBook Pro, Core i7 2.8 GHz, 16GB RAM, 256GB SSD, Radeon 55',
_MAC_HIGH_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_HIGH_END_EXECUTABLE_CONFIGS)
MAC_LOW_END = PerfPlatform(
'mac-10_12_laptop_low_end-perf',
'MacBook Air, Core i5 1.8 GHz, 8GB RAM, 128GB SSD, HD Graphics',
_MAC_LOW_END_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_LOW_END_EXECUTABLE_CONFIGS)
MAC_M1_MINI_2020 = PerfPlatform(
'mac-m1_mini_2020-perf',
'Mac M1 Mini 2020',
_MAC_M1_MINI_2020_BENCHMARK_CONFIGS,
26,
'mac',
executables=_MAC_M1_MINI_2020_EXECUTABLE_CONFIGS)
# Win
WIN_10_LOW_END = PerfPlatform(
'win-10_laptop_low_end-perf',
'Low end windows 10 HP laptops. HD Graphics 5500, x86-64-i3-5005U, '
'SSD, 4GB RAM.',
_WIN_10_LOW_END_BENCHMARK_CONFIGS,
# TODO(crbug.com/998161): Increase the number of shards once you
# have enough test data to make a shard map and when more devices
# are added to the data center.
46,
'win')
WIN_10 = PerfPlatform(
'win-10-perf',
'Windows Intel HD 630 towers, Core i7-7700 3.6 GHz, 16GB RAM,'
' Intel Kaby Lake HD Graphics 630', _WIN_10_BENCHMARK_CONFIGS,
26, 'win', executables=_WIN_10_EXECUTABLE_CONFIGS)
WIN_10_AMD = PerfPlatform('win-10_amd-perf', 'Windows AMD chipset',
_WIN_10_AMD_BENCHMARK_CONFIGS, 2, 'win')
WIN_7 = PerfPlatform('Win 7 Perf', 'N/A', _WIN_7_BENCHMARK_CONFIGS, 2, 'win')
WIN_7_GPU = PerfPlatform('Win 7 Nvidia GPU Perf', 'N/A',
_WIN_7_GPU_BENCHMARK_CONFIGS, 3, 'win')
# Android
ANDROID_GO = PerfPlatform(
'android-go-perf', 'Android O (gobo)', _ANDROID_GO_BENCHMARK_CONFIGS,
19, 'android')
ANDROID_GO_WEBVIEW = PerfPlatform('android-go_webview-perf',
'Android OPM1.171019.021 (gobo)',
_ANDROID_GO_WEBVIEW_BENCHMARK_CONFIGS, 13,
'android')
ANDROID_NEXUS_5 = PerfPlatform('Android Nexus5 Perf',
'Android KOT49H',
_ANDROID_NEXUS_5_BENCHMARK_CONFIGS,
10,
'android',
executables=_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS)
ANDROID_NEXUS_5X_WEBVIEW = PerfPlatform(
'Android Nexus5X WebView Perf', 'Android AOSP MOB30K',
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS, 16, 'android')
ANDROID_PIXEL2 = PerfPlatform('android-pixel2-perf',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL2_EXECUTABLE_CONFIGS)
ANDROID_PIXEL2_WEBVIEW = PerfPlatform(
'android-pixel2_webview-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL2_WEBLAYER = PerfPlatform(
'android-pixel2_weblayer-perf', 'Android OPM1.171019.021',
_ANDROID_PIXEL2_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4 = PerfPlatform('android-pixel4-perf',
'Android R',
_ANDROID_PIXEL4_BENCHMARK_CONFIGS,
28,
'android',
executables=_ANDROID_PIXEL4_EXECUTABLE_CONFIGS)
ANDROID_PIXEL4_WEBVIEW = PerfPlatform(
'android-pixel4_webview-perf', 'Android R',
_ANDROID_PIXEL4_WEBVIEW_BENCHMARK_CONFIGS, 21, 'android')
ANDROID_PIXEL4_WEBLAYER = PerfPlatform(
'android-pixel4_weblayer-perf', 'Android R',
_ANDROID_PIXEL4_WEBLAYER_BENCHMARK_CONFIGS, 4, 'android')
ANDROID_PIXEL4A_POWER = PerfPlatform('android-pixel4a_power-perf',
'Android QD4A.200102.001.A1',
_ANDROID_PIXEL4A_POWER_BENCHMARK_CONFIGS,
1, 'android')
# Cros/Lacros
LACROS_EVE_PERF = PerfPlatform('lacros-eve-perf', '',
_LACROS_EVE_BENCHMARK_CONFIGS, 10, 'chromeos')
# FYI bots
WIN_10_LOW_END_HP_CANDIDATE = PerfPlatform(
'win-10_laptop_low_end-perf_HP-Candidate', 'HP 15-BS121NR Laptop Candidate',
_WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS,
1, 'win', is_fyi=True)
ANDROID_NEXUS5X_PERF_FYI = PerfPlatform('android-nexus5x-perf-fyi',
'Android MMB29Q',
_ANDROID_NEXUS5X_FYI_BENCHMARK_CONFIGS,
2,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_AAB_FYI = PerfPlatform(
'android-pixel2-perf-aab-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_AAB_FYI_BENCHMARK_CONFIGS,
1,
'android',
is_fyi=True)
ANDROID_PIXEL2_PERF_FYI = PerfPlatform('android-pixel2-perf-fyi',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_FYI_BENCHMARK_CONFIGS,
4,
'android',
is_fyi=True)
CHROMEOS_KEVIN_PERF_FYI = PerfPlatform('chromeos-kevin-perf-fyi',
'',
_CHROMEOS_KEVIN_FYI_BENCHMARK_CONFIGS,
4,
'chromeos',
is_fyi=True)
LINUX_PERF_FYI = PerfPlatform('linux-perf-fyi',
'',
_LINUX_PERF_FYI_BENCHMARK_CONFIGS,
1,
'linux',
is_fyi=True)
FUCHSIA_PERF_FYI = PerfPlatform('fuchsia-perf-fyi',
'',
_FUCHSIA_PERF_FYI_BENCHMARK_CONFIGS,
7,
'fuchsia',
is_fyi=True)
# Calibration bots
LINUX_PERF_CALIBRATION = PerfPlatform(
'linux-perf-calibration',
'Ubuntu-18.04, 8 core, NVIDIA Quadro P400',
_LINUX_PERF_CALIBRATION_BENCHMARK_CONFIGS,
28,
'linux',
is_calibration=True)
ANDROID_PIXEL2_PERF_CALIBRATION = PerfPlatform(
'android-pixel2-perf-calibration',
'Android OPM1.171019.021',
_ANDROID_PIXEL2_PERF_CALIBRATION_BENCHMARK_CONFIGS,
42,
'android',
is_calibration=True)
ALL_PLATFORMS = {
p for p in locals().values() if isinstance(p, PerfPlatform)
}
PLATFORMS_BY_NAME = {p.name: p for p in ALL_PLATFORMS}
FYI_PLATFORMS = {
p for p in ALL_PLATFORMS if p.is_fyi
}
CALIBRATION_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_calibration}
OFFICIAL_PLATFORMS = {p for p in ALL_PLATFORMS if p.is_official}
ALL_PLATFORM_NAMES = {
p.name for p in ALL_PLATFORMS
}
OFFICIAL_PLATFORM_NAMES = {
p.name for p in OFFICIAL_PLATFORMS
}
| [
2,
15069,
2864,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
11748,
286... | 1.984234 | 8,436 |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that removes operations with tiny effects."""
from typing import TYPE_CHECKING
from cirq import protocols
from cirq.circuits import optimization_pass, circuit as _circuit
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import List, Tuple
from cirq import ops
class DropNegligible(optimization_pass.OptimizationPass):
"""An optimization pass that removes operations with tiny effects."""
| [
2,
15069,
2864,
383,
21239,
80,
34152,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,... | 3.762774 | 274 |
from datetime import datetime
from typing import Optional
import attr
@attr.dataclass
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
708,
81,
628,
198,
31,
35226,
13,
19608,
330,
31172,
198
] | 3.56 | 25 |
import itertools
import json
import re
from collections import OrderedDict, defaultdict
from os.path import abspath, dirname, join
import click
def ascii_encode(non_compatible_string):
"""Primarily used for ensuring terminal display compatibility"""
if non_compatible_string:
return non_compatible_string.encode("ascii", errors="ignore").decode("ascii")
else:
return ""
def regex_manifest(protocol, input):
"""Special input types, gets updated as more input types are added"""
if "type" in input and input["type"] == "choice":
if "options" in input:
pattern = r"\[(.*?)\]"
match = re.search(pattern, str(input["options"]))
if not match:
click.echo(
'Error in %s: input type "choice" options must '
'be in the form of: \n[\n {\n "value": '
'<choice value>, \n "label": <choice label>\n '
"},\n ...\n]" % protocol["name"]
)
raise RuntimeError
else:
click.echo(
f"Must have options for 'choice' input type. Error in: {protocol['name']}"
)
raise RuntimeError
def makedirs(name, mode=None, exist_ok=False):
"""Forward ports `exist_ok` flag for Py2 makedirs. Retains mode defaults"""
from os import makedirs
mode = mode if mode is not None else 0o777
makedirs(name, mode, exist_ok)
class PreviewParameters:
"""
A PreviewParameters object modifies web browser quick launch parameters and
modifies them for application protocol testing and debugging.
Attributes
------
api : object
the Connection object to provide session for using api endpoints
quick_launch_params: dict
web browser generated inputs for quick launch
selected_samples: defaultdict
all aliquots selected through the web quick launch manifest
modified_params: dict
the modified quick launch launch parameters, converts quick launch
aliquot objects into strings for debugging
refs: dict
all unique refs seen in the quick launch parameters
preview: dict
the combination of refs and modified_params for scientific
application debugging
protocol_obj: dict
the protocol object from the manifest
"""
def __init__(self, api, quick_launch_params, protocol_obj):
"""
Initialize TestParameter by providing a web generated params dict.
Parameters
----------
quick_launch_params: dict
web browser generated inputs for quick launch
"""
self.api = api
self.protocol_obj = protocol_obj
self.container_cache = {}
self.selected_samples = {}
self.csv_templates = {}
self.quick_launch_params = quick_launch_params
self.preview = self.build_preview()
def build_preview(self):
"""Builds preview parameters"""
self.modify_preview_parameters()
self.refs = self.generate_refs()
preview = defaultdict(lambda: defaultdict(dict))
preview["preview"]["parameters"].update(self.modified_params)
preview["preview"].update(self.refs)
return preview
def adjust_csv_table_input_type(self):
"""
Traverses the protocol object from the manifest to find any csv-table
input types. If it finds one it creates the headers and modifies the
modified_params that eventually will be the preview parameters for
autoprotocol testing.
"""
self.traverse_protocol_obj(self.protocol_obj["inputs"])
def modify_preview_parameters(self):
"""
This method will traverse the quick launch 'raw_inputs' and modify
container ids and aliquot dicts into a preview parameter container
string for autoprotocol generation debugging.
"""
self.modified_params = self.traverse_quick_launch(
obj=self.quick_launch_params, callback=self.create_preview_string
)
self.adjust_csv_table_input_type()
def generate_refs(self):
"""
This method takes the aggregated containers and aliquots to produce
the refs aliquot values
"""
ref_dict = defaultdict(lambda: defaultdict(dict))
ref_dict["refs"] = {}
for cid, index_arr in self.selected_samples.items():
container = self.container_cache.get(cid)
cont_name = PreviewParameters.format_container_name(container)
ref_dict["refs"][cont_name] = {
"label": cont_name,
"type": container.get("container_type").get("id"),
"store": container.get("storage_condition"),
"cover": container.get("cover", None),
"properties": container.get("properties"),
"aliquots": {},
}
if None not in index_arr:
ref_dict["refs"][cont_name]["aliquots"] = self.get_selected_aliquots(
container, index_arr
)
elif container.get("aliquots", None):
for ali in container.get("aliquots"):
ref_dict["refs"][cont_name]["aliquots"][ali["well_idx"]] = {
"name": ali["name"],
"volume": ali["volume_ul"] + ":microliter",
"properties": ali["properties"],
}
return ref_dict
def traverse_quick_launch(self, obj, callback=None):
"""
Will traverse quick launch object and send value to a callback
action method.
"""
if isinstance(obj, dict):
# If object has 'containerId' and 'wellIndex', then it is an aliquot
if "containerId" and "wellIndex" in obj.keys():
return self.create_string_from_aliquot(value=obj)
else:
value = {
k: self.traverse_quick_launch(v, callback) for k, v in obj.items()
}
elif isinstance(obj, list):
return [self.traverse_quick_launch(elem, callback) for elem in obj]
else:
value = obj
if callback is None:
return value
else:
return callback(value)
def add_to_cache(self, container_id):
"""Adds requested container to cache for later use"""
if container_id in self.container_cache:
container = self.container_cache[container_id]
else:
container = self.api.get_container(container_id)
self.container_cache[container_id] = container
return container
def create_string_from_aliquot(self, value):
"""Creates preview aliquot representation"""
well_idx = value.get("wellIndex")
container_id = value.get("containerId")
container = self.add_to_cache(container_id)
cont_name = PreviewParameters.format_container_name(container)
self.add_to_selected(container_id, well_idx)
return "{}/{}".format(cont_name, well_idx)
def create_preview_string(self, value):
"""Creates preview parameters string representation"""
if isinstance(value, str):
if value[:2] == "ct":
container_id = value
container = self.add_to_cache(container_id)
cont_name = PreviewParameters.format_container_name(container)
self.add_to_selected(container_id)
return cont_name
else:
return value
else:
return value
def add_to_selected(self, container_id, well_idx=None):
"""Saves which containers were selected."""
if container_id in self.selected_samples:
self.selected_samples[container_id].append(well_idx)
else:
self.selected_samples[container_id] = [well_idx]
def get_selected_aliquots(self, container, index_arr):
"""Grabs the properties from the selected aliquots"""
ref_aliquots = dict()
container_aliquots = {
ali.get("well_idx"): ali for ali in container.get("aliquots")
}
for i in index_arr:
ali = container_aliquots.get(i, container)
ref_aliquots[i] = {
"name": ali.get("name"),
"volume": "{}:microliter".format(ali.get("volume_ul", 10)),
"properties": ali.get("properties"),
}
return ref_aliquots
@classmethod
| [
11748,
340,
861,
10141,
198,
11748,
33918,
198,
11748,
302,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
11,
4277,
11600,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
26672,
3672,
11,
4654,
198,
198,
11748,
3904,
628,
628,
... | 2.304744 | 3,731 |
import os
from dotenv import load_dotenv, find_dotenv
from pymongo import MongoClient
load_dotenv(find_dotenv())
mongo_url = os.getenv("mongo_url")
myclient = MongoClient(mongo_url)
mydb_master = myclient["SCDF"]
col = mydb_master["investigacoes"] | [
11748,
28686,
198,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
11,
1064,
62,
26518,
24330,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
198,
2220,
62,
26518,
24330,
7,
19796,
62,
26518,
24330,
28955,
198,
76,
25162,
... | 2.766667 | 90 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 CS GROUP - France.
#
# This file is part of EOTile.
# See https://github.com/CS-SI/eotile for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
EO tile
:author: mgerma
:organization: CS GROUP - France
:copyright: 2021 CS GROUP - France. All rights reserved.
:license: see LICENSE file.
"""
import argparse
import logging
import sys
from pathlib import Path
from geopy.geocoders import Nominatim
from eotile import eotile_module
from eotile.eotiles.eotiles import write_tiles_bb
def build_parser():
"""Creates a parser suitable for parsing a command line invoking this program.
:return: An parser.
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"input",
help="Choose amongst : a file, a tile_id, a location, a wkt, a bbox",
)
parser.add_argument("-epsg", help="Specify the epsg of the input")
parser.add_argument("-no_l8", action="store_true", help="output L8 tiles")
parser.add_argument("-no_s2", action="store_true", help="Disable S2 tiles")
parser.add_argument("-dem", action="store_true", help='Use DEM 1" tiles as well')
parser.add_argument(
"-srtm5x5", action="store_true", help="Use specific srtm 5x5 tiles as well"
)
# Output arguments
parser.add_argument("-to_file", help="Write tiles to a file")
parser.add_argument(
"-to_wkt",
action="store_true",
help="Output the geometry of matching tiles with wkt format on standard output",
)
parser.add_argument(
"-to_bbox",
action="store_true",
help="Output the bounding box of matching tiles on standard output",
)
parser.add_argument(
"-to_tile_id",
action="store_true",
help="Output the id(s) of matching tiles on standard output",
)
parser.add_argument(
"-to_location",
action="store_true",
help="Output the location of the centroid of matching tiles "
"on standard output",
)
parser.add_argument(
"-s2_overlap",
action="store_true",
help="Do you want to have overlaps on S2 tiles ?",
)
parser.add_argument(
"-v", "--verbose", action="count", help="Increase output verbosity"
)
parser.add_argument(
"-logger_file", help="Redirect information from standard output to a file"
)
parser.add_argument(
"-location_type",
help="If needed, specify the location type that is requested (city, county, state, country)",
)
parser.add_argument(
"-threshold",
help="For large polygons at high resolution, you might want to simplify them using a threshold"
"(0 to 1)",
)
parser.add_argument(
"-min_overlap",
help="Minimum percentage of overlap to consider a tile (0 to 1)",
)
return parser
def build_output(source, tile_list, user_logger, message, args):
"""
Sub-function of the main
Formats an output depending on a specified message & arguments over a dataframe pandas of tiles.
:param source: Type of the source (DEM, S2, L8)
:type source: str
:param user_logger: LOGGER to log the message to
:type user_logger: logging.LOGGER
:param tile_list: pandas dataframe of the tiles to format
:type tile_list: pandas DataFrame
:param message: The message to format
:type message: str
:param args: fields to look in
:type args: list
"""
if source != "DEM":
interesting_columns = []
for elt in args:
if elt == "bounds":
interesting_columns.append("geometry")
else:
interesting_columns.append(elt)
for elt in tile_list[interesting_columns].iterrows():
arguments = []
for arg in args:
if arg == "geometry":
arguments.append(elt[1]["geometry"].wkt)
elif arg == "bounds":
arguments.append(elt[1]["geometry"].bounds)
else:
arguments.append(str(elt[1][arg]))
user_logger.info(message.format(source, *arguments))
else:
interesting_columns = ["EXIST_SRTM", "EXIST_COP30", "EXIST_COP90"]
for elt in args:
if elt == "bounds":
interesting_columns.append("geometry")
else:
interesting_columns.append(elt)
for elt in tile_list[interesting_columns].iterrows():
availability = []
if elt[1]["EXIST_SRTM"]:
availability.append("SRTM")
if elt[1]["EXIST_COP30"]:
availability.append("Copernicus 30")
if elt[1]["EXIST_COP90"]:
availability.append("Copernicus 90")
arguments = []
for arg in args:
if arg == "geometry":
arguments.append(elt[1]["geometry"].wkt)
elif arg == "bounds":
arguments.append(elt[1]["geometry"].bounds)
else:
arguments.append(str(elt[1][arg]))
user_logger.info(message.format(", ".join(availability), *arguments))
def main(arguments=None):
"""
Command line interface to perform
:param list arguments: list of arguments
"""
arg_parser = build_parser()
args = arg_parser.parse_args(args=arguments)
[tile_list_s2, tile_list_l8, tile_list_dem, tile_list_srtm5x5] = eotile_module.main(
args.input,
args.logger_file,
args.no_l8,
args.no_s2,
args.dem,
args.srtm5x5,
args.location_type,
args.min_overlap,
args.epsg,
args.threshold,
args.verbose,
args.s2_overlap,
)
tile_sources = ["S2", "L8", "DEM", "SRTM 5x5"]
user_logger = logging.getLogger("user_logger")
# Outputting the result
tile_lists = [tile_list_s2, tile_list_l8, tile_list_dem, tile_list_srtm5x5]
if args.to_file is not None:
output_path = Path(args.to_file)
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
if output_path.suffix == ".gpkg":
# Using layers method to combine sources if geopackage
write_tiles_bb(tile_list, output_path, source=source)
else:
# Else, we split into several files
write_tiles_bb(
tile_list,
output_path.with_name(
output_path.stem + "_" + source + output_path.suffix
),
)
elif args.to_wkt:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source, tile_list, user_logger, "[{}] Tile: {}", ["geometry"]
)
elif args.to_bbox:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source, tile_list, user_logger, "[{}] Tile Bounds: {}", ["bounds"]
)
elif args.to_tile_id:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(source, tile_list, user_logger, "[{}] Tile id: {}", ["id"])
elif args.to_location:
geolocator = Nominatim(user_agent="EOTile")
for tile_list in tile_lists:
if len(tile_list) > 0:
for elt in tile_list["geometry"]:
centroid = list(list(elt.centroid.coords)[0])
centroid.reverse()
location = geolocator.reverse(centroid, language="en")
if location is not None:
user_logger.info(str(location))
else:
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
build_output(
source,
tile_list,
user_logger,
"[{} tile]\n {}\n {}",
["id", "geometry"],
)
# counts
user_logger.info("--- Summary ---")
for i, tile_list in enumerate(tile_lists):
source = tile_sources[i]
if len(tile_list) > 0:
user_logger.info("- %s %s Tiles", len(tile_list), source)
if __name__ == "__main__":
sys.exit(main())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
9429,
44441,
532,
4881,
13,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
412,
2394,
576,
13,
198,
2,
4091,
3740,
1378,
12567,
13,... | 2.144525 | 4,283 |
from rest_framework.viewsets import ViewSet
from backend.models import AwsEnvironmentModel, TenantModel
from backend.serializers.aws_environment_model_serializer import (AwsEnvironmentModelGetDetailSerializer,
AwsEnvironmentModelCreateSerializer,
AwsEnvironmentModelUpdateSerializer)
from backend.usecases.control_aws_environment import ControlAwsEnvironment
from rest_framework.response import Response
from rest_framework import status
from backend.logger import NarukoLogging
from django.db import transaction
from rest_framework.decorators import action
| [
6738,
1334,
62,
30604,
13,
1177,
28709,
1330,
3582,
7248,
201,
198,
6738,
30203,
13,
27530,
1330,
5851,
82,
31441,
17633,
11,
9368,
415,
17633,
201,
198,
6738,
30203,
13,
46911,
11341,
13,
8356,
62,
38986,
62,
19849,
62,
46911,
7509,
... | 2.547101 | 276 |
from distutils.core import setup
setup(
name='openabis-fingerjetfx',
version='0.0.1',
packages=['openabis_fingerjetfx'],
url='https://github.com/newlogic42/openabis-fingerjetfx',
license='Apache License',
author='newlogic42',
author_email='',
description='OpenAbis\' plugin implementation of FingerJetFXOSE/FingerJetFXOSE.',
install_requires=[
'pillow==6.2.1'
],
package_data={
'': ['*'],
}
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
9654,
8102,
12,
35461,
31173,
21373,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
3256,
198,
220,
220,
220,
10392,
28,
17816... | 2.397906 | 191 |
import memory
import instructions
cpu_flags = {
"N": 0x80, # negative
"V": 0x40, # overflow
"M": 0x20, # accumulator size (set => 8bits)
"X": 0x10, # index size (set => 8bits)
"D": 0x08, # decimal flag (does nothing on SNES, I think)
"I": 0x04, # IRQ disabled when set
"Z": 0x02, # zero
"C": 0x01 # carry (can be copied to the emulation flag)
}
if __name__ == "__main__":
main()
| [
11748,
4088,
198,
11748,
7729,
198,
198,
36166,
62,
33152,
796,
1391,
198,
220,
220,
220,
366,
45,
1298,
657,
87,
1795,
11,
1303,
4633,
198,
220,
220,
220,
366,
53,
1298,
657,
87,
1821,
11,
1303,
30343,
198,
220,
220,
220,
366,
44... | 2.380682 | 176 |
import cv2
import json
import statistics
import matplotlib.pyplot as plt
import numpy as np
import libs.method.QcImage as QcImage
import libs.method.SICCalibrationRegression_MB3 as SICCalibrationRegression_MB3
from libs.model.TrainingSet import TrainingSet
JSON_PATH = 'Dataset/data_color_chart/tags.json'
IMAGE_PATH = 'Dataset/data_color_chart/'
RECT_SCALE = 1000
if __name__ == "__main__":
jsonPath = JSON_PATH
imagePath = IMAGE_PATH
vis = False
channel = 'green'
# train
with open(jsonPath) as json_data:
objs = json.load(json_data)
images_b = None
images_g = None
images_r = None
for obj in objs:
colors_b = []
colors_g = []
colors_r = []
trainingSet = TrainingSet(obj)
cv_image = cv2.imread(
imagePath + trainingSet.imagePath, cv2.IMREAD_COLOR)
if cv_image is None:
print('Training image: ' + trainingSet.imagePath + ' cannot be found.')
continue
dis_image = cv_image.copy()
height, width, channels = cv_image.shape
background_anno = trainingSet.background
background_area = QcImage.crop_image_by_position_and_rect(
cv_image, background_anno.position, background_anno.rect)
background_bgr = QcImage.get_average_rgb(background_area)
colors_b.append(background_bgr[0])
colors_g.append(background_bgr[1])
colors_r.append(background_bgr[2])
for anno in trainingSet.references:
colour_area = QcImage.crop_image_by_position_and_rect(
cv_image, anno.position, anno.rect)
sample_bgr = QcImage.get_average_rgb(colour_area)
colors_b.append(sample_bgr[0])
colors_g.append(sample_bgr[1])
colors_r.append(sample_bgr[2])
# draw training label
if vis:
pos_x = int(width * anno.position.x)
pos_y = int(height * anno.position.y)
dim_x = int(width * anno.rect.x / RECT_SCALE) + pos_x
dim_y = int(height * anno.rect.y / RECT_SCALE) + pos_y
cv2.rectangle(dis_image,
(pos_x, pos_y),
(dim_x, dim_y),
(0, 255, 0), 1)
images_b = np.array([colors_b]) if images_b is None else np.append(
images_b, [colors_b], axis=0)
images_g = np.array([colors_g]) if images_g is None else np.append(
images_g, [colors_g], axis=0)
images_r = np.array([colors_r]) if images_r is None else np.append(
images_r, [colors_r], axis=0)
# display training image and label
if vis:
dis_image = cv2.cvtColor(dis_image, cv2.COLOR_BGR2RGB)
plt.imshow(dis_image)
plt.title(trainingSet.imagePath)
plt.show()
if 'blue' in channel:
# blue channel
print('blue============')
M_b, B_b, err_b = SICCalibrationRegression_MB3.sic_calibration_regression(
images_b)
print('a, b and error for blue channel: %s,%s, %s' %
(M_b, B_b, err_b))
if 'green' in channel:
# green channel
print('green============')
M_g, B_g, err_g = SICCalibrationRegression_MB3.sic_calibration_regression(
images_g)
print('a, b and error for green channel: %s,%s, %s' %
(M_g, B_g, err_g))
if 'red' in channel:
# red channel
print('red============')
M_r, B_r, err_r = SICCalibrationRegression_MB3.sic_calibration_regression(
images_r)
print('a, b and error for red channel: %s,%s, %s' %
(M_r, B_r, err_r))
input("Press Enter to exit...")
| [
11748,
269,
85,
17,
198,
11748,
33918,
198,
11748,
7869,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
9195,
82,
13,
24396,
13,
48,
66,
5159,
355,
1195,
66,
5159,
198,
... | 2.003181 | 1,886 |
from setuptools import setup, find_packages
setup(name = 'MELKE',
version = '1',
description = 'Extract entities and relations from BIO-text',
long_description = 'You can read brief description of MELKE here: \nhttps://github.com/im-na02/melke/',
url = 'https://github.com/im-na02/melke/',
license = 'MIT',
packages = ['melke'],
keywords = ['bio', 'text', 'NER', 'entity', 'relation'],
py_modules = ['EntityRelation'],
python_requires = '>=3',
include_package_data = True,
package_data = {'melke':['*']},
zip_safe = False
)
| [
201,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
201,
198,
201,
198,
40406,
7,
3672,
796,
705,
44,
3698,
7336,
3256,
201,
198,
220,
220,
220,
220,
220,
2196,
796,
705,
16,
3256,
201,
198,
220,
220,
220,
220,
220... | 2.343396 | 265 |
"""Tests for motion_frontend integration."""
| [
37811,
51,
3558,
329,
6268,
62,
8534,
437,
11812,
526,
15931,
198
] | 3.75 | 12 |
# Crie um programa que tenha a função leiaInt(), que vai funcionar
# de forma semelhante ‘a função input() do Python, só que fazendo a
# validação para aceitar apenas um valor numérico. Ex: n = leiaInt(‘Digite um n: ‘)
n = leiaInt('Número: ')
print(n) | [
2,
327,
5034,
23781,
1430,
64,
8358,
3478,
3099,
257,
1257,
16175,
28749,
443,
544,
5317,
22784,
8358,
410,
1872,
25439,
295,
283,
198,
2,
390,
1296,
64,
5026,
417,
71,
12427,
564,
246,
64,
1257,
16175,
28749,
5128,
3419,
466,
11361,
... | 2.342593 | 108 |
# Import our database and initialize it
from db import DB
import send_email
import re
import hashlib
import uuid
import traceback
from datetime import datetime
from base64 import standard_b64encode
sql = DB()
sql.clear_db()
sql.init_db()
sql.populate()
# Checker function to check all form variables
# Checker function to check that all form variables are alphabetic
# Checker function to check that all form variables are alphanum
# Checker function to check that all form variables are alphanum
# Get user information by supplying their UUID
| [
2,
17267,
674,
6831,
290,
41216,
340,
198,
6738,
20613,
1330,
20137,
198,
11748,
3758,
62,
12888,
198,
11748,
302,
198,
11748,
12234,
8019,
198,
11748,
334,
27112,
198,
11748,
12854,
1891,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6... | 3.761905 | 147 |
from fanstatic import Library, Resource
from fanstatic.core import render_js
library = Library('json2', 'resources')
def earlier_than_ie8(url):
"""Native JSON support was introduced in IE8."""
return '<!--[if lt IE 8]>%s<![endif]-->' % render_js(url)
json2 = Resource(library, 'json2.js', renderer=earlier_than_ie8)
| [
6738,
4336,
12708,
1330,
10074,
11,
20857,
198,
6738,
4336,
12708,
13,
7295,
1330,
8543,
62,
8457,
198,
198,
32016,
796,
10074,
10786,
17752,
17,
3256,
705,
37540,
11537,
198,
198,
4299,
2961,
62,
14813,
62,
494,
23,
7,
6371,
2599,
19... | 2.843478 | 115 |
from .base import BaseCompiler, ExecCompiler
| [
6738,
764,
8692,
1330,
7308,
7293,
5329,
11,
8393,
7293,
5329,
198
] | 3.75 | 12 |
'''Swiss pairing simulation'''
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
import numpy as np
from tournament import SwissTournament, PairingError
if __name__ == '__main__':
main()
| [
7061,
6,
10462,
747,
27356,
18640,
7061,
6,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
50,
43490,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,... | 3.028169 | 71 |
import matplotlib.pyplot as plt
import numpy as np
t, x, y = np.loadtxt("out/traiettoria.dat", skiprows=1, unpack=True)
# Traiettoria
plt.figure(figsize=(5, 5))
plt.plot(x, y, "-o", color="tab:blue", markersize=3)
plt.title("Traiettoria")
plt.xlabel("x(t)")
plt.ylabel("y(t)")
plt.savefig("out/traiettoria")
# x
plt.figure(figsize=(5, 5))
plt.plot(t, x, "-o", color="tab:green", markersize=3)
plt.title("x")
plt.xlabel("t")
plt.ylabel("x(t)")
plt.savefig("out/x")
# y
plt.figure(figsize=(5, 5))
plt.plot(t, y, "-o", color="tab:red", markersize=3)
plt.title("y")
plt.xlabel("t")
plt.ylabel("y(t)")
plt.savefig("out/y")
plt.show()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
83,
11,
2124,
11,
331,
796,
45941,
13,
2220,
14116,
7203,
448,
14,
9535,
1155,
83,
7661,
13,
19608,
1600,
14267,
8516,
28,
16,
11... | 2.031847 | 314 |
"""Simple wrapper to upgrade the files by github URL"""
import json
import logging
import os
import re
import shutil
import subprocess
import urllib
from hashlib import md5
from typing import Tuple, List
import requests
import tensorflow as tf
# TODO: install file properly with `pip install -e .`
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from storage import FileStorage
from flask import (
Flask, redirect, request, render_template, send_from_directory)
app = Flask(__name__)
class NotebookDownloadException(Exception):
"""Notebook download exception"""
class ConvertionException(Exception):
"""NBdime conversion exception"""
def download_file(requested_url: str) -> str:
"""Download a file from github repository"""
url = f"https://github.com/{requested_url.replace('blob', 'raw')}"
resp = requests.get(url)
logging.info(F"Requested URL: {requested_url}")
if resp.status_code != 200:
logging.info(f"Can not download {url}")
raise NotebookDownloadException("Can not download the file. Please, check the URL")
return resp.text
# TODO: Run conversion in temp folder,
# so we do not have issues with concurrent conversion
def convert_file(in_file: str, out_file: str) -> List[str]:
"""Upgrade file with tf_upgrade_v2."""
comand = f"tf_upgrade_v2 --infile {in_file} --outfile {out_file}"
process = subprocess.Popen(comand,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result_bytes = process.stdout.readlines()
process.wait()
result = [line.decode('utf-8') for line in result_bytes]
if process.returncode:
details = "<br>".join(result)
raise ConvertionException("Can not convert the file", details)
return result
def save_ipynb_from_py(folder: str, py_filename: str) -> str:
"""Save ipynb file based on python file"""
full_filename = f"{folder}/{py_filename}"
with open(full_filename) as pyfile:
code_lines = [line.replace("\n", "\\n").replace('"', '\\"')
for line in pyfile.readlines()]
pycode = '",\n"'.join(code_lines)
with open('template.ipynb') as template:
template_body = ''.join(template.readlines())
ipynb_code = template_body.replace('{{TEMPLATE}}', pycode)
new_filename = full_filename.replace('.py', '.ipynb')
with open(new_filename, "w") as ipynb_file:
ipynb_file.write(ipynb_code)
return py_filename.replace('.py', '.ipynb')
def process_file(file_url: str) -> Tuple[str, Tuple[str, ...]]:
"""Process file with download, cache and upgrade."""
_, file_ext = os.path.splitext(file_url)
folder_hash = md5(file_url.encode('utf-8')).hexdigest()
path = f"/notebooks/{folder_hash}"
original = f"original{file_ext}"
converted = f"converted{file_ext}"
# TODO: delete the folder completely if `force`
if not os.path.exists(path):
file_content = download_file(file_url)
os.mkdir(path)
with open(f"{path}/{original}", "w") as original_file:
original_file.write(file_content)
try:
output = convert_file(f"{path}/{original}", f"{path}/{converted}")
except ConvertionException as error:
shutil.rmtree(path)
raise error
with open(f"{path}/output", "w") as summary_output:
summary_output.write('\n'.join(output))
shutil.copy('report.txt', f"{path}/report")
# persist `report.txt` to GCS
storage = FileStorage()
storage.save_file('report.txt', folder_hash)
# found a python file, need to encode separately
if original.endswith('.py'):
result_filenames = []
for py_file in [original, converted]:
result_filenames.append(save_ipynb_from_py(path, py_file))
assert len(result_filenames) == 2
return path, tuple(result_filenames[:2])
if original.endswith('.py'):
return path, (original.replace('.py', '.ipynb'),
converted.replace('.py', '.ipynb'))
return path, (original, converted)
def inject_nbdime(content: str, folder_hash: str) -> str:
"""Inject report strings before `nbdime`' diff"""
replace_token = "<h3>Notebook Diff</h3>"
position = content.find(replace_token)
# nothing to inject here, just return the content
if position == -1:
return content
path = f"/notebooks/{folder_hash}"
with open(f"{path}/report") as summary_output:
report_lines = [line for line in summary_output.readlines()
if line.strip() != '']
return render_template("nbdime_inject.html",
before=content[:position],
report_lines=report_lines,
after=content[position:],
folder=folder_hash,
file='converted.ipynb',
tf_version=tf.version.VERSION)
@app.route("/")
def hello():
"""Index page with intro info."""
return render_template('index.html',
tf_version=tf.version.VERSION)
@app.route('/download/<path:folder>/<path:filename>')
def download(folder, filename):
"""Allow to download files."""
# TODO: move all /notebooks to a single config
uploads = os.path.join('/notebooks/', folder)
return send_from_directory(directory=uploads, filename=filename)
@app.route("/d/<path:path>", methods=['GET'])
def proxy(path):
"""Proxy request to index of `nbdime`"""
nbdime_url = os.environ.get('NBDIME_URL')
params = '&'.join([f"{k}={v}" for k, v in request.values.items()])
url = f"{nbdime_url}{path}?{params}"
logging.info(f"URL: {url}")
try:
response = urllib.request.urlopen(url)
content = response.read()
if b'notebooks' in content:
folder_hash = re.findall(r"/notebooks\/([^\/]+)/", url)[0]
try:
content = inject_nbdime(content.decode('utf-8'), folder_hash)
return content
except FileNotFoundError:
return ("The cache was invalidated meanwhile. "
"Please start by submitting the URL again.")
else:
return content
except urllib.error.URLError:
logging.error(f"Can not proxy nbdime for GET: {url}")
message = "Something went wrong, can not proxy nbdime"
return render_template('error.html', message=message), 502
@app.route("/d/<path:path>", methods=['POST'])
def proxy_api(path):
"""Proxy request to `nbdime` API"""
nbdime_url = os.environ.get('NBDIME_URL')
url = f"{nbdime_url}{path}"
try:
payload = json.dumps(request.json).encode()
headers = {'content-type': 'application/json'}
# dirty hack: seems like sometimes nbdime looses `content type`
# from `application/json` to `text/plain;charset=UTF-8`
if not request.json:
logging.warning(f"WARNING: somehow lost json payload {request.json}")
base = re.findall(r"base=([^\&]+)", request.referrer)[0]
remote = re.findall(r"remote=([^\&]+)", request.referrer)[0]
payload = json.dumps({'base': base, 'remote': remote})
payload = payload.replace('%2F', '/').encode('utf-8')
req = urllib.request.Request(url,
data=payload,
headers=headers)
resp = urllib.request.urlopen(req)
return resp.read()
except urllib.error.URLError:
logging.error(f"Can not proxy nbdime for POST: {url}")
message = "Something went wrong, can not proxy nbdime"
return render_template('error.html', message=message), 502
# TODO force refresh
@app.route('/<path:path>')
def catch_all(path):
"""Endpoint for all URLs from Github"""
if not (path.endswith('.py') or path.endswith('.ipynb')):
message = "Currently we only support `.py` and `.ipynb` files."
return render_template('error.html', message=message), 501
try:
folder, files = process_file(path)
url = f"/d/diff?base={folder}/{files[0]}&remote={folder}/{files[1]}"
return redirect(url, code=302)
except NotebookDownloadException as error:
message = error.args[0]
return render_template('error.html', message=message), 400
except ConvertionException as error:
logging.error(f"Can not convert for path {path}: {error.details}")
return render_template('error.html',
message=error.message,
details=error.details), 400
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| [
37811,
26437,
29908,
284,
8515,
262,
3696,
416,
33084,
10289,
37811,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
11748,
2956,
297,
571,
198,
198,
6738,... | 2.326007 | 3,822 |
def resolve():
'''
code here
'''
X = int(input())
if X >= 30:
print('Yes')
else:
print('No')
if __name__ == "__main__":
resolve()
| [
4299,
10568,
33529,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
2438,
994,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
1395,
796,
493,
7,
15414,
28955,
628,
220,
220,
220,
611,
1395,
18189,
1542,
25,
198,
220,
220,
2... | 1.988764 | 89 |
from twistedbot.plugins.base import PluginChatBase
from twistedbot.behavior_tree import FollowPlayer
plugin = Follow
| [
198,
6738,
19074,
13645,
13,
37390,
13,
8692,
1330,
42636,
30820,
14881,
198,
6738,
19074,
13645,
13,
46571,
62,
21048,
1330,
7281,
14140,
628,
198,
198,
33803,
796,
7281,
198
] | 4.033333 | 30 |
import pytest
from transiter.db import models
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
6738,
1007,
2676,
13,
9945,
1330,
4981,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
... | 2.644444 | 45 |
from collections import defaultdict
import copy
import networkx as nx
from zincbase import context
class Edge:
"""Class representing an edge in the KB.
"""
@property
def nodes(self):
"""Return the nodes that this edge is connected to as tuple of (subject, object)
"""
return [context.kb.node(self._sub), context.kb.node(self._ob)]
@property
def attrs(self):
"""Returns attributes of the edge stored in the KB
"""
attributes = None
for _, edge in self._edge.items():
if edge['pred'] == self._pred:
attributes = copy.deepcopy(edge)
if attributes is None:
return False
try:
del attributes['pred']
del attributes['_watches']
except:
pass
return attributes
def watch(self, attribute, fn):
"""Execute user-defined function when the value of attribute changes.
Function takes two args: `edge` which has access to all
its own attributes, and the second
arg is the previous value of the attribute that changed.
As cycles are possible in the graph, changes to an edge attribute, that
change the attributes of the nodes it's connected to, etc,
may eventually propagate back to change the original edge's attribute again,
ad infinitum until the stack explodes. To prevent this, in one "update cycle", more
than `kb._MAX_RECURSION` updates will be rejected.
:returns int: id of the watch
:Example:
>>> from zincbase import KB
>>> kb = KB()
>>> kb.store('edge(a,b)')
0
>>> edge = kb.edge('a', 'edge', 'b')
>>> edge.resistance = 3
>>> print(edge.resistance)
3
>>> edge.watch('resistance', lambda x, prev_val: print('resistance changed to ' + str(x.resistance)))
('resistance', 0)
>>> edge.resistance += 1
resistance changed to 4
"""
self._watches[attribute].append(fn)
return (attribute, len(self._watches) - 1)
def remove_watch(self, attribute_or_watch_id):
"""Stop watching `attribute_or_watch_id`.
If it is a string, delete all watches for that attribute.
If it is a tuple of (attribute, watch_id): delete that specific watch.
"""
if isinstance(attribute_or_watch_id, tuple):
self._watches[attribute_or_watch_id[0]].pop(attribute_or_watch_id[1])
else:
self._watches[attribute_or_watch_id] = [] | [
6738,
17268,
1330,
4277,
11600,
198,
11748,
4866,
198,
198,
11748,
3127,
87,
355,
299,
87,
198,
198,
6738,
31861,
8692,
1330,
4732,
198,
198,
4871,
13113,
25,
198,
220,
220,
220,
37227,
9487,
10200,
281,
5743,
287,
262,
14204,
13,
198... | 2.480269 | 1,039 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SemKITTI dataloader
"""
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
import pickle
from torch.utils import data
from tqdm import tqdm
from scipy import stats as s
from os.path import join
# load Semantic KITTI class info
with open("semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
SemKITTI_label_name = dict()
for i in sorted(list(semkittiyaml['learning_map'].keys()))[::-1]:
SemKITTI_label_name[semkittiyaml['learning_map'][i]] = semkittiyaml['labels'][i]
# things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person', 'bicyclist', 'motorcyclist']
# stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
# things_ids = []
# for i in sorted(list(semkittiyaml['labels'].keys())):
# if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in things:
# things_ids.append(i)
# print(things_ids)
# transformation between Cartesian coordinates and polar coordinates
things_ids = set([10, 11, 13, 15, 16, 18, 20, 30, 31, 32, 252, 253, 254, 255, 256, 257, 258, 259])
# @nb.jit #TODO: why jit would lead to offsets all zero?
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])',nopython=True,cache=True,parallel = False)
if __name__ == '__main__':
dataset = SemKITTI('./sequences', 'train')
dataset.count_box_size()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
13900,
42,
22470,
40,
4818,
282,
1170,
263,
198,
37811,
198,
11748,
28686,
198,
11748,
299,
32152,
355,... | 2.542105 | 570 |
#!/usr/bin/python
# -*- coding: ascii -*-
"""
Serialized usage test.
:date: 2021
:author: Christian Wiche
:contact: cwichel@gmail.com
:license: The MIT License (MIT)
"""
import unittest
from examples.stream_setup import SimplePacket
from embutils.utils import CRC
# -->> Definitions <<------------------
# -->> Test API <<---------------------
class TestSerialized(unittest.TestCase):
"""
Basic reference tests using the SimplePacket example.
"""
def test_01_serialize(self):
"""
Check if the serialization is being done correctly.
"""
# By hand
raw = bytearray([0x01, 0x02, 0x02, 0xDD, 0x07])
raw.extend(CRC().compute(data=raw).to_bytes(length=2, byteorder='little', signed=False))
# Frame implementation
item = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
# Compare
assert raw == item.serialize()
def test_02_deserialize(self):
"""
Check if the deserialization is being done correctly.
"""
# By hand
raw = bytearray([0x01, 0x02, 0x02, 0xDD, 0x07])
raw.extend(CRC().compute(data=raw).to_bytes(length=2, byteorder='little', signed=False))
# Frame creation
item = SimplePacket.deserialize(data=raw)
# Compare
assert item is not None
assert raw == item.serialize()
def test_03_comparison(self):
"""
Check if the comparison is being done correctly.
"""
# Create frames
item_1 = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
item_2 = SimplePacket(source=0x01, destination=0x02, payload=bytearray([0xDD, 0x07]))
item_3 = SimplePacket(source=0x02, destination=0x01, payload=bytearray([0xDD, 0x08]))
# Compare
assert item_1 is not item_2
assert item_1 == item_2
assert item_1.serialize() == item_2.serialize()
assert item_1 != item_3
assert item_1.serialize() != item_3.serialize()
# -->> Test Execution <<---------------
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
355,
979,
72,
532,
9,
12,
198,
37811,
198,
32634,
1143,
8748,
1332,
13,
198,
198,
25,
4475,
25,
220,
220,
220,
220,
220,
33448,
198,
25,
9800,
25,
220,
220,
... | 2.35082 | 915 |
from __future__ import absolute_import, print_function
import logging
import re
import six
import itertools
from django.db import models, IntegrityError, transaction
from django.db.models import F
from django.utils import timezone
from time import time
from sentry.app import locks
from sentry.db.models import (
ArrayField,
BoundedPositiveIntegerField,
FlexibleForeignKey,
JSONField,
Model,
sane_repr,
)
from sentry.constants import BAD_RELEASE_CHARS, COMMIT_RANGE_DELIMITER
from sentry.models import CommitFileChange
from sentry.signals import issue_resolved, release_commits_updated
from sentry.utils import metrics
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5_text
from sentry.utils.retries import TimedRetryPolicy
logger = logging.getLogger(__name__)
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
_dotted_path_prefix_re = re.compile(r"^([a-zA-Z][a-zA-Z0-9-]+)(\.[a-zA-Z][a-zA-Z0-9-]+)+-")
DB_VERSION_LENGTH = 250
class Release(Model):
"""
A release is generally created when a new version is pushed into a
production state.
"""
__core__ = False
organization = FlexibleForeignKey("sentry.Organization")
projects = models.ManyToManyField(
"sentry.Project", related_name="releases", through=ReleaseProject
)
# DEPRECATED
project_id = BoundedPositiveIntegerField(null=True)
version = models.CharField(max_length=DB_VERSION_LENGTH)
# ref might be the branch name being released
ref = models.CharField(max_length=DB_VERSION_LENGTH, null=True, blank=True)
url = models.URLField(null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now)
# DEPRECATED - not available in UI or editable from API
date_started = models.DateTimeField(null=True, blank=True)
date_released = models.DateTimeField(null=True, blank=True)
# arbitrary data recorded with the release
data = JSONField(default={})
new_groups = BoundedPositiveIntegerField(default=0)
# generally the release manager, or the person initiating the process
owner = FlexibleForeignKey("sentry.User", null=True, blank=True, on_delete=models.SET_NULL)
# materialized stats
commit_count = BoundedPositiveIntegerField(null=True, default=0)
last_commit_id = BoundedPositiveIntegerField(null=True)
authors = ArrayField(null=True)
total_deploys = BoundedPositiveIntegerField(null=True, default=0)
last_deploy_id = BoundedPositiveIntegerField(null=True)
__repr__ = sane_repr("organization_id", "version")
@staticmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
def add_project(self, project):
"""
Add a project to this release.
Returns True if the project was added and did not already exist.
"""
from sentry.models import Project
try:
with transaction.atomic():
ReleaseProject.objects.create(project=project, release=self)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(flags=F("flags").bitor(Project.flags.has_releases))
except IntegrityError:
return False
else:
return True
def handle_commit_ranges(self, refs):
"""
Takes commit refs of the form:
[
{
'previousCommit': None,
'commit': 'previous_commit..commit',
}
]
Note: Overwrites 'previousCommit' and 'commit'
"""
for ref in refs:
if COMMIT_RANGE_DELIMITER in ref["commit"]:
ref["previousCommit"], ref["commit"] = ref["commit"].split(COMMIT_RANGE_DELIMITER)
def set_commits(self, commit_list):
"""
Bind a list of commits to this release.
This will clear any existing commit log and replace it with the given
commits.
"""
# Sort commit list in reverse order
commit_list.sort(key=lambda commit: commit.get("timestamp"), reverse=True)
# TODO(dcramer): this function could use some cleanup/refactoring as its a bit unwieldly
from sentry.models import (
Commit,
CommitAuthor,
Group,
GroupLink,
GroupResolution,
GroupStatus,
ReleaseCommit,
ReleaseHeadCommit,
Repository,
PullRequest,
)
from sentry.plugins.providers.repository import RepositoryProvider
from sentry.tasks.integrations import kick_off_status_syncs
# todo(meredith): implement for IntegrationRepositoryProvider
commit_list = [
c
for c in commit_list
if not RepositoryProvider.should_ignore_commit(c.get("message", ""))
]
lock_key = type(self).get_lock_key(self.organization_id, self.id)
lock = locks.get(lock_key, duration=10)
with TimedRetryPolicy(10)(lock.acquire):
start = time()
with transaction.atomic():
# TODO(dcramer): would be good to optimize the logic to avoid these
# deletes but not overly important
initial_commit_ids = set(
ReleaseCommit.objects.filter(release=self).values_list("commit_id", flat=True)
)
ReleaseCommit.objects.filter(release=self).delete()
authors = {}
repos = {}
commit_author_by_commit = {}
head_commit_by_repo = {}
latest_commit = None
for idx, data in enumerate(commit_list):
repo_name = data.get("repository") or u"organization-{}".format(
self.organization_id
)
if repo_name not in repos:
repos[repo_name] = repo = Repository.objects.get_or_create(
organization_id=self.organization_id, name=repo_name
)[0]
else:
repo = repos[repo_name]
author_email = data.get("author_email")
if author_email is None and data.get("author_name"):
author_email = (
re.sub(r"[^a-zA-Z0-9\-_\.]*", "", data["author_name"]).lower()
+ "@localhost"
)
if not author_email:
author = None
elif author_email not in authors:
author_data = {"name": data.get("author_name")}
author, created = CommitAuthor.objects.create_or_update(
organization_id=self.organization_id,
email=author_email,
values=author_data,
)
if not created:
author = CommitAuthor.objects.get(
organization_id=self.organization_id, email=author_email
)
authors[author_email] = author
else:
author = authors[author_email]
commit_data = {}
defaults = {}
# Update/set message and author if they are provided.
if author is not None:
commit_data["author"] = author
if "message" in data:
commit_data["message"] = data["message"]
if "timestamp" in data:
commit_data["date_added"] = data["timestamp"]
else:
defaults["date_added"] = timezone.now()
commit, created = Commit.objects.create_or_update(
organization_id=self.organization_id,
repository_id=repo.id,
key=data["id"],
defaults=defaults,
values=commit_data,
)
if not created:
commit = Commit.objects.get(
organization_id=self.organization_id,
repository_id=repo.id,
key=data["id"],
)
if author is None:
author = commit.author
commit_author_by_commit[commit.id] = author
patch_set = data.get("patch_set", [])
for patched_file in patch_set:
try:
with transaction.atomic():
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit=commit,
filename=patched_file["path"],
type=patched_file["type"],
)
except IntegrityError:
pass
try:
with transaction.atomic():
ReleaseCommit.objects.create(
organization_id=self.organization_id,
release=self,
commit=commit,
order=idx,
)
except IntegrityError:
pass
if latest_commit is None:
latest_commit = commit
head_commit_by_repo.setdefault(repo.id, commit.id)
self.update(
commit_count=len(commit_list),
authors=[
six.text_type(a_id)
for a_id in ReleaseCommit.objects.filter(
release=self, commit__author_id__isnull=False
)
.values_list("commit__author_id", flat=True)
.distinct()
],
last_commit_id=latest_commit.id if latest_commit else None,
)
metrics.timing("release.set_commits.duration", time() - start)
# fill any missing ReleaseHeadCommit entries
for repo_id, commit_id in six.iteritems(head_commit_by_repo):
try:
with transaction.atomic():
ReleaseHeadCommit.objects.create(
organization_id=self.organization_id,
release_id=self.id,
repository_id=repo_id,
commit_id=commit_id,
)
except IntegrityError:
pass
release_commits = list(
ReleaseCommit.objects.filter(release=self)
.select_related("commit")
.values("commit_id", "commit__key")
)
final_commit_ids = set(rc["commit_id"] for rc in release_commits)
removed_commit_ids = initial_commit_ids - final_commit_ids
added_commit_ids = final_commit_ids - initial_commit_ids
if removed_commit_ids or added_commit_ids:
release_commits_updated.send_robust(
release=self,
removed_commit_ids=removed_commit_ids,
added_commit_ids=added_commit_ids,
sender=self.__class__,
)
commit_resolutions = list(
GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.commit,
linked_id__in=[rc["commit_id"] for rc in release_commits],
).values_list("group_id", "linked_id")
)
commit_group_authors = [
(cr[0], commit_author_by_commit.get(cr[1])) for cr in commit_resolutions # group_id
]
pr_ids_by_merge_commit = list(
PullRequest.objects.filter(
merge_commit_sha__in=[rc["commit__key"] for rc in release_commits],
organization_id=self.organization_id,
).values_list("id", flat=True)
)
pull_request_resolutions = list(
GroupLink.objects.filter(
relationship=GroupLink.Relationship.resolves,
linked_type=GroupLink.LinkedType.pull_request,
linked_id__in=pr_ids_by_merge_commit,
).values_list("group_id", "linked_id")
)
pr_authors = list(
PullRequest.objects.filter(
id__in=[prr[1] for prr in pull_request_resolutions]
).select_related("author")
)
pr_authors_dict = {pra.id: pra.author for pra in pr_authors}
pull_request_group_authors = [
(prr[0], pr_authors_dict.get(prr[1])) for prr in pull_request_resolutions
]
user_by_author = {None: None}
commits_and_prs = list(itertools.chain(commit_group_authors, pull_request_group_authors))
group_project_lookup = dict(
Group.objects.filter(id__in=[group_id for group_id, _ in commits_and_prs]).values_list(
"id", "project_id"
)
)
for group_id, author in commits_and_prs:
if author not in user_by_author:
try:
user_by_author[author] = author.find_users()[0]
except IndexError:
user_by_author[author] = None
actor = user_by_author[author]
with transaction.atomic():
GroupResolution.objects.create_or_update(
group_id=group_id,
values={
"release": self,
"type": GroupResolution.Type.in_release,
"status": GroupResolution.Status.resolved,
"actor_id": actor.id if actor else None,
},
)
group = Group.objects.get(id=group_id)
group.update(status=GroupStatus.RESOLVED)
metrics.incr("group.resolved", instance="in_commit", skip_internal=True)
issue_resolved.send_robust(
organization_id=self.organization_id,
user=actor,
group=group,
project=group.project,
resolution_type="with_commit",
sender=type(self),
)
kick_off_status_syncs.apply_async(
kwargs={"project_id": group_project_lookup[group_id], "group_id": group_id}
)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
198,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
2237,
198,
11748,
340,
861,
10141,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
39348,
12331,
11,
8611,
... | 1.914754 | 7,754 |
print('abc'.startswith('a'))
print('abc'.startswith('b'))
print('abc'.startswith('c'))
print('abc'.startswith('bc'))
print('abc'.startswith('abc'))
print('aBc'.casefold().startswith(('b', 'c')))
print('aBc'.casefold().startswith(('x', 'y')))
print('aBc'.casefold().startswith('A'.casefold()))
print('aBc'.casefold().startswith('b'.casefold()))
print('aBc'.casefold().startswith('C'.casefold()))
print('aBc'.casefold().startswith('bC'.casefold()))
print('aBc'.casefold().startswith('AbC'.casefold()))
print()
print('abc'.endswith('a'))
print('abc'.endswith('b'))
print('abc'.endswith('c'))
print('abc'.endswith('bc'))
print('abc'.endswith('abc'))
print('aBc'.casefold().endswith(('b', 'c')))
print('aBc'.casefold().endswith(('x', 'y')))
print('aBc'.casefold().endswith('A'.casefold()))
print('aBc'.casefold().endswith('b'.casefold()))
print('aBc'.casefold().endswith('C'.casefold()))
print('aBc'.casefold().endswith('bC'.casefold()))
print('aBc'.casefold().endswith('AbC'.casefold()))
| [
4798,
10786,
39305,
4458,
9688,
2032,
342,
10786,
64,
6,
4008,
198,
4798,
10786,
39305,
4458,
9688,
2032,
342,
10786,
65,
6,
4008,
198,
4798,
10786,
39305,
4458,
9688,
2032,
342,
10786,
66,
6,
4008,
198,
4798,
10786,
39305,
4458,
9688,
... | 2.378897 | 417 |
import matplotlib.pyplot as plt
import numpy as np
plt.title('Un primo plot con Python')
x, y = np.loadtxt('ex1.dat', unpack=True)
plt.plot(x ,y, 'o-.b', label='Temperature Convertite')
plt.xlim((-10,130)) # intervallo lungo asse x
plt.ylim((10,250)) # intervallo lungo asse y
plt.xlabel('Temperature Celsius')
plt.ylabel('Temperature Fahrenheit')
plt.savefig('temp.png')
plt.legend()
plt.show() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
198,
11748,
299,
32152,
355,
45941,
198,
489,
83,
13,
7839,
10786,
3118,
2684,
78,
7110,
369,
11361,
11537,
198,
87,
11,
331,
796,
45941,
13,
2220,
14116,
10786,
1069,
16,
... | 2.518987 | 158 |
import os
from flask import url_for
from dimensigon.domain.entities import Software, Server
from dimensigon.utils.helpers import md5
from dimensigon.web import db
from tests.base import TestResourceBase
| [
11748,
28686,
198,
198,
6738,
42903,
1330,
19016,
62,
1640,
198,
198,
6738,
5391,
641,
37107,
13,
27830,
13,
298,
871,
1330,
10442,
11,
9652,
198,
6738,
5391,
641,
37107,
13,
26791,
13,
16794,
364,
1330,
45243,
20,
198,
6738,
5391,
64... | 3.614035 | 57 |
from django.conf.urls import url, patterns
from . import views
urlpatterns = patterns('clustering.views',
url(r'^accueil$', 'home'),
url(r'^screen/(\d+)$', views.view_screen),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
7572,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
220,
7572,
10786,
565,
436,
1586,
13,
33571,
3256,
220,
198,
197,
6371,
7,
81,
6,
61,
4134,
518,
346,
... | 2.577465 | 71 |
import os
import os.path
from setuptools import setup, Extension
import versioneer
# Default description in markdown
LONG_DESCRIPTION = open('README.md').read()
PKG_NAME = 'tessdb-cmdline'
AUTHOR = 'Rafael Gonzalez'
AUTHOR_EMAIL = 'astrorafael@yahoo.es'
DESCRIPTION = 'tessdb command line tool to manage tessdb database',
LICENSE = 'MIT'
KEYWORDS = 'Astronomy Python RaspberryPi LightPollution'
URL = 'http://github.com/stars4all/tessdb-comdline/'
PACKAGES = ["tess"]
DEPENDENCIES = [
'tabulate',
'matplotlib'
]
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: SQL',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Development Status :: 4 - Beta',
]
SCRIPTS = [
'files/usr/local/bin/tess'
]
if os.name == "posix":
setup(name = PKG_NAME,
version = versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
description = DESCRIPTION,
long_description_content_type = "text/markdown",
long_description = LONG_DESCRIPTION,
license = LICENSE,
keywords = KEYWORDS,
url = URL,
classifiers = CLASSIFIERS,
packages = PACKAGES,
install_requires = DEPENDENCIES,
scripts = SCRIPTS
)
else:
print("Not supported OS")
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
27995,
198,
11748,
2196,
28153,
198,
198,
2,
15161,
6764,
287,
1317,
2902,
198,
43,
18494,
62,
30910,
40165,
796,
1280,
10786,
15675,
11682,
1... | 2.283396 | 801 |
#-*-coding:utf8;-*-
import re
from random import choice
class sub(object):
""" a simple text to number evaluating class """
def text_to_number(self,text):
'''convert a number written as text to its real number equivalence'''
text = text.lower()
text = re.sub(r"ten", "10", text)
text = re.sub(r"eleven", "11", text)
text = re.sub(r"twelve", "12", text)
text = re.sub(r"thirteen", "13", text)
text = re.sub(r"fourteen", "14", text)
text = re.sub(r"fifteen", "15", text)
text = re.sub(r"sixteen", "16", text)
text = re.sub(r"seventeen", "17", text)
text = re.sub(r"eighteen", "18", text)
text = re.sub(r"nineteen", "19", text)
text = re.sub(r"twenty one", "21", text)
text = re.sub(r"twenty two", "22", text)
text = re.sub(r"twenty three", "23", text)
text = re.sub(r"twenty four", "24", text)
text = re.sub(r"twenty five", "25", text)
text = re.sub(r"twenty six", "26", text)
text = re.sub(r"twenty seven", "27", text)
text = re.sub(r"twenty eight", "28", text)
text = re.sub(r"twenty nine", "29", text)
text = re.sub(r"twenty", "20", text)
text = re.sub(r"thirty one", "31", text)
text = re.sub(r"thirty two", "32", text)
text = re.sub(r"thirty three", "33", text)
text = re.sub(r"thirty four", "34", text)
text = re.sub(r"thirty five", "35", text)
text = re.sub(r"thirty six", "36", text)
text = re.sub(r"thirty seven", "37", text)
text = re.sub(r"thirty eight", "38", text)
text = re.sub(r"thirty nine", "39", text)
text = re.sub(r"thirty", "30", text)
text = re.sub(r"forty one", "41", text)
text = re.sub(r"forty two", "42", text)
text = re.sub(r"forty three", "43", text)
text = re.sub(r"forty four", "44", text)
text = re.sub(r"forty five", "45", text)
text = re.sub(r"forty six", "46", text)
text = re.sub(r"forty seven", "47", text)
text = re.sub(r"forty eight", "48", text)
text = re.sub(r"forty nine", "49", text)
text = re.sub(r"forty", "40", text)
text = re.sub(r"fifty one", "51", text)
text = re.sub(r"fifty two", "52", text)
text = re.sub(r"fifty three", "53", text)
text = re.sub(r"fifty four", "54", text)
text = re.sub(r"fifty five", "55", text)
text = re.sub(r"fifty six", "56", text)
text = re.sub(r"fifty seven", "57", text)
text = re.sub(r"fifty eight", "58", text)
text = re.sub(r"fifty nine", "59", text)
text = re.sub(r"fifty", "50", text)
text = re.sub(r"sixty one", "61", text)
text = re.sub(r"sixty two", "62", text)
text = re.sub(r"sixty three", "63", text)
text = re.sub(r"sixty four", "64", text)
text = re.sub(r"sixty five", "65", text)
text = re.sub(r"sixty six", "66", text)
text = re.sub(r"sixty seven", "67", text)
text = re.sub(r"sixty eight", "68", text)
text = re.sub(r"sixty nine", "69", text)
text = re.sub(r"sixty", "60", text)
text = re.sub(r"seventy one", "71", text)
text = re.sub(r"seventy two", "72", text)
text = re.sub(r"seventy three", "73", text)
text = re.sub(r"seventy four", "74", text)
text = re.sub(r"seventy five", "75", text)
text = re.sub(r"seventy six", "76", text)
text = re.sub(r"seventy seven", "77", text)
text = re.sub(r"seventy eight", "78", text)
text = re.sub(r"seventy nine", "79", text)
text = re.sub(r"seventy", "70", text)
text = re.sub(r"eighty one", "81", text)
text = re.sub(r"eighty two", "82", text)
text = re.sub(r"eighty three", "83", text)
text = re.sub(r"eighty four", "84", text)
text = re.sub(r"eighty five", "85", text)
text = re.sub(r"eighty six", "86", text)
text = re.sub(r"eighty seven", "87", text)
text = re.sub(r"eighty eight", "88", text)
text = re.sub(r"eighty nine", "89", text)
text = re.sub(r"eighty", "80", text)
text = re.sub(r"ninety one", "91", text)
text = re.sub(r"ninety two", "92", text)
text = re.sub(r"ninety three", "93", text)
text = re.sub(r"ninety four", "94", text)
text = re.sub(r"ninety five", "95", text)
text = re.sub(r"ninety six", "96", text)
text = re.sub(r"ninety seven", "97", text)
text = re.sub(r"ninety eight", "98", text)
text = re.sub(r"ninety nine", "99", text)
text = re.sub(r"ninety", "90", text)
text = re.sub(r"one", "01", text)
text = re.sub(r"two", "02", text)
text = re.sub(r"three", "03", text)
text = re.sub(r"four", "04", text)
text = re.sub(r"five", "05", text)
text = re.sub(r"six", "06", text)
text = re.sub(r"seven", "07", text)
text = re.sub(r"eight", "08", text)
text = re.sub(r"nine", "09", text)
text = re.sub(r"hundred", "00", text)
text = re.sub(r"thousand", "000", text)
text = re.sub(r"million", "000000", text)
text = re.sub(r"billion", "000000000", text)
return text
| [
2,
12,
9,
12,
66,
7656,
25,
40477,
23,
26,
12,
9,
12,
198,
11748,
302,
198,
6738,
4738,
1330,
3572,
628,
198,
4871,
850,
7,
15252,
2599,
198,
220,
220,
220,
37227,
257,
2829,
2420,
284,
1271,
22232,
1398,
37227,
198,
220,
220,
2... | 1.981689 | 2,676 |
"""
This example shows how to create a Messaging Interactions transcripts CSV flat file from the lp_api_wrapper library.
"""
from lp_api_wrapper import MessagingInteractions, UserLogin
from datetime import datetime, timedelta
import pandas as pd
# For User Login
auth = UserLogin(account_id='1234', username='YOURUSERNAME', password='YOURPASSWORD')
# Create MI Connections
mi_conn = MessagingInteractions(auth=auth)
# Creates Epoch Time from 1 day ago. (If your volume is low, or none. Consider increasing days)
start_from = int((datetime.now() - timedelta(days=1)).timestamp() * 1000)
# Creates Epoch Time right now.
start_to = int(datetime.now().timestamp() * 1000)
# Conversations from date range created above
body = {'start': {'from': start_from, 'to': start_to}}
# Get data!
conversations = mi_conn.conversations(body=body)
# Convert into Pandas DataFrame
df = pd.DataFrame(conversations.message_record)
# File path with file name.
file_path = './transcripts.csv'
# Export into CSV with no index column
df.to_csv(path_or_buf=file_path, index=False)
# Now you have a Transcripts Flat File!
| [
37811,
198,
1212,
1672,
2523,
703,
284,
2251,
257,
10626,
3039,
4225,
4658,
29351,
44189,
6228,
2393,
422,
262,
300,
79,
62,
15042,
62,
48553,
5888,
13,
198,
37811,
198,
198,
6738,
300,
79,
62,
15042,
62,
48553,
1330,
10626,
3039,
949... | 3.18732 | 347 |
from selenium import webdriver
from time import sleep
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
usr=input('Enter Email Address :')
pwd=input('Enter Password:')
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://zoom.us/signin')
print ("Opened Zoom")
sleep(1)
username_box = driver.find_element_by_css_selector("#email")
username_box.send_keys(usr)
print ("Email Id entered")
sleep(1)
password_box = driver.find_element_by_css_selector('#password')
password_box.send_keys(pwd)
print ("Password entered")
sleep(1)
login_box = driver.find_element_by_css_selector("#login-form > div:nth-child(4) > div > div.signin > button")
login_box.click()
print ("Done")
input('Press anything to quit')
driver.quit()
print("Finished") | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
220,
198,
6738,
640,
1330,
3993,
220,
198,
6738,
3992,
26230,
62,
37153,
13,
46659,
1330,
13282,
32103,
13511,
220,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
25811,
1330,
18... | 2.840532 | 301 |
# Generated by Django 3.2.5 on 2021-07-12 05:08
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2998,
12,
1065,
8870,
25,
2919,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from logging import getLogger
from pandas import read_html
# from ecsim._scrapers.base import state_names
logger = getLogger(__name__)
url = "https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_historical_population"
# if __name__ == "__main__":
# data = scrape_data()
# for foo, bar in zip(data.index, state_names):
# print(f"Checking that {foo} is the same as {bar}")
# assert foo == bar
| [
6738,
18931,
1330,
651,
11187,
1362,
198,
198,
6738,
19798,
292,
1330,
1100,
62,
6494,
198,
198,
2,
422,
304,
6359,
320,
13557,
1416,
2416,
364,
13,
8692,
1330,
1181,
62,
14933,
628,
198,
6404,
1362,
796,
651,
11187,
1362,
7,
834,
3... | 2.549133 | 173 |
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)
# 2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Request/response of PutBucketVersioning and GetBucketVersioning APIs."""
from __future__ import absolute_import
from .commonconfig import DISABLED, ENABLED
from .xml import Element, SubElement, findtext
OFF = "Off"
SUSPENDED = "Suspended"
class VersioningConfig:
"""Versioning configuration."""
@property
def status(self):
"""Get status."""
return self._status or OFF
@property
def mfa_delete(self):
"""Get MFA delete."""
return self._mfa_delete
@classmethod
def fromxml(cls, element):
"""Create new object with values from XML element."""
status = findtext(element, "Status")
mfa_delete = findtext(element, "MFADelete")
return cls(status, mfa_delete)
def toxml(self, element):
"""Convert to XML."""
element = Element("VersioningConfiguration")
if self._status:
SubElement(element, "Status", self._status)
if self._mfa_delete:
SubElement(element, "MFADelete", self._mfa_delete)
return element
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
1855,
9399,
11361,
10074,
329,
6186,
311,
18,
3082,
16873,
10130,
20514,
11,
357,
34,
8,
198,
2,
12131,
1855,
9399,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,... | 2.865225 | 601 |
from __future__ import print_function, absolute_import, division
import os
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.MetisApplication as MetisApplication
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
import KratosMultiphysics.kratos_utilities as KratosUtils
from KratosMultiphysics.mpi import distributed_import_model_part_utility
from KratosMultiphysics.TrilinosApplication import trilinos_linear_solver_factory
from KratosMultiphysics import ParallelEnvironment
if __name__ == '__main__':
KratosUnittest.main() | [
171,
119,
123,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
11,
7297,
201,
198,
201,
198,
11748,
28686,
201,
198,
201,
198,
11748,
509,
10366,
418,
15205,
13323,
23154,
201,
198,
11748,
509,
10366,
418,
15205,
1... | 3.028846 | 208 |
import zipfile
import os.path
| [
201,
198,
11748,
19974,
7753,
201,
198,
11748,
28686,
13,
6978,
201,
198
] | 2.615385 | 13 |
# Mock settings file imported by sphinx when building docs
SECRET_KEY = 'not empty'
| [
2,
44123,
6460,
2393,
17392,
416,
599,
20079,
87,
618,
2615,
34165,
198,
198,
23683,
26087,
62,
20373,
796,
705,
1662,
6565,
6,
198
] | 3.541667 | 24 |
'''
Author: Liu Xin
Date: 2021-11-29 11:08:53
LastEditors: Liu Xin
LastEditTime: 2021-11-30 19:43:19
Description: file content
FilePath: /CVMI_Sementic_Segmentation/model/decode_heads/encnet/encnet.py
'''
'''
Author: Liu Xin
Date: 2021-11-29 11:08:53
LastEditors: Liu Xin
LastEditTime: 2021-11-30 19:31:07
Description: file content
FilePath: /CVMI_Sementic_Segmentation/model/decode_heads/encnet/encnet.py
'''
"""Context Encoding for Semantic Segmentation"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.utils.enc_module import EncModule
from model.builder import DECODE_HEAD
__all__ = ['EncNet']
@DECODE_HEAD.register_module("EncNet")
if __name__ == '__main__':
x1 = torch.randn(4,256,64,64)
x2 = torch.randn(4,512,16,16)
x3 = torch.randn(4,1024,16,16)
x4 = torch.randn(4,2048,16,16)
model = EncNet(2048,11)
out = model([x1,x2,x3,x4])
print(type(out))
# outputs = model(img)
| [
7061,
6,
198,
13838,
25,
18258,
25426,
198,
10430,
25,
33448,
12,
1157,
12,
1959,
1367,
25,
2919,
25,
4310,
198,
5956,
18378,
669,
25,
18258,
25426,
198,
5956,
18378,
7575,
25,
33448,
12,
1157,
12,
1270,
678,
25,
3559,
25,
1129,
198... | 2.418782 | 394 |
import numpy as np
import numpy.typing as npt
from .. import tools
from ..algo import Algo
class TCO(Algo):
"""Transaction costs optimization. The TCO algorithm needs just a next return prediction
to work, see the paper for more details.
Paper : https://ink.library.smu.edu.sg/cgi/viewcontent.cgi?referer=&httpsredir=1&article=4761&context=sis_research
"""
PRICE_TYPE = "raw"
REPLACE_MISSING = True
def __init__(self, trx_fee_pct=0, eta=10, **kwargs):
"""
:param trx_fee_pct: transaction fee in percent
:param eta: smoothing parameter
"""
super().__init__(**kwargs)
self.trx_fee_pct = trx_fee_pct
self.eta = eta
def predict(self, p, history) -> npt.NDArray:
"""Predict returns on next day.
:param p: raw price
"""
raise NotImplementedError()
def update_tco(self, x: npt.NDArray, b: npt.NDArray, x_pred: npt.NDArray):
"""
:param x: ratio of change in price
"""
lambd = 10 * self.trx_fee_pct
# last price adjusted weights
updated_b = np.multiply(b, x) / np.dot(b, x)
# Calculate variables
vt = x_pred / np.dot(updated_b, x_pred)
v_t_ = np.mean(vt)
# Update portfolio
b_1 = self.eta * (vt - np.dot(v_t_, 1))
b_ = updated_b + np.sign(b_1) * np.maximum(
np.zeros(len(b_1)), np.abs(b_1) - lambd
)
# project it onto simplex
proj = tools.simplex_proj(y=b_)
return proj
if __name__ == "__main__":
tools.quickrun(TCO1())
| [
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
774,
13886,
355,
299,
457,
198,
198,
6738,
11485,
1330,
4899,
198,
6738,
11485,
282,
2188,
1330,
978,
2188,
628,
198,
4871,
309,
8220,
7,
2348,
2188,
2599,
198,
220,
220,
220,... | 2.132353 | 748 |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from datetime import datetime, timedelta
from floodsystem.analysis import polyfit
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
6947,
10057,
13,
20930,
1330,
7514,
11147,
... | 3.707317 | 41 |
import sys
import numpy as np
'''
This function parses the yaml input file, assuming the input is correct The parsing works in the following way:
given a correct file that defines len = n, the function returns two arrays of length n - cell_value_arr (beauty/num
coins in each cell) and cell_title_arr (princess or dragon)
'''
'''
index - an index of a princess in the input arrays
title_arr - cell_title_arr (input)
this functions returns the index of the previous princess
'''
'''
index - an index of a princess in the input arrays
title_arr - cell_title_arr (input)
output_array - this array contains the lower_bound and upper_bound for each princess
this functions returns the index of the previous princess with non empty lower bound (explanation will follow)
'''
'''
cell_value_arr - a version of cell_value_array
num_dragons_allowed - a number of dragons the knight is allowed to kill
this function returns the indices of the dragons with most of the coins (bound by num_dragons_allowed)
'''
'''
cell_title_arr - a version of cell_title_arr
this function counts the number of dragons in it, and returns the count
'''
'''
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
index_list - an index_list of dragon cells
prev_princess_index - the index of a previous princess
this function marks the following cells: all cells before prev_princess_index are marked if
they are not in index_list or not a dragon, and the rest of the cells are marked if they are not a dragon
the mark is the lowest integer number (i'm assuming it won't be given as an input)
'''
# This as an explanation for calculate_princess_lower_bound(), calculate_princess_upper_bound(): the output array
# will hold for each princess two lists of indices - lower_bound: minimal number of dragons to kill, that maximize
# coin sum and allow marrying that princess upper_bound: maximal number of dragons to kill, that maximal coin sum and
# allow marrying that princess (without marrying previous princesses)
'''
prev_lower_bound - lower bound of the previous princess
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
i - current princess index
beauty_val - current princess beauty value
prev_princess_index - the index of a previous princess
this function returns the current princess lower_bound
'''
'''
prev_lower_bound - lower bound of the previous princess
cell_value_arr - a version of cell_value_arr
cell_title_arr - a version of cell_title_arr
dragon_count_in_range - number of dragons in between current princess and previous princess
i - current princess index
prev_princess_index - the index of a previous princess
this function returns the current princess upper_bound
'''
'''
i - current index in output array
cell_title_arr - a version of cell_title_arr
cell_value_arr - a version of cell_value_arr
output_array - this array contains the lower_bound and upper_bound for each princess
this function uses the previous functions and the previous cells of output_array to calculate
lower_bound and upper_bound of output_array[i], and returns it
'''
'''
output_array - this array contains the lower_bound and upper_bound for each princess
value_array - cell_value_arr (input)
n - index of princess we want to print
this function prints the output according to the instruction
'''
'''
title_arr - cell_title_arr (input)
value_array - cell_value_arr (input)
this function initializes output_array, fills it and prints it
'''
'''
main parses the input and runs run()
'''
if __name__ == '__main__':
input_file = input("Enter file name: for example input_file.yaml\n After output is printed, press Enter\n")
parser_val = parse_input_file(input_file)
if parser_val is not None:
input_title_arr, input_value_arr = parser_val
if len(input_title_arr) != 0:
run(input_title_arr, input_value_arr)
else:
# No princess
print(-1)
input("")
| [
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
7061,
6,
198,
1212,
2163,
13544,
274,
262,
331,
43695,
5128,
2393,
11,
13148,
262,
5128,
318,
3376,
383,
32096,
2499,
287,
262,
1708,
835,
25,
220,
198,
35569,
257,
3376,
239... | 3.470078 | 1,153 |
# -*- coding: utf-8 -*-
# Copyright Hannah von Reth <vonreth@kde.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import utils
from CraftOS.osutils import OsUtils
from CraftCore import CraftCore | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
26013,
18042,
371,
2788,
1279,
26982,
40978,
31,
74,
2934,
13,
2398,
29,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
3... | 3.415274 | 419 |
import argparse
import ora_tools as ora
| [
11748,
1822,
29572,
198,
11748,
393,
64,
62,
31391,
355,
393,
64,
198
] | 3.076923 | 13 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import databricks_test
from databricks_test import SessionAlreadyExistsException
| [
11748,
4818,
397,
23706,
62,
9288,
198,
6738,
4818,
397,
23706,
62,
9288,
1330,
23575,
37447,
3109,
1023,
16922,
628
] | 4.1 | 20 |
"""Module that implements the Switch class."""
from __future__ import annotations
from ..const import SwitchAttribute, ZWaveDeviceAttribute
from . import VivintDevice
class Switch(VivintDevice):
"""Represents a Vivint switch device."""
@property
def is_on(self) -> bool:
"""Return True if switch is on."""
return self.data[SwitchAttribute.STATE]
@property
def level(self) -> int:
"""Return the level of the switch betwen 0..100."""
return self.data[SwitchAttribute.VALUE]
@property
def node_online(self) -> bool:
"""Return True if the node is online."""
return self.data[ZWaveDeviceAttribute.ONLINE]
async def set_state(self, on: bool | None = None, level: int | None = None) -> None:
"""Set switch's state."""
await self.vivintskyapi.set_switch_state(
self.alarm_panel.id, self.alarm_panel.partition_id, self.id, on, level
)
async def turn_on(self) -> None:
"""Turn on the switch."""
await self.set_state(on=True)
async def turn_off(self) -> None:
"""Turn off the switch."""
await self.set_state(on=False)
class BinarySwitch(Switch):
"""Represents a Vivint binary switch device."""
class MultilevelSwitch(Switch):
"""Represents a Vivint multilevel switch device."""
async def set_level(self, level: int) -> None:
"""Set the level of the switch between 0..100."""
await self.set_state(level=level)
| [
37811,
26796,
326,
23986,
262,
14645,
1398,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
11485,
9979,
1330,
14645,
33682,
11,
1168,
39709,
24728,
33682,
198,
6738,
764,
1330,
25313,
600,
24728,
628,
198,
4871,
14... | 2.681004 | 558 |
from sprp.core.alg import *
from sprp.export.shapefile import *
if __name__ == "__main__":
slc = SimpleLineCalculator(116.23589,39.90387,116.25291,39.90391,**{
"cameraWidth": 4000,
"cameraHeight":3000,
"focusLength":35,
"pixelSize":2,
"gsd":0.05,
"flightSpeed":80,
"courseOverlap":0.8,
"sidewiseOverlap":0.6
})
print(slc)
linePointsResult,forwardAngle = slc.caculate_line(116.23589,39.90387,116.25291,39.90391)
#slc.setLine(116.23589,39.90387,116.25291,39.90391)
result = slc.calculate()
print(result)
print(slc.points)
print("###############################################################################")
ssc = SimpleStripCalculator(116.23589,39.90387,116.25291,39.90391,
3,2,
**{
"cameraWidth": 4000,
"cameraHeight":3000,
"focusLength":35,
"pixelSize":2,
"gsd":0.05,
"flightSpeed":80,
"courseOverlap":0.8,
"sidewiseOverlap":0.6,
})
result = ssc.calculate()
print(result)
print(ssc.points)
print(len(ssc.points))
sfe = ShapefileExportor('/Users/luoxiangyong/Devel/sprp/Data', 'test-project')
sfe.save(ssc)
################################################################
###############################################################################
CAMERA_WIDTH = 2000
CAMERA_HEIGHT = 1000
CAMERA_GSD = 0.05
OVERLAP_FWD = 0.8
OVERLAP_CROSS = 0.6
BASELINE = (1-OVERLAP_FWD) * CAMERA_HEIGHT * CAMERA_GSD
CROSSLINE = (1-OVERLAP_CROSS) * CAMERA_WIDTH * CAMERA_GSD
"""
@brief 从点和指定的角度计算地面覆盖的矩形(footprint)
@param point 指定点
@param angle 航线方向
@param iwidth 图像长度
@param iheight 图像高度
@param gsd 地面分辨率
@return 返回地面覆盖的矩形的四脚点坐标
"""
if __name__ == "__main__":
# points,angle = caculateLine(116.23589,39.90387,116.25291,39.90391,50)
# print("Angle:{}".format(angle))
# writeLineToShapefile(points,'test-shapefile-01')
# points,angle = caculateLine(116.23589,39.90287,116.25291,39.90291,50)
# print("Angle:{}".format(angle))
# writeLineToShapefile(points,'test-shapefile-02')
start_long = 116.23589
start_lat = 39.90387
end_long = 116.25291
end_lat = 39.90591
geod = pyproj.Geod(ellps="WGS84")
#long,lat,tmpAngle = geod.fwd(point[0],point[1],angleTR, distance/2)
# 计算两点的角度
angle,backAngle,distanceTmp = geod.inv(start_long, start_lat,end_long,end_lat)
pointsOfLine = []
long = start_long
lat = start_lat
for index in range(10):
long,lat,tmpAngle = geod.fwd(long,lat, angle-90,CROSSLINE)
end_long,end_lat,tempAngle = geod.fwd(long,lat, angle,distanceTmp)
pointsOfLine.append((long,lat,end_long,end_lat))
caculateArea(pointsOfLine,BASELINE)
# caculateArea([[116.23589,39.90387,116.25291,39.90391],
# [116.23589,39.90287,116.25291,39.90291]],
# CAMERA_GSD) | [
6738,
7500,
79,
13,
7295,
13,
14016,
1330,
1635,
198,
6738,
7500,
79,
13,
39344,
13,
43358,
7753,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1017,
66,
796,
17427,
13949,
9771,
... | 2.012371 | 1,455 |
import sys
from optparse import make_option
from django.core import management
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import get_apps
from django_kwalitee.testrunners import get_runner | [
11748,
25064,
198,
6738,
2172,
29572,
1330,
787,
62,
18076,
198,
198,
6738,
42625,
14208,
13,
7295,
1330,
4542,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
1... | 3.681159 | 69 |
#
# @lc app=leetcode.cn id=76 lang=python3
#
# [76] 最小覆盖子串
#
# https://leetcode-cn.com/problems/minimum-window-substring/description/
#
# algorithms
# Hard (38.57%)
# Likes: 701
# Dislikes: 0
# Total Accepted: 72K
# Total Submissions: 186K
# Testcase Example: '"ADOBECODEBANC"\n"ABC"'
#
# 给你一个字符串 S、一个字符串 T 。请你设计一种算法,可以在 O(n) 的时间复杂度内,从字符串 S 里面找出:包含 T 所有字符的最小子串。
#
#
#
# 示例:
#
# 输入:S = "ADOBECODEBANC", T = "ABC"
# 输出:"BANC"
#
#
#
# 提示:
#
#
# 如果 S 中不存这样的子串,则返回空字符串 ""。
# 如果 S 中存在这样的子串,我们保证它是唯一的答案。
#
#
#
# @lc code=start
# @lc code=end
# def minWindow(self, s: str, t: str) -> str:
# l,r = 0,0
# res = ''
# min_len = float('inf')
# need = Counter(t)
# needcnt = len(t)
# while r < len(s):
# if need[s[r]] > 0:
# needcnt -= 1
# need[s[r]] -= 1
# r += 1
# while needcnt == 0:
# if r - l < min_len:
# min_len = r - l
# res = s[l:r]
# if need[s[l]] == 0:
# needcnt += 1
# need[s[l]] += 1
# l += 1
# return res
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
13,
31522,
4686,
28,
4304,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
4304,
60,
42164,
222,
22887,
237,
17358,
228,
33566,
244,
36310,
10310,
110,
198,
2,
198,
2,
3740,
1378,
293,
... | 1.300541 | 925 |
print ("""
Working.
@muuT3ra$$uu-kick-my-str-v.1
#FuckAllEverything.
by Tripl_color vk.com/Tripl_color""")
import vk_requests
import time
import random
token = "токен бота"
cid = str(input('Айди беседы = '))
photo = "photo472165736_457244077"
audio = "audio472165736_456239668"
msg = "fuck all. by Tripl_Color. @muuT3ra$$uu-kick-my-str-v.1 " ## можешь добавить свое сообщение
while True:
api = vk_requests.create_api(service_token=token)
print(api.messages.send(chat_id= cid, message= msg, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, attachment= photo, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, attachment= audio, random_id= random.randint(1, 2147483647)))
print(api.messages.send(chat_id= cid, message= random.randint(1, 2147483647), random_id= random.randint(1, 2147483647)))
print('Круг сообщений сделан')
time.sleep(5)
| [
4798,
5855,
15931,
198,
28516,
13,
198,
31,
76,
12303,
51,
18,
430,
13702,
12303,
12,
24585,
12,
1820,
12,
2536,
12,
85,
13,
16,
198,
2,
34094,
3237,
19693,
13,
220,
198,
1525,
7563,
489,
62,
8043,
410,
74,
13,
785,
14,
14824,
4... | 2.158392 | 423 |
import random
from heads import Head
| [
11748,
4738,
198,
6738,
6665,
1330,
7123,
628,
198
] | 4.333333 | 9 |