blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
780002a82af4cd9867b9c666fdee43af9b33a52a
|
2a12bedd69dbc4632b321dd40037df36b5e7bc76
|
/hospital/migrations/0009_auto_20201122_0139.py
|
7751eb005dc78242e9d1db918efeddc5efc96363
|
[] |
no_license
|
mbharti321/covidms
|
1a70392a2ad02b5cd08592566b082ca76efdabda
|
3e1e2ec4fc997b88bcea184ffde69a74e1f32692
|
refs/heads/main
| 2023-01-21T01:04:42.181821
| 2020-11-22T01:45:16
| 2020-11-22T01:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
# Generated by Django 3.1.3 on 2020-11-21 20:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hospital', '0008_patient_severity'),
]
operations = [
migrations.AddField(
model_name='patient',
name='recovery_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='patient',
name='severity',
field=models.IntegerField(choices=[(0, 'Mild'), (1, 'Potenitally worsening'), (2, 'Moderate severity'), (3, 'High severity'), (4, 'Requires urgent care')], default=1),
),
]
|
[
"ericmiranda7@gmail.com"
] |
ericmiranda7@gmail.com
|
803c19c6fe0f39db11b5ed2b0443c09df5ef0e85
|
48ca34f5666e8ce7e7777acf599a302317ad907d
|
/2-vervolg datatypes/taak02 -dicts/antwoorden.py
|
2411aa77fbe56f3c915c39f641b704cd5688004a
|
[] |
no_license
|
DaantjeDaantje/Python-Basic
|
8645121327d1ae8e891ec193add4642d5f86b813
|
f09c12fac98767e927607b4a0bfbef08ef2a14b3
|
refs/heads/master
| 2023-02-25T11:04:46.228327
| 2021-02-02T12:40:04
| 2021-02-02T12:40:04
| 293,811,687
| 0
| 0
| null | 2020-09-08T13:01:04
| 2020-09-08T13:01:04
| null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
#%%
Provinciehoofdsteden = dict([
('Noord_Holland' , 'Amsterdam'),
('Zuid_Holland', 'Den_Haag'),
('Utrecht', 'Utrecht'),
])
type(Provinciehoofdsteden)
print(Provinciehoofdsteden)
Provinciehoofdsteden['Zeeland'] = 'Middelburg'
Provinciehoofdsteden['Limburg'] = 'Maastricht'
Provinciehoofdsteden['Gelderland'] = 'ert'
print(Provinciehoofdsteden)
del Provinciehoofdsteden["Zuid_Holland"]
print(Provinciehoofdsteden)
Provinciehoofdsteden["Gelderland"] = "Arnhem"
for key,val in Provinciehoofdsteden.items():
print(key, "=>", val)
# %%
|
[
"danielleroelofsma@gmail.com"
] |
danielleroelofsma@gmail.com
|
1693ec93727b0777d4566244afdac149272f172e
|
471c56fc617994cb22dd1463d8ff413fdcf5aaa9
|
/24.py
|
d2ea7e2d356ecb0e57ef91624dfbdc000305db3f
|
[] |
no_license
|
blanchg/aoc202
|
e112597a1b4538096fa84c24293641465aa4115d
|
e14c6d16879b83178a585df0babd9be172cf2556
|
refs/heads/main
| 2023-02-05T11:57:56.953209
| 2020-12-27T13:09:39
| 2020-12-27T13:09:39
| 319,165,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,096
|
py
|
from hex import Hex, Layout, Point, hex_add, hex_direction, hex_distance, hex_neighbor, hex_scale, layout_pointy, polygon_corners
import time
from collections import deque, defaultdict
ms = time.time() * 1000
class HexValue:
def __init__(self, q, r=None, s=None) -> None:
if isinstance(q, str):
q,r,s = q.split(',')
self.hex = Hex(int(q),int(r),int(s))
elif isinstance(q, tuple):
self.hex = q
else:
self.hex = Hex(q,r,s)
def __repr__(self) -> str:
return self.key()
def key(self) -> str:
return f'{self.hex.q:2},{self.hex.r:2},{self.hex.s:2}'
def add(self, hexdir):
self.hex = hex_add(self.hex, hexdir)
def apply_instructions():
# instructions = [
# 'sesenwnenenewseeswwswswwnenewsewsw',
# 'neeenesenwnwwswnenewnwwsewnenwseswesw',
# 'seswneswswsenwwnwse',
# 'nwnwneseeswswnenewneswwnewseswneseene',
# 'swweswneswnenwsewnwneneseenw',
# 'eesenwseswswnenwswnwnwsewwnwsene',
# 'sewnenenenesenwsewnenwwwse',
# 'wenwwweseeeweswwwnwwe',
# 'wsweesenenewnwwnwsenewsenwwsesesenwne',
# 'neeswseenwwswnwswswnw',
# 'nenwswwsewswnenenewsenwsenwnesesenew',
# 'enewnwewneswsewnwswenweswnenwsenwsw',
# 'sweneswneswneneenwnewenewwneswswnese',
# 'swwesenesewenwneswnwwneseswwne',
# 'enesenwswwswneneswsenwnewswseenwsese',
# 'wnwnesenesenenwwnenwsewesewsesesew',
# 'nenewswnwewswnenesenwnesewesw',
# 'eneswnwswnwsenenwnwnwwseeswneewsenese',
# 'neswnwewnwnwseenwseesewsenwsweewe',
# 'wseweeenwnesenwwwswnew',
# ]
instruction = ''
layout = defaultdict(bool)
for path in instructions:
pos = HexValue(0, 0, 0)
for letter in path:
if letter == 'n' or letter == 's':
instruction = letter
continue
else:
move = None
instruction += letter
if instruction == 'e':
move = 2
elif instruction == 'w':
move = 5
elif instruction == 'ne':
move = 3
elif instruction == 'nw':
move = 4
elif instruction == 'se':
move = 1
elif instruction == 'sw':
move = 0
else:
raise Exception('Unknown instruction', instruction)
hexdir = hex_direction(move)
pos.add(hexdir)
# print(instruction, pos)
instruction = ''
key = str(pos)
# print(key)
# if key in layout:
layout[key] = not layout[key]
# else:
# layout[key] = True
return layout
def run1():
result = 0
layout = apply_instructions()
for k,v in layout.items():
print(k,v)
if v:
result += 1
return result
def hex_ring(center, radius):
results = []
# this code doesn't work for radius == 0; can you see why?
cube = hex_add(center, hex_scale(hex_direction(4), radius))
for i in range(0, 6):
for j in range(0, radius):
results.append(cube)
cube = hex_neighbor(cube, i)
return results
def hex_spiral(center, radius):
results = [center]
for k in range(1, radius + 1):
results.extend(hex_ring(center, k))
return results
def run2():
result = 0
layout = apply_instructions()
max_distance = 1
center = Hex(0, 0, 0)
print('Before', len(layout.items()))
hex_layout = Layout(layout_pointy, Point(10, 10), Point(480,480))
# c = tk.Canvas(app.root, width=960, heigh=960, bg='lightgrey', highlightthickness=0)
# c.place(x=0, y=0)
# hexen = {}
for k,v in layout.items():
hex1 = HexValue(k)
distance = hex_distance(center, hex1.hex)
if distance > max_distance:
max_distance = distance + 1
print('max dist', max_distance)
for h in hex_spiral(center, max_distance):
k = str(HexValue(h))
if k not in layout:
layout[k] = False
# c.create_polygon(polygon_corners(hex_layout,h), fill='',outline='grey')
# hexen[k] = c.create_polygon(polygon_corners(hex_layout,hex1.hex), fill='white' if not layout[k] else 'black',outline='grey')
print(bool())
print('After', len(layout.items()))
# poly = polygon_corners(hex_layout,center)
# print(poly)
# c.create_polygon(poly, fill='black',outline='grey')
# c.place(x=0, y=0)
for day in range(1, 101):
# time.sleep(0.1)
layout2 = defaultdict(bool)
items = layout.copy().items()
for k1,v1 in items:
hex1 = HexValue(k1)
neighbours = []
for move in range(0,6):
hex2 = HexValue(hex_neighbor(hex1.hex, move))
k2 = str(hex2)
if layout[k2]:
neighbours.append(1)
# else:
# neighbours.append(1 if layout[k2] else 0)
black_neighbours = sum(neighbours)
layout2[k1] = v1
if v1: # True is black
if black_neighbours == 0 or black_neighbours > 2:
layout2[k1] = False
else:
if black_neighbours == 2:
layout2[k1] = True
dist = hex_distance(center, hex1.hex)
if dist >= max_distance:
max_distance = dist + 1
print('New max', max_distance)
for h in hex_spiral(center, max_distance):
k = str(HexValue(h))
if k not in layout2:
layout2[k] = False
layout = layout2
result = 0
# c.delete('all')
# for h in hex_spiral(center, max_distance):
# k = str(HexValue(h))
# if k not in layout:
# layout[k] = False
# c.create_polygon(polygon_corners(hex_layout,h), fill='',outline='grey')
for k,v in layout.items():
# print(k,v)
if v:
result += 1
# hex1 = HexValue(k)
#c.create_polygon(polygon_corners(hex_layout,hex1.hex), fill='white' if not v else 'black',outline='grey')
# c.itemconfig(hexen[k], fill='white' if not v else 'black')
print(f'Day {day}: {result}')
return result
# import tkinter as tk
# import threading
# class App(threading.Thread):
# def __init__(self):
# threading.Thread.__init__(self)
# self.root = None
# self.start()
# def callback(self):
# self.root.quit()
# def run(self):
# self.root = tk.Tk()
# self.root.protocol("WM_DELETE_WINDOW", self.callback)
# self.root.geometry("960x960")
# # label = tk.Label(self.root, text="Day 20")
# # label.pack()
# self.root.mainloop()
# app = App()
instructions = [
'wwwnwwwwwwwwenwwwwwwww',
'swwwswswswswswswwswswswneswswswwswse',
'nenenesenewnenenesenesenenenenenewnewnene',
'seseswseseswseswwswseswseseseseneswseswse',
'nwnwsenwnwnwnwnenwnenwnwnwsenenwsenwnwnwne',
'swswswwswnewswswsw',
'eneeswnenenweesenenenwneneneeenene',
'wnwwnwwnwsewnwwnwnwwnwwwnwwnwnw',
'nwneswsenewenwnwswneswnenenwenwnenenwnwnw',
'senewnwwwwwwnwwwnwnwnwnwnwwwew',
'eewneneneeeeneeese',
'eneewseeneswnweswnwswsw',
'swwswswseswnwneseswseenwwsesesenewne',
'swswswswswswseswswseswswsenwsesesenwsee',
'esesenwnwnwseseneeswseswnwwewesesee',
'neenenwneswneneneeneneeweneneneswne',
'eeswswwwsesesenwswswsenwneswsenwswnenenw',
'swswswsweswswswseswnwswswswswswswswnesw',
'sewswswswwnwswnwswwwwwwswnwsesewsw',
'newnwseeneneeewseneswnenenenwenesee',
'eseneneneneseeeneweenweswswenwee',
'newswsesewswsewnwwnewneswwswnwsenww',
'eneneneeeewneswsewseeeenwnwnwnee',
'nesenwnweeswswnesesweseseseneswsesesw',
'swswneseswswsewswwswswneswswswseswseswsw',
'eneneeeseeeeeeweeeeeee',
'nenewnwneeneswseneneeswnenwswenenewne',
'swneseeneeeneneeeneeeeeweneeene',
'nwsenwwnwnwnwnwweswwnwsenwsenwnw',
'neeenenewneenese',
'ewsenwseseseeseseneseeeeseswewsese',
'nwnwsenenwnwnwwnwnwnwwsenwnwnwwsenwnw',
'neneseswnwnenwnwnwnenwnwnenwnenenenenenw',
'seseseseswswsenwwswswswswseseee',
'nenenwnwnwswnenenenenwnenwnenenenenene',
'nenesenenewnenwnenenwnwnwnwswnenenenwnenese',
'wswswswswswsweseswswswswswneswswswnwse',
'wwwnwnwnwwwsewnewnwwswsenenwww',
'eswswswsenenwsewseseeswseswnwnwnwswswsw',
'swswnwswewwneswwwwwwweswesww',
'swsweswswswswswswswswswswewneswswsewsw',
'wwsenwnwenwswnwnwnesesenenwnwnwsenenwnw',
'weeneneneenenweeneeseneseeenee',
'swwewwswwswsweswnewseneswwwswww',
'sewnwswnwwneswnwwneesewnewwnwewnw',
'wwwwwswwswwsewswwwswswwnenwww',
'neewneneenewswneeenwneeeneneswne',
'nwnenewnwsenwnesenwnenenwenwnenwnwnenwnw',
'swswswswswwwswsenwswswswwswswswneseswsw',
'wsewwwnwwwwwwwewnewwwnww',
'neeeseeneseseenwseseseeswnewseseseew',
'nwsenwneenwwnwwnwswnwnwwswwnwswneswew',
'wnwnwnewswnwnenwnwsewewwnwnwesenw',
'swwswwswnwswswswswweseswwneswswswsw',
'eeeeewnweseeeeeneeseseeeeenw',
'swneeeweeeseesew',
'wwwwwswswnewswswnenwwseneswwwse',
'nwseseseseseseseseseseeswneesesesesese',
'nwewwwnweneswswewswenwwweeseene',
'nwneneswnwneenenwswenwneswswenwnwnenenene',
'swwwswsenewseswnewnewwwswseswwswsw',
'nenenwnwnenwnwnenwnwneneeswnwnwnwnenene',
'nwnwwnwnwnwnenwnwnwnwnwnwnwnwwnwnwnwnwse',
'esenwnwwswnwneeswswseeenenwswneneene',
'eseseeseseeseseesesenwseseeeesee',
'nwenwwwnwenenwnwenwnwnwswnenwnwnwnwnw',
'eseenwseseseseswenwseseseseseeesesese',
'eeeeeeeeeeeeeswnwneeneene',
'wnwwwnwnwwnenwwwnwwwewswwwwnw',
'senwwneseseeseseseeseswwseenewsesesw',
'senwneseseseewseeweneseeseeeese',
'eeeeesweseswneswnweswnwnwenwneeww',
'nenenwswneeneeeeneeswneeeneeenenwe',
'nenwnenwnwnenwsenwnwnwwnenwnwnwnwnwnenw',
'eswnwenwsesesenwswenwnwswwnesenwswswswsw',
'seseseseseseseeseseseseseseweeeese',
'eswneenenwswewneeneneeneeeneee',
'sweswswswswwnwswsweswswswwswsww',
'swwwswswswwswwwnewwswswsewwsww',
'swsesesesesesesesesesesesesesesene',
'neneneneneneneeneneenewneneenenenene',
'neseswwswwswwwsewwwswswsenewwnew',
'swseneneseseswswswswseseswsesesesesesese',
'eswwswswwnwswenwsweswswswswnwwseswswsw',
'nenenenwnenenenenenwneneneneneneneenesw',
'nwnwewswnwneswseeewnwswseswenenwenw',
'wwwwnwnwwwnwwnwnwwweswnwwnwnww',
'nwnwnwnwnwnwnwnwnewnwswneswnwnenwsenw',
'wseseseseseseseseseesenese',
'senwswneswswswswswseswswswswseswsewswswse',
'eesesweneweeenwneeneeeesweeee',
'seeneseeeseeeenweeweeeewee',
'nenwnenweswneswnwnwne',
'neeneneeneneenewnenenesweeneenwene',
'seseseseseseseseseneseseesewsesesesesese',
'eswswsesenwnenwwnwswnwsenwnenwwnwnenee',
'nwnwnwwnwnwnwenwsewwenenwnwnwwswnw',
'wwnwnwnwwnwneswnw',
'nenwneneswnwnwneneneenenwneneenwnenew',
'swsewneswneswneswnweneese',
'eneeewneswnweneeseeeneneneenenene',
'sesewsewsweeneneeneneesenwewswsesw',
'seeenweseenweeseneeenweewesw',
'eenwwneeeeneeneeweneswnweeswne',
'neswwnewnwswswswweswwswwswsesw',
'sesenesewseseweseseseweneseseseese',
'eeenwneswsenwseswseseeeenwswenwew',
'nwnwneenenenwsweewnwnenwnenenewsewse',
'swswswwswswswswneswswswewswswswnwwww',
'swwnenenesenwesewnwswesesewnwesese',
'eseseseswewesesenweeseswnw',
'wswswswwwwwwswwswswswwwwwnew',
'nwseeseeeseseswseseseseseseesee',
'wsewseeeseeeeeeeneeneeseeeee',
'nenenewneneneneneneneneneneneneneenene',
'enenenwsenweenwswnwswswswswwswenwenwnw',
'nenwneneneswnwnenenenwnenw',
'eeewseseseeneeseeeseeenwswsee',
'enesweneswweeneenweeneneewsenewe',
'seswswswswseswswnwswseswswnwswswswswswsw',
'nwnwnenenenenenwswnenenenw',
'wwswwswwwwwwwwewwwswnwwww',
'eeeneseeweneneneeeneeneeenenee',
'nenewnenewneeneweneeneewnenenenene',
'seswenwseswswswswsewswswswswseneseswswse',
'nwnwnwwnwwwwnwnwnwnwnwnenwnwnwsewwnw',
'eseseseseswwseseeseesewseswseswswswnw',
'newwnenenenenwenenewswneswsenenesenese',
'nwnenesenenenwnesenwnenwnwnwnewnwnenwnenw',
'swesweswswswswswswwswswswswswswswswswswnw',
'nwnwswnenwnenenwnenwnenwnenw',
'nenenenenenenenenenenwneneeswnenenenene',
'nwewnwnwnwnwnesewswnenwnw',
'swswseseseseseswseeseswesesesesenwsenw',
'wwsewnenewnwwswwwwneswsenwwnww',
'neeesenenenenewneneeeneeeeneneene',
'wwwwwswsewnwww',
'seseeseseewneseseseesesesesesesesesese',
'swneweneeswneenwneswnenwneeenenene',
'eneeseeeeenenweeseneeenenwee',
'swnwwnwwsenwnesewswwnwnwnwenwnwnenwnw',
'swseseswseseseswseswsweseswsesesesenwsesw',
'wswnewswswswswwwwseww',
'seseseseseseseeenwseseseswseseseseesese',
'eeneseesenenewwenwsenenenenwenesewse',
'nwewwswswsweeswswwswwseswswwswsww',
'seswswswswswwwswswnewswswwewswswswsw',
'swseenenwnwnenewwsenenenewswneneese',
'wwwswenwwwswnwwseswsewnwseswwswne',
'senwseeeeseseseseseeseeeseswnwsesese',
'wnenewnenenenesenenenenenewsesenwnenene',
'neeeeeeeneseeeeeeenweeee',
'swswneswswswswswswneswswswswswwse',
'enwseswwswnwneswswswneeswswswswsesenwswsw',
'swnwsesewneseseseseseseswwsenesesw',
'seeeeeeenweeeneeeeene',
'eseswswseeswseswswwwse',
'newnenwswsenenweneneseenwsweeeewe',
'nwswseswswseesewseseseseswsenesesesese',
'enwneswseeswnenesewsenwswsenwseswswsw',
'swwsweneeseneenweseeeeeeewee',
'swwsewwwwwnwwwewwwwwwwnw',
'swneswswswswswwwswwnewswswwsewww',
'nenewneseenesewnwneswnenenenwnenenesene',
'neneenewsewseneenenenenenesenenwnenee',
'eseswneseeneneneweneeneneenewene',
'seeseseeseseseswseesenwnweseeeenw',
'enenwwnwswseewsewnwsewseesenesesese',
'nwwsenwwnwnwwwnwwwwwnwsewwnww',
'neeneeenewswneeseenenenenenweeswne',
'wsewneseswnwsenwseseneweseewseene',
'swswswswswswswswwswneswswswswswswswswsw',
'seeeenwseseseeeeseeseeeeseee',
'nwesenenwwnewwnwneenwweswnwsenenw',
'wnenwnenwsweneneseneneeswnenwnwnenesew',
'eeeseeeeeeseeeseeeeenwnwee',
'esenwwswwseweseseswseseseswseneeswnw',
'wnenenenwenwesewneneseseewnewnenese',
'nwnenwnenwnwnwnwnwwenenwnwnenwnwneswnw',
'eneneswnenenenwneneeeenenwneneswene',
'sewwwswswswwsewnwwswswneww',
'wwwswwswwwswwswwwwnewswseswsww',
'swswseseseseswswsewswesewseswseesesesw',
'senwnwwnwnwwwnwnwnwnwnwnwwnwnwswenww',
'wwewnwewsewwnwwnweseswswenesenw',
'neneneeeneeneneneneswneneeneeeswe',
'eeseneewesweeeseeeeeeeenwee',
'esenewnewnwneswneneneneeneneeeeee',
'wwnwwwwwwwsesewnwwwwwwswwsw',
'nenwseneesweneneneneswenenenwwsewnee',
'seswseswswnwnwswswsw',
'swneneenesewnwnwneneneswesewwnenenesw',
'neeneeeeweeneneneneneneneneenene',
'nenweneeeenwneneneseneeeneseneew',
'wswwneswswswsewswswswsenwswnesw',
'nwnwnenwnwnwnwswsenwwnwenwnwnesenwnwnwnw',
'senwnwwnwwswwnwwwnwnwnwwnwwnwwe',
'nenenwnwnwneneneenenenenwseswnwnenwneswnw',
'seeeseeeeseeeeeeeenwsewee',
'seseeseseseeeeeseneeseseewsesee',
'sesesewseseseseswswsweseswsewseswnesenwse',
'nwswnenwnwnwnwnwnwnwnwnwnwnwnwnwnwnw',
'nwnwnwnwnwnenwnwneenwsww',
'nenwneseneswnwneswnenwnenenwnwnenwnenwne',
'eneneneneneneweneneseneneeneneenenee',
'nwnwnwnwswnwnwesenenwneneswnwnwnwenwnenww',
'neneneneneneneneneneneneswnenenenenenesw',
'enwnewwswsesenwsenwsesweeeswnwnwse',
'neswwswswesenenwenenenenwswseeenwe',
'nwwnwnwnwsewnwnwwwwnenwnwnwsenwenw',
'swnwenwenweswswseswswwswwneswneswwwse',
'eeeeneeneneeeneeneeewnee',
'eeswseswseswswwwswneswwsweswswswne',
'swnwswswswswswwenwswswswswenewnwsewsw',
'nwwswnwnwwwneswnwwnwnwnwnewwnwwnww',
'nwswneeneewwewewneseseneseseswse',
'weneneeseneswwswswneeeeenwese',
'seneeseseeeseseewseeseeswsenwsee',
'swswswseswneseswsenwseseseseswsesesesesewse',
'nenenwwswwnwnwenweenwnwswsweswswsenw',
'nwnwnwnwnwnwnwnwnenwnwnwnwnenwnwnwsenwswsw',
'nwwwwnenwnwnwwnwnwwswnwwewnwnwsese',
'nwswseeseswsesesesenwsesesesesesese',
'seswseswswswnwsweswswnewswswnwswseswswsw',
'neseswwsewsewswwsweeswseeswne',
'swwwwnwnwenwwnwwwwwwnwwwwww',
'swwnenweneneewneeseneenenenenwseesene',
'nenenenenenwneneneswneneneneenenenenene',
'wnwnenwesenwnwnwwnwnwnwwnwwnwnwnwnww',
'wsesesesesesesesesesesesene',
'neneswnenwwnwnenwneenwnwnwnwnwnwnenwnw',
'wswsenwswsenenwnwseseseseesesesesenw',
'wwnwnwnwnwwnwneesesenwnwnwnwnwnwnwnw',
'nwenwneneneneswnenwnwnwnwnenwnwnenwnenw',
'eeneneesewneeeesenenenenewsenwene',
'sesesewneswseseseseswseseswse',
'sesenwsesesewseseeseeseseneseswesesese',
'nenwseswseseneseswwsesweseswnwsesesesesene',
'eseeseseeseesesesesesenweeewese',
'nwnenwwnwnwwnwsewnwwnwwwwnwwnww',
'swesenweeseseesewsesesesese',
'eeeneeeeeeneeenesewnenwswee',
'nenenweneeneneeewneseeneneneeenee',
'newswswwwwwwwsewwewswneswww',
'swswneswseswsweswnwwseeseswswswsewswswsw',
'ewwswswswneswwswswnwenwnweewesenw',
'wsenwnwsesewwnwnwwnewnwenenwnwnwwnw',
'nenwnwneneenenenenwnwnwnewsenenene',
'enenweswenwnenwswseeweeswseeeene',
'seeneseeeeeeseeweweeneenwee',
'enwnenweswnwnwswneswnenweseswnw',
'enwsenwswswnenewwseseeswwswnenenene',
'wnwsewenewnwwwwnwenwnwwwenwsenw',
'wswnwswswswswsenenwneneewesewswsewwnw',
'seseeewseeseseseeesesesenwseseseee',
'neeewenenwesenweeswnewwnwswswse',
'neenwnenwenwnwswswnwenewswneneswenwnw',
'nenenenesenenenenenewenee',
'eswseswwwwwnenwswwwsweewnwww',
'nenenenenwwenenenenenenwseswnesenewnwe',
'weeneneseneneneneneneneneneneenenene',
'sewseseswneseseswseswseesewnwseswseswnese',
'swswswswswswseswswseswneswswswwswsesesw',
'eeenwneneeeeeeeeswneneswnenee',
'sesenwsewneswseseweseseseeswswsewsene',
'seseseseseseswseseseeswsewsesenesenwe',
'wwswwswwwsenesww',
'wwnwnwwsenwwnwnenwnwnwnwnwseeswwe',
'swswwwswwwwwwswwwwseneeswwww',
'neswsweswnwswwswswwswswseswswswswswsw',
'sesesesesesewseseseseseesesesee',
'sesenwsweseenweenweswwseeneeesenwe',
'eeeeneeeesweeeeeeeeesweenw',
'esesesesesenwneweneeeseesesewsese',
'seseseneeeeeeweeseeseeesesese',
'wwnewwseswwswwwwswew',
'neswswswseswswseswswseswswwswswswswswswsw',
'seseseseswnesweswseneswsesesesewswsesese',
'seseseseseseseseneseseseseseswsesesewsesw',
'wnwneswswswseswswnwswwwwswswswsweww',
'neeeweeeeseeeeeneeeeneenee',
'nwswwwswnewnwswsenwwwwswwseeewe',
'swwwwwswewwwwnwwwnwwwewwnw',
'nwsenwnwnwnwnwwwnwnwnewnwnwnwnwsewnwnwne',
'wseeeewenesesenwweneeneeesesesww',
'swwwswswwswswneswwsewswwswwwswsw',
'nweeenenenwseewswnewsewenewsewe',
'swswseswswswswswswwswswswswseswswswneswsw',
'eeneeeneeeeeeseeneneweneeee',
'nenwsesenwnwnwnenwnwnwnwnwnwnewnenwnwnwnwnw',
'nwsweswswseseseseseswswswseseseseswswsesw',
'sesesenwenwewseeeseeseseneeeswnw',
'nwnwnenwewnwnwwwnwnwse',
'nwwnenwnwnenenenenwnenwnwnwneneseenewne',
'eseseesesesesenwnesweweneswneswwnw',
'nwnwwwwwenwnwnwsenwwww',
'nwnweneneenwsenenwnwwneneswnwnwwsenwnw',
'swseswsenwswnweseswseswsewseseeesw',
'neneneswnwnwseswwsenwnwwnwnwnenwnene',
'seeesewseneseseswsesesesenwseesesesee',
'senenweweseeneneeneswseeweseneww',
'eeeeeeweeeseeseeeneeeesee',
'eneseeeeeewneeeneeeeenene',
'wwewswsewwwwwnewwwsw',
'neeeeeeseeeeeeeeesweeee',
'swnwnwsewwwwwnwnwnwnenwenwnwneswnw',
'wseswswseseseswnenweswswsweseswseswse',
'wewnwswwswwwswswnewwwnwnewsewsw',
'nwnenwnenenwnwnenesenwneneseswwenenene',
'swenwneswsewenwwnwneswnwenwnwwswnwnwnw',
'senenenenewswneneneweswnenenenenenenwnene',
'swseseseswsesenwnesweseseeswnwsesenwsesw',
'swseseswneswswneswswswswswswnwneswseswse',
'nwwnwnwnwwnwwweswwnwnwwwnesenwenw',
'seeneswswswneenwesenwsenwwweeeswne',
'seseesesenwseeesesesweseseeeesese',
'senwnwswweseswnwneewwneenwnenenwsenw',
'swswwwwnwswwwseswsesewnwenwwwnese',
'neesenwnwnwnenenwnenewnenenenesenenwne',
'seswswswswsesesesesenwswseneswswswswseswsw',
'nenwwnwswneneenwnenenenwseeeswsenwnesw',
'eneneneeneneswnenenenenewsenenenenenene',
'nwswwwwsewnwwnwnenwnwwwewwsw',
'neneneneweeswseneneneenenenenenenenewne',
'ewwwwwwnenwnwwnwnwwesewnwnww',
'nenenenwnenenwneneneswnwnwnwnwnwneseneswne',
'wnwnwwwwsewnwwnwwwwnwsenwwwswe',
'neneeenwneneeneneeneneswnesweneew',
'wwnwnwwnwewnwwswewwwwnwwwww',
'eseeeeseeeenweenwweswseeee',
'nwnwnwwwnwnwnwswnwnwnwnwnewnwnww',
'eeenenenenenenesenenewneenenenenene',
'wswseswswewseneseewswnesenwswswnenw',
'nwwswnwnwnwnwwnwewwwnwnwnenwnwnwwswnw',
'nwsewneswwswswewswswswnwwnwseseswsw',
'seneneeeeeneeswneneswnwneenenenwne',
'sesenwsenwswseseseswseseswseswseseseene',
'eswswnenewweeseswwswneswwnenwseswne',
'wsweneesenewneswsenwnwsweneneneswnenw',
'nwnwnwnwnwenwnesewsenwswnwnwnwnwwnwnw',
'senwsewseseeseseswseswswswnwsenwseseesw',
'wswnwwswewwswswswwswswswswswswswsw',
'wnwneeswwnwnwnwwse',
'swswwswwwswswswwwswswwswswswneww',
'nwnwwnwwswnwwswenwwnewnwwwwnwnwe',
'seseseesesesesewseeeeseseesenesese',
'sesesesenwseeseseseseseseesese',
'swwwwnesenewnesw',
'senwseswwweseswswswseseseswseseswnesw',
'eeeenewewseeneeeeeseeseseese',
'eweeeneeeeeseneee',
'senwseseseswsesenwnesewwseseswsenwese',
'swsewswwwwnwwwwwwwwwwswwew',
'seeeeeseseesewesesesewseesesese',
'nenwnwnenwnwnwnwnwnwnwenwswnenwnw',
'swswswswswneswswswswsesweswswswswnwsww',
'seswsesesesesenwsesese',
'nenenenenesewneneneneswnenenenenenenenwne',
'swseeeeseeeeeeseeweeseesene',
'swsenwsewsenwsewenwseseswwnweseneese',
'nweneesweseesewseseswewsesese',
'neeseneneneneeseenwnenenenweenenee',
'wneeseeneeneeneeeenwneeneeenee',
'swnwnwnwnwnwnwnwswnwwwwwnwnwenewnw',
'esenwsesenesweeeseswsesesesesesesesese',
'seeeneneseeneweneeswnwneneeeewe',
'nesewswnwwweswswwwwswewswwww',
'wwwewenwnwnwnenwwnwnwnwnwswnwwwnwsw',
'nwnenwnwneseswwewswseesewwnwwnwsw',
'eeeneswesenenesweewneenwnenenwe',
'neswswswnwseseswseseseswewswswwneswsw',
'nenenenwnenenwnesenewnenenenenenenenene',
'wwnenwnewwnwnwswwwnwnwswnwnenwsewnw',
'nwneswswswwswswswswswwsewsw',
'seeswsenweesewneeenweneeswseeesw',
'neseswswseswsesesenwseeswsenesesesesww',
'weneeeeeeeeesweee',
'nwwwwwswwwwwwwnwwnwwwnwnew',
'nenewneneneneeewnwseseneneenewseswe',
'neswswwenenenwneswenwneswnwwswseswwswe',
'nwwnwnwnwnenwnwwwnwnwnwnwnwnenwnwsenwse',
'nwswenwwnwenwnwnwnwnwnwnwnwnwnwnwnwnw',
'nwwsewnwwwwsewnewwwwww',
'nenewneneneseneneeneneswnenwnenenwnwne',
'nwwsewnwnwnwnwsenwnwwwwwwnenenwnwwnw',
'seseswswswnwseswswswswswnweseseseswese',
'nwwnwsenwnwwnwwnwnwwnww',
'nenenwnenenwnwneenenwnenwnwnwwnenwnene',
'newewwewnwwsewweeeneswwwww',
'swnenwnwnwwseswsweswneswswesw',
'wwwwnwwnwnwnwwwwwwnwnwenwww',
'seswswwwnewnewwwnewnewwswwsww',
'eseswneweswneeeewnenw',
'eeneenewwweewenwswesenenwseww',
'wwwwwnwwsewwwswewwswswwswew',
'wesenwsesewewnwne',
'ewnwnwnewseswwseneneenenenenenwneenene',
'newwsewwwwwwwwnewneseswww',
'eeeeneneeneneswneneneneenenenee',
'enwwnwwwwnwewewwwwwnwwwww',
'newewnwswswwsenwesenenwewswswesw',
'nenwnwnenwnwnenwewnwwnwnenenwnwnwnwenw',
'neswnwnwnenenenwwswnenesesenenwnewnene',
'wwwnwwnwwwnwwnwnenwnwse',
'eneneneenwseneeweeesweeeewnee',
'swsesesenesenwesesesesesesewsesesesesesew',
'nweseeeeneswnweenweeswneeneee',
'nwnwnwnwnwnwnwseswnewnwnw',
'wnwneseenenwnenenenwnwnenenenenenwnenw',
'seseeeeeseweeseseseseseeseeenw',
'seseneswseseseseseseseseseseseseseseswse',
'eneenenenenenewneneewnenenenene',
'swswenwswswswswswsewswwwswswswwwswe',
'eseseseesenweseseeeswsenweseseee',
'wnwnwnwwnwwwnwnwnwnwnwnwnwwwsenwnw',
'wneeswneneneneeneneneneneneneneneene',
'seneeseswnwenwwnewsweseweseseseee',
'swwwwswewwswwswwwswwwwsww',
'wenwnwswnwwwwwnesenwnwnwsenwnwnenww',
'swswswswnewwenwnwswnewsesweswsesew',
'seswsesesesesesesesesesesesesesesenwnwsesee',
'swseswswswswswswneswswsw',
'swswenewnwnewnenwnwswnenwnwneseeseswesw',
'wwswwnwnwnwenwnwnwwwewwwenesw',
'nwwneeswnenenwneneenw',
'swseswwswswswswswwswswswswswswswswswnw',
'nenwnenwneseneneneneeneneeneswnenenee',
'seseswseseeswswswswswwswswswswswswseswnw',
'seseseeseneseeseseeesesesewseseesese',
'swwwwwwnwswswwwewwsewwwww',
'swwwwswswswswwewswswswsweswswnww',
'swswswseeswswsesewseswseseseswswsesesw',
'wnewsewswwwsewwnenwwsewwwwww',
'swseseswneswswsewswswswswswswswswswseswsw',
'seseseseswneswswneswswswswsesesesesewsesew',
'nwnwneswnenenwneeswneneenenenenenenenene',
'nenenenenwnwwnenenesenenesenwse',
'wneeneeneneneseneneeneneneneswneenene',
'newneneneesewneeneeenenesenenenenene',
'wseseseseseseseseseseseesenwseseseswnwe',
'nwwwswwnwnwnewwwnwwwwww',
'swseseseseseseswnwseseseseseeseseseseseswnw',
'nwnenwnenwenwswenwswnwnwnenwnenenwnenwnw',
'seeswsewwseseswseswseseswesesesesesesw',
'eeeeeeeeeewseenweseneee',
'seswseswswswnweswswswswswnwswswswswswswsw',
'wneeeeneeneneneneeneeeneeneee',
'newneneseeeeeneeeeweeeeeesew',
'wseswseneseeseeeseneseseesesenwse',
'swnweeeseneseenewwswesenwesweenw',
'newnwnenwnwnenwnwnenenenwnwnenenenwnwsene',
'nwswswswnenwsenwsewwneswseweseseww',
'wswwneewwwswww',
'sesesesesewneeseseswwwenesesesese',
'eeseeseeseneesewseseseeeeeesese',
'seeneeweesenesewwseeseeseswne',
'wswsenwsenenewnenewneeswsesesesesew',
'nenwwseneneswnenenenwneneneenwenwnene',
'seseenwseeseseseesenwseseeeeseeee',
'nenenesenweeeneeeneneneneneneneee',
'seseseewseseseseswswseseseseseeseseswnw',
'enwnwswswwswwwsenwneeesenweewswsesw',
'seseseseseneeseseseseseweeeweese',
'swwwswewwswnwswswswswswwswswwswswsw',
'swsesesesenwswsewesesenwseeswsenwsesese',
'neneneneswneeneeeenwneneswnenwnenene',
'senwewwswnweseneneswneeswnweswwe',
'enenwnenenwnewnwnwnenenw',
'swneswswsewwswswnewneeswnesweswswsw',
'seseseswenwnwseeswswneseenwneseeswsw',
'nenwwwwsenwwww',
'wwswswewwwnwnwwsewnweswewnwnww',
'nwnesenwneneneneenenewnenewseneneesene',
'eseeneneneneeeneneewneneeeenee',
'nesesewswseneesesesweseseeeenesewse',
'newswwsenwswswwswwwswewswwwwswne',
'wwswenwswswwswwswswwwewwswswsww',
'wswsweseseesenesenwseeseneseesenwsee',
'wseenweesesesenenwseeseseseeseesesw',
'wnwwsewwwewswwswswwswwwnewsw',
'enesewneenewswnenenesenenenenenenenw',
'wswswswswwnewswnwsweswsw',
'swswseswswswswswswswswswswswswswneswswse',
'wwswswnwswswseswswnewswseswnesw',
'seswsenwswseswnwseswswswswsenwseeeswwse',
'senenenenwnwewnenenwnenwneneneweseswne',
]
part1 = run1()
print(f'Part1: {part1}')
part2 = run2()
print(f'Part2: {part2}')
print(f'Took {time.time()*1000 - ms}ms')
|
[
"glen.blanchard@adgile.media"
] |
glen.blanchard@adgile.media
|
a8a8399bfa809d696c5a030a9946a0d1162f8291
|
ee05d8196d40e2e59885f639150cc318be189986
|
/website.py
|
1de4d31c0e24aed095685cd85fbbfc2aa86fe08b
|
[] |
no_license
|
isakson/Endangered-Species-API
|
d83dd5b74cf5d76c65d0750f6e844861f6ed13ee
|
68ae3f97a4b82d9a56cf0cdfe7eda23c847d8a9c
|
refs/heads/master
| 2020-03-24T22:24:17.246659
| 2018-08-01T00:24:39
| 2018-08-01T00:24:39
| 143,083,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
#!/usr/bin/env python3
'''
website.py
'''
import sys
import flask
app = flask.Flask(__name__, static_folder='static', template_folder='templates')
@app.route('/')
def get_main_page():
''' This is the only route intended for human users '''
global api_port
return flask.render_template('index.html', api_port=api_port)
if __name__ == '__main__':
if len(sys.argv) != 4:
print('Usage: {0} host port api-port'.format(sys.argv[0]), file=sys.stderr)
exit()
host = sys.argv[1]
port = sys.argv[2]
api_port = sys.argv[3]
app.run(host=host, port=port)
|
[
"saksoni@carleton.edu"
] |
saksoni@carleton.edu
|
03c09f5406d6286376fef2f47a651851ed002f74
|
7d780e632edbd73b9df391394fc1b365851108af
|
/app/worker/base_worker/trainer.py
|
e1144e6c49f789c469161dfbb68c5808fca8fe71
|
[] |
no_license
|
rompear/attention_rl
|
1bcaf998a69e81e0947bc3b3e7f00e808d5647d0
|
d45c04380a25587870c550742a9694de351c7a8f
|
refs/heads/master
| 2020-07-31T11:47:11.451104
| 2019-11-19T13:07:07
| 2019-11-19T13:07:07
| 210,594,294
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,119
|
py
|
import torch
import gym
import app.util.wrapers_atari.atari_wrappers as Atari_Wrappers
import numpy as np
from app.config.config_factory import ConfigFactory
from app.util.logger.logger import Logger
from typing import Dict, Any, Tuple, List, Union
from app.util.wrapers_atari.atari_wrappers import LazyFrames
class Trainer:
def __init__(self, config: ConfigFactory, model: Dict[str, Any], logger: Logger) -> None:
self.model = model
self.config = config
self.phases = config.phase
self.pred_dict = {} # used for inference and test
self.iterations = 0 # used for train and val.
self.logger = logger
self.create_env()
def create_env(self) -> None:
self.env = Atari_Wrappers.make_atari(self.config.env_name)
print(self.env)
if self.config.env_name != "CartPole-v0":
self.env = Atari_Wrappers.wrap_deepmind(self.env, episode_life=False, frame_stack=True, scale= (not self.config.ram), clip_rewards=True)
def set_phase_configuration(self, phase: str) -> None:
if phase == 'train':
self.model['policy'].train()
self.model['target'].eval()
else:
self.model['policy'].eval()
self.model['target'].eval()
def if_debug(self) -> bool:
if self.config.debug:
self.logger.comet.close()
print("Configuration are in DEBUG mode.")
return self.config.debug
def inference(self) -> Dict[str, Any]:
with torch.no_grad():
pred_dict = {}
for phase in ['inference']:
self.set_phase_configuration(phase)
pred_dict[phase] = []
self.get_inference(phase)
return self.pred_dict['inference']
def test(self) -> Dict[str, Any]:
self.model.eval()
with torch.no_grad():
self.pred_dict = {}
for phase in ['test']:
self.set_phase_configuration(phase)
self.pred_dict[phase] = []
self.get_test(phase)
return self.pred_dict['test']
def train_and_val(self) -> None:
self.iterations = 0
for epoch in range(self.config.hyperparameters.epoch):
self.epoch = epoch
print('Epoch {}/{}'.format(epoch, self.config.hyperparameters.epoch - 1))
print('-' * 10)
for phase in ['train']:
self.set_phase_configuration(phase)
self.get_train(phase)
def perform_train_step(self, loss: torch.optim, retain_graph: bool = False) -> None:
loss.backward(retain_graph=retain_graph)
self.model['optimizer'].step()
def items_to_device(self, items: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
return tuple([item.to(self.config.device) for item in items])
def transition_to_tensor(self, state: torch.Tensor,
next_state: Union[np.ndarray, LazyFrames],
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
pretrain: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
reward = torch.tensor([reward])
next_state = self.state_to_tensor(next_state)
done = torch.tensor([done])
done = done.to(dtype=torch.bool)
return (state, action, next_state, reward, done, pretrain)
def state_to_tensor(self, state: Union[np.ndarray, LazyFrames]) -> torch.Tensor:
if self.config.env_name == "CartPole-v0":
state = torch.from_numpy(np.array(state))
else:
state = torch.from_numpy(np.array(state._frames))
state = state.squeeze(-1)
state = state.to(dtype=torch.float)
state = state.unsqueeze(0)
state = state.unsqueeze(0)
return state
def get_test(self, phase: str) -> None:
pass
def get_inference(self, phase: str) -> None:
pass
def get_train(self, phase: str) -> None:
pass
|
[
"romeogoosens94@gmail.com"
] |
romeogoosens94@gmail.com
|
f3bf4efd08ac73f061fd67e8806df8cc2163f5d6
|
0c9bc1db20e8f88a366839c32c0662a681d80379
|
/src/skymusic/renderers/instrument_renderers/instrument_renderer.py
|
98edca3561c549dbe0cd793bd09e395b9ae3094d
|
[
"MIT"
] |
permissive
|
TrendingTechnology/sky-python-music-sheet-maker
|
d1074399b2d92059ea64c6c84871655cfc9cc270
|
d749c7163c01686350cf77a9740c814f763140cb
|
refs/heads/master
| 2023-05-19T17:40:39.751401
| 2021-06-13T01:25:30
| 2021-06-13T01:25:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
from skymusic import Lang
from skymusic.instruments import Voice
class InstrumentRenderer():
def __init__(self, locale=None):
if locale is None:
self.locale = Lang.guess_locale()
print(f"**ERROR: Song self.maker has no locale. Reverting to: {self.locale}")
else:
self.locale = locale
def render(self, *args, **kwargs):
try:
instrument = args[0]
except IndexError:
instrument = kwargs['instrument']
if isinstance(instrument, Voice):
return self.render_voice(*args, **kwargs)
else:
return self.render_harp(*args, **kwargs)
|
[
"jmmelko@gmail.com"
] |
jmmelko@gmail.com
|
3af61ccc05ac107e2d8dc13ce31678bf1379ca5c
|
894f35e683119469b85768bcd7fe95658fd3d900
|
/1-Python101/variables/mutable_and_unmmutable.py
|
cc87976987ce4a5cc199c7605bc90c3556831af9
|
[] |
no_license
|
sltm-14/Python_Course
|
8af91b75563e3b0129d225e51b18f39567547491
|
58f8e78902e78abc13367274bb224de56fb5cb0c
|
refs/heads/master
| 2020-12-21T21:23:58.254837
| 2020-11-19T18:03:09
| 2020-11-19T18:03:09
| 236,566,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
""" -------------------------------------------------------------------------------------------------------------------------------------
UNMMUTABLE
In python integers are unmmutable, that means that the value will be allocated in a different space of memory everythime it changes,
and X will reference this new memory location.
Because we don't have a reference to the original memory, at some point python garbage collector will automatically release this memory.
------------------------------------------------------------------------------------------------------------------------------------- """
x = 1
print(id(x)) # It will show the location where X is stored || Output: 2060314544
x = 3
print(id(x)) # It will show the location where X is stored || Output: 2060314576
""" -------------------------------------------------------------------------------------------------------------------------------------
MUTABLE
On the other hand, list are mutable, thats why even if we add or delete its objects, the location does not change.
------------------------------------------------------------------------------------------------------------------------------------- """
x = [1, 2, 3]
print(id(x)) # It will show the location where X is stored || Output: 57933608
x.append(4)
print(id(x)) # It will show the location where X is stored || Output: 57933608
|
[
"ie693242@iteso.mx"
] |
ie693242@iteso.mx
|
728b4497309cb53507d8324b36ea8bd2d0693130
|
aaa07613c41fed96fb6d7fe5dc292975e17fb107
|
/isovar/genetic_code.py
|
c703b7f00116baad44d36508c1c4167141a87eb6
|
[
"Apache-2.0"
] |
permissive
|
openvax/isovar
|
2fa89f88525e72d94b974d5a20f038e3bdc15bf4
|
e43e2574dc783a5dfc65b055f977bd0f11df015b
|
refs/heads/master
| 2023-08-18T20:14:39.338144
| 2023-08-01T17:56:23
| 2023-08-01T17:56:23
| 51,102,454
| 17
| 10
|
Apache-2.0
| 2020-08-19T18:48:54
| 2016-02-04T20:14:48
|
Python
|
UTF-8
|
Python
| false
| false
| 8,454
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
"""
GeneticCode objects contain the rules for translating cDNA into a protein
sequence: the set of valid start and stop codons, as well as which
amino acid each DNA triplet is translated into.
"""
class GeneticCode(object):
"""
Represents distinct translation tables to go from cDNA triplets to amino
acids.
"""
def __init__(self, name, start_codons, stop_codons, codon_table):
self.name = name
self.start_codons = set(start_codons)
self.stop_codons = set(stop_codons)
self.codon_table = dict(codon_table)
self._check_codons()
def _check_codons(self):
"""
If codon table is missing stop codons, then add them.
"""
for stop_codon in self.stop_codons:
if stop_codon in self.codon_table:
if self.codon_table[stop_codon] != "*":
raise ValueError(
("Codon '%s' not found in stop_codons, but codon table "
"indicates that it should be") % (stop_codon,))
else:
self.codon_table[stop_codon] = "*"
for start_codon in self.start_codons:
if start_codon not in self.codon_table:
raise ValueError(
"Start codon '%s' missing from codon table" % (
start_codon,))
for codon, amino_acid in self.codon_table.items():
if amino_acid == "*" and codon not in self.stop_codons:
raise ValueError(
"Non-stop codon '%s' can't translate to '*'" % (
codon,))
if len(self.codon_table) != 64:
raise ValueError(
"Expected 64 codons but found %d in codon table" % (
len(self.codon_table,)))
def translate(self, cdna_sequence, first_codon_is_start=False):
"""
Given a cDNA sequence which is aligned to a reading frame, returns
the translated protein sequence and a boolean flag indicating whether
the translated sequence ended on a stop codon (or just ran out of codons).
Parameters
----------
cdna_sequence : str
cDNA sequence which is expected to start and end on complete codons.
first_codon_is_start : bool
Is the first codon of the sequence a start codon?
"""
if not isinstance(cdna_sequence, str):
cdna_sequence = str(cdna_sequence)
n = len(cdna_sequence)
# trim to multiple of 3 length, if there are 1 or 2 nucleotides
# dangling at the end of an mRNA they will not affect translation
# since ribosome will fall off at that point
end_idx = 3 * (n // 3)
codon_table = self.codon_table
if first_codon_is_start and cdna_sequence[:3] in self.start_codons:
amino_acid_list = ['M']
start_index = 3
else:
start_index = 0
amino_acid_list = []
ends_with_stop_codon = False
for i in range(start_index, end_idx, 3):
codon = cdna_sequence[i:i + 3]
aa = codon_table[codon]
if aa == "*":
ends_with_stop_codon = True
break
amino_acid_list.append(aa)
amino_acids = "".join(amino_acid_list)
return amino_acids, ends_with_stop_codon
def copy(
self,
name,
start_codons=None,
stop_codons=None,
codon_table=None,
codon_table_changes=None):
"""
Make copy of this GeneticCode object with optional replacement
values for all fields.
"""
new_start_codons = (
self.start_codons.copy()
if start_codons is None
else start_codons)
new_stop_codons = (
self.stop_codons.copy()
if stop_codons is None
else stop_codons)
new_codon_table = (
self.codon_table.copy()
if codon_table is None
else codon_table)
if codon_table_changes is not None:
new_codon_table.update(codon_table_changes)
return GeneticCode(
name=name,
start_codons=new_start_codons,
stop_codons=new_stop_codons,
codon_table=new_codon_table)
standard_genetic_code = GeneticCode(
name="standard",
start_codons={'ATG', 'CTG', 'TTG'},
stop_codons={'TAA', 'TAG', 'TGA'},
codon_table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L',
'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S',
'TAT': 'Y', 'TAC': 'Y', 'TAA': '*', 'TAG': '*',
'TGT': 'C', 'TGC': 'C', 'TGA': '*', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L',
'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T',
'AAT': 'N', 'AAC': 'N', 'AAA': 'K', 'AAG': 'K',
'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E',
'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G'
}
)
# Non-canonical start sites based on figure 2 of
# "Global mapping of translation initiation sites in mammalian
# cells at single-nucleotide resolution"
standard_genetic_code_with_extra_start_codons = standard_genetic_code.copy(
name="standard-with-extra-start-codons",
start_codons=standard_genetic_code.start_codons.union({
'GTG',
'AGG',
'ACG',
'AAG',
'ATC',
'ATA',
'ATT'}))
vertebrate_mitochondrial_genetic_code = standard_genetic_code.copy(
name="verterbrate-mitochondrial",
# "For thirty years AGA and AGG were considered terminators instead
# of coding for arginine. However, Temperley (2010) has recently shown
# that human mitochondria use only UAA and UAG stop codons."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
stop_codons={'TAA', 'TAG'},
# "AUU codes for isoleucine during elongation but can code for
# methionine for initiation (ND2) See Fearnley & Walker (1987) and
# Peabody (1989)."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
start_codons=['ATT', 'ATC', 'ATA', 'ATG', 'GTG'],
# "UGA codes for tryptophan instead of termination and AUA codes for
# methionine instead of isoleucine."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
codon_table_changes={'TGA': 'W', 'ATA': 'M'},
)
def translate_cdna(
cdna_sequence,
first_codon_is_start=False,
mitochondrial=False):
"""
Given a cDNA sequence which is aligned to a reading frame, returns
the translated protein sequence and a boolean flag indicating whether
the translated sequence ended on a stop codon (or just ran out of codons).
Parameters
----------
cdna_sequence : str
cDNA sequence which is expected to start and end on complete codons.
first_codon_is_start : bool
mitochondrial : bool
Use the mitochondrial codon table instead of standard
codon to amino acid table.
"""
# once we drop some of the prefix nucleotides, we should be in a reading frame
# which allows us to translate this protein
if mitochondrial:
genetic_code = vertebrate_mitochondrial_genetic_code
else:
genetic_code = standard_genetic_code_with_extra_start_codons
return genetic_code.translate(
cdna_sequence=cdna_sequence,
first_codon_is_start=first_codon_is_start)
|
[
"alex.rubinsteyn@gmail.com"
] |
alex.rubinsteyn@gmail.com
|
99f94f0fc3ee9a38ec3c34db968e6e99a9ea7e86
|
f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2
|
/2016/AST2/Bili/letnji/ponovo/konacno1.py
|
7482940ccb3f15b8866efb9be0b7760bf88d483d
|
[] |
no_license
|
ispastlibrary/Titan
|
a4a7e4bb56544d28b884a336db488488e81402e0
|
f60e5c6dc43876415b36ad76ab0322a1f709b14d
|
refs/heads/master
| 2021-01-17T19:23:32.839966
| 2016-06-03T13:47:44
| 2016-06-03T13:47:44
| 60,350,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,475
|
py
|
from scipy.special import wofz
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import interpolate
e=1.60217657e-19 #elementarno naelektrisanje [C]
eV=1.60217657e-19 #eV u J
AU=149597871000 #Astronomska jedinica [m]
Na=6.02*1e+23 #Avogadrov broj
M=23*1e-3 #molarna masa Na [kg/mol]
me=9.1e-31 #masa elektrona[kg]
Rk=400000 #poluprecnik kome[m] ?????????????????????????????
k=1.38*10e-23 #Bolcmanova konst [J\K]
dE=3.37*1e-19 #razlika energetskih stanja 3p i 3s, povezana sa talasnom duzinom [J]
R=Na*k #Univerzalna gasna konstanta [J/(molK)]
L0=589e-9 #centralna talasna duzina D2 linije [m]
h=6.63*1e-34 #Plankova konstanta [Js]
c=299792458 #brzina svetlosti [m/s]
A=6.14e+7 #Ajnstajnov koef za verovatnocu spontane emisije[s^-1] 3p-3s
g0=1 # statisticka tezina 3s orbitale (s)
g1=3 # statisticka tezina 3p orbitale (px,py,pz)
V0=c/L0 #centralna frekvencija [Hz]
#Tef=5777 #efektivna temperatura Sunca [K]
d=0 #rastojanje prave (posmatranog pravca) od centra jezgra
#niz koef za NaI
#niz koef za NaII
#koeficijenti aproksimativne polinomne funkcije za izracunavanje particione f-je
a=[-2.60507178e+3,1.71419244e+3,-4.50632658e+2,5.91751503e+1,-3.88164070e+0,1.01752936e-1]
b=[-3.68868193e-6,2.28941495e-6,-5.66767833e-7,6.99552282e-8,-4.30495956e-9,1.05668164e-10]
Ro=1000 #gustina jezgra [kg/m^3], Andrija
Rn=2.5e4 #poluprecnik jezgra [m]
S=1/2 #funkcija izvora
AMU=1.66*10e-27 #jedinica atomske mase u kg
sigma=5.670373e-8 #Stefan - Bolcmanova konstanta[Wm^-2K^-4]
mna=22.9877*AMU #atomska masa Na u kg
mnaoh=39.998*AMU #masa NaOH u kg
mh20=18.015*AMU #masa H2O u kg
def voigt(x,y): #Fojtova funkcija, ali samo realni deo Faddeeva f-je
z = x + 1j*y
w = wofz(z).real
return w
def part_funk(a,T): #izracunavanje part f-je
K=0
for i in range(6):
K += a[i]*pow(np.log(T),i)
Zp = np.exp(K)
return Zp
def rastojanje(T): #heliocentricno rastojanje [AU]
#Lt=-582*(T-273)+2.62e6 #latentna toplota sublimacije [J/kg], T u c, a ne u K
Lt=2.62e6
Sc=1361 #solarna const na 1AU [W/m^2]
Pr=1.2*(10**12)*np.exp(-6000/T) #pritisak zasicene pare na povrsini komete
Zm=Pr*np.sqrt(mh20/(2*np.pi*k*T)) #sublimacioni fluks H2O [kgs^-1m^-2] (masena stopa produktivnosti)
rasto=np.sqrt(Sc/(sigma*T**4+Lt*Zm))
return rasto
"""
def Q_H20(T):
mh20=18.015*AMU
Pr=1.2*(10e+12)*np.exp((-6000)/T)
prod=Pr*np.sqrt(1/(2*np.pi*k*T*mh20))
prod=prod*4*np.pi*Rn*Rn
return prod"""
def Q_Na(T): #ukupna stopa produktivnosti Na [s^-1]
Pr=1.2*(10e+12)*np.exp((-6000)/T) #pritisak zasicene pare
produ=Pr*np.sqrt(1/(2*np.pi*k*T*mna)) #stopa produktivnosti Na [m^-2s^-1]
produ=produ*4*np.pi*Rn*Rn #ukupna stopa
return produ
# 207.6 K -> 1 AU, ~1e+32 ukupna stopa produktivnosti Na
def brz_izb(T,masa): #brzina outflow (izlivanja)=termalna brzina [m/s]
#brz=20*pow(rastojanje(T),-0.41)*(10**3)
brz = np.sqrt((2*k*T)/masa)
return brz
tau_p0=10**3 #vreme dok se roditelj ne unisti [s]
tau_d0=1.67*(10**5) #vreme dok se cerka ne unisti [s]
def lp(T): #skalirana duzina roditelja [m]
roditelj = brz_izb(T,mnaoh)*tau_p0*(rastojanje(T))**2
return roditelj
def ld(T): #skalirana duzina cerke [m]
cerka = brz_izb(T,mna)*tau_d0*(rastojanje(T))**2
return cerka
#plt.gca().set_yscale('log')
def kon_Na_koma(T,r): #koncentracija Na u komi, Haserov model
Dr=r-Rn # redukovano trenutno rastojanje Rkome-Rjezgra
konc=(Q_Na(T)*ld(T)*(np.exp(-Dr/lp(T))-np.exp(-Dr/ld(T))))/(4*np.pi*brz_izb(T,mna)*r*r*(lp(T)-ld(T)))
#print(ld(T),lp(T))
return konc
def MaxBol(T,r): #koncentracija Na u osnovnom 3s stanju (n0) na rastojanju r od kome
NaI = kon_Na_koma(T,r) #ukupan broj Na iz Haserovog modela
Zp = part_funk(a,T) #particiona f-ja
Er=5.1390767*eV #energija 3s za Na [J]
#dE=h*c/L0
#Er=dE
#Er=0
n0 =(NaI*g0*np.exp(-Er/(k*T)))/Zp #konc Na u osnovnom stanju
return n0
def Bol(T,r):
NaI = kon_Na_koma(T,r)
dE = h*c/L0
odnos = g1*(np.exp(-dE/(k*T)))/g0
n0 = NaI/(1+odnos)
return n0
def ajnstajnB(A): # za A(koef emisije) (3p-3s) vraca B (Ajnstajnov koef apsorpcije za verovatnocu prelaza (3s-3p)[m^3s^-2J^-1])
B = (c**2*A*g1)/(8*np.pi*h*V0**3*g0)
return B
def Dopl_sirina(T): #Doplerova sirina u funkciji of temperature [Hz]
Dopler = np.sqrt(2*R*T/M)*(V0/c)
return Dopler
def koef_aps(V,T,r,av): #koeficijent apsorpcije
B = ajnstajnB(A)
konc_aps = n0 = Bol(T,r) #MAXBOLC statistika
Dopler = Dopl_sirina(T)
apsor = ((B*n0*h*V)/(4*np.pi*Dopler))*Fojtov_profil(V,av)
return apsor
br_ljuspica = 2500
dr = Rk/br_ljuspica #koma je podeljena na 50000 ljuspica
def opt_dub(d,V,T,av): #opticka dubina za nehomogenu komu
r1 = Rk
r2 = r1-dr
suma_opt = 0
broj = br_ljuspica - 1 - math.floor(d/dr)
"""for i in range(broj):
r2 = r1 - dr
ds = np.sqrt(r1*r1 - d*d) - np.sqrt(r2*r2 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
r1 = r2"""
while (r1>(Rn+d)) and (r2>(Rn+d)):
ds = np.sqrt(r1*r1 - d*d) - np.sqrt(r2*r2 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
r1=r2
r2=r1-dr
ds = np.sqrt(r1*r1 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
suma_opt *= 2
return suma_opt
"""def poc_intez(V,T): #pocetni intezitet preko plankove fje
plank=(2*h*V*V*V)/(c*c*(np.exp((h*V)/(k*T))-1))
return plank"""
N_tacaka = 150
V1 = c/(589.02e-9)
V2 = c/(588.98e-9)
dV = (V2 - V1)/N_tacaka
def izlazni(V,T,av): #izlazni intenzitet normiran tako da je I0=1, a funkcija izvora S=1/2
tau = opt_dub(d,V,T,av) #opticka dubina
#2*Rk*koef_aps(V,T,Rk)
q = S*(1-np.exp(-tau))
return q
def Fojtov_profil(V,av):
F=voigt((V-V0)/Dopl_sirina(T),av)/(np.pi*Dopl_sirina(T))
return F
def E_W(x,y): #ekvivalentna sirina, metodom cubic spline
EkW=0
tck=interpolate.splrep(x,y)
i=0
while(i<(N_tacaka-1)):
EkW+=interpolate.splint(x[i],x[i+1],tck)
i+=1
return EkW
"""T=207
av=A/Dopl_sirina(T)
x=np.linspace(V1,V2,N_tacaka)
y0=izlazni(x,av,0)
#y1=izlazni(x,av,1)
#plt.suptitle('Uporedjivanje dveju metoda za nalazenja broja Na u osnovnom 3s stanju u odnosu na ukupan broj Na')
plt.plot(x,y0,lw=5,label='Maksvel - Bolcmanova statistika')
#plt.plot(x,y1,label='Bolcmanova raspodela')
plt.legend(loc='best')
plt.show()"""
d=0
x=np.linspace(V1,V2,N_tacaka)
T=300
print(d)
av=A/Dopl_sirina(T)
y0=izlazni(x,T,av)
dd=Rk/6
d=d+dd
print(d)
y1=izlazni(x,T,av)
d=d+dd
print(d)
y2=izlazni(x,T,av)
d=d+dd
print(d)
y3=izlazni(x,T,av)
d=d+dd
print(d)
y4=izlazni(x,T,av)
plt.suptitle('Profil linije Na za različite preseke na T=220K')
plt.xlabel('Frekvencija[Hz]')
plt.ylabel('Relativni intenzitet')
plt.plot(x,y0,label='d=0 m')
plt.plot(x,y1,label='d=66666 m')
plt.plot(x,y2,label='d=133333 m')
plt.plot(x,y3,label='d=200000 m')
plt.plot(x,y4,label='d=266666 m')
#plt.plot(x,y5,label='0.06 AU, 260K')
plt.legend(loc='best')
plt.show()
"""xt=np.linspace(100,210,100) yt=Q_Na(xt) plt.plot(xt,yt)
plt.xlabel('Temperatura[K]') plt.ylabel('Stopa produkcije Na[s^-1]')
plt.title('Grafik zavisnosti stope produkcije od temperature')"""
#plt.yscale('log')
|
[
"ispast.library@gmail.com"
] |
ispast.library@gmail.com
|
6680de51002ea138c0edcb346732ab17cab16b1a
|
efc7433020b404f19bd46372000032797d361a0c
|
/apps/account/enums.py
|
9164b76980a275512b305acbeea9dbd65b5ada36
|
[] |
no_license
|
dimitrisamp/joinflyline
|
2819571c867434ec7b7e859a21cd278bb2bde501
|
dff0ab38244e6e2876443995fe99ad9959fc3db4
|
refs/heads/develop
| 2022-12-21T13:11:19.364128
| 2020-02-14T13:37:30
| 2020-02-14T13:37:30
| 242,969,405
| 0
| 0
| null | 2022-12-12T03:15:37
| 2020-02-25T10:12:46
|
Vue
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
from django_enumfield import enum
class CompanionInviteStatus(enum.Enum):
created = 0
email_sent = 1
active = 2
class DWGKind(enum.Enum):
unknown = -1
domestic = 0
international = 1
private = 2
|
[
"bladeofdima@gmail.com"
] |
bladeofdima@gmail.com
|
2245cd251ad202e770f21c9460b99be910f1aa89
|
ba34c60da52d847a7a6b6030db5afe1fb3e4c432
|
/python/tornado/com.py
|
d43302ac834c933b301f4e8ad9ecb705172bb17a
|
[] |
no_license
|
yurnerola/Script
|
533eb3bca45b69498e6e6a5e5c07ea37fcef5df9
|
6d887fddf44098df72f7d581d82f13375d2720b9
|
refs/heads/master
| 2016-09-05T17:08:35.239295
| 2014-05-27T02:46:43
| 2014-05-27T02:46:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
import re;
def usage():
return '''
Usage:<br/>
logger [options]<br/>
Options:<br/>
-h Print this usage.<br/>
-i <hallid><br/>
Example:<br/>
logger -i 447283
'''
if __name__=="__main__":
m=re.match(r'(\w+) (\w+)(?P<sign>.*)','hello world!')
print "m.string:",m.string
print "m.re:",m.re
print "m.pos:",m.pos
print "m.endpos:",m.endpos
print "m.lastindex:",m.lastindex
print "m.lastgroup:",m.lastgroup
print "m.group(1):",m.group(1)
print "m.group(1,2):",m.group(1,2)
print "m.groups():",m.groups()
print "m.groupdict():",m.groupdict()
p=re.compile(r'\d+')
print p.split('one1two2three3four4')
print p.findall('one1two2three3four4')
# for m in p.finditer('one1two2three3four4'):
# print m.group()
p=re.compile(r'(\w+) (\w+)')
s='i say,hello world!'
# print p.sub(r'\2 \1',s)
def func(m):
return m.group(1).title() + ' ' + m.group(2).title()
print p.sub(func, s)
|
[
"yurnerola@gmail.com"
] |
yurnerola@gmail.com
|
43d061b4c4472d4324336ec30c70f872f8e872b6
|
ce4d8e005d68a48d091f28351729d8f5e902de2e
|
/lib/PageNotFoundException.py
|
ed919f82b36320ab17ecee78dfaddc24f617ee6e
|
[] |
no_license
|
alexgf/document-similarity
|
cc1c0e80a1383dda897b5e1b75e9e366754dd8c7
|
c4674d6af792e9b2e4efcad500dcacebbbad47de
|
refs/heads/main
| 2023-08-25T02:31:37.260524
| 2021-09-16T15:55:21
| 2021-09-16T15:55:21
| 406,755,812
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
class PageNotFoundException(Exception):
""" Exception raised when URL is not found
Attributes:
url - url not possible to be loaded
"""
def __init__(self, url):
self.url = url
self.message = f'Enable to load {self.url}'
|
[
"agferreira.rj@gmail.com"
] |
agferreira.rj@gmail.com
|
79721e05ee944e87e9420fe7e05baa78184b0db0
|
16b03199949be9d18e4487419667180cb2c232ca
|
/testLocation.py
|
9d751858a43bbc599bba07dfdfb30837961b0162
|
[] |
no_license
|
hellokathy/MarketSim
|
30b115967b66cf43fb57fc52b28235abd04d7d42
|
e2dfbc3f8912badc7987e806887eb9f3c338d28a
|
refs/heads/master
| 2021-01-23T02:59:36.273443
| 2014-09-05T19:13:03
| 2014-09-05T19:13:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
from location import *
from exchange import Exchange
from agent import Agent
import unittest
class TestLocation(unittest.TestCase):
def setUp(self):
exchange1 = Exchange()
exchange2 = Exchange()
self.location1 = Location(exchange1)
self.location2 = Location(exchange2)
self.agent1 = Agent(location=self.location1)
self.agent2 = Agent(location=self.location2)
def test_setUp(self):
self.assertTrue(self.agent1.exchange == self.location1.exchange)
self.assertTrue(self.agent2.exchange == self.location2.exchange)
self.assertTrue(self.agent1 in self.location1.population)
self.assertTrue(self.agent2 in self.location2.population)
self.assertTrue(self.agent1.getLocation() == self.location1)
self.assertTrue(self.agent2.getLocation() == self.location2)
|
[
"delbalso@gmail.com"
] |
delbalso@gmail.com
|
3058f9b477b8a173f082bf87d9701e0d5998c875
|
ce0ed63440993396190ddd6878880ab3652bc657
|
/comments/urls.py
|
c50b75c7a0d73d6f2e736b0e9fc39490142a7024
|
[] |
no_license
|
daduoduo123/project1
|
762b208c27cde71c44c8c95a0422c9de08f4f794
|
b6d25183b7d703d9bd4cc6a051b22ed6e1f48411
|
refs/heads/master
| 2021-06-20T21:21:24.063288
| 2019-06-30T03:48:06
| 2019-06-30T03:48:06
| 194,406,916
| 0
| 0
| null | 2021-06-10T21:39:30
| 2019-06-29T13:08:32
|
CSS
|
UTF-8
|
Python
| false
| false
| 195
|
py
|
from django.conf.urls import url
from .import views
app_name='comments'
urlpatterns=[
url(r'^comments/post/(?P<post_pk>[0-9]+)/$', views.post_comment, name='post_comment'), # 文章评论
]
|
[
"785216423@qq.com"
] |
785216423@qq.com
|
82517b45e33e99c6728eea5ef933042d18891240
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/plugin/core/instructionsearch/ui/SearchDirectionWidget.pyi
|
68398a7321fe0e264877ec23688efac4696bbacd
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,511
|
pyi
|
from typing import List
import ghidra.app.plugin.core.instructionsearch.ui
import java.awt
import java.awt.dnd
import java.awt.event
import java.awt.im
import java.awt.image
import java.beans
import java.io
import java.lang
import java.util
import javax.accessibility
import javax.swing
import javax.swing.border
import javax.swing.event
import javax.swing.plaf
class SearchDirectionWidget(ghidra.app.plugin.core.instructionsearch.ui.ControlPanelWidget):
class Direction(java.lang.Enum):
BACKWARD: ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction = BACKWARD
FORWARD: ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction = FORWARD
@overload
def compareTo(self, __a0: java.lang.Enum) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getDeclaringClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def name(self) -> unicode: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def ordinal(self) -> int: ...
def toString(self) -> unicode: ...
@overload
@staticmethod
def valueOf(__a0: unicode) -> ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction: ...
@overload
@staticmethod
def valueOf(__a0: java.lang.Class, __a1: unicode) -> java.lang.Enum: ...
@staticmethod
def values() -> List[ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction]: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self, __a0: unicode, __a1: ghidra.app.plugin.core.instructionsearch.ui.InstructionSearchDialog): ...
def action(self, __a0: java.awt.Event, __a1: object) -> bool: ...
@overload
def add(self, __a0: java.awt.Component) -> java.awt.Component: ...
@overload
def add(self, __a0: java.awt.PopupMenu) -> None: ...
@overload
def add(self, __a0: java.awt.Component, __a1: int) -> java.awt.Component: ...
@overload
def add(self, __a0: unicode, __a1: java.awt.Component) -> java.awt.Component: ...
@overload
def add(self, __a0: java.awt.Component, __a1: object) -> None: ...
@overload
def add(self, __a0: java.awt.Component, __a1: object, __a2: int) -> None: ...
def addAncestorListener(self, __a0: javax.swing.event.AncestorListener) -> None: ...
def addComponentListener(self, __a0: java.awt.event.ComponentListener) -> None: ...
def addContainerListener(self, __a0: java.awt.event.ContainerListener) -> None: ...
def addFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def addHierarchyBoundsListener(self, __a0: java.awt.event.HierarchyBoundsListener) -> None: ...
def addHierarchyListener(self, __a0: java.awt.event.HierarchyListener) -> None: ...
def addInputMethodListener(self, __a0: java.awt.event.InputMethodListener) -> None: ...
def addKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def addMouseListener(self, __a0: java.awt.event.MouseListener) -> None: ...
def addMouseMotionListener(self, __a0: java.awt.event.MouseMotionListener) -> None: ...
def addMouseWheelListener(self, __a0: java.awt.event.MouseWheelListener) -> None: ...
def addNotify(self) -> None: ...
@overload
def addPropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
@overload
def addPropertyChangeListener(self, __a0: unicode, __a1: java.beans.PropertyChangeListener) -> None: ...
def addVetoableChangeListener(self, __a0: java.beans.VetoableChangeListener) -> None: ...
def applyComponentOrientation(self, __a0: java.awt.ComponentOrientation) -> None: ...
def areFocusTraversalKeysSet(self, __a0: int) -> bool: ...
@overload
def checkImage(self, __a0: java.awt.Image, __a1: java.awt.image.ImageObserver) -> int: ...
@overload
def checkImage(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: java.awt.image.ImageObserver) -> int: ...
def computeVisibleRect(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def contains(self, __a0: java.awt.Point) -> bool: ...
@overload
def contains(self, __a0: int, __a1: int) -> bool: ...
def countComponents(self) -> int: ...
@overload
def createImage(self, __a0: java.awt.image.ImageProducer) -> java.awt.Image: ...
@overload
def createImage(self, __a0: int, __a1: int) -> java.awt.Image: ...
def createToolTip(self) -> javax.swing.JToolTip: ...
@overload
def createVolatileImage(self, __a0: int, __a1: int) -> java.awt.image.VolatileImage: ...
@overload
def createVolatileImage(self, __a0: int, __a1: int, __a2: java.awt.ImageCapabilities) -> java.awt.image.VolatileImage: ...
def deliverEvent(self, __a0: java.awt.Event) -> None: ...
def disable(self) -> None: ...
def dispatchEvent(self, __a0: java.awt.AWTEvent) -> None: ...
def doLayout(self) -> None: ...
@overload
def enable(self) -> None: ...
@overload
def enable(self, __a0: bool) -> None: ...
def enableInputMethods(self, __a0: bool) -> None: ...
def equals(self, __a0: object) -> bool: ...
@overload
def findComponentAt(self, __a0: java.awt.Point) -> java.awt.Component: ...
@overload
def findComponentAt(self, __a0: int, __a1: int) -> java.awt.Component: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: long, __a2: long) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: float, __a2: float) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: float, __a2: float) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: bool, __a2: bool) -> None: ...
def getAccessibleContext(self) -> javax.accessibility.AccessibleContext: ...
def getActionForKeyStroke(self, __a0: javax.swing.KeyStroke) -> java.awt.event.ActionListener: ...
def getActionMap(self) -> javax.swing.ActionMap: ...
def getAlignmentX(self) -> float: ...
def getAlignmentY(self) -> float: ...
def getAncestorListeners(self) -> List[javax.swing.event.AncestorListener]: ...
def getAutoscrolls(self) -> bool: ...
def getBackground(self) -> java.awt.Color: ...
def getBaseline(self, __a0: int, __a1: int) -> int: ...
def getBaselineResizeBehavior(self) -> java.awt.Component.BaselineResizeBehavior: ...
def getBorder(self) -> javax.swing.border.Border: ...
@overload
def getBounds(self) -> java.awt.Rectangle: ...
@overload
def getBounds(self, __a0: java.awt.Rectangle) -> java.awt.Rectangle: ...
def getClass(self) -> java.lang.Class: ...
def getClientProperty(self, __a0: object) -> object: ...
def getColorModel(self) -> java.awt.image.ColorModel: ...
def getComponent(self, __a0: int) -> java.awt.Component: ...
@overload
def getComponentAt(self, __a0: java.awt.Point) -> java.awt.Component: ...
@overload
def getComponentAt(self, __a0: int, __a1: int) -> java.awt.Component: ...
def getComponentCount(self) -> int: ...
def getComponentListeners(self) -> List[java.awt.event.ComponentListener]: ...
def getComponentOrientation(self) -> java.awt.ComponentOrientation: ...
def getComponentPopupMenu(self) -> javax.swing.JPopupMenu: ...
def getComponentZOrder(self, __a0: java.awt.Component) -> int: ...
def getComponents(self) -> List[java.awt.Component]: ...
def getConditionForKeyStroke(self, __a0: javax.swing.KeyStroke) -> int: ...
def getContainerListeners(self) -> List[java.awt.event.ContainerListener]: ...
def getCursor(self) -> java.awt.Cursor: ...
def getDebugGraphicsOptions(self) -> int: ...
@staticmethod
def getDefaultLocale() -> java.util.Locale: ...
def getDropTarget(self) -> java.awt.dnd.DropTarget: ...
def getFocusCycleRootAncestor(self) -> java.awt.Container: ...
def getFocusListeners(self) -> List[java.awt.event.FocusListener]: ...
def getFocusTraversalKeys(self, __a0: int) -> java.util.Set: ...
def getFocusTraversalKeysEnabled(self) -> bool: ...
def getFocusTraversalPolicy(self) -> java.awt.FocusTraversalPolicy: ...
def getFont(self) -> java.awt.Font: ...
def getFontMetrics(self, __a0: java.awt.Font) -> java.awt.FontMetrics: ...
def getForeground(self) -> java.awt.Color: ...
def getGraphics(self) -> java.awt.Graphics: ...
def getGraphicsConfiguration(self) -> java.awt.GraphicsConfiguration: ...
def getHeight(self) -> int: ...
def getHierarchyBoundsListeners(self) -> List[java.awt.event.HierarchyBoundsListener]: ...
def getHierarchyListeners(self) -> List[java.awt.event.HierarchyListener]: ...
def getIgnoreRepaint(self) -> bool: ...
def getInheritsPopupMenu(self) -> bool: ...
def getInputContext(self) -> java.awt.im.InputContext: ...
@overload
def getInputMap(self) -> javax.swing.InputMap: ...
@overload
def getInputMap(self, __a0: int) -> javax.swing.InputMap: ...
def getInputMethodListeners(self) -> List[java.awt.event.InputMethodListener]: ...
def getInputMethodRequests(self) -> java.awt.im.InputMethodRequests: ...
def getInputVerifier(self) -> javax.swing.InputVerifier: ...
@overload
def getInsets(self) -> java.awt.Insets: ...
@overload
def getInsets(self, __a0: java.awt.Insets) -> java.awt.Insets: ...
def getKeyListeners(self) -> List[java.awt.event.KeyListener]: ...
def getLayout(self) -> java.awt.LayoutManager: ...
def getListeners(self, __a0: java.lang.Class) -> List[java.util.EventListener]: ...
def getLocale(self) -> java.util.Locale: ...
@overload
def getLocation(self) -> java.awt.Point: ...
@overload
def getLocation(self, __a0: java.awt.Point) -> java.awt.Point: ...
def getLocationOnScreen(self) -> java.awt.Point: ...
def getMaximumSize(self) -> java.awt.Dimension: ...
def getMinimumSize(self) -> java.awt.Dimension: ...
def getMouseListeners(self) -> List[java.awt.event.MouseListener]: ...
def getMouseMotionListeners(self) -> List[java.awt.event.MouseMotionListener]: ...
@overload
def getMousePosition(self) -> java.awt.Point: ...
@overload
def getMousePosition(self, __a0: bool) -> java.awt.Point: ...
def getMouseWheelListeners(self) -> List[java.awt.event.MouseWheelListener]: ...
def getName(self) -> unicode: ...
def getNextFocusableComponent(self) -> java.awt.Component: ...
def getParent(self) -> java.awt.Container: ...
def getPopupLocation(self, __a0: java.awt.event.MouseEvent) -> java.awt.Point: ...
def getPreferredSize(self) -> java.awt.Dimension: ...
@overload
def getPropertyChangeListeners(self) -> List[java.beans.PropertyChangeListener]: ...
@overload
def getPropertyChangeListeners(self, __a0: unicode) -> List[java.beans.PropertyChangeListener]: ...
def getRegisteredKeyStrokes(self) -> List[javax.swing.KeyStroke]: ...
def getRootPane(self) -> javax.swing.JRootPane: ...
def getSearchDirection(self) -> ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction: ...
@overload
def getSize(self) -> java.awt.Dimension: ...
@overload
def getSize(self, __a0: java.awt.Dimension) -> java.awt.Dimension: ...
def getToolTipLocation(self, __a0: java.awt.event.MouseEvent) -> java.awt.Point: ...
@overload
def getToolTipText(self) -> unicode: ...
@overload
def getToolTipText(self, __a0: java.awt.event.MouseEvent) -> unicode: ...
def getToolkit(self) -> java.awt.Toolkit: ...
def getTopLevelAncestor(self) -> java.awt.Container: ...
def getTransferHandler(self) -> javax.swing.TransferHandler: ...
def getTreeLock(self) -> object: ...
def getUI(self) -> javax.swing.plaf.ComponentUI: ...
def getUIClassID(self) -> unicode: ...
def getVerifyInputWhenFocusTarget(self) -> bool: ...
def getVetoableChangeListeners(self) -> List[java.beans.VetoableChangeListener]: ...
def getVisibleRect(self) -> java.awt.Rectangle: ...
def getWidth(self) -> int: ...
def getX(self) -> int: ...
def getY(self) -> int: ...
def gotFocus(self, __a0: java.awt.Event, __a1: object) -> bool: ...
def grabFocus(self) -> None: ...
def handleEvent(self, __a0: java.awt.Event) -> bool: ...
def hasFocus(self) -> bool: ...
def hashCode(self) -> int: ...
def hide(self) -> None: ...
def imageUpdate(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: int, __a4: int, __a5: int) -> bool: ...
def inside(self, __a0: int, __a1: int) -> bool: ...
def invalidate(self) -> None: ...
def isAncestorOf(self, __a0: java.awt.Component) -> bool: ...
def isBackgroundSet(self) -> bool: ...
def isCursorSet(self) -> bool: ...
def isDisplayable(self) -> bool: ...
def isDoubleBuffered(self) -> bool: ...
def isEnabled(self) -> bool: ...
@overload
def isFocusCycleRoot(self) -> bool: ...
@overload
def isFocusCycleRoot(self, __a0: java.awt.Container) -> bool: ...
def isFocusOwner(self) -> bool: ...
def isFocusTraversable(self) -> bool: ...
def isFocusTraversalPolicyProvider(self) -> bool: ...
def isFocusTraversalPolicySet(self) -> bool: ...
def isFocusable(self) -> bool: ...
def isFontSet(self) -> bool: ...
def isForegroundSet(self) -> bool: ...
def isLightweight(self) -> bool: ...
@staticmethod
def isLightweightComponent(__a0: java.awt.Component) -> bool: ...
def isManagingFocus(self) -> bool: ...
def isMaximumSizeSet(self) -> bool: ...
def isMinimumSizeSet(self) -> bool: ...
def isOpaque(self) -> bool: ...
def isOptimizedDrawingEnabled(self) -> bool: ...
def isPaintingForPrint(self) -> bool: ...
def isPaintingTile(self) -> bool: ...
def isPreferredSizeSet(self) -> bool: ...
def isRequestFocusEnabled(self) -> bool: ...
def isShowing(self) -> bool: ...
def isValid(self) -> bool: ...
def isValidateRoot(self) -> bool: ...
def isVisible(self) -> bool: ...
def keyDown(self, __a0: java.awt.Event, __a1: int) -> bool: ...
def keyUp(self, __a0: java.awt.Event, __a1: int) -> bool: ...
@overload
def list(self) -> None: ...
@overload
def list(self, __a0: java.io.PrintStream) -> None: ...
@overload
def list(self, __a0: java.io.PrintWriter) -> None: ...
@overload
def list(self, __a0: java.io.PrintStream, __a1: int) -> None: ...
@overload
def list(self, __a0: java.io.PrintWriter, __a1: int) -> None: ...
def locate(self, __a0: int, __a1: int) -> java.awt.Component: ...
def location(self) -> java.awt.Point: ...
def lostFocus(self, __a0: java.awt.Event, __a1: object) -> bool: ...
def mouseDown(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseDrag(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseEnter(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseExit(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseMove(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseUp(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def move(self, __a0: int, __a1: int) -> None: ...
def nextFocus(self) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def paint(self, __a0: java.awt.Graphics) -> None: ...
def paintAll(self, __a0: java.awt.Graphics) -> None: ...
def paintComponents(self, __a0: java.awt.Graphics) -> None: ...
@overload
def paintImmediately(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def paintImmediately(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
def postEvent(self, __a0: java.awt.Event) -> bool: ...
@overload
def prepareImage(self, __a0: java.awt.Image, __a1: java.awt.image.ImageObserver) -> bool: ...
@overload
def prepareImage(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: java.awt.image.ImageObserver) -> bool: ...
def print(self, __a0: java.awt.Graphics) -> None: ...
def printAll(self, __a0: java.awt.Graphics) -> None: ...
def printComponents(self, __a0: java.awt.Graphics) -> None: ...
def putClientProperty(self, __a0: object, __a1: object) -> None: ...
@overload
def registerKeyboardAction(self, __a0: java.awt.event.ActionListener, __a1: javax.swing.KeyStroke, __a2: int) -> None: ...
@overload
def registerKeyboardAction(self, __a0: java.awt.event.ActionListener, __a1: unicode, __a2: javax.swing.KeyStroke, __a3: int) -> None: ...
@overload
def remove(self, __a0: int) -> None: ...
@overload
def remove(self, __a0: java.awt.Component) -> None: ...
@overload
def remove(self, __a0: java.awt.MenuComponent) -> None: ...
def removeAll(self) -> None: ...
def removeAncestorListener(self, __a0: javax.swing.event.AncestorListener) -> None: ...
def removeComponentListener(self, __a0: java.awt.event.ComponentListener) -> None: ...
def removeContainerListener(self, __a0: java.awt.event.ContainerListener) -> None: ...
def removeFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def removeHierarchyBoundsListener(self, __a0: java.awt.event.HierarchyBoundsListener) -> None: ...
def removeHierarchyListener(self, __a0: java.awt.event.HierarchyListener) -> None: ...
def removeInputMethodListener(self, __a0: java.awt.event.InputMethodListener) -> None: ...
def removeKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def removeMouseListener(self, __a0: java.awt.event.MouseListener) -> None: ...
def removeMouseMotionListener(self, __a0: java.awt.event.MouseMotionListener) -> None: ...
def removeMouseWheelListener(self, __a0: java.awt.event.MouseWheelListener) -> None: ...
def removeNotify(self) -> None: ...
@overload
def removePropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
@overload
def removePropertyChangeListener(self, __a0: unicode, __a1: java.beans.PropertyChangeListener) -> None: ...
def removeVetoableChangeListener(self, __a0: java.beans.VetoableChangeListener) -> None: ...
@overload
def repaint(self) -> None: ...
@overload
def repaint(self, __a0: long) -> None: ...
@overload
def repaint(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def repaint(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
@overload
def repaint(self, __a0: long, __a1: int, __a2: int, __a3: int, __a4: int) -> None: ...
def requestDefaultFocus(self) -> bool: ...
@overload
def requestFocus(self) -> None: ...
@overload
def requestFocus(self, __a0: bool) -> bool: ...
@overload
def requestFocus(self, __a0: java.awt.event.FocusEvent.Cause) -> None: ...
@overload
def requestFocusInWindow(self) -> bool: ...
@overload
def requestFocusInWindow(self, __a0: java.awt.event.FocusEvent.Cause) -> bool: ...
def resetKeyboardActions(self) -> None: ...
def reshape(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
@overload
def resize(self, __a0: java.awt.Dimension) -> None: ...
@overload
def resize(self, __a0: int, __a1: int) -> None: ...
def revalidate(self) -> None: ...
def scrollRectToVisible(self, __a0: java.awt.Rectangle) -> None: ...
def setActionMap(self, __a0: javax.swing.ActionMap) -> None: ...
def setAlignmentX(self, __a0: float) -> None: ...
def setAlignmentY(self, __a0: float) -> None: ...
def setAutoscrolls(self, __a0: bool) -> None: ...
def setBackground(self, __a0: java.awt.Color) -> None: ...
def setBorder(self, __a0: javax.swing.border.Border) -> None: ...
@overload
def setBounds(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def setBounds(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
def setComponentOrientation(self, __a0: java.awt.ComponentOrientation) -> None: ...
def setComponentPopupMenu(self, __a0: javax.swing.JPopupMenu) -> None: ...
def setComponentZOrder(self, __a0: java.awt.Component, __a1: int) -> None: ...
def setCursor(self, __a0: java.awt.Cursor) -> None: ...
def setDebugGraphicsOptions(self, __a0: int) -> None: ...
@staticmethod
def setDefaultLocale(__a0: java.util.Locale) -> None: ...
def setDoubleBuffered(self, __a0: bool) -> None: ...
def setDropTarget(self, __a0: java.awt.dnd.DropTarget) -> None: ...
def setEnabled(self, __a0: bool) -> None: ...
def setFocusCycleRoot(self, __a0: bool) -> None: ...
def setFocusTraversalKeys(self, __a0: int, __a1: java.util.Set) -> None: ...
def setFocusTraversalKeysEnabled(self, __a0: bool) -> None: ...
def setFocusTraversalPolicy(self, __a0: java.awt.FocusTraversalPolicy) -> None: ...
def setFocusTraversalPolicyProvider(self, __a0: bool) -> None: ...
def setFocusable(self, __a0: bool) -> None: ...
def setFont(self, __a0: java.awt.Font) -> None: ...
def setForeground(self, __a0: java.awt.Color) -> None: ...
def setIgnoreRepaint(self, __a0: bool) -> None: ...
def setInheritsPopupMenu(self, __a0: bool) -> None: ...
def setInputMap(self, __a0: int, __a1: javax.swing.InputMap) -> None: ...
def setInputVerifier(self, __a0: javax.swing.InputVerifier) -> None: ...
def setLayout(self, __a0: java.awt.LayoutManager) -> None: ...
def setLocale(self, __a0: java.util.Locale) -> None: ...
@overload
def setLocation(self, __a0: java.awt.Point) -> None: ...
@overload
def setLocation(self, __a0: int, __a1: int) -> None: ...
def setMaximumSize(self, __a0: java.awt.Dimension) -> None: ...
def setMinimumSize(self, __a0: java.awt.Dimension) -> None: ...
def setMixingCutoutShape(self, __a0: java.awt.Shape) -> None: ...
def setName(self, __a0: unicode) -> None: ...
def setNextFocusableComponent(self, __a0: java.awt.Component) -> None: ...
def setOpaque(self, __a0: bool) -> None: ...
def setPreferredSize(self, __a0: java.awt.Dimension) -> None: ...
def setRequestFocusEnabled(self, __a0: bool) -> None: ...
@overload
def setSize(self, __a0: java.awt.Dimension) -> None: ...
@overload
def setSize(self, __a0: int, __a1: int) -> None: ...
def setToolTipText(self, __a0: unicode) -> None: ...
def setTransferHandler(self, __a0: javax.swing.TransferHandler) -> None: ...
def setUI(self, __a0: javax.swing.plaf.PanelUI) -> None: ...
def setVerifyInputWhenFocusTarget(self, __a0: bool) -> None: ...
def setVisible(self, __a0: bool) -> None: ...
@overload
def show(self) -> None: ...
@overload
def show(self, __a0: bool) -> None: ...
def toString(self) -> unicode: ...
def transferFocus(self) -> None: ...
def transferFocusBackward(self) -> None: ...
def transferFocusDownCycle(self) -> None: ...
def transferFocusUpCycle(self) -> None: ...
def unregisterKeyboardAction(self, __a0: javax.swing.KeyStroke) -> None: ...
def update(self, __a0: java.awt.Graphics) -> None: ...
def updateUI(self) -> None: ...
def validate(self) -> None: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def searchDirection(self) -> ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
6b091982c00c4c87893ece4161b1b801fddc4ed4
|
d5acc40e766857c2399013c5228fc4f02b2f5210
|
/python/41 - pandigital prime.py
|
b012c97c2db6ce1baba569f8be217f62d4f573a7
|
[] |
no_license
|
celvro/euler
|
a79dec92620fedfaaa5789a29d964395488fc1d8
|
f76d6f00efd9db26b9caad72e94110d5ef972b37
|
refs/heads/master
| 2016-09-10T03:21:41.417156
| 2015-04-30T21:27:38
| 2015-04-30T21:27:38
| 20,080,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from util import prime
import itertools
x = []
for i in itertools.permutations('1234567'):
num = (''.join(i))[::-1]
if prime(int(num)):
x.append(num)
print max(x)
|
[
"dwlfk2@mst.edu"
] |
dwlfk2@mst.edu
|
90e8d38ca104a9333d1b862e16bbc7ebd1820480
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02747/s534963942.py
|
e59afea127c18cba4bb7f22ef8a7070d00c3b7dd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
S = str(input())
S_size = len(S)
hi_size = S.count('hi')
if S_size == hi_size * 2:
print('Yes')
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
553c7caf000c233ba51244d4df4ff97ba86e93c4
|
5201bb9ea835f8cce502690d25b67868000140ff
|
/Desafios/Aula 020/ex098.py
|
f4765fdfddd25bde19cddc292869ee4887217eb6
|
[] |
no_license
|
vitorsemidio-dev/curso-python-guanabara
|
4eccb05f17fc4cf84361eb499ab0113bf25901f7
|
fcd656abb0faf7a36ff70e2544c4cbbd12db3a8a
|
refs/heads/master
| 2021-01-05T12:32:02.619140
| 2020-02-17T05:10:58
| 2020-02-17T05:10:58
| 241,024,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
from time import sleep
from random import randint
pausa = 0.5
def imprime(inicio, fim, passo):
ultimo = 1
if not passo:
passo = 1
if inicio > fim:
passo = -abs(passo)
ultimo = -1
for i in range(inicio, fim+ultimo, passo):
print(f"{i}", end=" ")
sleep(pausa)
print("")
def contador(inicio, fim, passo):
print("Imprimindo de 1 a 10 com passo 1")
imprime(1, 10, 1)
print("Imprimindo de 10 até zero, de dois em dois")
imprime(10, 0, -2)
print("Isto aqui nao eh Yu-Gi-Oh! Mas eh sua vez")
inicio = int(input(f"{'Inicio: ':<10}"))
fim = int(input(f"{'Fim: ':<10}"))
passo = int(input(f"{'Passo: ':<10}"))
imprime(inicio, fim, passo)
contador(1,23,3)
|
[
"vitorsemidio.96@gmail.com"
] |
vitorsemidio.96@gmail.com
|
bcb8bf4c3d80cd5e54a047f3b9db444bb171f47e
|
753532ab35fc3d83b750b1b79603cc1613408523
|
/liqian/UCSD/research/research/spiders/UCSDSpider.py
|
da674e1f7a544f8d15dfbacd832fc5443d9d41d1
|
[
"Unlicense"
] |
permissive
|
doge-search/webdoge
|
4e9435f2ba744201adca1bfe2288994e1f284f00
|
443e758b5c1f962d5c2fe792cdbed01e1208b1cb
|
refs/heads/master
| 2021-01-15T08:16:34.357284
| 2016-06-15T17:22:43
| 2016-06-15T17:22:43
| 54,782,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
import scrapy
from scrapy.http import FormRequest
import re
from research.items import ResearchItem
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
postname = "abc"
class CaltechSpider(scrapy.Spider):
name = "ucsd"
allowed_domains = ["jacobsschool.ucsd.edu"]
start_urls = ["http://jacobsschool.ucsd.edu/faculty/faculty_bios/findprofile.sfe?department=cse"]
def parse(self, response):
for opt in response.xpath('//select[@name="institute"]/option/@value'):
tmpname = opt.extract()
postname = tmpname
yield FormRequest.from_response(response, formname='dirSearch', formdata={'institute': tmpname}, callback=self.parse_page)
def parse_page(self, response):
print postname
# item = ResearchItem()
# item['proflist'] = []
# tmpname = response.xpath('//td[@colspan="3"][@height="30"]/h1/text()').extract()
# item['groupname'] = tmpname[0]
# print str(item['groupname'])
# sel = response.xpath('//td[@style="border-left:1px solid #333;"]')
# print sel
# sel2 = sel.xpath('ul[1]')
# for sel3 in sel2.xpath('li/a'):
# tmpname = sel3.xpath('text()').extract()
# print str(tmpname)
# item['proflist'].append(tmpname)
item = ResearchItem()
item['groupname'] = postname
item['proflist'] = []
for sel in response.xpath('//p/a/text()'):
tmpname = sel.extract()
print tmpname
item['proflist'].append(tmpname)
yield item
|
[
"liqian.cs@gmail.com"
] |
liqian.cs@gmail.com
|
22985f2d819e9b6edb91e3975998fac5bee4bad6
|
fad526f1c2ecda4d57343f597cef327f26f6ab35
|
/setup.py
|
1e5f442e70eb4c64c157e274919fad05f0475483
|
[
"Apache-2.0"
] |
permissive
|
lofoyet/SchoolDigger-Scraper
|
53651aad7a11d531dfc06c9a2b08fe0222a170a2
|
98b329bad99534ef0f234ffb11ec1e19fdd875b2
|
refs/heads/master
| 2022-12-09T16:32:11.752836
| 2019-01-21T17:14:25
| 2019-01-21T17:14:25
| 166,749,962
| 1
| 1
|
Apache-2.0
| 2022-12-08T01:33:38
| 2019-01-21T04:48:23
|
Python
|
UTF-8
|
Python
| false
| false
| 606
|
py
|
"""Install and setup."""
from distutils.core import setup
setup(
name="SchoolDigger-Scraper",
version="1.0.0",
python_requires=">=3.6",
packages=[ # this will put these dirs into site-packges/
"schooldiggerscraper",
],
install_requires=[
"beautifulsoup4==4.7.1",
"click==7.0",
"requests==2.21.0",
],
entry_points={
"console_scripts": [
"schooldiggerscraper=schooldiggerscraper.scrape:main",
],
},
description="Scrape school digger",
author="Liu,Tianpei",
author_email="tianpei.liu0@gmail.com",
)
|
[
"tonylove5251@gmail.com"
] |
tonylove5251@gmail.com
|
01c90cf3b10ff564d8bd1c1485a176a7793c5f09
|
b3cb4015ca65774c670165c49a8e6069114cce8d
|
/preprocessing_old.py
|
2fe1bfc305d3f7dba22aa7b0a38f490e2b92a716
|
[] |
no_license
|
pynlpteam/PySupport
|
b1477038b9fea2873efa627d2d40bcd0f57dc79e
|
cca3ad21758ab4d43a865b94a0949eb5ffa2efa4
|
refs/heads/master
| 2021-05-10T14:23:04.914865
| 2018-01-27T08:50:31
| 2018-01-27T08:50:31
| 118,516,520
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,610
|
py
|
# general
import os
import re
import inspect
import itertools
import dill as pickle
import string
from collections import Counter
# NLP tools
import enchant
import nltk
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from pymystem3 import Mystem
import pymorphy2
# from Preprocessing.Dicts import word_lists, it_ru_dict
# Init project path
# inspect.getfile(inspect.currentframe()) # script filename (usually with path)
proj_path = '/'.join(inspect.getfile(inspect.currentframe()).split('/')[:-2])
# proj_path = '/'.join(os.path.dirname(os.getcwd()).split('/')[:-1])
print('project path (preprocessing): ', proj_path)
# Initialising Mystem
mystem = Mystem()
# Initialising dictionaties
en_dict = enchant.DictWithPWL("en_US", proj_path + '/Preprocessing/Dicts/IT_EN_dict.txt')
ru_aot_dict = enchant.Dict("ru_RU")
# nltk.download()
# print(enchant.list_languages())
class Preprocessing():
def __init__(self):
self.re_signatures = [re.compile(each) for each in word_lists.signatures]
self.mystem = Mystem()
self.morph = pymorphy2.MorphAnalyzer()
self.en_dict = enchant.DictWithPWL("en_US", proj_path + '/Preprocessing/Dicts/IT_EN_dict.txt')
self.ru_aot_dict = enchant.Dict("ru_RU")
self.stop_words = stopwords.words('english')
self.stop_words.extend(word_lists.yandex_seo_stopwords)
self.dataset = self.load_dataset('/home/aviadmin/semantic_git/models/21/dataset')
self.vocab = self.dataset._vocab
# def __init__(self):
def load_dataset(self, path):
with open(path, 'rb') as fp:
return pickle.load(fp)
def normalize(self, input_string):
return input_string.lower().strip().replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
def cut_by_signature(self, input_string):
""" Find index of earliest signature index and cut it. """
beginnings = []
for each in self.re_signatures:
try:
beginnings.append(each.search(input_string).span()[0])
except AttributeError:
pass
if beginnings:
return input_string[:min(beginnings)]
else:
return input_string
def tokenize(self, input_string):
return nltk.word_tokenize(input_string)
def remove_stopwords(self, tokenized_text):
return [t for t in tokenized_text if t not in self.stop_words]
def get_pymorphy_lemma(self, token):
return self.morph.parse(token)[0].normal_form
def scan_by_vocab(self, text):
return [t for t in text if t in self.vocab]
|
[
"tonko22x@gmail.com"
] |
tonko22x@gmail.com
|
b7524f91f759d248026093ecf9ca391fb5b8aa99
|
d0fbfd9448033e76239141e6ce05b6a92d662f42
|
/others/cannibals.py
|
70c0a75d1585f0ddf60340ead0d72420e7b84b8b
|
[
"MIT"
] |
permissive
|
Mifour/Algorithms
|
7d01d8a83de927ba41d7a7c03f3ad71616da282a
|
77cfafc49bc0130da0f6041b169a15053f81af87
|
refs/heads/master
| 2020-09-29T21:33:40.235011
| 2020-02-25T14:34:22
| 2020-02-25T14:34:22
| 227,127,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
"""
This is just a fun play with classes and recursion
'We are what we eat'
Then only cannibals that eat humans are human?
Only if the human himself ate another cannibal
that ate another cannibal that ate another...
This create such class and try to get a Human object
that is human according to this criteria.
"""
class Human:
def __init__(self,name=None, food=None):
self.name = name
self.food = food
def is_human(self, food=None):
print(f"{self.name} is eating {food or self.food}")
if self == food:
return True
if isinstance(self.food, Human):
return self.food.is_human(food=food or self.food)
return False
alice = Human(name='alice')
bob = Human(name='bob')
charlie = Human(name='charlie')
alice.food = bob
bob.food = charlie
charlie.food = bob
print(alice.is_human()) # True
|
[
"dufour.thomas@hotmail.fr"
] |
dufour.thomas@hotmail.fr
|
ace782b57ad01f2d7acf5a6f3a1fa578a001f116
|
801f4a6a58515c0d658dc3a5ebff50fa048819f4
|
/service-flask/manage.py
|
a0b0ef7af6327197089d8a7139d9db4e53c1148b
|
[] |
no_license
|
omdv/crypto-git-tracker
|
5456eb627d671087fa874faa10782d14a1a678b8
|
ee7a7109e3a598de26cc192a3bcfaadc55701dbe
|
refs/heads/master
| 2021-04-06T07:33:21.466435
| 2018-04-12T01:01:57
| 2018-04-12T01:01:57
| 124,619,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,013
|
py
|
import unittest
import coverage
from flask_script import Manager
from project import create_app, db
from project.api.models import RepoControlRecord
from project.tests.custom_test_runner import TimeLoggingTest
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*'
]
)
COV.start()
app = create_app()
manager = Manager(app)
def add_one_repo(repo):
ticker, apihandle, url = repo.split(':')
if_exists = len(db.session.query(
RepoControlRecord.id).filter_by(url=url).all()) is not 0
if not if_exists:
db.session.add(RepoControlRecord(
ticker=ticker, apihandle=apihandle, url=url))
print("Repo {} was added".format(url))
db.session.commit()
else:
print("Repo {} already exists".format(url))
@manager.command
def cov():
"""Runs the unit tests with coverage."""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
COV.html_report()
COV.erase()
return 0
return 1
@manager.command
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')
result = unittest.TextTestRunner(resultclass=TimeLoggingTest).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def recreate_db():
"""Recreates a database."""
db.drop_all()
db.create_all()
db.session.commit()
@manager.option('-r', '--repo', help='ticker:apihandle:repo_url')
def add_repo_url(repo):
add_one_repo(repo)
@manager.option('-f', '--filename', help='filename.csv')
def add_repos(filename):
with open(filename, 'r') as _f:
repos = _f.readlines()
for _r in repos:
add_one_repo(_r.strip('\n'))
if __name__ == '__main__':
manager.run()
|
[
"omdv@fastmail.com"
] |
omdv@fastmail.com
|
7b73c090cdc64e94afc687def5a36b63a87a369b
|
03f36f36f2ae165d08d3c75a9873ef1d13f6ffa6
|
/Ćwiczenia_5/Zadanie_4.py
|
c41227848273ef3e62fdde02a2dce4297ba58dd4
|
[] |
no_license
|
KrzysiekMiskowicz/WDI
|
c1e7bb9ff8991afbdad6da779c32f48b0519c6be
|
33a791ede8b30f3be5ea40bddd6aa17c618dc257
|
refs/heads/master
| 2023-04-05T15:23:39.445843
| 2021-04-10T14:20:43
| 2021-04-10T14:20:43
| 304,461,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,175
|
py
|
import random
class ulamek:
def __init__(self, l = 1, m = 1):
self.l = l
self.m = m
def wczytanie(self):
l = int(input("Podaj licznik -> "))
m = int(input("Podaj mianownik -> "))
self. l = l
self.m = m
def wypisywanie(self):
print("(", self.l, "/", self.m, ")")
def skracanie(self):
nwd = NWD(self.l, self.m)
self.l //= nwd
self.m //= nwd
def dodawanie(self, skladnik):
nww = NWW(self.m, skladnik.m)
suma = ulamek(int(self.l*(nww/self.m)+skladnik.l*(nww/skladnik.m)), nww)
suma.skracanie()
return suma
def odejmowanie(self, odjemnik):
nww = NWW(self.m, odjemnik.m)
roznica = ulamek(int(self.l*(nww/self.m)-odjemnik.l*(nww/odjemnik.m)), nww)
roznica.skracanie()
return roznica
def mnozenie(self, czynnik):
iloczyn = ulamek(self.l*czynnik.l, self.m*czynnik.m)
iloczyn.skracanie()
return iloczyn
def dzielenie(self, dzielnik):
iloraz = ulamek(self.l*dzielnik.m, self.m*dzielnik.l)
iloraz.skracanie()
return iloraz
def potegowanie(self, wykladnik):
wynik = ulamek()
for i in range(wykladnik):
wynik = wynik.mnozenie(self)
return wynik
def porownanie(self, liczba):
self.skracanie()
liczba.skracanie()
return self.l == liczba.l and self.m == liczba.m
def NWW(n1, n2):
nww = 1
while n1 % 2 == 0 and n2 % 2 == 0:
nww *= 2
n1 //= 2
n2 //= 2
dzielnik = 3
while n1 >= dzielnik and n2 >= dzielnik:
if n1 % dzielnik == 0 and n2 % dzielnik == 0:
n1 //= dzielnik
n2 //= dzielnik
nww *= dzielnik
continue
dzielnik += 2
nww = nww * n1 * n2
return nww
def NWD(n1, n2):
nwd = 1
while n1 % 2 == 0 and n2 % 2 == 0:
n1 //= 2
n2 //= 2
nwd *= 2
dzielnik = 3
while n1 >= dzielnik and n2 >= dzielnik:
if n1 % dzielnik == 0 and n2 % dzielnik == 0:
n1 //= dzielnik
n2 //= dzielnik
nwd *= dzielnik
continue
dzielnik += 2
return nwd
t = [ulamek() for _ in range(10)]
for i in range(10):
t[i].wczytanie()
#t[i] = ulamek(random.randrange(1, 10), random.randrange(1, 10))
#print("(", t[i].l, "/", t[i].m , ")", end=" ")
print()
def podciagi(t):
current_a = 2
max_a = 2
current_g = 2
max_g = 2
for i in range(len(t)-2):
if t[i].odejmowanie(t[i+1]).porownanie(t[i+1].odejmowanie(t[i+2])):
current_a += 1
else:
max_a = max(max_a, current_a)
current_a = 2
if t[i].dzielenie(t[i+1]).porownanie(t[i+1].dzielenie(t[i+2])):
current_g += 1
else:
max_g = max(max_g, current_g)
current_g = 2
max_a = max(max_a, current_a)
max_g = max(max_g, current_g)
print("max a =", max_a)
print("max g =", max_g)
if max_a > max_g:
return 1
elif max_a == max_g:
return 0
else:
return -1
print(podciagi(t))
|
[
"miskowicz@student.agh.edu.pl"
] |
miskowicz@student.agh.edu.pl
|
281ad853afa50f156cc560eb9efde70e9130b00e
|
40e09fc848fac3bc523802e353c4e8bef9e3cf5e
|
/pyvore/pyvore/managers/sessions.py
|
9f5d60dbae366a036b85d684f0aee266d2320f5c
|
[] |
no_license
|
sontek/pycon2012
|
8ff24ce51770e0fb6a40ec9a510e958b9b9f309b
|
79d417d185030c0af247506b49903744088abe65
|
refs/heads/master
| 2016-09-05T19:56:18.702274
| 2012-03-17T05:53:46
| 2012-03-17T05:53:46
| 3,627,137
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
from pyvore.managers import BaseManager
from pyvore.models.sessions import Session
from pyvore.models.sessions import Chat
class SessionManager(BaseManager):
def get_sessions(self):
return self.session.query(Session).all()
def get_chatlog(self, pk):
return self.session.query(Chat).filter(Chat.session_pk == pk)
|
[
"sontek@gmail.com"
] |
sontek@gmail.com
|
c54e9e33e7d6fda10fa9d5b3a9099374742748b8
|
6b0d165bad59b1af4a20b469d8a222370b9b5354
|
/beer_warehouse/Scripts/django-admin.py
|
498f0fa4364d5904d8fbd4e22e8e5fdbf8915409
|
[] |
no_license
|
juansanluq/DjangoTest
|
2a2df82f8ea42e18f869bf66084e4998739e57a6
|
b8998caa382cef4d22c2a601207bd483b43ab9d4
|
refs/heads/master
| 2022-11-01T08:07:23.301570
| 2019-01-22T14:48:03
| 2019-01-22T14:48:03
| 164,463,799
| 0
| 1
| null | 2022-10-28T10:32:28
| 2019-01-07T16:59:30
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
#!C:\DjangoTest\beer_warehouse\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"juansanluq@gmail.com"
] |
juansanluq@gmail.com
|
3a74b3ab2a1cc28993c4ea0595eb0e7747c7cdf1
|
fede8e979d4297a8bfca8ef2f7ed84de68dfb373
|
/14_exceptions.py
|
b7da4d089a471723ba4db92e1b8750144f0dc571
|
[] |
no_license
|
atrukhanov/routine_python
|
8d17d5998d270348fecd766b77e7cd21cc6d0f76
|
da1eec5d9e7344449c8eb26b3a2c445834f9513e
|
refs/heads/master
| 2022-12-17T09:44:38.642554
| 2020-09-18T09:54:39
| 2020-09-18T09:54:39
| 294,264,454
| 0
| 0
| null | 2020-09-10T00:51:19
| 2020-09-10T00:51:18
| null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
import xml
import lxml
import requests
|
[
"iampopovich@gmail.com"
] |
iampopovich@gmail.com
|
3ddb4e47db5a0d30bc1e38c5d8c3e051f3906458
|
9543bcb3abe1adab6b2e9bfedcd3651e36173fa5
|
/jobs/models.py
|
b90048df0fe010220c6554b239981f4294167bae
|
[] |
no_license
|
nothing2aKING/my_portfolio
|
b5394b15f196276373fa469066d0514e7cde5dab
|
51c23bb1d146a47464923397489bb47541f07226
|
refs/heads/master
| 2020-09-17T07:12:41.107023
| 2019-11-25T20:14:13
| 2019-11-25T20:14:13
| 223,784,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
from django.db import models
# Create your models here.
#DB table
class Job(models.Model):
#upload a image
image = models.ImageField(upload_to='images/')
#input summary of job
summary = models.CharField(max_length=200)
#input github link
github = models.URLField(max_length=100, default='') #default is 200
#python manage.py migrate
#run python manage.py check; this checks for any problems in your project without making migrations or touching the database.
#The migrate command looks at the INSTALLED_APPS setting and creates any necessary database tables
#according to the database settings in your mysite/settings.py file and the database migrations shipped with the app '''
#Steps to make changes/updates to the DataBase
'''
1) make changes in this file (models.py)
- add app to mysite/settings.py (INSTALLED_APPS settings) before doing step 2
2) run 'python manage.py makemigrations <name_of_app>' to create migrations for those changes
3) run 'python manage.py migrate' to apply those changes to the db
'''
|
[
"amking15@gmail.com"
] |
amking15@gmail.com
|
217b18fed616cde55be1ebb5f4f04e0e3fd399a9
|
3f171049906579115ae02e046374a62805893935
|
/mesh/geometry.py
|
a123f5846b9da7bf5fa6cf9e506e21b3440c0c61
|
[] |
no_license
|
piyushagru/smart-match
|
9fac3e170988d2c36209bdcf361756d02640d1ea
|
96e2c6f4ad4b0c124bc375b411ac4326c401d66d
|
refs/heads/main
| 2023-04-16T05:24:54.789025
| 2023-04-01T11:52:42
| 2023-04-01T11:52:42
| 309,167,267
| 1
| 0
| null | 2023-04-01T08:43:09
| 2020-11-01T19:03:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,452
|
py
|
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
def sparse_to_tensor(x):
coo = x.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
return tf.sparse.reorder(tf.SparseTensor(indices, coo.data, coo.shape))
def sparse_dense_matmul_batch(a, b):
num_b = tf.shape(b)[0]
shape = a.dense_shape
indices = tf.reshape(a.indices, (num_b, -1, 3))
values = tf.reshape(a.values, (num_b, -1))
# def matmul((i, bb)):
def matmul(ibb): # python 3 change
(i, bb) = ibb
sp = tf.SparseTensor(indices[i, :, 1:], values[i], shape[1:])
return i, tf.sparse_tensor_dense_matmul(sp, bb)
_, p = tf.map_fn(matmul, (tf.range(num_b), b))
return p
def sparse_dense_matmul_batch_tile(a, b):
return tf.map_fn(lambda x: tf.sparse_tensor_dense_matmul(a, x), b)
def project_pool(v, img_feat):
shape = tf.shape(img_feat)
batch_size = shape[0]
height = shape[1]
width = shape[2]
dim = shape[-1]
num_v = tf.shape(v)[1]
v /= tf.expand_dims(v[:, :, -1], -1)
x = (v[:, :, 0] + 1) / 2. * tf.cast(width - 1, tf.float32)
y = (1 - (v[:, :, 1] + 1) / 2.) * tf.cast(height - 1, tf.float32)
# x = tf.Print(x, [tf.stack([x, y], 2)[0]], summarize=20)
x1 = tf.floor(x)
x2 = tf.ceil(x)
y1 = tf.floor(y)
y2 = tf.ceil(y)
b = tf.tile(tf.expand_dims(tf.range(0, batch_size), 1), (1, num_v))
Q11 = tf.gather_nd(img_feat, tf.stack(
[b, tf.cast(x1, tf.int32), tf.cast(y1, tf.int32)], 2))
Q12 = tf.gather_nd(img_feat, tf.stack(
[b, tf.cast(x1, tf.int32), tf.cast(y2, tf.int32)], 2))
Q21 = tf.gather_nd(img_feat, tf.stack(
[b, tf.cast(x2, tf.int32), tf.cast(y1, tf.int32)], 2))
Q22 = tf.gather_nd(img_feat, tf.stack(
[b, tf.cast(x2, tf.int32), tf.cast(y2, tf.int32)], 2))
weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y2, y))
Q11 = tf.multiply(tf.tile(tf.expand_dims(weights, 2), [1, 1, dim]), Q11)
weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y2, y))
Q21 = tf.multiply(tf.tile(tf.expand_dims(weights, 2), [1, 1, dim]), Q21)
weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y, y1))
Q12 = tf.multiply(tf.tile(tf.expand_dims(weights, 2), [1, 1, dim]), Q12)
weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y, y1))
Q22 = tf.multiply(tf.tile(tf.expand_dims(weights, 2), [1, 1, dim]), Q22)
outputs = tf.add_n([Q11, Q21, Q12, Q22])
return outputs
def edge_lengths(v, e_idx):
num_b = tf.shape(v)[0]
num_e = tf.shape(e_idx)[0]
batch_dim = tf.tile(tf.expand_dims(tf.range(num_b), 1), (1, num_e))
e_idx_0 = tf.tile(tf.expand_dims(e_idx[:, 0], 0), (num_b, 1))
e_idx_1 = tf.tile(tf.expand_dims(e_idx[:, 1], 0), (num_b, 1))
indices_0 = tf.stack((batch_dim, e_idx_0), axis=2)
indices_1 = tf.stack((batch_dim, e_idx_1), axis=2)
v0 = tf.gather_nd(v, indices_0)
v1 = tf.gather_nd(v, indices_1)
return tf.reduce_sum(tf.pow(v0 - v1, 2), 2)
def batch_laplacian(v, f, return_sparse=True):
# v: B x N x 3
# f: M x 3
num_b = tf.shape(v)[0]
num_v = tf.shape(v)[1]
num_f = tf.shape(f)[0]
v_a = f[:, 0]
v_b = f[:, 1]
v_c = f[:, 2]
a = tf.gather(v, v_a, axis=1)
b = tf.gather(v, v_b, axis=1)
c = tf.gather(v, v_c, axis=1)
ab = a - b
bc = b - c
ca = c - a
cot_a = -1 * tf.reduce_sum(ab * ca, axis=2) / \
tf.sqrt(tf.reduce_sum(tf.cross(ab, ca) ** 2, axis=-1))
cot_b = -1 * tf.reduce_sum(bc * ab, axis=2) / \
tf.sqrt(tf.reduce_sum(tf.cross(bc, ab) ** 2, axis=-1))
cot_c = -1 * tf.reduce_sum(ca * bc, axis=2) / \
tf.sqrt(tf.reduce_sum(tf.cross(ca, bc) ** 2, axis=-1))
I = tf.tile(tf.expand_dims(
tf.concat((v_a, v_c, v_a, v_b, v_b, v_c), axis=0), 0), (num_b, 1))
J = tf.tile(tf.expand_dims(
tf.concat((v_c, v_a, v_b, v_a, v_c, v_b), axis=0), 0), (num_b, 1))
W = 0.5 * tf.concat((cot_b, cot_b, cot_c, cot_c, cot_a, cot_a), axis=1)
batch_dim = tf.tile(tf.expand_dims(tf.range(num_b), 1), (1, num_f * 6))
indices = tf.reshape(
tf.stack((batch_dim, J, I), axis=2), (num_b, 6, -1, 3))
W = tf.reshape(W, (num_b, 6, -1))
l_indices = [tf.cast(tf.reshape(indices[:, i], (-1, 3)), tf.int64)
for i in range(6)]
shape = tf.cast(tf.stack((num_b, num_v, num_v)), tf.int64)
sp_L_raw = [tf.sparse_reorder(tf.SparseTensor(
l_indices[i], tf.reshape(W[:, i], (-1,)), shape)) for i in range(6)]
L = sp_L_raw[0]
for i in range(1, 6):
L = tf.sparse_add(L, sp_L_raw[i])
dia_values = tf.sparse_reduce_sum(L, axis=-1) * -1
I = tf.tile(tf.expand_dims(tf.range(num_v), 0), (num_b, 1))
batch_dim = tf.tile(tf.expand_dims(tf.range(num_b), 1), (1, num_v))
indices = tf.reshape(tf.stack((batch_dim, I, I), axis=2), (-1, 3))
dia = tf.sparse_reorder(tf.SparseTensor(
tf.cast(indices, tf.int64), tf.reshape(dia_values, (-1,)), shape))
return tf.sparse_add(L, dia)
def compute_laplacian_diff(v0, v1, f):
L0 = batch_laplacian(v0, f)
L1 = batch_laplacian(v1, f)
return sparse_dense_matmul_batch(L0, v0) - sparse_dense_matmul_batch(L1, v1)
def cpu_laplacian(v, f):
n = len(v)
v_a = f[:, 0]
v_b = f[:, 1]
v_c = f[:, 2]
ab = v[v_a] - v[v_b]
bc = v[v_b] - v[v_c]
ca = v[v_c] - v[v_a]
cot_a = -1 * (ab * ca).sum(axis=1) / \
np.sqrt(np.sum(np.cross(ab, ca) ** 2, axis=-1))
cot_b = -1 * (bc * ab).sum(axis=1) / \
np.sqrt(np.sum(np.cross(bc, ab) ** 2, axis=-1))
cot_c = -1 * (ca * bc).sum(axis=1) / \
np.sqrt(np.sum(np.cross(ca, bc) ** 2, axis=-1))
I = np.concatenate((v_a, v_c, v_a, v_b, v_b, v_c))
J = np.concatenate((v_c, v_a, v_b, v_a, v_c, v_b))
W = 0.5 * np.concatenate((cot_b, cot_b, cot_c, cot_c, cot_a, cot_a))
L = sp.csr_matrix((W, (I, J)), shape=(n, n))
L = L - sp.spdiags(L * np.ones(n), 0, n, n)
return L
if __name__ == "__main__":
# from psbody.mesh import Mesh
# m0 = Mesh(filename='assets/sphube.obj')
# m1 = Mesh(filename='assets/sphube.obj')
# m1.v *= np.array([0.5, 1., 2.])
from utils.smpl_paths import SmplPaths
smp = SmplPaths()
m0 = smp.get_mesh(smp.get_smpl())
L0 = cpu_laplacian(m0.v.astype(np.float32), m0.f)
lap0 = L0.dot(m0.v.astype(np.float32))
tf_v0 = tf.expand_dims(m0.v.astype(np.float32), 0)
tf_v = tf.tile(tf_v0, (5, 1, 1))
tf_L = batch_laplacian(tf_v, m0.f.astype(np.int32))
tf_L0 = batch_laplacian(tf_v0, m0.f.astype(np.int32))
tf_lap = sparse_dense_matmul_batch(tf_L, tf_v)
# tf_diff = tf.reduce_max(tf.abs(f_L[0] - tf_L[-1]))
with tf.Session():
tf_L_e = tf.sparse_tensor_to_dense(tf_L).eval()
tf_lap_e = tf_lap.eval()
print(np.max(np.abs(tf_L_e[0] - L0.toarray())))
print(np.max(np.abs(tf_L_e[-1] - L0.toarray())))
print(tf_L_e.shape)
# print np.max(np.abs(tf_L0_e[0] - L0.toarray()))
print(np.max(np.abs(tf_lap_e[0] - lap0)))
print(np.max(np.abs(tf_lap_e[-1] - lap0)))
print(np.max(np.abs(tf_lap_e[0] - tf_lap_e[-1])))
# from opendr.topology import get_vertices_per_edge
# e_idx = get_vertices_per_edge(m.v, m.f)
#
# tf_v = tf.tile(tf.expand_dims(m.v.astype(np.float32), 0), (2, 1, 1))
#
# el = edge_lengths(tf_v, e_idx)
#
# with tf.Session():
# print(el.eval())
# print(el.eval().shape)
|
[
"khushgrover16@gmail.com"
] |
khushgrover16@gmail.com
|
647d7e5364848034896ddc8bb3b7b7f3c0eb4671
|
eb73d4fa1a6f62f6406ca230699397925856b013
|
/src/transformers/models/poolformer/image_processing_poolformer.py
|
99ac07c510a516b45a0a7f0ad67fdda67ce89499
|
[
"Apache-2.0"
] |
permissive
|
MerHS/transformers
|
f3ece97926e8883fb43f321937c5b5af7731fe70
|
d447c460b16626c656e4d7a9425f648fe69517b3
|
refs/heads/main
| 2023-02-20T13:38:31.756752
| 2022-11-03T16:56:22
| 2022-11-03T16:56:22
| 561,655,525
| 0
| 0
|
Apache-2.0
| 2022-11-04T07:21:06
| 2022-11-04T07:21:04
| null |
UTF-8
|
Python
| false
| false
| 18,311
|
py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for PoolFormer."""
import math
from typing import Dict, List, Optional, Union
import numpy as np
from transformers import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
logger = logging.get_logger(__name__)
class PoolFormerImageProcessor(BaseImageProcessor):
r"""
Constructs a PoolFormer image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. If crop_pct is
unset:
- size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
- size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
aspect ratio.
If crop_pct is set:
- size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
int(floor(w/crop_pct)))`
- size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio.
- size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio.
crop_pct (`float`, *optional*, defaults to `0.9`):
Percentage of the image to crop from the center. Can be overridden by `crop_pct` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in the `preprocess`
method.
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Size of the image after applying center crop. Only has an effect if `do_center_crop` is set to `True`. Can
be overridden by the `crop_size` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
crop_pct: int = 0.9,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Dict[str, int] = None,
rescale_factor: Union[int, float] = 1 / 255,
do_rescale: bool = True,
do_normalize: bool = True,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
**kwargs
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 256}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
crop_size = get_size_dict(crop_size)
self.do_resize = do_resize
self.size = size
self.crop_pct = crop_pct
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
crop_pct: Optional[float] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs
) -> np.ndarray:
"""
Resize an image.
If crop_pct is unset:
- size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
- size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
aspect ratio.
if crop_pct is set:
- size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
int(floor(w/crop_pct)))`
- size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio.
- size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
whilst maintaining the aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
crop_pct (`float`, *optional*):
Percentage of the image that will be cropped from the center. If set, the image is resized
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size, default_to_square=False)
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
if crop_pct is not None:
if "shortest_edge" in size:
scale_size = int(math.floor(size["shortest_edge"] / crop_pct))
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
scale_size = int(math.floor(size["height"] / crop_pct))
else:
scale_size = (
int(math.floor(size["height"] / crop_pct)),
int(math.floor(size["width"] / crop_pct)),
)
else:
raise ValueError("Invalid size for resize: {}".format(size))
output_size = get_resize_output_image_size(image, size=scale_size, default_to_square=False)
else:
if "shortest_edge" in size:
output_size = get_resize_output_image_size(image, size=size["shortest_edge"], default_to_square=False)
elif "height" in size and "width" in size:
output_size = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(size))
return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
def center_crop(
self,
image: np.ndarray,
size: Dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs
) -> np.ndarray:
"""
Center crop an image to (size["height"], size["width"]). If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`Dict[str, int]`):
Size of the output image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs)
def rescale(
self,
image: np.ndarray,
scale: Union[int, float],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs
):
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(
self,
image: np.ndarray,
mean: Union[float, List[float]],
std: Union[float, List[float]],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs
) -> np.ndarray:
"""
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`np.ndarray`):
Image to normalize.
image_mean (`float` or `List[float]`):
Image mean.
image_std (`float` or `List[float]`):
Image standard deviation.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
crop_pct: int = None,
resample: PILImageResampling = None,
do_center_crop: bool = None,
crop_size: Dict[str, int] = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after applying resize.
crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after applying center crop.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
crop_pct = crop_pct if crop_pct is not None else self.crop_pct
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size)
if not is_batched(images):
images = [images]
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, crop_pct=crop_pct, resample=resample) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
[
"noreply@github.com"
] |
noreply@github.com
|
71b0e1044d36d090db6eb4f4f05206c2821b9c58
|
50886163b2e74287654574527ffb9eb48d5e100e
|
/Tech Documents/设计模式(python版)/工厂模式/abstract_factory.py
|
603863303dd7487eb2d7127558ac90953a85d9dc
|
[] |
no_license
|
13241308289/Documents
|
56222b45a38875bb7e53e302dfc0c23fd156e790
|
cd7cf3f2d51e1bfcea79bb58839bab582e48c8a6
|
refs/heads/master
| 2020-05-24T01:18:56.690083
| 2019-05-18T03:33:07
| 2019-05-18T03:33:07
| 187,030,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
# -*- encoding: utf-8 -*-
# ##################抽象工厂#####################
class Frog:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def interact_with(self, obstacle):
print('{} the Frog encounters {} and {}!'.format(self,
obstacle, obstacle.action()))
class Bug:
def __str__(self):
return 'a bug'
def action(self):
return 'eats it'
class FrogWorld:
def __init__(self, name):
print(self)
self.player_name = name
def __str__(self):
return '\n\n\t------ Frog World -------'
def make_character(self):
return Frog(self.player_name)
def make_obstacle(self):
return Bug()
class Wizard:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def interact_with(self, obstacle):
print(
'{} the Wizard battles against {} and {}!'.format(
self,
obstacle,
obstacle.action()))
class Ork:
def __str__(self):
return 'an evil ork'
def action(self):
return 'kills it'
class WizardWorld:
def __init__(self, name):
print(self)
self.player_name = name
def __str__(self):
return '\n\n\t------ Wizard World -------'
def make_character(self):
return Wizard(self.player_name)
def make_obstacle(self):
return Ork()
class GameEnvironment:
def __init__(self, factory):
self.hero = factory.make_character()
self.obstacle = factory.make_obstacle()
def play(self):
self.hero.interact_with(self.obstacle)
def validate_age(name):
try:
age = input('Welcome {}. How old are you? '.format(name))
age = int(age)
except ValueError as err:
print("Age {} is invalid, please try again...".format(age))
return (False, age)
return (True, age)
def main():
name = raw_input("Hello. What's your name? ")
valid_input = False
while not valid_input:
valid_input, age = validate_age(name)
game = FrogWorld if age < 18 else WizardWorld
environment = GameEnvironment(game(name))
environment.play()
if __name__ == '__main__':
main()
|
[
"2519960931@qq.com"
] |
2519960931@qq.com
|
28f27209663b924166d6625334c52add25361c31
|
a6509a854dfb0d7ce32a61edd4c380c7f066947e
|
/ui/appPopUpSecurity.py
|
27174f20727fe2ab7f610df84540ba6be9cec3f6
|
[
"MIT"
] |
permissive
|
Penmast/Chameleon
|
70d81bd4dd1e4fa3c77fbdc4edf7f0c5493e5ba8
|
91f79a4640dede099f6e2a4757053f04cdc186f0
|
refs/heads/master
| 2021-05-03T04:58:44.931277
| 2018-02-08T21:21:55
| 2018-02-08T21:21:55
| 120,628,478
| 21
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,647
|
py
|
# -*- coding: utf-8 -*-
# self implementation generated from reading ui file 'appPopUpSecurity.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
### The form for the duration of a security action
class appPopUpSecurity(QDialog):
def __init__(self):
super(appPopUpSecurity, self).__init__()
### Set up the form window
self.setObjectName("appPopUpSecurity")
self.setFixedSize(329, 290)
self.setStyleSheet("QDialog{background-color:white;}QLabel#label,QLabel#label_2{color:rgba(41, 107, 116, 1);}")
### Title
self.labelTitle = QLabel(self)
self.labelTitle.setWordWrap(True)
self.labelTitle.setGeometry(QRect(0, 0, 329, 71))
font = QFont()
font.setPointSize(18)
font.setWeight(0)
font.setLetterSpacing(QFont.AbsoluteSpacing,2)
self.labelTitle.setFont(font)
self.labelTitle.setTextFormat(Qt.AutoText)
self.labelTitle.setAlignment(Qt.AlignCenter)
self.labelTitle.setObjectName("label")
### Line under title
self.line = QFrame(self)
self.line.setGeometry(QRect(60, 70, 221, 16))
self.line.setLineWidth(1)
self.line.setFrameShape(QFrame.HLine)
self.line.setFrameShadow(QFrame.Sunken)
self.line.setObjectName("line")
### Network subtitle
self.label_2 = QLabel(self)
self.label_2.setGeometry(QRect(20, 120, 281, 20))
font = QFont()
font.setPointSize(11)
font.setLetterSpacing(QFont.AbsoluteSpacing,2)
font.setWeight(0)
self.label_2.setFont(font)
self.label_2.setAlignment(Qt.AlignCenter)
self.label_2.setObjectName("label_2")
### Form layout
self.formLayoutWidget = QWidget(self)
self.formLayoutWidget.setGeometry(QRect(20, 150, 281, 71))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout_2 = QFormLayout(self.formLayoutWidget)
self.formLayout_2.setContentsMargins(0, 0, 0, 0)
self.formLayout_2.setObjectName("formLayout_2")
##### Duration menu
### Drop down menu label
self.label_6 = QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout_2.setWidget(0, QFormLayout.LabelRole, self.label_6)
### Drop down menu
self.durationBox = QComboBox(self.formLayoutWidget)
self.durationBox.setObjectName("durationBox")
self.durationBox.addItem("")
self.durationBox.addItem("")
self.durationBox.addItem("")
self.formLayout_2.setWidget(0, QFormLayout.FieldRole, self.durationBox)
self.durationBox.currentIndexChanged.connect(self.activateTimeInput)
##### Time menu
### Time label
self.label_7 = QLabel(self.formLayoutWidget)
self.label_7.setObjectName("label_7")
self.formLayout_2.setWidget(1, QFormLayout.LabelRole, self.label_7)
### Time input
self.inputTime = QLineEdit(self.formLayoutWidget)
self.inputTime.setObjectName("inputTime")
self.formLayout_2.setWidget(1, QFormLayout.FieldRole, self.inputTime)
self.inputTime.setEnabled(False)
### Submit button
self.pushButton = QPushButton(self)
self.pushButton.setGeometry(QRect(210, 240, 91, 31))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.exitDialog)
self.cancel = False
self.retranslateUi(self)
QMetaObject.connectSlotsByName(self)
@pyqtSlot()
def exitDialog(self):
self.accept()
### Enable or disable the time input if it is needed or not
@pyqtSlot()
def activateTimeInput(self):
if (self.durationBox.currentIndex() == 1):
self.inputTime.setEnabled(True)
else:
self.inputTime.setEnabled(False)
self.inputTime.setText("")
@pyqtSlot(str)
def setTitle(self, value):
self.labelTitle.setText(value)
def retranslateUi(self, Dialog):
_translate = QCoreApplication.translate
self.setWindowTitle(_translate("Dialog", "Security options"))
self.labelTitle.setText(_translate("Dialog", "Appli Name"))
self.label_2.setText(_translate("Dialog", "Security"))
self.pushButton.setText(_translate("Dialog", "OK"))
self.label_6.setText(_translate("Dialog", "Duration"))
self.durationBox.setItemText(0, _translate("Dialog", "Until the application is closed"))
self.durationBox.setItemText(1, _translate("Dialog", "For a set period of time"))
self.durationBox.setItemText(2, _translate("Dialog", "Forever"))
self.label_7.setText(_translate("Dialog", "Hours"))
def closeEvent(self, event):
self.cancel = True
self.reject()
### Is triggered when the form is the submit.
### Returns the following dictionary:
#### 'duration' : the type of duration; 0: until the application is closed, 1: for a set period of time, 2: forever
#### 'time' : the entered set period of time
### Returns None if the form is cancelled
def exec_(self):
super(appPopUpSecurity, self).exec_()
if(self.cancel is True):
return None
if( self.durationBox.currentIndex() == 1 ):
return { 'duration' : self.durationBox.currentIndex(), 'time' : int(self.inputTime.text()) }
else:
return { 'duration' : self.durationBox.currentIndex(), 'time' : 0 }
|
[
"deray@edu.ece.fr"
] |
deray@edu.ece.fr
|
840f4bbfbecf36b0fca0ba5c41b0fcca167ee681
|
4d8c7a0b760abb796ea3ce6160ec9cd88faa62ea
|
/src/coordsim/reader/reader.py
|
f029d534275ff7d5032b5f304f7444b2089a6203
|
[] |
no_license
|
DatLQ95/simulation_network
|
cd1d3efe1720e9c45cc832641f746f924245d532
|
06a439579ebf5bb47aef79f5c47a8f756381fd17
|
refs/heads/master
| 2023-07-19T07:42:25.358624
| 2021-09-04T15:41:47
| 2021-09-04T15:41:47
| 403,074,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,377
|
py
|
import networkx as nx
from geopy.distance import distance as dist
import numpy as np
import logging
import yaml
import math
from collections import defaultdict
import importlib
import csv
log = logging.getLogger(__name__)
# Disclaimer: Some snippets of the following file were imported/modified from B-JointSP on GitHub.
# Original code can be found on https://github.com/CN-UPB/B-JointSP
"""
Network parsing module.
- Reads and parses network files into NetworkX.
- Reads and parses network yaml files and gets placement and SFC and SFs.
"""
def get_trace(trace_file):
"""
Parse the trace file that the simulator will use to generate traffic.
"""
with open(trace_file) as f:
trace_rows = csv.DictReader(f)
traces = []
for row in trace_rows:
traces.append(dict(row))
return traces
def get_config(config_file):
"""
Parse simulator config params in specified yaml file and return as Python dict
"""
# TODO: specify defaults as fall back if param is not set in config
with open(config_file) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
def get_sfc(sfc_file):
"""
Get the list of SFCs from the yaml data.
"""
with open(sfc_file) as yaml_stream:
sfc_data = yaml.load(yaml_stream, Loader=yaml.FullLoader)
sfc_list = defaultdict(None)
for sfc_name, sfc_sf in sfc_data['sfc_list'].items():
sfc_list[sfc_name] = sfc_sf
return sfc_list
def get_sfc_requirement(sfc_requirement_file):
"""
Get the list of SFCs from the yaml data.
"""
with open(sfc_requirement_file) as yaml_stream:
sfc_requirement_data = yaml.load(yaml_stream, Loader=yaml.FullLoader)
sfc_requirement_list = defaultdict(None)
for sfc_name, sfc_sf in sfc_requirement_data['sfc_list'].items():
sfc_requirement_list[sfc_name] = sfc_sf
return sfc_requirement_list
def load_resource_function(name, path):
try:
spec = importlib.util.spec_from_file_location(name, path + '/' + name + '.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
except Exception:
raise Exception(f'Cannot load file "{name}.py" from specified location "{path}".')
try:
return getattr(module, 'resource_function')
except Exception:
raise Exception(f'There is no "resource_function" defined in file "{name}.py."')
def get_sf(sf_file, resource_functions_path=''):
"""
Get the list of SFs and their properties from the yaml data.
"""
with open(sf_file) as yaml_stream:
sf_data = yaml.load(yaml_stream, Loader=yaml.FullLoader)
# Configurable default mean and stddev defaults
default_processing_delay_mean = 1.0
default_processing_delay_stdev = 1.0
default_startup_delay = 0.0
def default_resource_function(x):
return x
sf_list = defaultdict(None)
for sf_name, sf_details in sf_data['sf_list'].items():
sf_list[sf_name] = sf_details
# Set defaults (currently processing delay mean and stdev)
sf_list[sf_name]["processing_delay_mean"] = sf_list[sf_name].get("processing_delay_mean",
default_processing_delay_mean)
sf_list[sf_name]["processing_delay_stdev"] = sf_list[sf_name].get("processing_delay_stdev",
default_processing_delay_stdev)
sf_list[sf_name]["startup_delay"] = sf_list[sf_name].get("startup_delay",
default_startup_delay)
if 'resource_function_id' in sf_list[sf_name]:
try:
sf_list[sf_name]['resource_function'] = load_resource_function(sf_list[sf_name]['resource_function_id'],
resource_functions_path)
except Exception as ex:
sf_list[sf_name]['resource_function_id'] = 'default'
sf_list[sf_name]['resource_function'] = default_resource_function
log.warning(f'{str(ex)} SF {sf_name} will use default resource function instead.')
else:
sf_list[sf_name]["resource_function_id"] = 'default'
sf_list[sf_name]["resource_function"] = default_resource_function
log.debug(f'No resource function specified for SF {sf_name}. Default resource function will be used.')
return sf_list
def weight(edge_cap, edge_delay):
"""
edge weight = 1 / (cap + 1/delay) => prefer high cap, use smaller delay as additional influence/tie breaker
if cap = None, set it to 0 use edge_delay as weight
"""
assert edge_delay is not None
if edge_cap is None:
return edge_delay
if edge_cap == 0:
return math.inf
elif edge_delay == 0:
return 0
return 1 / (edge_cap + 1 / edge_delay)
def network_diameter(nx_network):
"""Return the network diameter, ie, delay of longest shortest path"""
if 'shortest_paths' not in nx_network.graph:
shortest_paths(nx_network)
return max([path[1] for path in nx_network.graph['shortest_paths'].values()])
def shortest_paths(networkx_network):
"""
finds the all pairs shortest paths using Johnson Algo
sets a dictionary, keyed by source and target, of all pairs shortest paths with path_delays in the network as an
attr.
key: (src, dest) , value: ([nodes_on_the_shortest_path], path_delay)
path delays are the sum of individual edge_delays of the edges in the shortest path from source to destination
"""
# in-built implementation of Johnson Algo, just returns a list of shortest paths
# returns a dict with : key: source, value: dict with key: dest and value: shortest path as list of nodes
all_pair_shortest_paths = dict(nx.johnson(networkx_network, weight='weight'))
# contains shortest paths with path_delays
# key: (src, dest) , value: ([nodes_on_the_shortest_path], path_delay)
shortest_paths_with_delays = {}
for source, v in all_pair_shortest_paths.items():
for destination, shortest_path_list in v.items():
path_delay = 0
# only if the source and destination are different, path_delays need to be calculated, otherwise 0
if source != destination:
# shortest_path_list only contains ordered nodes [node1,node2,node3....] in the shortest path
# here we take ordered pair of nodes (src, dest) to cal. the path_delay of the edge between them
for i in range(len(shortest_path_list) - 1):
path_delay += networkx_network[shortest_path_list[i]][shortest_path_list[i + 1]]['delay']
shortest_paths_with_delays[(source, destination)] = (shortest_path_list, path_delay)
networkx_network.graph['shortest_paths'] = shortest_paths_with_delays
def read_network(file, node_cap=None, link_cap=None):
"""
Read the GraphML file and return list of nodes and edges.
"""
SPEED_OF_LIGHT = 299792458 # meter per second
PROPAGATION_FACTOR = 0.77 # https://en.wikipedia.org/wiki/Propagation_delay
if not file.endswith(".graphml"):
raise ValueError("{} is not a GraphML file".format(file))
graphml_network = nx.read_graphml(file, node_type=int)
networkx_network = nx.Graph()
# Setting the nodes of the NetworkX Graph
for n in graphml_network.nodes(data=True):
node_id = "pop{}".format(n[0])
cap = n[1].get("NodeCap", None)
if cap is None:
cap = node_cap
log.warning("NodeCap not set in the GraphML file, now using default NodeCap for node: {}".format(n))
node_type = n[1].get("NodeType", "Normal")
node_name = n[1].get("label", None)
if cap is None:
raise ValueError("No NodeCap. set for node{} in file {} (as cmd argument or in graphml)".format(n, file))
# Adding a Node in the NetworkX Graph
# {"id": node_id, "name": node_name, "type": node_type, "cap": cpu})
# Type of node. For now it is either "Normal" or "Ingress"
# Init 'remaining_resources' to the node capacity
networkx_network.add_node(node_id, name=node_name, type=node_type, cap=cap, available_sf={},
remaining_cap=cap)
# set links
# calculate link delay based on geo positions of nodes;
for e in graphml_network.edges(data=True):
# Check whether LinkDelay value is set, otherwise default to None
source = "pop{}".format(e[0])
target = "pop{}".format(e[1])
link_delay = e[2].get("LinkDelay", None)
# As edges are undirectional, only LinkFwdCap determines the available data rate
link_fwd_cap = e[2].get("LinkFwdCap", link_cap)
if e[2].get("LinkFwdCap") is None:
log.warning(f"Link {(e[0], e[1])} has no capacity defined. Using the default capacity {link_cap} instead.")
# Setting a default delay of 3 incase no delay specified in GraphML file
# and we are unable to set it based on Geo location
delay = 3
if link_delay is None:
n1 = graphml_network.nodes(data=True)[e[0]]
n2 = graphml_network.nodes(data=True)[e[1]]
n1_lat, n1_long = n1.get("Latitude", None), n1.get("Longitude", None)
n2_lat, n2_long = n2.get("Latitude", None), n2.get("Longitude", None)
if n1_lat is None or n1_long is None or n2_lat is None or n2_long is None:
log.warning("Link Delay not set in the GraphML file and unable to calc based on Geo Location,"
"Now using default delay for edge: ({},{})".format(source, target))
else:
distance = dist((n1_lat, n1_long), (n2_lat, n2_long)).meters # in meters
# round delay to int using np.around for consistency with emulator
delay = int(np.around((distance / SPEED_OF_LIGHT * 1000) * PROPAGATION_FACTOR)) # in milliseconds
else:
delay = link_delay
# Adding the undirected edges for each link defined in the network.
# delay = edge delay , cap = edge capacity
networkx_network.add_edge(source, target, delay=delay, cap=link_fwd_cap, remaining_cap=link_fwd_cap)
# setting the weight property for each edge in the NetworkX Graph
# weight attribute is used to find the shortest paths
for edge in networkx_network.edges.values():
edge['weight'] = weight(edge['cap'], edge['delay'])
# Setting the all-pairs shortest path in the NetworkX network as a graph attribute
shortest_paths(networkx_network)
# Filter ingress and egress (if any) nodes
ing_nodes = []
eg_nodes = []
for node in networkx_network.nodes.items():
if node[1]["type"] == "Ingress":
ing_nodes.append(node)
if node[1]["type"] == "Egress":
eg_nodes.append(node[0])
return networkx_network, ing_nodes, eg_nodes
def reset_cap(network):
for node in network.nodes.keys():
network.nodes[node]['remaining_cap'] = network.nodes[node]['cap']
network.nodes[node]['available_sf'] = {}
for edge in network.edges(data=True):
edge[2]['remaining_cap'] = edge[2]['cap']
|
[
"luongquocdat.hust@gmail.com"
] |
luongquocdat.hust@gmail.com
|
0ec055a25cc8a0344ce78bd9d4773178113d80f6
|
77ec9edf40b34b48477a627d149b6c2054b98a93
|
/abc_179_d.py
|
7b6ac7b449986dfe9275cb4f4fd8e0cb8b57219c
|
[] |
no_license
|
junkhp/atcorder
|
fa4eeb204e3a4ac713001ab89c205039703abc88
|
028ddf7a39534d5907232c4576a03af79feb6073
|
refs/heads/main
| 2023-04-11T02:15:10.088883
| 2021-04-22T07:06:06
| 2021-04-22T07:06:06
| 313,284,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
def main():
n, k = map(int, input().split())
move_set = set([])
for i in range(k):
a, b = map(int, input().split())
for j in range(a, b + 1):
move_set.add(j)
sorted_set = sorted(move_set)
# print(sorted_set)
dp = [0] * (n + 1)
dp[1] = 1
for i in range(2, n+1):
for num in sorted_set:
if num + 1 > i:
break
else:
dp[i] += dp[i - num]
print(dp[-1] % 998244353)
if __name__ == "__main__":
main()
|
[
"oshiba@m.cs.osakafu-u.ac.jp"
] |
oshiba@m.cs.osakafu-u.ac.jp
|
8064b003bf8f32b09274eb5f0e59a4fb83ace17d
|
eeddca8c1041c3abbf39698d0da79cf63a210fff
|
/hello.py
|
f258045f4f064636cb0f4193dd5b0401f8162b49
|
[] |
no_license
|
aftana/st-web51
|
f4917eaa72720ad79e901bab0be0e30ebc9df549
|
270b34d71e7995ce3d611012f12f60e98f2d54c8
|
refs/heads/master
| 2020-12-02T21:00:25.734887
| 2017-07-04T18:53:56
| 2017-07-04T18:53:56
| 96,243,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
def app(environ, start_response):
raw_uri = str(environ.get('RAW_URI'))
raw_uri = raw_uri[2:]
params = raw_uri.split('&')
data = ''
for param in params:
data += param + '\r\n'
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(data)))
])
return iter([data])
|
[
"art-mint@mint.com"
] |
art-mint@mint.com
|
5f4e1fee590a9fadc29ea5ac318730f216eca1c0
|
9752c5fad298864a3328170aa6017bc4b6db9d18
|
/ref/importing_data/sanity_check.py
|
5ceaebcee328057ea8c6de3aa12631e0d9cd5759
|
[
"MIT"
] |
permissive
|
dli-invest/investing-experiments
|
69dfa3e4adfbb8d1074732eb4aa82be3b660470f
|
ed5ec66b6151e6eb0b560bca3485af77985fecfd
|
refs/heads/master
| 2023-04-07T23:37:49.166285
| 2021-04-19T16:47:22
| 2021-04-19T16:47:22
| 295,036,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
import os
from zipline.utils.run_algo import load_extensions
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
from zipline.data import bundles
bundle = bundles.load('eu_stocks')
data = bundle.asset_finder.retrieve_all(bundle.asset_finder.sids)
print(data)
|
[
"noreply@github.com"
] |
noreply@github.com
|
09e4f93dfe0a7dbf721add15e86b819a1a93c6b9
|
d475a6cf49c0b2d40895ff6d48ca9b0298643a87
|
/pyleecan/Classes/SlotW24.py
|
984f0065ae9048217d93f3adee011ed7f89dd645
|
[
"Apache-2.0"
] |
permissive
|
lyhehehe/pyleecan
|
6c4a52b17a083fe29fdc8dcd989a3d20feb844d9
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
refs/heads/master
| 2021-07-05T17:42:02.813128
| 2020-09-03T14:27:03
| 2020-09-03T14:27:03
| 176,678,325
| 2
| 0
| null | 2019-03-20T07:28:06
| 2019-03-20T07:28:06
| null |
UTF-8
|
Python
| false
| false
| 9,973
|
py
|
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Slot/SlotW24.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Slot/SlotW24
"""
from os import linesep
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from .SlotWind import SlotWind
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Slot.SlotW24._comp_point_coordinate import _comp_point_coordinate
except ImportError as error:
_comp_point_coordinate = error
try:
from ..Methods.Slot.SlotW24.build_geometry import build_geometry
except ImportError as error:
build_geometry = error
try:
from ..Methods.Slot.SlotW24.build_geometry_wind import build_geometry_wind
except ImportError as error:
build_geometry_wind = error
try:
from ..Methods.Slot.SlotW24.check import check
except ImportError as error:
check = error
try:
from ..Methods.Slot.SlotW24.comp_alphas import comp_alphas
except ImportError as error:
comp_alphas = error
try:
from ..Methods.Slot.SlotW24.comp_angle_opening import comp_angle_opening
except ImportError as error:
comp_angle_opening = error
try:
from ..Methods.Slot.SlotW24.comp_height import comp_height
except ImportError as error:
comp_height = error
try:
from ..Methods.Slot.SlotW24.comp_height_wind import comp_height_wind
except ImportError as error:
comp_height_wind = error
try:
from ..Methods.Slot.SlotW24.comp_surface import comp_surface
except ImportError as error:
comp_surface = error
try:
from ..Methods.Slot.SlotW24.comp_surface_wind import comp_surface_wind
except ImportError as error:
comp_surface_wind = error
from ._check import InitUnKnowClassError
class SlotW24(SlotWind):
VERSION = 1
IS_SYMMETRICAL = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Slot.SlotW24._comp_point_coordinate
if isinstance(_comp_point_coordinate, ImportError):
_comp_point_coordinate = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method _comp_point_coordinate: "
+ str(_comp_point_coordinate)
)
)
)
else:
_comp_point_coordinate = _comp_point_coordinate
# cf Methods.Slot.SlotW24.build_geometry
if isinstance(build_geometry, ImportError):
build_geometry = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method build_geometry: " + str(build_geometry)
)
)
)
else:
build_geometry = build_geometry
# cf Methods.Slot.SlotW24.build_geometry_wind
if isinstance(build_geometry_wind, ImportError):
build_geometry_wind = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method build_geometry_wind: "
+ str(build_geometry_wind)
)
)
)
else:
build_geometry_wind = build_geometry_wind
# cf Methods.Slot.SlotW24.check
if isinstance(check, ImportError):
check = property(
fget=lambda x: raise_(
ImportError("Can't use SlotW24 method check: " + str(check))
)
)
else:
check = check
# cf Methods.Slot.SlotW24.comp_alphas
if isinstance(comp_alphas, ImportError):
comp_alphas = property(
fget=lambda x: raise_(
ImportError("Can't use SlotW24 method comp_alphas: " + str(comp_alphas))
)
)
else:
comp_alphas = comp_alphas
# cf Methods.Slot.SlotW24.comp_angle_opening
if isinstance(comp_angle_opening, ImportError):
comp_angle_opening = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_angle_opening: "
+ str(comp_angle_opening)
)
)
)
else:
comp_angle_opening = comp_angle_opening
# cf Methods.Slot.SlotW24.comp_height
if isinstance(comp_height, ImportError):
comp_height = property(
fget=lambda x: raise_(
ImportError("Can't use SlotW24 method comp_height: " + str(comp_height))
)
)
else:
comp_height = comp_height
# cf Methods.Slot.SlotW24.comp_height_wind
if isinstance(comp_height_wind, ImportError):
comp_height_wind = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_height_wind: "
+ str(comp_height_wind)
)
)
)
else:
comp_height_wind = comp_height_wind
# cf Methods.Slot.SlotW24.comp_surface
if isinstance(comp_surface, ImportError):
comp_surface = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_surface: " + str(comp_surface)
)
)
)
else:
comp_surface = comp_surface
# cf Methods.Slot.SlotW24.comp_surface_wind
if isinstance(comp_surface_wind, ImportError):
comp_surface_wind = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_surface_wind: "
+ str(comp_surface_wind)
)
)
)
else:
comp_surface_wind = comp_surface_wind
# save method is available in all object
save = save
# generic copy method
def copy(self):
"""Return a copy of the class
"""
return type(self)(init_dict=self.as_dict())
# get_logger method is available in all object
get_logger = get_logger
def __init__(self, W3=0.003, H2=0.003, Zs=36, init_dict=None, init_str=None):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with every properties as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Initialisation by str
from ..Functions.load import load
assert type(init_str) is str
# load the object from a file
obj = load(init_str)
assert type(obj) is type(self)
W3 = obj.W3
H2 = obj.H2
Zs = obj.Zs
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "W3" in list(init_dict.keys()):
W3 = init_dict["W3"]
if "H2" in list(init_dict.keys()):
H2 = init_dict["H2"]
if "Zs" in list(init_dict.keys()):
Zs = init_dict["Zs"]
# Initialisation by argument
self.W3 = W3
self.H2 = H2
# Call SlotWind init
super(SlotW24, self).__init__(Zs=Zs)
# The class is frozen (in SlotWind init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
SlotW24_str = ""
# Get the properties inherited from SlotWind
SlotW24_str += super(SlotW24, self).__str__()
SlotW24_str += "W3 = " + str(self.W3) + linesep
SlotW24_str += "H2 = " + str(self.H2) + linesep
return SlotW24_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from SlotWind
if not super(SlotW24, self).__eq__(other):
return False
if other.W3 != self.W3:
return False
if other.H2 != self.H2:
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
# Get the properties inherited from SlotWind
SlotW24_dict = super(SlotW24, self).as_dict()
SlotW24_dict["W3"] = self.W3
SlotW24_dict["H2"] = self.H2
# The class name is added to the dict fordeserialisation purpose
# Overwrite the mother class name
SlotW24_dict["__class__"] = "SlotW24"
return SlotW24_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.W3 = None
self.H2 = None
# Set to None the properties inherited from SlotWind
super(SlotW24, self)._set_None()
def _get_W3(self):
"""getter of W3"""
return self._W3
def _set_W3(self, value):
"""setter of W3"""
check_var("W3", value, "float", Vmin=0)
self._W3 = value
W3 = property(
fget=_get_W3,
fset=_set_W3,
doc=u"""Teeth width
:Type: float
:min: 0
""",
)
def _get_H2(self):
"""getter of H2"""
return self._H2
def _set_H2(self, value):
"""setter of H2"""
check_var("H2", value, "float", Vmin=0)
self._H2 = value
H2 = property(
fget=_get_H2,
fset=_set_H2,
doc=u"""Slot height
:Type: float
:min: 0
""",
)
|
[
"sebgue@gmx.net"
] |
sebgue@gmx.net
|
8df10d67c0578696c321b29a4292c832ffce104d
|
3955734e46e6b6ecc2713d5edb02507d15a329af
|
/sifter/grammar/comparator.py
|
554c112bbed88675dd045a7fd37d392ba37f68d7
|
[
"BSD-2-Clause"
] |
permissive
|
python-sifter/sifter
|
987de046c1fa29cf4fdfbc6ff37cb142fa6a001b
|
cb2656ac47125e9e06c9bdda56193da41cc340a8
|
refs/heads/master
| 2022-12-20T21:52:48.935455
| 2020-09-07T05:27:01
| 2020-09-07T05:27:01
| 1,018,454
| 0
| 0
|
BSD-2-Clause
| 2020-10-16T17:19:53
| 2010-10-23T21:43:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
import re
import sifter.comparator
__all__ = ('Comparator',)
# The official definition of comparators is in RFC 4790
class Comparator(object):
@classmethod
def register(cls):
try:
sifter.comparator.register(cls.COMPARATOR_ID, cls)
except AttributeError:
# this method should only be called on sub-classes that define an
# identifier
raise NotImplementedError
@classmethod
def sort_key(cls, s):
return s
# draft-ietf-sieve-regex-01: according to section 5, the :regex match type
# is available to all comparators. furthermore, string normalization (aka
# sort_key() above) is only applied to the string to be matched against,
# not to the regular expression string.
@classmethod
def cmp_regex(cls, s, pattern, state):
# section 4: must be used as an extension named 'regex'
state.check_required_extension('regex', ':regex')
# TODO: cache compiled pattern for more efficient execution across
# multiple strings and messages
# TODO: make sure the specified pattern is allowed by the standard
# (which allows only extended regular expressions from IEEE Standard
# 1003.2, 1992): 1) disallow python-specific features, along with word
# boundaries and backreferences, 2) double-check that python supports
# all ERE features.
compiled_re = re.compile(pattern)
return compiled_re.search(cls.sort_key(s))
|
[
"gary@realify.com"
] |
gary@realify.com
|
44e898de8b26e5a201cf475e7ab019e44ead146d
|
67379c2ae929266f303edc783c8c62edb521174b
|
/exception/TransactionException.py
|
255a542bbd984278db4669c881c1ac6ca58f723b
|
[] |
no_license
|
bbb11808/seata-python
|
d20be83093d6d084ad36d9292a8ee18ad3bfc8c6
|
c53b605be423c781d38e599e5bade8df8c81c2d9
|
refs/heads/master
| 2023-02-11T01:22:18.488881
| 2021-01-05T10:10:08
| 2021-01-05T10:10:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
class TransactionException(Exception):
def __init__(self, code, message=None, cause=None):
self.code = code
self.message = message
self.cause = cause
|
[
"jsbxyyx@163.com"
] |
jsbxyyx@163.com
|
d65dd43c9764aa5c4b8e093dd79520e1a748eb71
|
7b0f9a984dca4ad3fa536cf6ecd8f6654db02420
|
/tencentcloud/live/v20180801/models.py
|
15eaac07bf2987d54808c969fbf9decd6cab2b38
|
[
"Apache-2.0"
] |
permissive
|
SpencerHoGD/tencentcloud-sdk-python
|
8cb6756722ec571f140a2dd8d2ade897f8bbd0c5
|
c90e7719a253ea7928d4a510987df3ea6f3c23ac
|
refs/heads/master
| 2020-06-27T13:24:31.736521
| 2019-07-26T10:31:52
| 2019-07-26T10:31:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212,493
|
py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class AddDelayLiveStreamRequest(AbstractModel):
"""AddDelayLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
:param DelayTime: 延播时间,单位:秒,上限:600秒。
:type DelayTime: int
:param ExpireTime: 延播设置的过期时间。UTC 格式,例如:2018-11-29T19:00:00Z。
注意:默认7天后过期,且最长支持7天内生效。
:type ExpireTime: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
self.DelayTime = None
self.ExpireTime = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.DelayTime = params.get("DelayTime")
self.ExpireTime = params.get("ExpireTime")
class AddDelayLiveStreamResponse(AbstractModel):
"""AddDelayLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AddLiveDomainRequest(AbstractModel):
"""AddLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名名称。
:type DomainName: str
:param DomainType: 域名类型,
0:推流域名,
1:播放域名。
:type DomainType: int
:param PlayType: 拉流域名类型:
1:国内,
2:全球,
3:境外。
:type PlayType: int
:param IsDelayLive: 默认 0 :普通直播,
1:慢直播。
:type IsDelayLive: int
"""
self.DomainName = None
self.DomainType = None
self.PlayType = None
self.IsDelayLive = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.DomainType = params.get("DomainType")
self.PlayType = params.get("PlayType")
self.IsDelayLive = params.get("IsDelayLive")
class AddLiveDomainResponse(AbstractModel):
"""AddLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AddLiveWatermarkRequest(AbstractModel):
"""AddLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param PictureUrl: 水印图片url。
:type PictureUrl: str
:param WatermarkName: 水印名称。
:type WatermarkName: str
:param XPosition: 显示位置,X轴偏移。
:type XPosition: int
:param YPosition: 显示位置,Y轴偏移。
:type YPosition: int
:param Width: 水印宽度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Width: int
:param Height: 水印高度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Height: int
"""
self.PictureUrl = None
self.WatermarkName = None
self.XPosition = None
self.YPosition = None
self.Width = None
self.Height = None
def _deserialize(self, params):
self.PictureUrl = params.get("PictureUrl")
self.WatermarkName = params.get("WatermarkName")
self.XPosition = params.get("XPosition")
self.YPosition = params.get("YPosition")
self.Width = params.get("Width")
self.Height = params.get("Height")
class AddLiveWatermarkResponse(AbstractModel):
"""AddLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.WatermarkId = None
self.RequestId = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
self.RequestId = params.get("RequestId")
class BillDataInfo(AbstractModel):
"""带宽和流量信息
"""
def __init__(self):
"""
:param Time: 时间点,格式为yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Bandwidth: 带宽,单位是Mbps。
:type Bandwidth: float
:param Flux: 流量,单位是MB。
:type Flux: float
"""
self.Time = None
self.Bandwidth = None
self.Flux = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
class BindLiveDomainCertRequest(AbstractModel):
"""BindLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
:param DomainName: 播放域名。
:type DomainName: str
:param Status: 状态,0: 关闭 1:打开。
:type Status: int
"""
self.CertId = None
self.DomainName = None
self.Status = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.DomainName = params.get("DomainName")
self.Status = params.get("Status")
class BindLiveDomainCertResponse(AbstractModel):
"""BindLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CallBackRuleInfo(AbstractModel):
"""规则信息
"""
def __init__(self):
"""
:param CreateTime: 规则创建时间。
:type CreateTime: str
:param UpdateTime: 规则更新时间。
:type UpdateTime: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径。
:type AppName: str
"""
self.CreateTime = None
self.UpdateTime = None
self.TemplateId = None
self.DomainName = None
self.AppName = None
def _deserialize(self, params):
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.TemplateId = params.get("TemplateId")
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
class CallBackTemplateInfo(AbstractModel):
"""回调模板信息
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param StreamBeginNotifyUrl: 开播回调URL。
:type StreamBeginNotifyUrl: str
:param StreamEndNotifyUrl: 断流回调URL。
:type StreamEndNotifyUrl: str
:param StreamMixNotifyUrl: 混流回调URL。
:type StreamMixNotifyUrl: str
:param RecordNotifyUrl: 录制回调URL。
:type RecordNotifyUrl: str
:param SnapshotNotifyUrl: 截图回调URL。
:type SnapshotNotifyUrl: str
:param PornCensorshipNotifyUrl: 鉴黄回调URL。
:type PornCensorshipNotifyUrl: str
:param CallbackKey: 回调的鉴权key
:type CallbackKey: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.StreamBeginNotifyUrl = None
self.StreamEndNotifyUrl = None
self.StreamMixNotifyUrl = None
self.RecordNotifyUrl = None
self.SnapshotNotifyUrl = None
self.PornCensorshipNotifyUrl = None
self.CallbackKey = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.StreamBeginNotifyUrl = params.get("StreamBeginNotifyUrl")
self.StreamEndNotifyUrl = params.get("StreamEndNotifyUrl")
self.StreamMixNotifyUrl = params.get("StreamMixNotifyUrl")
self.RecordNotifyUrl = params.get("RecordNotifyUrl")
self.SnapshotNotifyUrl = params.get("SnapshotNotifyUrl")
self.PornCensorshipNotifyUrl = params.get("PornCensorshipNotifyUrl")
self.CallbackKey = params.get("CallbackKey")
class CdnPlayStatData(AbstractModel):
"""下行播放统计指标
"""
def __init__(self):
"""
:param Time: 时间点,格式为yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Bandwidth: 带宽,(单位Mbps)。
:type Bandwidth: float
:param Flux: 流量,(单位MB)。
:type Flux: float
:param Request: 新增请求数。
:type Request: int
:param Online: 并发连接数。
:type Online: int
"""
self.Time = None
self.Bandwidth = None
self.Flux = None
self.Request = None
self.Online = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
self.Request = params.get("Request")
self.Online = params.get("Online")
class CertInfo(AbstractModel):
"""证书信息
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
:param CertName: 证书名称。
:type CertName: str
:param Description: 描述信息。
:type Description: str
:param CreateTime: 创建时间,UTC格式。
:type CreateTime: str
:param HttpsCrt: 证书内容。
:type HttpsCrt: str
:param CertType: 证书类型。
0:腾讯云托管证书
1:用户添加证书。
:type CertType: int
:param CertExpireTime: 证书过期时间,UTC格式。
:type CertExpireTime: str
:param DomainList: 使用此证书的域名列表。
:type DomainList: list of str
"""
self.CertId = None
self.CertName = None
self.Description = None
self.CreateTime = None
self.HttpsCrt = None
self.CertType = None
self.CertExpireTime = None
self.DomainList = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.CertName = params.get("CertName")
self.Description = params.get("Description")
self.CreateTime = params.get("CreateTime")
self.HttpsCrt = params.get("HttpsCrt")
self.CertType = params.get("CertType")
self.CertExpireTime = params.get("CertExpireTime")
self.DomainList = params.get("DomainList")
class ClientIpPlaySumInfo(AbstractModel):
"""客户端ip播放汇总信息
"""
def __init__(self):
"""
:param ClientIp: 客户端ip,点分型。
:type ClientIp: str
:param Province: 客户端所在省份。
:type Province: str
:param TotalFlux: 总流量。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param TotalFailedRequest: 总失败请求数。
:type TotalFailedRequest: int
"""
self.ClientIp = None
self.Province = None
self.TotalFlux = None
self.TotalRequest = None
self.TotalFailedRequest = None
def _deserialize(self, params):
self.ClientIp = params.get("ClientIp")
self.Province = params.get("Province")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.TotalFailedRequest = params.get("TotalFailedRequest")
class CreateLiveCallbackRuleRequest(AbstractModel):
"""CreateLiveCallbackRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param TemplateId: 模板ID。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.TemplateId = params.get("TemplateId")
class CreateLiveCallbackRuleResponse(AbstractModel):
"""CreateLiveCallbackRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveCallbackTemplateRequest(AbstractModel):
"""CreateLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名称。非空的字符串
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param StreamBeginNotifyUrl: 开播回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type StreamBeginNotifyUrl: str
:param StreamEndNotifyUrl: 断流回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type StreamEndNotifyUrl: str
:param RecordNotifyUrl: 录制回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type RecordNotifyUrl: str
:param SnapshotNotifyUrl: 截图回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type SnapshotNotifyUrl: str
:param PornCensorshipNotifyUrl: 鉴黄回调URL,
相关协议文档:[事件消息通知](/document/product/267/32741)。
:type PornCensorshipNotifyUrl: str
:param CallbackKey: 回调key,回调URL公用,鉴权回调说明详见回调格式文档
:type CallbackKey: str
"""
self.TemplateName = None
self.Description = None
self.StreamBeginNotifyUrl = None
self.StreamEndNotifyUrl = None
self.RecordNotifyUrl = None
self.SnapshotNotifyUrl = None
self.PornCensorshipNotifyUrl = None
self.CallbackKey = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.StreamBeginNotifyUrl = params.get("StreamBeginNotifyUrl")
self.StreamEndNotifyUrl = params.get("StreamEndNotifyUrl")
self.RecordNotifyUrl = params.get("RecordNotifyUrl")
self.SnapshotNotifyUrl = params.get("SnapshotNotifyUrl")
self.PornCensorshipNotifyUrl = params.get("PornCensorshipNotifyUrl")
self.CallbackKey = params.get("CallbackKey")
class CreateLiveCallbackTemplateResponse(AbstractModel):
"""CreateLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板ID。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveCertRequest(AbstractModel):
"""CreateLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertType: 证书类型。0-用户添加证书;1-腾讯云托管证书。
:type CertType: int
:param HttpsCrt: 证书内容,即公钥。
:type HttpsCrt: str
:param HttpsKey: 私钥。
:type HttpsKey: str
:param CertName: 证书名称。
:type CertName: str
:param Description: 描述。
:type Description: str
"""
self.CertType = None
self.HttpsCrt = None
self.HttpsKey = None
self.CertName = None
self.Description = None
def _deserialize(self, params):
self.CertType = params.get("CertType")
self.HttpsCrt = params.get("HttpsCrt")
self.HttpsKey = params.get("HttpsKey")
self.CertName = params.get("CertName")
self.Description = params.get("Description")
class CreateLiveCertResponse(AbstractModel):
"""CreateLiveCert返回参数结构体
"""
def __init__(self):
"""
:param CertId: 证书ID
:type CertId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertId = None
self.RequestId = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.RequestId = params.get("RequestId")
class CreateLiveRecordRequest(AbstractModel):
"""CreateLiveRecord请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 推流域名。多域名推流必须设置。
:type DomainName: str
:param StartTime: 录制开始时间。中国标准时间,需要URLEncode(rfc3986)。如 2017-01-01 10:10:01,编码为:2017-01-01+10%3a10%3a01。
定时录制模式,必须设置该字段;实时视频录制模式,忽略该字段。
:type StartTime: str
:param EndTime: 录制结束时间。中国标准时间,需要URLEncode(rfc3986)。如 2017-01-01 10:30:01,编码为:2017-01-01+10%3a30%3a01。
定时录制模式,必须设置该字段;实时录制模式,为可选字段。如果通过Highlight参数,设置录制为实时视频录制模式,其设置的结束时间不应超过当前时间+30分钟,如果设置的结束时间超过当前时间+30分钟或者小于当前时间或者不设置该参数,则实际结束时间为当前时间+30分钟。
:type EndTime: str
:param RecordType: 录制类型。
“video” : 音视频录制【默认】。
“audio” : 纯音频录制。
在定时录制模式或实时视频录制模式下,该参数均有效,不区分大小写。
:type RecordType: str
:param FileFormat: 录制文件格式。其值为:
“flv”【默认】,“hls”,”mp4”,“aac”,”mp3”。
在定时录制模式或实时视频录制模式下,该参数均有效,不区分大小写。
:type FileFormat: str
:param Highlight: 开启实时视频录制模式标志。
0:不开启实时视频录制模式,即定时录制模式【默认】。见[示例一](#.E7.A4.BA.E4.BE.8B1-.E5.88.9B.E5.BB.BA.E5.AE.9A.E6.97.B6.E5.BD.95.E5.88.B6.E4.BB.BB.E5.8A.A1)。
1:开启实时视频录制模式。见[示例二](#.E7.A4.BA.E4.BE.8B2-.E5.88.9B.E5.BB.BA.E5.AE.9E.E6.97.B6.E5.BD.95.E5.88.B6.E4.BB.BB.E5.8A.A1)。
:type Highlight: int
:param MixStream: 开启A+B=C混流C流录制标志。
0:不开启A+B=C混流C流录制【默认】。
1:开启A+B=C混流C流录制。
在定时录制模式或实时视频录制模式下,该参数均有效。
:type MixStream: int
:param StreamParam: 录制流参数。当前支持以下参数:
record_interval - 录制分片时长,单位 秒,1800 - 7200
storage_time - 录制文件存储时长,单位 秒
eg. record_interval=3600&storage_time=2592000
注:参数需要url encode。
在定时录制模式或实时视频录制模式下,该参数均有效。
:type StreamParam: str
"""
self.StreamName = None
self.AppName = None
self.DomainName = None
self.StartTime = None
self.EndTime = None
self.RecordType = None
self.FileFormat = None
self.Highlight = None
self.MixStream = None
self.StreamParam = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.RecordType = params.get("RecordType")
self.FileFormat = params.get("FileFormat")
self.Highlight = params.get("Highlight")
self.MixStream = params.get("MixStream")
self.StreamParam = params.get("StreamParam")
class CreateLiveRecordResponse(AbstractModel):
"""CreateLiveRecord返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务ID,全局唯一标识录制任务。
:type TaskId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class CreateLiveRecordRuleRequest(AbstractModel):
"""CreateLiveRecordRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
注:如果本参数设置为非空字符串,规则将只对此推流起作用。
:type StreamName: str
"""
self.DomainName = None
self.TemplateId = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.TemplateId = params.get("TemplateId")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class CreateLiveRecordRuleResponse(AbstractModel):
"""CreateLiveRecordRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveRecordTemplateRequest(AbstractModel):
"""CreateLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名。非空的字符串
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param FlvParam: Flv录制参数,开启Flv录制时设置。
:type FlvParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsParam: Hls录制参数,开启hls录制时设置。
:type HlsParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param Mp4Param: Mp4录制参数,开启Mp4录制时设置。
:type Mp4Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param AacParam: Aac录制参数,开启Aac录制时设置。
:type AacParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param IsDelayLive: 0:普通直播,
1:慢直播。
:type IsDelayLive: int
:param HlsSpecialParam: HLS专属录制参数。
:type HlsSpecialParam: :class:`tencentcloud.live.v20180801.models.HlsSpecialParam`
:param Mp3Param: Mp3录制参数,开启Mp3录制时设置。
:type Mp3Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
"""
self.TemplateName = None
self.Description = None
self.FlvParam = None
self.HlsParam = None
self.Mp4Param = None
self.AacParam = None
self.IsDelayLive = None
self.HlsSpecialParam = None
self.Mp3Param = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
if params.get("FlvParam") is not None:
self.FlvParam = RecordParam()
self.FlvParam._deserialize(params.get("FlvParam"))
if params.get("HlsParam") is not None:
self.HlsParam = RecordParam()
self.HlsParam._deserialize(params.get("HlsParam"))
if params.get("Mp4Param") is not None:
self.Mp4Param = RecordParam()
self.Mp4Param._deserialize(params.get("Mp4Param"))
if params.get("AacParam") is not None:
self.AacParam = RecordParam()
self.AacParam._deserialize(params.get("AacParam"))
self.IsDelayLive = params.get("IsDelayLive")
if params.get("HlsSpecialParam") is not None:
self.HlsSpecialParam = HlsSpecialParam()
self.HlsSpecialParam._deserialize(params.get("HlsSpecialParam"))
if params.get("Mp3Param") is not None:
self.Mp3Param = RecordParam()
self.Mp3Param._deserialize(params.get("Mp3Param"))
class CreateLiveRecordTemplateResponse(AbstractModel):
"""CreateLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveSnapshotRuleRequest(AbstractModel):
"""CreateLiveSnapshotRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
注:如果本参数设置为非空字符串,规则将只对此推流起作用。
:type StreamName: str
"""
self.DomainName = None
self.TemplateId = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.TemplateId = params.get("TemplateId")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class CreateLiveSnapshotRuleResponse(AbstractModel):
"""CreateLiveSnapshotRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveSnapshotTemplateRequest(AbstractModel):
"""CreateLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名称。非空的字符串。
:type TemplateName: str
:param CosAppId: Cos AppId。
:type CosAppId: int
:param CosBucket: Cos Bucket名称。
:type CosBucket: str
:param CosRegion: Cos地区。
:type CosRegion: str
:param Description: 描述信息。
:type Description: str
:param SnapshotInterval: 截图间隔,单位s,默认10s。
范围: 5s ~ 600s。
:type SnapshotInterval: int
:param Width: 截图宽度。默认:0(原始宽)。
:type Width: int
:param Height: 截图高度。默认:0(原始高)。
:type Height: int
:param PornFlag: 是否开启鉴黄,0:不开启,1:开启。默认:0。
:type PornFlag: int
"""
self.TemplateName = None
self.CosAppId = None
self.CosBucket = None
self.CosRegion = None
self.Description = None
self.SnapshotInterval = None
self.Width = None
self.Height = None
self.PornFlag = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.CosAppId = params.get("CosAppId")
self.CosBucket = params.get("CosBucket")
self.CosRegion = params.get("CosRegion")
self.Description = params.get("Description")
self.SnapshotInterval = params.get("SnapshotInterval")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.PornFlag = params.get("PornFlag")
class CreateLiveSnapshotTemplateResponse(AbstractModel):
"""CreateLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveTranscodeRuleRequest(AbstractModel):
"""CreateLiveTranscodeRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
:param TemplateId: 指定已有的模板Id。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.TemplateId = params.get("TemplateId")
class CreateLiveTranscodeRuleResponse(AbstractModel):
"""CreateLiveTranscodeRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveTranscodeTemplateRequest(AbstractModel):
"""CreateLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名称,例:900 900p 仅支持字母和数字的组合。
:type TemplateName: str
:param VideoBitrate: 视频码率。范围:100-8000。
:type VideoBitrate: int
:param Vcodec: 视频编码:h264/h265,默认h264。
注意:当前该参数未生效,待后续支持!
:type Vcodec: str
:param Acodec: 音频编码:aac,默认原始音频格式。
注意:当前该参数未生效,待后续支持!
:type Acodec: str
:param AudioBitrate: 音频码率:默认0。0-500。
:type AudioBitrate: int
:param Description: 模板描述。
:type Description: str
:param Width: 宽,默认0。
:type Width: int
:param NeedVideo: 是否保留视频,0:否,1:是。默认1。
:type NeedVideo: int
:param NeedAudio: 是否保留音频,0:否,1:是。默认1。
:type NeedAudio: int
:param Height: 高,默认0。
:type Height: int
:param Fps: 帧率,默认0。
:type Fps: int
:param Gop: 关键帧间隔,单位:秒。默认原始的间隔
:type Gop: int
:param Rotate: 是否旋转,0:否,1:是。默认0。
:type Rotate: int
:param Profile: 编码质量:
baseline/main/high。默认baseline
:type Profile: str
:param BitrateToOrig: 是否不超过原始码率,0:否,1:是。默认0。
:type BitrateToOrig: int
:param HeightToOrig: 是否不超过原始高,0:否,1:是。默认0。
:type HeightToOrig: int
:param FpsToOrig: 是否不超过原始帧率,0:否,1:是。默认0。
:type FpsToOrig: int
"""
self.TemplateName = None
self.VideoBitrate = None
self.Vcodec = None
self.Acodec = None
self.AudioBitrate = None
self.Description = None
self.Width = None
self.NeedVideo = None
self.NeedAudio = None
self.Height = None
self.Fps = None
self.Gop = None
self.Rotate = None
self.Profile = None
self.BitrateToOrig = None
self.HeightToOrig = None
self.FpsToOrig = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.VideoBitrate = params.get("VideoBitrate")
self.Vcodec = params.get("Vcodec")
self.Acodec = params.get("Acodec")
self.AudioBitrate = params.get("AudioBitrate")
self.Description = params.get("Description")
self.Width = params.get("Width")
self.NeedVideo = params.get("NeedVideo")
self.NeedAudio = params.get("NeedAudio")
self.Height = params.get("Height")
self.Fps = params.get("Fps")
self.Gop = params.get("Gop")
self.Rotate = params.get("Rotate")
self.Profile = params.get("Profile")
self.BitrateToOrig = params.get("BitrateToOrig")
self.HeightToOrig = params.get("HeightToOrig")
self.FpsToOrig = params.get("FpsToOrig")
class CreateLiveTranscodeTemplateResponse(AbstractModel):
"""CreateLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveWatermarkRuleRequest(AbstractModel):
"""CreateLiveWatermarkRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
:param TemplateId: 水印Id,即调用[AddLiveWatermark](/document/product/267/30154)接口返回的WatermarkId。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.TemplateId = params.get("TemplateId")
class CreateLiveWatermarkRuleResponse(AbstractModel):
"""CreateLiveWatermarkRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreatePullStreamConfigRequest(AbstractModel):
"""CreatePullStreamConfig请求参数结构体
"""
def __init__(self):
"""
:param FromUrl: 源Url。目前可支持直播流及点播文件。
:type FromUrl: str
:param ToUrl: 目的Url,目前限制该目标地址为腾讯域名。
:type ToUrl: str
:param AreaId: 区域id,1-深圳,2-上海,3-天津,4-香港。
:type AreaId: int
:param IspId: 运营商id,1-电信,2-移动,3-联通,4-其他,AreaId为4的时候,IspId只能为其他。
:type IspId: int
:param StartTime: 开始时间。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type StartTime: str
:param EndTime: 结束时间,注意:
1. 结束时间必须大于开始时间;
2. 结束时间和开始时间必须大于当前时间;
3. 结束时间 和 开始时间 间隔必须小于七天。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type EndTime: str
"""
self.FromUrl = None
self.ToUrl = None
self.AreaId = None
self.IspId = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.FromUrl = params.get("FromUrl")
self.ToUrl = params.get("ToUrl")
self.AreaId = params.get("AreaId")
self.IspId = params.get("IspId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
class CreatePullStreamConfigResponse(AbstractModel):
"""CreatePullStreamConfig返回参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置成功后的id。
:type ConfigId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ConfigId = None
self.RequestId = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.RequestId = params.get("RequestId")
class DayStreamPlayInfo(AbstractModel):
"""流播放信息
"""
def __init__(self):
"""
:param Time: 数据时间点,格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Bandwidth: 带宽(单位Mbps)。
:type Bandwidth: float
:param Flux: 流量 (单位MB)。
:type Flux: float
:param Request: 请求数。
:type Request: int
:param Online: 在线人数。
:type Online: int
"""
self.Time = None
self.Bandwidth = None
self.Flux = None
self.Request = None
self.Online = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
self.Request = params.get("Request")
self.Online = params.get("Online")
class DelayInfo(AbstractModel):
"""延播信息
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
:param DelayInterval: 延播时间,单位:秒。
:type DelayInterval: int
:param CreateTime: 创建时间,UTC时间。
注意:UTC时间和北京时间相差8小时。
例如:2019-06-18T12:00:00Z(为北京时间 2019 年 6 月 18 日 20 点 0 分 0 秒)。
:type CreateTime: str
:param ExpireTime: 过期时间,UTC时间。
注意:UTC时间和北京时间相差8小时。
例如:2019-06-18T12:00:00Z(为北京时间 2019 年 6 月 18 日 20 点 0 分 0 秒)。
:type ExpireTime: str
:param Status: 当前状态,
-1:已过期,
1: 生效中。
:type Status: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.DelayInterval = None
self.CreateTime = None
self.ExpireTime = None
self.Status = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.DelayInterval = params.get("DelayInterval")
self.CreateTime = params.get("CreateTime")
self.ExpireTime = params.get("ExpireTime")
self.Status = params.get("Status")
class DeleteLiveCallbackRuleRequest(AbstractModel):
"""DeleteLiveCallbackRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
"""
self.DomainName = None
self.AppName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
class DeleteLiveCallbackRuleResponse(AbstractModel):
"""DeleteLiveCallbackRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveCallbackTemplateRequest(AbstractModel):
"""DeleteLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveCallbackTemplateResponse(AbstractModel):
"""DeleteLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveCertRequest(AbstractModel):
"""DeleteLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
"""
self.CertId = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
class DeleteLiveCertResponse(AbstractModel):
"""DeleteLiveCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveDomainRequest(AbstractModel):
"""DeleteLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 要删除的域名
:type DomainName: str
:param DomainType: 类型。0-推流,1-播放
:type DomainType: int
"""
self.DomainName = None
self.DomainType = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.DomainType = params.get("DomainType")
class DeleteLiveDomainResponse(AbstractModel):
"""DeleteLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveRecordRequest(AbstractModel):
"""DeleteLiveRecord请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param TaskId: 任务ID,全局唯一标识录制任务。
:type TaskId: int
"""
self.StreamName = None
self.TaskId = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.TaskId = params.get("TaskId")
class DeleteLiveRecordResponse(AbstractModel):
"""DeleteLiveRecord返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveRecordRuleRequest(AbstractModel):
"""DeleteLiveRecordRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type AppName: str
:param StreamName: 流名称。
域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class DeleteLiveRecordRuleResponse(AbstractModel):
"""DeleteLiveRecordRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveRecordTemplateRequest(AbstractModel):
"""DeleteLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板ID。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveRecordTemplateResponse(AbstractModel):
"""DeleteLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveSnapshotRuleRequest(AbstractModel):
"""DeleteLiveSnapshotRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class DeleteLiveSnapshotRuleResponse(AbstractModel):
"""DeleteLiveSnapshotRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveSnapshotTemplateRequest(AbstractModel):
"""DeleteLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveSnapshotTemplateResponse(AbstractModel):
"""DeleteLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveTranscodeRuleRequest(AbstractModel):
"""DeleteLiveTranscodeRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
域名维度转码,域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
域名+AppName+StreamName+TemplateId唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type AppName: str
:param StreamName: 流名称。
域名+AppName+StreamName+TemplateId唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type StreamName: str
:param TemplateId: 模板ID。
域名+AppName+StreamName+TemplateId唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.TemplateId = params.get("TemplateId")
class DeleteLiveTranscodeRuleResponse(AbstractModel):
"""DeleteLiveTranscodeRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveTranscodeTemplateRequest(AbstractModel):
"""DeleteLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveTranscodeTemplateResponse(AbstractModel):
"""DeleteLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveWatermarkRequest(AbstractModel):
"""DeleteLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
"""
self.WatermarkId = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
class DeleteLiveWatermarkResponse(AbstractModel):
"""DeleteLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveWatermarkRuleRequest(AbstractModel):
"""DeleteLiveWatermarkRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class DeleteLiveWatermarkRuleResponse(AbstractModel):
"""DeleteLiveWatermarkRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeletePullStreamConfigRequest(AbstractModel):
"""DeletePullStreamConfig请求参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置id。
:type ConfigId: str
"""
self.ConfigId = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
class DeletePullStreamConfigResponse(AbstractModel):
"""DeletePullStreamConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeBillBandwidthAndFluxListRequest(AbstractModel):
"""DescribeBillBandwidthAndFluxList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS,起始和结束时间跨度不支持超过31天。
:type EndTime: str
:param PlayDomains: 直播播放域名,若不填,表示总体数据。
:type PlayDomains: list of str
:param MainlandOrOversea: 国内还是国外,若不填,表示国内+国外。
:type MainlandOrOversea: str
:param Granularity: 数据粒度,支持如下粒度:
5:5分钟粒度,默认值(跨度不支持超过1天);
60:1小时粒度(跨度不支持超过一个月);
1440:天粒度(跨度不支持超过一个月)。
:type Granularity: int
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.MainlandOrOversea = None
self.Granularity = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.MainlandOrOversea = params.get("MainlandOrOversea")
self.Granularity = params.get("Granularity")
class DescribeBillBandwidthAndFluxListResponse(AbstractModel):
"""DescribeBillBandwidthAndFluxList返回参数结构体
"""
def __init__(self):
"""
:param PeakBandwidthTime: 峰值带宽所在时间点,格式为yyyy-mm-dd HH:MM:SS。
:type PeakBandwidthTime: str
:param PeakBandwidth: 峰值带宽,单位是Mbps。
:type PeakBandwidth: float
:param P95PeakBandwidthTime: 95峰值带宽所在时间点,格式为yyyy-mm-dd HH:MM:SS。
:type P95PeakBandwidthTime: str
:param P95PeakBandwidth: 95峰值带宽,单位是Mbps。
:type P95PeakBandwidth: float
:param SumFlux: 总流量,单位是MB。
:type SumFlux: float
:param DataInfoList: 明细数据信息。
:type DataInfoList: list of BillDataInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PeakBandwidthTime = None
self.PeakBandwidth = None
self.P95PeakBandwidthTime = None
self.P95PeakBandwidth = None
self.SumFlux = None
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PeakBandwidthTime = params.get("PeakBandwidthTime")
self.PeakBandwidth = params.get("PeakBandwidth")
self.P95PeakBandwidthTime = params.get("P95PeakBandwidthTime")
self.P95PeakBandwidth = params.get("P95PeakBandwidth")
self.SumFlux = params.get("SumFlux")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = BillDataInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeGroupProIspPlayInfoListRequest(AbstractModel):
"""DescribeGroupProIspPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS
时间跨度在(0,3小时],支持最近1个月数据查询。
:type EndTime: str
:param PlayDomains: 播放域名,默认为不填,表示求总体数据。
:type PlayDomains: list of str
:param ProvinceNames: 省份列表,默认不填,则返回各省份的数据。
:type ProvinceNames: list of str
:param IspNames: 运营商列表,默认不填,则返回个运营商的数据。
:type IspNames: list of str
:param MainlandOrOversea: 国内还是国外,如果为空,查询所有地区数据;如果为“Mainland”,查询国内数据;如果为“Oversea”,则查询国外数据。
:type MainlandOrOversea: str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.ProvinceNames = None
self.IspNames = None
self.MainlandOrOversea = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.ProvinceNames = params.get("ProvinceNames")
self.IspNames = params.get("IspNames")
self.MainlandOrOversea = params.get("MainlandOrOversea")
class DescribeGroupProIspPlayInfoListResponse(AbstractModel):
"""DescribeGroupProIspPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 数据内容。
:type DataInfoList: list of GroupProIspDataInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = GroupProIspDataInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeHttpStatusInfoListRequest(AbstractModel):
"""DescribeHttpStatusInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
StartTime不能为3个月前。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
class DescribeHttpStatusInfoListResponse(AbstractModel):
"""DescribeHttpStatusInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 播放状态码列表。
:type DataInfoList: list of HttpStatusData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = HttpStatusData()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackRulesRequest(AbstractModel):
"""DescribeLiveCallbackRules请求参数结构体
"""
class DescribeLiveCallbackRulesResponse(AbstractModel):
"""DescribeLiveCallbackRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 规则信息列表。
:type Rules: list of CallBackRuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = CallBackRuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackTemplateRequest(AbstractModel):
"""DescribeLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveCallbackTemplateResponse(AbstractModel):
"""DescribeLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 回调模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.CallBackTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = CallBackTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackTemplatesRequest(AbstractModel):
"""DescribeLiveCallbackTemplates请求参数结构体
"""
class DescribeLiveCallbackTemplatesResponse(AbstractModel):
"""DescribeLiveCallbackTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 模板信息列表。
:type Templates: list of CallBackTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = CallBackTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCertRequest(AbstractModel):
"""DescribeLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
"""
self.CertId = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
class DescribeLiveCertResponse(AbstractModel):
"""DescribeLiveCert返回参数结构体
"""
def __init__(self):
"""
:param CertInfo: 证书信息。
:type CertInfo: :class:`tencentcloud.live.v20180801.models.CertInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CertInfo") is not None:
self.CertInfo = CertInfo()
self.CertInfo._deserialize(params.get("CertInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveCertsRequest(AbstractModel):
"""DescribeLiveCerts请求参数结构体
"""
class DescribeLiveCertsResponse(AbstractModel):
"""DescribeLiveCerts返回参数结构体
"""
def __init__(self):
"""
:param CertInfoSet: 证书信息列表。
:type CertInfoSet: list of CertInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertInfoSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CertInfoSet") is not None:
self.CertInfoSet = []
for item in params.get("CertInfoSet"):
obj = CertInfo()
obj._deserialize(item)
self.CertInfoSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDelayInfoListRequest(AbstractModel):
"""DescribeLiveDelayInfoList请求参数结构体
"""
class DescribeLiveDelayInfoListResponse(AbstractModel):
"""DescribeLiveDelayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DelayInfoList: 延播信息列表。
:type DelayInfoList: list of DelayInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DelayInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DelayInfoList") is not None:
self.DelayInfoList = []
for item in params.get("DelayInfoList"):
obj = DelayInfo()
obj._deserialize(item)
self.DelayInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDomainCertRequest(AbstractModel):
"""DescribeLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLiveDomainCertResponse(AbstractModel):
"""DescribeLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param DomainCertInfo: 证书信息。
:type DomainCertInfo: :class:`tencentcloud.live.v20180801.models.DomainCertInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DomainCertInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DomainCertInfo") is not None:
self.DomainCertInfo = DomainCertInfo()
self.DomainCertInfo._deserialize(params.get("DomainCertInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveDomainPlayInfoListRequest(AbstractModel):
"""DescribeLiveDomainPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
"""
self.PlayDomains = None
def _deserialize(self, params):
self.PlayDomains = params.get("PlayDomains")
class DescribeLiveDomainPlayInfoListResponse(AbstractModel):
"""DescribeLiveDomainPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param Time: 数据时间,格式为yyyy-mm-dd HH:MM:SS。
:type Time: str
:param TotalBandwidth: 实时总带宽。
:type TotalBandwidth: float
:param TotalFlux: 实时总流量。
:type TotalFlux: float
:param TotalRequest: TotalRequest。
:type TotalRequest: int
:param TotalOnline: 实时总连接数。
:type TotalOnline: int
:param DomainInfoList: 分域名的数据情况。
:type DomainInfoList: list of DomainInfoList
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Time = None
self.TotalBandwidth = None
self.TotalFlux = None
self.TotalRequest = None
self.TotalOnline = None
self.DomainInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.TotalBandwidth = params.get("TotalBandwidth")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.TotalOnline = params.get("TotalOnline")
if params.get("DomainInfoList") is not None:
self.DomainInfoList = []
for item in params.get("DomainInfoList"):
obj = DomainInfoList()
obj._deserialize(item)
self.DomainInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDomainRequest(AbstractModel):
"""DescribeLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLiveDomainResponse(AbstractModel):
"""DescribeLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param DomainInfo: 域名信息。
:type DomainInfo: :class:`tencentcloud.live.v20180801.models.DomainInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DomainInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DomainInfo") is not None:
self.DomainInfo = DomainInfo()
self.DomainInfo._deserialize(params.get("DomainInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveDomainsRequest(AbstractModel):
"""DescribeLiveDomains请求参数结构体
"""
def __init__(self):
"""
:param DomainStatus: 域名状态过滤。0-停用,1-启用
:type DomainStatus: int
:param DomainType: 域名类型过滤。0-推流,1-播放
:type DomainType: int
:param PageSize: 分页大小,范围:10~100。默认10
:type PageSize: int
:param PageNum: 取第几页,范围:1~100000。默认1
:type PageNum: int
:param IsDelayLive: 0 普通直播 1慢直播 默认0
:type IsDelayLive: int
"""
self.DomainStatus = None
self.DomainType = None
self.PageSize = None
self.PageNum = None
self.IsDelayLive = None
def _deserialize(self, params):
self.DomainStatus = params.get("DomainStatus")
self.DomainType = params.get("DomainType")
self.PageSize = params.get("PageSize")
self.PageNum = params.get("PageNum")
self.IsDelayLive = params.get("IsDelayLive")
class DescribeLiveDomainsResponse(AbstractModel):
"""DescribeLiveDomains返回参数结构体
"""
def __init__(self):
"""
:param AllCount: 总记录数
:type AllCount: int
:param DomainList: 域名详细信息列表
:type DomainList: list of DomainInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AllCount = None
self.DomainList = None
self.RequestId = None
def _deserialize(self, params):
self.AllCount = params.get("AllCount")
if params.get("DomainList") is not None:
self.DomainList = []
for item in params.get("DomainList"):
obj = DomainInfo()
obj._deserialize(item)
self.DomainList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveForbidStreamListRequest(AbstractModel):
"""DescribeLiveForbidStreamList请求参数结构体
"""
def __init__(self):
"""
:param PageNum: 取得第几页,默认1。
:type PageNum: int
:param PageSize: 每页大小,最大100。
取值:1~100之前的任意整数。
默认值:10。
:type PageSize: int
"""
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeLiveForbidStreamListResponse(AbstractModel):
"""DescribeLiveForbidStreamList返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页显示的条数。
:type PageSize: int
:param ForbidStreamList: 禁推流列表。
:type ForbidStreamList: list of ForbidStreamInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.ForbidStreamList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
if params.get("ForbidStreamList") is not None:
self.ForbidStreamList = []
for item in params.get("ForbidStreamList"):
obj = ForbidStreamInfo()
obj._deserialize(item)
self.ForbidStreamList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLivePlayAuthKeyRequest(AbstractModel):
"""DescribeLivePlayAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLivePlayAuthKeyResponse(AbstractModel):
"""DescribeLivePlayAuthKey返回参数结构体
"""
def __init__(self):
"""
:param PlayAuthKeyInfo: 播放鉴权key信息。
:type PlayAuthKeyInfo: :class:`tencentcloud.live.v20180801.models.PlayAuthKeyInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PlayAuthKeyInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PlayAuthKeyInfo") is not None:
self.PlayAuthKeyInfo = PlayAuthKeyInfo()
self.PlayAuthKeyInfo._deserialize(params.get("PlayAuthKeyInfo"))
self.RequestId = params.get("RequestId")
class DescribeLivePushAuthKeyRequest(AbstractModel):
"""DescribeLivePushAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLivePushAuthKeyResponse(AbstractModel):
"""DescribeLivePushAuthKey返回参数结构体
"""
def __init__(self):
"""
:param PushAuthKeyInfo: 推流鉴权key信息。
:type PushAuthKeyInfo: :class:`tencentcloud.live.v20180801.models.PushAuthKeyInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PushAuthKeyInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PushAuthKeyInfo") is not None:
self.PushAuthKeyInfo = PushAuthKeyInfo()
self.PushAuthKeyInfo._deserialize(params.get("PushAuthKeyInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveRecordRulesRequest(AbstractModel):
"""DescribeLiveRecordRules请求参数结构体
"""
class DescribeLiveRecordRulesResponse(AbstractModel):
"""DescribeLiveRecordRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveRecordTemplateRequest(AbstractModel):
"""DescribeLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveRecordTemplateResponse(AbstractModel):
"""DescribeLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 录制模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.RecordTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = RecordTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveRecordTemplatesRequest(AbstractModel):
"""DescribeLiveRecordTemplates请求参数结构体
"""
def __init__(self):
"""
:param IsDelayLive: 是否属于慢直播模板
:type IsDelayLive: int
"""
self.IsDelayLive = None
def _deserialize(self, params):
self.IsDelayLive = params.get("IsDelayLive")
class DescribeLiveRecordTemplatesResponse(AbstractModel):
"""DescribeLiveRecordTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 录制模板信息列表。
:type Templates: list of RecordTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = RecordTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotRulesRequest(AbstractModel):
"""DescribeLiveSnapshotRules请求参数结构体
"""
class DescribeLiveSnapshotRulesResponse(AbstractModel):
"""DescribeLiveSnapshotRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotTemplateRequest(AbstractModel):
"""DescribeLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveSnapshotTemplateResponse(AbstractModel):
"""DescribeLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 截图模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.SnapshotTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = SnapshotTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotTemplatesRequest(AbstractModel):
"""DescribeLiveSnapshotTemplates请求参数结构体
"""
class DescribeLiveSnapshotTemplatesResponse(AbstractModel):
"""DescribeLiveSnapshotTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 截图模板列表。
:type Templates: list of SnapshotTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = SnapshotTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveStreamEventListRequest(AbstractModel):
"""DescribeLiveStreamEventList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间。
UTC 格式,例如:2018-12-29T19:00:00Z。
支持查询60天内的历史记录。
:type StartTime: str
:param EndTime: 结束时间。
UTC 格式,例如:2018-12-29T20:00:00Z。
不超过当前时间,且和起始时间相差不得超过30天。
:type EndTime: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称,不支持通配符(*)查询,默认模糊匹配。
可使用IsStrict字段改为精确查询。
:type StreamName: str
:param PageNum: 取得第几页。
默认值:1。
注: 目前只支持10000条内的查询。
:type PageNum: int
:param PageSize: 分页大小。
最大值:100。
取值范围:1~100 之前的任意整数。
默认值:10。
注: 目前只支持10000条内的查询。
:type PageSize: int
:param IsFilter: 是否过滤,默认不过滤。
0:不进行任何过滤。
1:过滤掉开播失败的,只返回开播成功的。
:type IsFilter: int
:param IsStrict: 是否精确查询,默认模糊匹配。
0:模糊匹配。
1:精确查询。
注:使用StreamName时该参数生效。
:type IsStrict: int
:param IsAsc: 是否按结束时间正序显示,默认逆序。
0:逆序。
1:正序。
:type IsAsc: int
"""
self.StartTime = None
self.EndTime = None
self.AppName = None
self.DomainName = None
self.StreamName = None
self.PageNum = None
self.PageSize = None
self.IsFilter = None
self.IsStrict = None
self.IsAsc = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.IsFilter = params.get("IsFilter")
self.IsStrict = params.get("IsStrict")
self.IsAsc = params.get("IsAsc")
class DescribeLiveStreamEventListResponse(AbstractModel):
"""DescribeLiveStreamEventList返回参数结构体
"""
def __init__(self):
"""
:param EventList: 推断流事件列表。
:type EventList: list of StreamEventInfo
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.EventList = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.RequestId = None
def _deserialize(self, params):
if params.get("EventList") is not None:
self.EventList = []
for item in params.get("EventList"):
obj = StreamEventInfo()
obj._deserialize(item)
self.EventList.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.RequestId = params.get("RequestId")
class DescribeLiveStreamOnlineInfoRequest(AbstractModel):
"""DescribeLiveStreamOnlineInfo请求参数结构体
"""
def __init__(self):
"""
:param PageNum: 取得第几页。
默认值:1。
:type PageNum: int
:param PageSize: 分页大小。
最大值:100。
取值范围:1~100 之前的任意整数。
默认值:10。
:type PageSize: int
:param Status: 0:未开始推流 1:正在推流
:type Status: int
:param StreamName: 流名称。
:type StreamName: str
"""
self.PageNum = None
self.PageSize = None
self.Status = None
self.StreamName = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.Status = params.get("Status")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamOnlineInfoResponse(AbstractModel):
"""DescribeLiveStreamOnlineInfo返回参数结构体
"""
def __init__(self):
"""
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param StreamInfoList: 流信息列表。
:type StreamInfoList: list of StreamInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.StreamInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("StreamInfoList") is not None:
self.StreamInfoList = []
for item in params.get("StreamInfoList"):
obj = StreamInfo()
obj._deserialize(item)
self.StreamInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveStreamOnlineListRequest(AbstractModel):
"""DescribeLiveStreamOnlineList请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param PageNum: 取得第几页,默认1。
:type PageNum: int
:param PageSize: 每页大小,最大100。
取值:10~100之间的任意整数。
默认值:10。
:type PageSize: int
:param StreamName: 流名称,用于精确查询。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.PageNum = None
self.PageSize = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamOnlineListResponse(AbstractModel):
"""DescribeLiveStreamOnlineList返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页显示的条数。
:type PageSize: int
:param OnlineInfo: 正在推送流的信息列表。
:type OnlineInfo: list of StreamOnlineInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.OnlineInfo = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
if params.get("OnlineInfo") is not None:
self.OnlineInfo = []
for item in params.get("OnlineInfo"):
obj = StreamOnlineInfo()
obj._deserialize(item)
self.OnlineInfo.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveStreamPublishedListRequest(AbstractModel):
"""DescribeLiveStreamPublishedList请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 您的推流域名。
:type DomainName: str
:param EndTime: 结束时间。
UTC 格式,例如:2016-06-30T19:00:00Z。
不超过当前时间。
注意:EndTime和StartTime相差不可超过30天。
:type EndTime: str
:param StartTime: 起始时间。
UTC 格式,例如:2016-06-29T19:00:00Z。
最长支持查询60天内数据。
:type StartTime: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。不支持模糊匹配。
:type AppName: str
:param PageNum: 取得第几页。
默认值:1。
:type PageNum: int
:param PageSize: 分页大小。
最大值:100。
取值范围:1~100 之前的任意整数。
默认值:10。
:type PageSize: int
:param StreamName: 流名称,支持模糊匹配。
:type StreamName: str
"""
self.DomainName = None
self.EndTime = None
self.StartTime = None
self.AppName = None
self.PageNum = None
self.PageSize = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.AppName = params.get("AppName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamPublishedListResponse(AbstractModel):
"""DescribeLiveStreamPublishedList返回参数结构体
"""
def __init__(self):
"""
:param PublishInfo: 推流记录信息。
:type PublishInfo: list of StreamName
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PublishInfo = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PublishInfo") is not None:
self.PublishInfo = []
for item in params.get("PublishInfo"):
obj = StreamName()
obj._deserialize(item)
self.PublishInfo.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.RequestId = params.get("RequestId")
class DescribeLiveStreamPushInfoListRequest(AbstractModel):
"""DescribeLiveStreamPushInfoList请求参数结构体
"""
def __init__(self):
"""
:param PushDomain: 推流域名。
:type PushDomain: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param PageNum: 页数,
范围[1,10000],
默认值:1。
:type PageNum: int
:param PageSize: 每页个数,
范围:[1,1000],
默认值: 200。
:type PageSize: int
"""
self.PushDomain = None
self.AppName = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.PushDomain = params.get("PushDomain")
self.AppName = params.get("AppName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeLiveStreamPushInfoListResponse(AbstractModel):
"""DescribeLiveStreamPushInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 直播流的统计信息列表。
:type DataInfoList: list of PushDataInfo
:param TotalNum: 所有在线流的总数量。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 当前数据所在页码。
:type PageNum: int
:param PageSize: 每页的在线流的个数。
:type PageSize: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PushDataInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.RequestId = params.get("RequestId")
class DescribeLiveStreamStateRequest(AbstractModel):
"""DescribeLiveStreamState请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 您的推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamStateResponse(AbstractModel):
"""DescribeLiveStreamState返回参数结构体
"""
def __init__(self):
"""
:param StreamState: 流状态,
active:活跃,
inactive:非活跃,
forbid:禁播。
:type StreamState: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.StreamState = None
self.RequestId = None
def _deserialize(self, params):
self.StreamState = params.get("StreamState")
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeDetailInfoRequest(AbstractModel):
"""DescribeLiveTranscodeDetailInfo请求参数结构体
"""
def __init__(self):
"""
:param DayTime: 起始时间,北京时间,
格式:yyyymmdd。
注意:当前只支持查询近30天内某天的详细数据。
:type DayTime: str
:param PushDomain: 推流域名。
:type PushDomain: str
:param StreamName: 流名称。
:type StreamName: str
:param PageNum: 页数,默认1,
不超过100页。
:type PageNum: int
:param PageSize: 每页个数,默认20,
范围:[10,1000]。
:type PageSize: int
"""
self.DayTime = None
self.PushDomain = None
self.StreamName = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.DayTime = params.get("DayTime")
self.PushDomain = params.get("PushDomain")
self.StreamName = params.get("StreamName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeLiveTranscodeDetailInfoResponse(AbstractModel):
"""DescribeLiveTranscodeDetailInfo返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 统计数据列表。
:type DataInfoList: list of TranscodeDetailInfo
:param PageNum: 页码。
:type PageNum: int
:param PageSize: 每页个数。
:type PageSize: int
:param TotalNum: 总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = TranscodeDetailInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeRulesRequest(AbstractModel):
"""DescribeLiveTranscodeRules请求参数结构体
"""
class DescribeLiveTranscodeRulesResponse(AbstractModel):
"""DescribeLiveTranscodeRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 转码规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeTemplateRequest(AbstractModel):
"""DescribeLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveTranscodeTemplateResponse(AbstractModel):
"""DescribeLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.TemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = TemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeTemplatesRequest(AbstractModel):
"""DescribeLiveTranscodeTemplates请求参数结构体
"""
class DescribeLiveTranscodeTemplatesResponse(AbstractModel):
"""DescribeLiveTranscodeTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 转码模板列表。
:type Templates: list of TemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = TemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveWatermarkRequest(AbstractModel):
"""DescribeLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
"""
self.WatermarkId = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
class DescribeLiveWatermarkResponse(AbstractModel):
"""DescribeLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param Watermark: 水印信息。
:type Watermark: :class:`tencentcloud.live.v20180801.models.WatermarkInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Watermark = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Watermark") is not None:
self.Watermark = WatermarkInfo()
self.Watermark._deserialize(params.get("Watermark"))
self.RequestId = params.get("RequestId")
class DescribeLiveWatermarkRulesRequest(AbstractModel):
"""DescribeLiveWatermarkRules请求参数结构体
"""
class DescribeLiveWatermarkRulesResponse(AbstractModel):
"""DescribeLiveWatermarkRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 水印规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveWatermarksRequest(AbstractModel):
"""DescribeLiveWatermarks请求参数结构体
"""
class DescribeLiveWatermarksResponse(AbstractModel):
"""DescribeLiveWatermarks返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 水印总个数。
:type TotalNum: int
:param WatermarkList: 水印信息列表。
:type WatermarkList: list of WatermarkInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.WatermarkList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("WatermarkList") is not None:
self.WatermarkList = []
for item in params.get("WatermarkList"):
obj = WatermarkInfo()
obj._deserialize(item)
self.WatermarkList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLogDownloadListRequest(AbstractModel):
"""DescribeLogDownloadList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 开始时间,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
注意:结束时间 - 开始时间 <=7天。
:type EndTime: str
:param PlayDomains: 域名列表。
:type PlayDomains: list of str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
class DescribeLogDownloadListResponse(AbstractModel):
"""DescribeLogDownloadList返回参数结构体
"""
def __init__(self):
"""
:param LogInfoList: 日志信息列表。
:type LogInfoList: list of LogInfo
:param TotalNum: 总条数。
:type TotalNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LogInfoList = None
self.TotalNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LogInfoList") is not None:
self.LogInfoList = []
for item in params.get("LogInfoList"):
obj = LogInfo()
obj._deserialize(item)
self.LogInfoList.append(obj)
self.TotalNum = params.get("TotalNum")
self.RequestId = params.get("RequestId")
class DescribePlayErrorCodeDetailInfoListRequest(AbstractModel):
"""DescribePlayErrorCodeDetailInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param Granularity: 查询粒度:
1-1分钟粒度。
:type Granularity: int
:param StatType: 是,可选值包括”4xx”,”5xx”,支持”4xx,5xx”等这种混合模式。
:type StatType: str
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
"""
self.StartTime = None
self.EndTime = None
self.Granularity = None
self.StatType = None
self.PlayDomains = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Granularity = params.get("Granularity")
self.StatType = params.get("StatType")
self.PlayDomains = params.get("PlayDomains")
class DescribePlayErrorCodeDetailInfoListResponse(AbstractModel):
"""DescribePlayErrorCodeDetailInfoList返回参数结构体
"""
def __init__(self):
"""
:param HttpCodeList: 统计信息列表。
:type HttpCodeList: list of HttpCodeInfo
:param StatType: 统计类型。
:type StatType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.HttpCodeList = None
self.StatType = None
self.RequestId = None
def _deserialize(self, params):
if params.get("HttpCodeList") is not None:
self.HttpCodeList = []
for item in params.get("HttpCodeList"):
obj = HttpCodeInfo()
obj._deserialize(item)
self.HttpCodeList.append(obj)
self.StatType = params.get("StatType")
self.RequestId = params.get("RequestId")
class DescribePlayErrorCodeSumInfoListRequest(AbstractModel):
"""DescribePlayErrorCodeSumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param PlayDomains: 播放域名列表,不填表示总体数据。
:type PlayDomains: list of str
:param PageNum: 页数,
范围[1,1000],
默认值:1。
:type PageNum: int
:param PageSize: 每页个数,
范围:[1,1000],
默认值: 20。
:type PageSize: int
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribePlayErrorCodeSumInfoListResponse(AbstractModel):
"""DescribePlayErrorCodeSumInfoList返回参数结构体
"""
def __init__(self):
"""
:param ProIspInfoList: 分省份分运营商错误码为4或5开头的状态码数据信息。
:type ProIspInfoList: list of ProIspPlayCodeDataInfo
:param TotalCodeAll: 所有状态码的加和的次数。
:type TotalCodeAll: int
:param TotalCode4xx: 状态码为4开头的总次数。
:type TotalCode4xx: int
:param TotalCode5xx: 状态码为5开头的总次数。
:type TotalCode5xx: int
:param TotalCodeList: 各状态码的总次数,暂时支持400,403,404,500,502,503,504。
:type TotalCodeList: list of PlayCodeTotalInfo
:param PageNum: 页号。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalPage: 总页数。
:type TotalPage: int
:param TotalNum: 总记录数。
:type TotalNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProIspInfoList = None
self.TotalCodeAll = None
self.TotalCode4xx = None
self.TotalCode5xx = None
self.TotalCodeList = None
self.PageNum = None
self.PageSize = None
self.TotalPage = None
self.TotalNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProIspInfoList") is not None:
self.ProIspInfoList = []
for item in params.get("ProIspInfoList"):
obj = ProIspPlayCodeDataInfo()
obj._deserialize(item)
self.ProIspInfoList.append(obj)
self.TotalCodeAll = params.get("TotalCodeAll")
self.TotalCode4xx = params.get("TotalCode4xx")
self.TotalCode5xx = params.get("TotalCode5xx")
if params.get("TotalCodeList") is not None:
self.TotalCodeList = []
for item in params.get("TotalCodeList"):
obj = PlayCodeTotalInfo()
obj._deserialize(item)
self.TotalCodeList.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalPage = params.get("TotalPage")
self.TotalNum = params.get("TotalNum")
self.RequestId = params.get("RequestId")
class DescribeProIspPlaySumInfoListRequest(AbstractModel):
"""DescribeProIspPlaySumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param StatType: 统计的类型,可选值包括”Province”,”Isp”。
:type StatType: str
:param PlayDomains: 不填则为总体数据。
:type PlayDomains: list of str
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
"""
self.StartTime = None
self.EndTime = None
self.StatType = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.StatType = params.get("StatType")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeProIspPlaySumInfoListResponse(AbstractModel):
"""DescribeProIspPlaySumInfoList返回参数结构体
"""
def __init__(self):
"""
:param TotalFlux: 总流量。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param StatType: 统计的类型。
:type StatType: str
:param PageSize: 每页的记录数。
:type PageSize: int
:param PageNum: 页号。
:type PageNum: int
:param TotalNum: 总记录数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param DataInfoList: 省份或运营商汇总数据列表。
:type DataInfoList: list of ProIspPlaySumInfo
:param AvgFluxPerSecond: 平均带宽。
:type AvgFluxPerSecond: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalFlux = None
self.TotalRequest = None
self.StatType = None
self.PageSize = None
self.PageNum = None
self.TotalNum = None
self.TotalPage = None
self.DataInfoList = None
self.AvgFluxPerSecond = None
self.RequestId = None
def _deserialize(self, params):
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.StatType = params.get("StatType")
self.PageSize = params.get("PageSize")
self.PageNum = params.get("PageNum")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = ProIspPlaySumInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
self.RequestId = params.get("RequestId")
class DescribeProvinceIspPlayInfoListRequest(AbstractModel):
"""DescribeProvinceIspPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,当前使用北京时间,
例:2019-02-21 10:00:00。
:type StartTime: str
:param EndTime: 结束时间点,当前使用北京时间,
例:2019-02-21 12:00:00。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param Granularity: 支持如下粒度:
1:1分钟粒度(跨度不支持超过1天)
:type Granularity: int
:param StatType: 统计指标类型:
“Bandwidth”:带宽
“FluxPerSecond”:平均流量
“Flux”:流量
“Request”:请求数
“Online”:并发连接数
:type StatType: str
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
:param ProvinceNames: 非必传参数,要查询的省份(地区)英文名称列表,如 Beijing
:type ProvinceNames: list of str
:param IspNames: 非必传参数,要查询的运营商英文名称列表,如 China Mobile ,如果为空,查询所有运营商的数据
:type IspNames: list of str
"""
self.StartTime = None
self.EndTime = None
self.Granularity = None
self.StatType = None
self.PlayDomains = None
self.ProvinceNames = None
self.IspNames = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Granularity = params.get("Granularity")
self.StatType = params.get("StatType")
self.PlayDomains = params.get("PlayDomains")
self.ProvinceNames = params.get("ProvinceNames")
self.IspNames = params.get("IspNames")
class DescribeProvinceIspPlayInfoListResponse(AbstractModel):
"""DescribeProvinceIspPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 播放信息列表。
:type DataInfoList: list of PlayStatInfo
:param StatType: 统计的类型,和输入参数保持一致。
:type StatType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.StatType = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PlayStatInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.StatType = params.get("StatType")
self.RequestId = params.get("RequestId")
class DescribePullStreamConfigsRequest(AbstractModel):
"""DescribePullStreamConfigs请求参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置id。
:type ConfigId: str
"""
self.ConfigId = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
class DescribePullStreamConfigsResponse(AbstractModel):
"""DescribePullStreamConfigs返回参数结构体
"""
def __init__(self):
"""
:param PullStreamConfigs: 拉流配置。
:type PullStreamConfigs: list of PullStreamConfig
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PullStreamConfigs = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PullStreamConfigs") is not None:
self.PullStreamConfigs = []
for item in params.get("PullStreamConfigs"):
obj = PullStreamConfig()
obj._deserialize(item)
self.PullStreamConfigs.append(obj)
self.RequestId = params.get("RequestId")
class DescribeStreamDayPlayInfoListRequest(AbstractModel):
"""DescribeStreamDayPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param DayTime: 日期,
格式:YYYY-mm-dd。
:type DayTime: str
:param PlayDomain: 播放域名。
:type PlayDomain: str
:param PageNum: 页号,范围[1,10],默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围[100,1000],默认值是1000。
:type PageSize: int
"""
self.DayTime = None
self.PlayDomain = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.DayTime = params.get("DayTime")
self.PlayDomain = params.get("PlayDomain")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeStreamDayPlayInfoListResponse(AbstractModel):
"""DescribeStreamDayPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 播放数据信息列表。
:type DataInfoList: list of PlayDataInfoByStream
:param TotalNum: 总数量。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 当前数据所处页码。
:type PageNum: int
:param PageSize: 每页个数。
:type PageSize: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PlayDataInfoByStream()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.RequestId = params.get("RequestId")
class DescribeStreamPlayInfoListRequest(AbstractModel):
"""DescribeStreamPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 开始时间,北京时间,格式为yyyy-mm-dd HH:MM:SS,
当前时间 和 开始时间 间隔不超过30天。
:type StartTime: str
:param EndTime: 结束时间,北京时间,格式为yyyy-mm-dd HH:MM:SS,
结束时间 和 开始时间 必须在同一天内。
:type EndTime: str
:param PlayDomain: 播放域名,
若不填,则为查询所有播放域名的在线流数据。
:type PlayDomain: str
:param StreamName: 流名称,精确匹配。
若不填,则为查询总体播放数据。
:type StreamName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。精确匹配,不支持。
若不填,则为查询总体播放数据。
注意:按AppName查询,需要联系客服同学提单支持。
:type AppName: str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomain = None
self.StreamName = None
self.AppName = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomain = params.get("PlayDomain")
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
class DescribeStreamPlayInfoListResponse(AbstractModel):
"""DescribeStreamPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 统计信息列表。
:type DataInfoList: list of DayStreamPlayInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = DayStreamPlayInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeStreamPushInfoListRequest(AbstractModel):
"""DescribeStreamPushInfoList请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS,最大时间跨度支持6小时,支持最近6天数据查询。
:type EndTime: str
:param PushDomain: 推流域名。
:type PushDomain: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
"""
self.StreamName = None
self.StartTime = None
self.EndTime = None
self.PushDomain = None
self.AppName = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PushDomain = params.get("PushDomain")
self.AppName = params.get("AppName")
class DescribeStreamPushInfoListResponse(AbstractModel):
"""DescribeStreamPushInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 返回的数据列表。
:type DataInfoList: list of PushQualityData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PushQualityData()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopClientIpSumInfoListRequest(AbstractModel):
"""DescribeTopClientIpSumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS
时间跨度在(0,4小时],支持最近1天数据查询。
:type EndTime: str
:param PlayDomains: 播放域名,默认为不填,表示求总体数据。
:type PlayDomains: list of str
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param OrderParam: 排序指标,可选值包括”TotalRequest”,”FailedRequest”,“TotalFlux”。
:type OrderParam: str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
self.OrderParam = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.OrderParam = params.get("OrderParam")
class DescribeTopClientIpSumInfoListResponse(AbstractModel):
"""DescribeTopClientIpSumInfoList返回参数结构体
"""
def __init__(self):
"""
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param OrderParam: 排序指标,可选值包括”TotalRequest”,”FailedRequest”,“TotalFlux”。
:type OrderParam: str
:param TotalNum: 记录总数。
:type TotalNum: int
:param TotalPage: 记录总页数。
:type TotalPage: int
:param DataInfoList: 数据内容。
:type DataInfoList: list of ClientIpPlaySumInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PageNum = None
self.PageSize = None
self.OrderParam = None
self.TotalNum = None
self.TotalPage = None
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.OrderParam = params.get("OrderParam")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = ClientIpPlaySumInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeVisitTopSumInfoListRequest(AbstractModel):
"""DescribeVisitTopSumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS
时间跨度在(0,4小时],支持最近1天数据查询。
:type EndTime: str
:param TopIndex: 峰值指标,可选值包括”Domain”,”StreamId”。
:type TopIndex: str
:param PlayDomains: 播放域名,默认为不填,表示求总体数据。
:type PlayDomains: list of str
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param OrderParam: 排序指标,可选值包括” AvgFluxPerSecond”,”TotalRequest”(默认),“TotalFlux”。
:type OrderParam: str
"""
self.StartTime = None
self.EndTime = None
self.TopIndex = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
self.OrderParam = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.TopIndex = params.get("TopIndex")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.OrderParam = params.get("OrderParam")
class DescribeVisitTopSumInfoListResponse(AbstractModel):
"""DescribeVisitTopSumInfoList返回参数结构体
"""
def __init__(self):
"""
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param TopIndex: 峰值指标,可选值包括”Domain”,”StreamId”。
:type TopIndex: str
:param OrderParam: 排序指标,可选值包括” AvgFluxPerSecond”,”TotalRequest”(默认),“TotalFlux”。
:type OrderParam: str
:param TotalNum: 记录总数。
:type TotalNum: int
:param TotalPage: 记录总页数。
:type TotalPage: int
:param DataInfoList: 数据内容。
:type DataInfoList: list of PlaySumStatInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PageNum = None
self.PageSize = None
self.TopIndex = None
self.OrderParam = None
self.TotalNum = None
self.TotalPage = None
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TopIndex = params.get("TopIndex")
self.OrderParam = params.get("OrderParam")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PlaySumStatInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DomainCertInfo(AbstractModel):
"""域名证书信息
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
:param CertName: 证书名称。
:type CertName: str
:param Description: 描述信息。
:type Description: str
:param CreateTime: 创建时间,UTC格式。
:type CreateTime: str
:param HttpsCrt: 证书内容。
:type HttpsCrt: str
:param CertType: 证书类型。
0:腾讯云托管证书
1:用户添加证书。
:type CertType: int
:param CertExpireTime: 证书过期时间,UTC格式。
:type CertExpireTime: str
:param DomainName: 使用此证书的域名名称。
:type DomainName: str
:param Status: 证书状态
:type Status: int
"""
self.CertId = None
self.CertName = None
self.Description = None
self.CreateTime = None
self.HttpsCrt = None
self.CertType = None
self.CertExpireTime = None
self.DomainName = None
self.Status = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.CertName = params.get("CertName")
self.Description = params.get("Description")
self.CreateTime = params.get("CreateTime")
self.HttpsCrt = params.get("HttpsCrt")
self.CertType = params.get("CertType")
self.CertExpireTime = params.get("CertExpireTime")
self.DomainName = params.get("DomainName")
self.Status = params.get("Status")
class DomainDetailInfo(AbstractModel):
"""每个域名的统计信息
"""
def __init__(self):
"""
:param MainlandOrOversea: 国内还是国外,可选值包括Mainland和Oversea,如果为“Mainland”,表示国内数据;如果为“Oversea”,表示国外数据。
:type MainlandOrOversea: str
:param Bandwidth: 带宽,单位是Mbps。
:type Bandwidth: float
:param Flux: 流量,单位是MB。
:type Flux: float
:param Online: 人数。
:type Online: int
:param Request: 请求数。
:type Request: int
"""
self.MainlandOrOversea = None
self.Bandwidth = None
self.Flux = None
self.Online = None
self.Request = None
def _deserialize(self, params):
self.MainlandOrOversea = params.get("MainlandOrOversea")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
self.Online = params.get("Online")
self.Request = params.get("Request")
class DomainInfo(AbstractModel):
"""直播域名信息
"""
def __init__(self):
"""
:param Name: 直播域名
:type Name: str
:param Type: 域名类型。0-推流,1-播放
:type Type: int
:param Status: 域名状态。0-停用,1-启用
:type Status: int
:param CreateTime: 添加时间
:type CreateTime: str
:param BCName: 是否有CName到固定规则域名。0-否,1-是
:type BCName: int
:param TargetDomain: cname对应的域名
:type TargetDomain: str
:param PlayType: 播放区域,只在Type=1时该参数有意义。
1-国内,2-全球,3-海外。
:type PlayType: int
:param IsDelayLive: 0:普通直播,
1:慢直播。
:type IsDelayLive: int
"""
self.Name = None
self.Type = None
self.Status = None
self.CreateTime = None
self.BCName = None
self.TargetDomain = None
self.PlayType = None
self.IsDelayLive = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Type = params.get("Type")
self.Status = params.get("Status")
self.CreateTime = params.get("CreateTime")
self.BCName = params.get("BCName")
self.TargetDomain = params.get("TargetDomain")
self.PlayType = params.get("PlayType")
self.IsDelayLive = params.get("IsDelayLive")
class DomainInfoList(AbstractModel):
"""多个域名信息列表
"""
def __init__(self):
"""
:param Domain: 域名。
:type Domain: str
:param DetailInfoList: 明细信息。
:type DetailInfoList: list of DomainDetailInfo
"""
self.Domain = None
self.DetailInfoList = None
def _deserialize(self, params):
self.Domain = params.get("Domain")
if params.get("DetailInfoList") is not None:
self.DetailInfoList = []
for item in params.get("DetailInfoList"):
obj = DomainDetailInfo()
obj._deserialize(item)
self.DetailInfoList.append(obj)
class DropLiveStreamRequest(AbstractModel):
"""DropLiveStream请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param DomainName: 您的加速域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
"""
self.StreamName = None
self.DomainName = None
self.AppName = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
class DropLiveStreamResponse(AbstractModel):
"""DropLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class EnableLiveDomainRequest(AbstractModel):
"""EnableLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 待启用的直播域名
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class EnableLiveDomainResponse(AbstractModel):
"""EnableLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidLiveDomainRequest(AbstractModel):
"""ForbidLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 停用的直播域名
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class ForbidLiveDomainResponse(AbstractModel):
"""ForbidLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidLiveStreamRequest(AbstractModel):
"""ForbidLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 您的加速域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
:param ResumeTime: 恢复流的时间。UTC 格式,例如:2018-11-29T19:00:00Z。
注意:默认禁播90天,且最长支持禁播90天。
:type ResumeTime: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
self.ResumeTime = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.ResumeTime = params.get("ResumeTime")
class ForbidLiveStreamResponse(AbstractModel):
"""ForbidLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidStreamInfo(AbstractModel):
"""禁推流列表
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param CreateTime: 创建时间。
:type CreateTime: str
:param ExpireTime: 禁推过期时间。
:type ExpireTime: str
"""
self.StreamName = None
self.CreateTime = None
self.ExpireTime = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.CreateTime = params.get("CreateTime")
self.ExpireTime = params.get("ExpireTime")
class GroupProIspDataInfo(AbstractModel):
"""某省份某运营商在某段时间内的带宽,流量,请求数和并发数
"""
def __init__(self):
"""
:param ProvinceName: 省份。
:type ProvinceName: str
:param IspName: 运营商。
:type IspName: str
:param DetailInfoList: 分钟维度的明细数据。
:type DetailInfoList: list of CdnPlayStatData
"""
self.ProvinceName = None
self.IspName = None
self.DetailInfoList = None
def _deserialize(self, params):
self.ProvinceName = params.get("ProvinceName")
self.IspName = params.get("IspName")
if params.get("DetailInfoList") is not None:
self.DetailInfoList = []
for item in params.get("DetailInfoList"):
obj = CdnPlayStatData()
obj._deserialize(item)
self.DetailInfoList.append(obj)
class HlsSpecialParam(AbstractModel):
"""HLS专属录制参数
"""
def __init__(self):
"""
:param FlowContinueDuration: HLS续流超时时间。
:type FlowContinueDuration: int
"""
self.FlowContinueDuration = None
def _deserialize(self, params):
self.FlowContinueDuration = params.get("FlowContinueDuration")
class HttpCodeInfo(AbstractModel):
"""http返回码和统计数据
"""
def __init__(self):
"""
:param HttpCode: http协议返回码。
例:"2xx", "3xx", "4xx", "5xx"。
:type HttpCode: str
:param ValueList: 统计信息,对于无数据的时间点,会补0。
:type ValueList: list of HttpCodeValue
"""
self.HttpCode = None
self.ValueList = None
def _deserialize(self, params):
self.HttpCode = params.get("HttpCode")
if params.get("ValueList") is not None:
self.ValueList = []
for item in params.get("ValueList"):
obj = HttpCodeValue()
obj._deserialize(item)
self.ValueList.append(obj)
class HttpCodeValue(AbstractModel):
"""http返回码数据信息
"""
def __init__(self):
"""
:param Time: 时间,格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Numbers: 次数。
:type Numbers: int
:param Percentage: 占比。
:type Percentage: float
"""
self.Time = None
self.Numbers = None
self.Percentage = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Numbers = params.get("Numbers")
self.Percentage = params.get("Percentage")
class HttpStatusData(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
"""
:param Time: 数据时间点,
格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param HttpStatusInfoList: 播放状态码详细信息。
:type HttpStatusInfoList: list of HttpStatusInfo
"""
self.Time = None
self.HttpStatusInfoList = None
def _deserialize(self, params):
self.Time = params.get("Time")
if params.get("HttpStatusInfoList") is not None:
self.HttpStatusInfoList = []
for item in params.get("HttpStatusInfoList"):
obj = HttpStatusInfo()
obj._deserialize(item)
self.HttpStatusInfoList.append(obj)
class HttpStatusInfo(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
"""
:param HttpStatus: 播放http状态码。
:type HttpStatus: str
:param Num: 个数。
:type Num: int
"""
self.HttpStatus = None
self.Num = None
def _deserialize(self, params):
self.HttpStatus = params.get("HttpStatus")
self.Num = params.get("Num")
class LogInfo(AbstractModel):
"""日志url信息
"""
def __init__(self):
"""
:param LogName: 日志名称。
:type LogName: str
:param LogUrl: 日志Url。
:type LogUrl: str
:param LogTime: 日志生成时间
:type LogTime: str
"""
self.LogName = None
self.LogUrl = None
self.LogTime = None
def _deserialize(self, params):
self.LogName = params.get("LogName")
self.LogUrl = params.get("LogUrl")
self.LogTime = params.get("LogTime")
class ModifyLiveCallbackTemplateRequest(AbstractModel):
"""ModifyLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param StreamBeginNotifyUrl: 开播回调URL。
:type StreamBeginNotifyUrl: str
:param StreamEndNotifyUrl: 断流回调URL。
:type StreamEndNotifyUrl: str
:param RecordNotifyUrl: 录制回调URL。
:type RecordNotifyUrl: str
:param SnapshotNotifyUrl: 截图回调URL。
:type SnapshotNotifyUrl: str
:param PornCensorshipNotifyUrl: 鉴黄回调URL。
:type PornCensorshipNotifyUrl: str
:param CallbackKey: 回调key,回调URL公用,鉴权回调说明详见回调格式文档
:type CallbackKey: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.StreamBeginNotifyUrl = None
self.StreamEndNotifyUrl = None
self.RecordNotifyUrl = None
self.SnapshotNotifyUrl = None
self.PornCensorshipNotifyUrl = None
self.CallbackKey = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.StreamBeginNotifyUrl = params.get("StreamBeginNotifyUrl")
self.StreamEndNotifyUrl = params.get("StreamEndNotifyUrl")
self.RecordNotifyUrl = params.get("RecordNotifyUrl")
self.SnapshotNotifyUrl = params.get("SnapshotNotifyUrl")
self.PornCensorshipNotifyUrl = params.get("PornCensorshipNotifyUrl")
self.CallbackKey = params.get("CallbackKey")
class ModifyLiveCallbackTemplateResponse(AbstractModel):
"""ModifyLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveCertRequest(AbstractModel):
"""ModifyLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: str
:param CertType: 证书类型。0-用户添加证书;1-腾讯云托管证书。
:type CertType: int
:param CertName: 证书名称。
:type CertName: str
:param HttpsCrt: 证书内容,即公钥。
:type HttpsCrt: str
:param HttpsKey: 私钥。
:type HttpsKey: str
:param Description: 描述信息。
:type Description: str
"""
self.CertId = None
self.CertType = None
self.CertName = None
self.HttpsCrt = None
self.HttpsKey = None
self.Description = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.CertType = params.get("CertType")
self.CertName = params.get("CertName")
self.HttpsCrt = params.get("HttpsCrt")
self.HttpsKey = params.get("HttpsKey")
self.Description = params.get("Description")
class ModifyLiveCertResponse(AbstractModel):
"""ModifyLiveCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveDomainCertRequest(AbstractModel):
"""ModifyLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
:param CertId: 证书Id。
:type CertId: int
:param Status: 状态,0:关闭 1:打开。
:type Status: int
"""
self.DomainName = None
self.CertId = None
self.Status = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.CertId = params.get("CertId")
self.Status = params.get("Status")
class ModifyLiveDomainCertResponse(AbstractModel):
"""ModifyLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePlayAuthKeyRequest(AbstractModel):
"""ModifyLivePlayAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param AuthKey: 鉴权key。
:type AuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
:param AuthBackKey: 鉴权backkey。
:type AuthBackKey: str
"""
self.DomainName = None
self.Enable = None
self.AuthKey = None
self.AuthDelta = None
self.AuthBackKey = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.AuthKey = params.get("AuthKey")
self.AuthDelta = params.get("AuthDelta")
self.AuthBackKey = params.get("AuthBackKey")
class ModifyLivePlayAuthKeyResponse(AbstractModel):
"""ModifyLivePlayAuthKey返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePlayDomainRequest(AbstractModel):
"""ModifyLivePlayDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
:param PlayType: 拉流域名类型。1-国内;2-全球;3-境外
:type PlayType: int
"""
self.DomainName = None
self.PlayType = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.PlayType = params.get("PlayType")
class ModifyLivePlayDomainResponse(AbstractModel):
"""ModifyLivePlayDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePushAuthKeyRequest(AbstractModel):
"""ModifyLivePushAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param MasterAuthKey: 主鉴权key。
:type MasterAuthKey: str
:param BackupAuthKey: 备鉴权key。
:type BackupAuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
"""
self.DomainName = None
self.Enable = None
self.MasterAuthKey = None
self.BackupAuthKey = None
self.AuthDelta = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.MasterAuthKey = params.get("MasterAuthKey")
self.BackupAuthKey = params.get("BackupAuthKey")
self.AuthDelta = params.get("AuthDelta")
class ModifyLivePushAuthKeyResponse(AbstractModel):
"""ModifyLivePushAuthKey返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveRecordTemplateRequest(AbstractModel):
"""ModifyLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param FlvParam: Flv录制参数,开启Flv录制时设置。
:type FlvParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsParam: Hls录制参数,开启hls录制时设置。
:type HlsParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param Mp4Param: Mp4录制参数,开启Mp4录制时设置。
:type Mp4Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param AacParam: Aac录制参数,开启Aac录制时设置。
:type AacParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsSpecialParam: HLS录制定制参数
:type HlsSpecialParam: :class:`tencentcloud.live.v20180801.models.HlsSpecialParam`
:param Mp3Param: Mp3录制参数,开启Mp3录制时设置。
:type Mp3Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.FlvParam = None
self.HlsParam = None
self.Mp4Param = None
self.AacParam = None
self.HlsSpecialParam = None
self.Mp3Param = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
if params.get("FlvParam") is not None:
self.FlvParam = RecordParam()
self.FlvParam._deserialize(params.get("FlvParam"))
if params.get("HlsParam") is not None:
self.HlsParam = RecordParam()
self.HlsParam._deserialize(params.get("HlsParam"))
if params.get("Mp4Param") is not None:
self.Mp4Param = RecordParam()
self.Mp4Param._deserialize(params.get("Mp4Param"))
if params.get("AacParam") is not None:
self.AacParam = RecordParam()
self.AacParam._deserialize(params.get("AacParam"))
if params.get("HlsSpecialParam") is not None:
self.HlsSpecialParam = HlsSpecialParam()
self.HlsSpecialParam._deserialize(params.get("HlsSpecialParam"))
if params.get("Mp3Param") is not None:
self.Mp3Param = RecordParam()
self.Mp3Param._deserialize(params.get("Mp3Param"))
class ModifyLiveRecordTemplateResponse(AbstractModel):
"""ModifyLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveSnapshotTemplateRequest(AbstractModel):
"""ModifyLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param SnapshotInterval: 截图时间间隔
:type SnapshotInterval: int
:param Width: 截图宽度。
:type Width: int
:param Height: 截图高度。
:type Height: int
:param PornFlag: 是否开启鉴黄,0:不开启,1:开启。
:type PornFlag: int
:param CosAppId: Cos AppId。
:type CosAppId: int
:param CosBucket: Cos Bucket名称。
:type CosBucket: str
:param CosRegion: Cos 地域。
:type CosRegion: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.SnapshotInterval = None
self.Width = None
self.Height = None
self.PornFlag = None
self.CosAppId = None
self.CosBucket = None
self.CosRegion = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.SnapshotInterval = params.get("SnapshotInterval")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.PornFlag = params.get("PornFlag")
self.CosAppId = params.get("CosAppId")
self.CosBucket = params.get("CosBucket")
self.CosRegion = params.get("CosRegion")
class ModifyLiveSnapshotTemplateResponse(AbstractModel):
"""ModifyLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveTranscodeTemplateRequest(AbstractModel):
"""ModifyLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param Vcodec: 视频编码:
h264/h265。
:type Vcodec: str
:param Acodec: 音频编码:
aac/mp3。
:type Acodec: str
:param AudioBitrate: 音频码率,默认0。0-500
:type AudioBitrate: int
:param Description: 模板描述。
:type Description: str
:param VideoBitrate: 视频码率。100-8000
:type VideoBitrate: int
:param Width: 宽。0-3000
:type Width: int
:param NeedVideo: 是否保留视频,0:否,1:是。默认1。
:type NeedVideo: int
:param NeedAudio: 是否保留音频,0:否,1:是。默认1。
:type NeedAudio: int
:param Height: 高。0-3000
:type Height: int
:param Fps: 帧率。0-200
:type Fps: int
:param Gop: 关键帧间隔,单位:秒。0-50
:type Gop: int
:param Rotate: 旋转角度。0 90 180 270
:type Rotate: int
:param Profile: 编码质量:
baseline/main/high。
:type Profile: str
:param BitrateToOrig: 是否不超过原始码率。0:否,1:是。默认0。
:type BitrateToOrig: int
:param HeightToOrig: 是否不超过原始高。0:否,1:是。默认0。
:type HeightToOrig: int
:param FpsToOrig: 是否不超过原始帧率。0:否,1:是。默认0。
:type FpsToOrig: int
"""
self.TemplateId = None
self.Vcodec = None
self.Acodec = None
self.AudioBitrate = None
self.Description = None
self.VideoBitrate = None
self.Width = None
self.NeedVideo = None
self.NeedAudio = None
self.Height = None
self.Fps = None
self.Gop = None
self.Rotate = None
self.Profile = None
self.BitrateToOrig = None
self.HeightToOrig = None
self.FpsToOrig = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.Vcodec = params.get("Vcodec")
self.Acodec = params.get("Acodec")
self.AudioBitrate = params.get("AudioBitrate")
self.Description = params.get("Description")
self.VideoBitrate = params.get("VideoBitrate")
self.Width = params.get("Width")
self.NeedVideo = params.get("NeedVideo")
self.NeedAudio = params.get("NeedAudio")
self.Height = params.get("Height")
self.Fps = params.get("Fps")
self.Gop = params.get("Gop")
self.Rotate = params.get("Rotate")
self.Profile = params.get("Profile")
self.BitrateToOrig = params.get("BitrateToOrig")
self.HeightToOrig = params.get("HeightToOrig")
self.FpsToOrig = params.get("FpsToOrig")
class ModifyLiveTranscodeTemplateResponse(AbstractModel):
"""ModifyLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyPullStreamConfigRequest(AbstractModel):
"""ModifyPullStreamConfig请求参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置id。
:type ConfigId: str
:param FromUrl: 源Url。
:type FromUrl: str
:param ToUrl: 目的Url。
:type ToUrl: str
:param AreaId: 区域id,1-深圳,2-上海,3-天津,4-香港。如有改动,需同时传入IspId。
:type AreaId: int
:param IspId: 运营商id,1-电信,2-移动,3-联通,4-其他,AreaId为4的时候,IspId只能为其他。如有改动,需同时传入AreaId。
:type IspId: int
:param StartTime: 开始时间。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type StartTime: str
:param EndTime: 结束时间,注意:
1. 结束时间必须大于开始时间;
2. 结束时间和开始时间必须大于当前时间;
3. 结束时间 和 开始时间 间隔必须小于七天。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type EndTime: str
"""
self.ConfigId = None
self.FromUrl = None
self.ToUrl = None
self.AreaId = None
self.IspId = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.FromUrl = params.get("FromUrl")
self.ToUrl = params.get("ToUrl")
self.AreaId = params.get("AreaId")
self.IspId = params.get("IspId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
class ModifyPullStreamConfigResponse(AbstractModel):
"""ModifyPullStreamConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyPullStreamStatusRequest(AbstractModel):
"""ModifyPullStreamStatus请求参数结构体
"""
def __init__(self):
"""
:param ConfigIds: 配置id列表。
:type ConfigIds: list of str
:param Status: 目标状态。0无效,2正在运行,4暂停。
:type Status: str
"""
self.ConfigIds = None
self.Status = None
def _deserialize(self, params):
self.ConfigIds = params.get("ConfigIds")
self.Status = params.get("Status")
class ModifyPullStreamStatusResponse(AbstractModel):
"""ModifyPullStreamStatus返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class PlayAuthKeyInfo(AbstractModel):
"""播放鉴权key信息
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param AuthKey: 鉴权key。
:type AuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
:param AuthBackKey: 鉴权BackKey。
:type AuthBackKey: str
"""
self.DomainName = None
self.Enable = None
self.AuthKey = None
self.AuthDelta = None
self.AuthBackKey = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.AuthKey = params.get("AuthKey")
self.AuthDelta = params.get("AuthDelta")
self.AuthBackKey = params.get("AuthBackKey")
class PlayCodeTotalInfo(AbstractModel):
"""各状态码的总次数,暂时支持400,403,404,500,502,503,504
"""
def __init__(self):
"""
:param Code: http code,可选值包括400,403,404,500,502,503,504
:type Code: str
:param Num: 总次数
:type Num: int
"""
self.Code = None
self.Num = None
def _deserialize(self, params):
self.Code = params.get("Code")
self.Num = params.get("Num")
class PlayDataInfoByStream(AbstractModel):
"""流维度的播放信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param TotalFlux: 总流量(单位MB)。
:type TotalFlux: float
"""
self.StreamName = None
self.TotalFlux = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.TotalFlux = params.get("TotalFlux")
class PlayStatInfo(AbstractModel):
"""按省份运营商查询的播放信息
"""
def __init__(self):
"""
:param Time: 数据时间点。
:type Time: str
:param Value: 带宽/流量/请求数/并发连接数/下载速度的值,若没数据返回时该值为0
注意:此字段可能返回 null,表示取不到有效值。
:type Value: float
"""
self.Time = None
self.Value = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Value = params.get("Value")
class PlaySumStatInfo(AbstractModel):
"""播放汇总统计信息
"""
def __init__(self):
"""
:param Name: 域名或流id。
:type Name: str
:param AvgFluxPerSecond: 平均下载速度,单位是MB/s,计算公式是每分钟的下载速度求平均值。
:type AvgFluxPerSecond: float
:param TotalFlux: 总流量,单位是MB。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
"""
self.Name = None
self.AvgFluxPerSecond = None
self.TotalFlux = None
self.TotalRequest = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
class ProIspPlayCodeDataInfo(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
"""
:param ProvinceName: 省份。
:type ProvinceName: str
:param IspName: 运营商。
:type IspName: str
:param Code4xx: 错误码为4开头的次数。
:type Code4xx: int
:param Code5xx: 错误码为5开头的次数。
:type Code5xx: int
"""
self.ProvinceName = None
self.IspName = None
self.Code4xx = None
self.Code5xx = None
def _deserialize(self, params):
self.ProvinceName = params.get("ProvinceName")
self.IspName = params.get("IspName")
self.Code4xx = params.get("Code4xx")
self.Code5xx = params.get("Code5xx")
class ProIspPlaySumInfo(AbstractModel):
"""获取省份/运营商的播放信息
"""
def __init__(self):
"""
:param Name: 省份/运营商。
:type Name: str
:param TotalFlux: 总流量,单位:MB。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param AvgFluxPerSecond: 平均下载流量,单位:MB/s
:type AvgFluxPerSecond: float
"""
self.Name = None
self.TotalFlux = None
self.TotalRequest = None
self.AvgFluxPerSecond = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
class PublishTime(AbstractModel):
"""推流时间
"""
def __init__(self):
"""
:param PublishTime: 推流时间
UTC 格式,例如:2018-06-29T19:00:00Z。
:type PublishTime: str
"""
self.PublishTime = None
def _deserialize(self, params):
self.PublishTime = params.get("PublishTime")
class PullStreamConfig(AbstractModel):
"""拉流配置
"""
def __init__(self):
"""
:param ConfigId: 拉流配置Id。
:type ConfigId: str
:param FromUrl: 源Url。
:type FromUrl: str
:param ToUrl: 目的Url。
:type ToUrl: str
:param AreaName: 区域名。
:type AreaName: str
:param IspName: 运营商名。
:type IspName: str
:param StartTime: 开始时间。
UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type StartTime: str
:param EndTime: 结束时间。
UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type EndTime: str
:param Status: 0无效,1初始状态,2正在运行,3拉起失败,4暂停。
:type Status: str
"""
self.ConfigId = None
self.FromUrl = None
self.ToUrl = None
self.AreaName = None
self.IspName = None
self.StartTime = None
self.EndTime = None
self.Status = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.FromUrl = params.get("FromUrl")
self.ToUrl = params.get("ToUrl")
self.AreaName = params.get("AreaName")
self.IspName = params.get("IspName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
class PushAuthKeyInfo(AbstractModel):
"""推流鉴权key信息
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param MasterAuthKey: 主鉴权key。
:type MasterAuthKey: str
:param BackupAuthKey: 备鉴权key。
:type BackupAuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
"""
self.DomainName = None
self.Enable = None
self.MasterAuthKey = None
self.BackupAuthKey = None
self.AuthDelta = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.MasterAuthKey = params.get("MasterAuthKey")
self.BackupAuthKey = params.get("BackupAuthKey")
self.AuthDelta = params.get("AuthDelta")
class PushDataInfo(AbstractModel):
"""推流数据信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param AppName: 推流路径。
:type AppName: str
:param ClientIp: 推流客户端ip。
:type ClientIp: str
:param ServerIp: 接流服务器ip。
:type ServerIp: str
:param VideoFps: 推流视频帧率,单位是Hz。
:type VideoFps: int
:param VideoSpeed: 推流视频码率,单位是bps。
:type VideoSpeed: int
:param AudioFps: 推流音频帧率,单位是Hz。
:type AudioFps: int
:param AudioSpeed: 推流音频码率,单位是bps。
:type AudioSpeed: int
:param PushDomain: 推流域名。
:type PushDomain: str
:param BeginPushTime: 推流开始时间。
:type BeginPushTime: str
:param Acodec: 音频编码格式,
例:"AAC"。
:type Acodec: str
:param Vcodec: 视频编码格式,
例:"H264"。
:type Vcodec: str
:param Resolution: 分辨率。
:type Resolution: str
"""
self.StreamName = None
self.AppName = None
self.ClientIp = None
self.ServerIp = None
self.VideoFps = None
self.VideoSpeed = None
self.AudioFps = None
self.AudioSpeed = None
self.PushDomain = None
self.BeginPushTime = None
self.Acodec = None
self.Vcodec = None
self.Resolution = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
self.ClientIp = params.get("ClientIp")
self.ServerIp = params.get("ServerIp")
self.VideoFps = params.get("VideoFps")
self.VideoSpeed = params.get("VideoSpeed")
self.AudioFps = params.get("AudioFps")
self.AudioSpeed = params.get("AudioSpeed")
self.PushDomain = params.get("PushDomain")
self.BeginPushTime = params.get("BeginPushTime")
self.Acodec = params.get("Acodec")
self.Vcodec = params.get("Vcodec")
self.Resolution = params.get("Resolution")
class PushQualityData(AbstractModel):
"""某条流的推流质量详情数据。
"""
def __init__(self):
"""
:param Time: 数据时间,格式是%Y-%m-%d %H:%M:%S.%ms,精确到毫秒级。
:type Time: str
:param PushDomain: 推流域名。
:type PushDomain: str
:param AppName: 推流路径。
:type AppName: str
:param ClientIp: 推流客户端ip。
:type ClientIp: str
:param BeginPushTime: 开始推流时间,格式是%Y-%m-%d %H:%M:%S.%ms,精确到毫秒级。
:type BeginPushTime: str
:param Resolution: 分辨率信息。
:type Resolution: str
:param VCodec: 视频编码格式。
:type VCodec: str
:param ACodec: 音频编码格式。
:type ACodec: str
:param Sequence: 推流序列号,用来唯一的标志一次推流。
:type Sequence: str
:param VideoFps: 视频帧率。
:type VideoFps: int
:param VideoRate: 视频码率,单位是bps。
:type VideoRate: int
:param AudioFps: 音频帧率。
:type AudioFps: int
:param AudioRate: 音频码率,单位是bps。
:type AudioRate: int
:param LocalTs: 本地流逝时间,单位是ms,音视频流逝时间与本地流逝时间的差距越大表示推流质量越差,上行卡顿越严重。
:type LocalTs: int
:param VideoTs: 视频流逝时间,单位是ms。
:type VideoTs: int
:param AudioTs: 音频流逝时间,单位是ms。
:type AudioTs: int
"""
self.Time = None
self.PushDomain = None
self.AppName = None
self.ClientIp = None
self.BeginPushTime = None
self.Resolution = None
self.VCodec = None
self.ACodec = None
self.Sequence = None
self.VideoFps = None
self.VideoRate = None
self.AudioFps = None
self.AudioRate = None
self.LocalTs = None
self.VideoTs = None
self.AudioTs = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.PushDomain = params.get("PushDomain")
self.AppName = params.get("AppName")
self.ClientIp = params.get("ClientIp")
self.BeginPushTime = params.get("BeginPushTime")
self.Resolution = params.get("Resolution")
self.VCodec = params.get("VCodec")
self.ACodec = params.get("ACodec")
self.Sequence = params.get("Sequence")
self.VideoFps = params.get("VideoFps")
self.VideoRate = params.get("VideoRate")
self.AudioFps = params.get("AudioFps")
self.AudioRate = params.get("AudioRate")
self.LocalTs = params.get("LocalTs")
self.VideoTs = params.get("VideoTs")
self.AudioTs = params.get("AudioTs")
class RecordParam(AbstractModel):
"""录制模板参数
"""
def __init__(self):
"""
:param RecordInterval: 录制间隔。
单位秒,默认值1800。
取值范围:300-7200。
此参数对 HLS 无效,当录制 HLS 时从推流到断流生成一个文件。
:type RecordInterval: int
:param StorageTime: 录制存储时长。
单位秒,取值范围: 0-93312000。
0表示永久存储。
:type StorageTime: int
:param Enable: 是否开启当前格式录制,0 否 1是。默认值0。
:type Enable: int
"""
self.RecordInterval = None
self.StorageTime = None
self.Enable = None
def _deserialize(self, params):
self.RecordInterval = params.get("RecordInterval")
self.StorageTime = params.get("StorageTime")
self.Enable = params.get("Enable")
class RecordTemplateInfo(AbstractModel):
"""录制模板信息
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param FlvParam: Flv录制参数。
:type FlvParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsParam: Hls录制参数。
:type HlsParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param Mp4Param: Mp4录制参数。
:type Mp4Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param AacParam: Aac录制参数。
:type AacParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param IsDelayLive: 0:普通直播,
1:慢直播。
:type IsDelayLive: int
:param HlsSpecialParam: HLS录制定制参数
:type HlsSpecialParam: :class:`tencentcloud.live.v20180801.models.HlsSpecialParam`
:param Mp3Param: Mp3录制参数。
:type Mp3Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.FlvParam = None
self.HlsParam = None
self.Mp4Param = None
self.AacParam = None
self.IsDelayLive = None
self.HlsSpecialParam = None
self.Mp3Param = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
if params.get("FlvParam") is not None:
self.FlvParam = RecordParam()
self.FlvParam._deserialize(params.get("FlvParam"))
if params.get("HlsParam") is not None:
self.HlsParam = RecordParam()
self.HlsParam._deserialize(params.get("HlsParam"))
if params.get("Mp4Param") is not None:
self.Mp4Param = RecordParam()
self.Mp4Param._deserialize(params.get("Mp4Param"))
if params.get("AacParam") is not None:
self.AacParam = RecordParam()
self.AacParam._deserialize(params.get("AacParam"))
self.IsDelayLive = params.get("IsDelayLive")
if params.get("HlsSpecialParam") is not None:
self.HlsSpecialParam = HlsSpecialParam()
self.HlsSpecialParam._deserialize(params.get("HlsSpecialParam"))
if params.get("Mp3Param") is not None:
self.Mp3Param = RecordParam()
self.Mp3Param._deserialize(params.get("Mp3Param"))
class ResumeDelayLiveStreamRequest(AbstractModel):
"""ResumeDelayLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
class ResumeDelayLiveStreamResponse(AbstractModel):
"""ResumeDelayLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResumeLiveStreamRequest(AbstractModel):
"""ResumeLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 您的加速域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
class ResumeLiveStreamResponse(AbstractModel):
"""ResumeLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RuleInfo(AbstractModel):
"""规则信息
"""
def __init__(self):
"""
:param CreateTime: 规则创建时间。
:type CreateTime: str
:param UpdateTime: 规则更新时间。
:type UpdateTime: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.CreateTime = None
self.UpdateTime = None
self.TemplateId = None
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.TemplateId = params.get("TemplateId")
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class SnapshotTemplateInfo(AbstractModel):
"""截图模板信息
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param SnapshotInterval: 截图时间间隔。5-300
:type SnapshotInterval: int
:param Width: 截图宽度。0-3000 0原始宽度并适配原始比例
:type Width: int
:param Height: 截图高度。0-2000 0原始高度并适配原始比例
:type Height: int
:param PornFlag: 是否开启鉴黄,0:不开启,1:开启。
:type PornFlag: int
:param CosAppId: Cos AppId。
:type CosAppId: int
:param CosBucket: Cos Bucket名称。
:type CosBucket: str
:param CosRegion: Cos 地域。
:type CosRegion: str
:param Description: 模板描述
:type Description: str
"""
self.TemplateId = None
self.TemplateName = None
self.SnapshotInterval = None
self.Width = None
self.Height = None
self.PornFlag = None
self.CosAppId = None
self.CosBucket = None
self.CosRegion = None
self.Description = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.SnapshotInterval = params.get("SnapshotInterval")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.PornFlag = params.get("PornFlag")
self.CosAppId = params.get("CosAppId")
self.CosBucket = params.get("CosBucket")
self.CosRegion = params.get("CosRegion")
self.Description = params.get("Description")
class StopLiveRecordRequest(AbstractModel):
"""StopLiveRecord请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param TaskId: 任务ID,全局唯一标识录制任务。
:type TaskId: int
"""
self.StreamName = None
self.TaskId = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.TaskId = params.get("TaskId")
class StopLiveRecordResponse(AbstractModel):
"""StopLiveRecord返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class StreamEventInfo(AbstractModel):
"""推断流事件信息。
"""
def __init__(self):
"""
:param AppName: 应用名称。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
:param StreamStartTime: 推流开始时间。
UTC格式时间,
例如:2019-01-07T12:00:00Z。
:type StreamStartTime: str
:param StreamEndTime: 推流结束时间。
UTC格式时间,
例如:2019-01-07T15:00:00Z。
:type StreamEndTime: str
:param StopReason: 停止原因。
:type StopReason: str
:param Duration: 推流持续时长,单位:秒。
:type Duration: int
:param ClientIp: 主播IP。
:type ClientIp: str
:param Resolution: 分辨率。
:type Resolution: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
self.StreamStartTime = None
self.StreamEndTime = None
self.StopReason = None
self.Duration = None
self.ClientIp = None
self.Resolution = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.StreamStartTime = params.get("StreamStartTime")
self.StreamEndTime = params.get("StreamEndTime")
self.StopReason = params.get("StopReason")
self.Duration = params.get("Duration")
self.ClientIp = params.get("ClientIp")
self.Resolution = params.get("Resolution")
class StreamInfo(AbstractModel):
"""推流信息
"""
def __init__(self):
"""
:param AppName: 直播流所属应用名称
:type AppName: str
:param CreateMode: 创建模式
:type CreateMode: str
:param CreateTime: 创建时间,如: 2018-07-13 14:48:23
:type CreateTime: str
:param Status: 流状态
:type Status: int
:param StreamId: 流id
:type StreamId: str
:param StreamName: 流名称
:type StreamName: str
:param WaterMarkId: 水印id
:type WaterMarkId: str
"""
self.AppName = None
self.CreateMode = None
self.CreateTime = None
self.Status = None
self.StreamId = None
self.StreamName = None
self.WaterMarkId = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.CreateMode = params.get("CreateMode")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.StreamId = params.get("StreamId")
self.StreamName = params.get("StreamName")
self.WaterMarkId = params.get("WaterMarkId")
class StreamName(AbstractModel):
"""流名称列表
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param AppName: 应用名称。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamStartTime: 推流开始时间。
UTC格式时间,
例如:2019-01-07T12:00:00Z。
:type StreamStartTime: str
:param StreamEndTime: 推流结束时间。
UTC格式时间,
例如:2019-01-07T15:00:00Z。
:type StreamEndTime: str
:param StopReason: 停止原因。
:type StopReason: str
:param Duration: 推流持续时长,单位:秒。
:type Duration: int
:param ClientIp: 主播IP。
:type ClientIp: str
:param Resolution: 分辨率。
:type Resolution: str
"""
self.StreamName = None
self.AppName = None
self.DomainName = None
self.StreamStartTime = None
self.StreamEndTime = None
self.StopReason = None
self.Duration = None
self.ClientIp = None
self.Resolution = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamStartTime = params.get("StreamStartTime")
self.StreamEndTime = params.get("StreamEndTime")
self.StopReason = params.get("StopReason")
self.Duration = params.get("Duration")
self.ClientIp = params.get("ClientIp")
self.Resolution = params.get("Resolution")
class StreamOnlineInfo(AbstractModel):
"""查询当前正在推流的信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param PublishTimeList: 推流时间列表
:type PublishTimeList: list of PublishTime
:param AppName: 应用名称。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
"""
self.StreamName = None
self.PublishTimeList = None
self.AppName = None
self.DomainName = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
if params.get("PublishTimeList") is not None:
self.PublishTimeList = []
for item in params.get("PublishTimeList"):
obj = PublishTime()
obj._deserialize(item)
self.PublishTimeList.append(obj)
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
class TemplateInfo(AbstractModel):
"""转码模板信息
"""
def __init__(self):
"""
:param Vcodec: 视频编码:
h264/h265。
:type Vcodec: str
:param VideoBitrate: 视频码率。100-8000kbps
:type VideoBitrate: int
:param Acodec: 音频编码:aac/mp3
aac/mp3。
:type Acodec: str
:param AudioBitrate: 音频码率。0-500
:type AudioBitrate: int
:param Width: 宽。0-3000
:type Width: int
:param Height: 高。0-3000
:type Height: int
:param Fps: 帧率。0-200
:type Fps: int
:param Gop: 关键帧间隔,单位:秒。1-50
:type Gop: int
:param Rotate: 旋转角度。0 90 180 270
:type Rotate: int
:param Profile: 编码质量:
baseline/main/high。
:type Profile: str
:param BitrateToOrig: 是否不超过原始码率。0:否,1:是。
:type BitrateToOrig: int
:param HeightToOrig: 是否不超过原始高度。0:否,1:是。
:type HeightToOrig: int
:param FpsToOrig: 是否不超过原始帧率。0:否,1:是。
:type FpsToOrig: int
:param NeedVideo: 是否保留视频。0:否,1:是。
:type NeedVideo: int
:param NeedAudio: 是否保留音频。0:否,1:是。
:type NeedAudio: int
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称
:type TemplateName: str
:param Description: 模板描述
:type Description: str
"""
self.Vcodec = None
self.VideoBitrate = None
self.Acodec = None
self.AudioBitrate = None
self.Width = None
self.Height = None
self.Fps = None
self.Gop = None
self.Rotate = None
self.Profile = None
self.BitrateToOrig = None
self.HeightToOrig = None
self.FpsToOrig = None
self.NeedVideo = None
self.NeedAudio = None
self.TemplateId = None
self.TemplateName = None
self.Description = None
def _deserialize(self, params):
self.Vcodec = params.get("Vcodec")
self.VideoBitrate = params.get("VideoBitrate")
self.Acodec = params.get("Acodec")
self.AudioBitrate = params.get("AudioBitrate")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.Fps = params.get("Fps")
self.Gop = params.get("Gop")
self.Rotate = params.get("Rotate")
self.Profile = params.get("Profile")
self.BitrateToOrig = params.get("BitrateToOrig")
self.HeightToOrig = params.get("HeightToOrig")
self.FpsToOrig = params.get("FpsToOrig")
self.NeedVideo = params.get("NeedVideo")
self.NeedAudio = params.get("NeedAudio")
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
class TranscodeDetailInfo(AbstractModel):
"""转码详细信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param StartTime: 开始时间,北京时间,
格式:yyyy-mm-dd HH:MM。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM。
:type EndTime: str
:param Duration: 转码时长,单位:分钟。
注意:因推流过程中可能有中断重推情况,此处时长为真实转码时长累加值,并非结束时间和开始时间的间隔。
:type Duration: int
:param ModuleCodec: 编码方式,带模块,
示例:
liveprocessor_H264 =》直播转码-H264,
liveprocessor_H265 =》 直播转码-H265,
topspeed_H264 =》极速高清-H264,
topspeed_H265 =》极速高清-H265。
:type ModuleCodec: str
:param Bitrate: 码率。
:type Bitrate: int
:param Type: 类型,包含:转码(Transcode),混流(MixStream),水印(WaterMark)。
:type Type: str
:param PushDomain: 推流域名。
:type PushDomain: str
"""
self.StreamName = None
self.StartTime = None
self.EndTime = None
self.Duration = None
self.ModuleCodec = None
self.Bitrate = None
self.Type = None
self.PushDomain = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Duration = params.get("Duration")
self.ModuleCodec = params.get("ModuleCodec")
self.Bitrate = params.get("Bitrate")
self.Type = params.get("Type")
self.PushDomain = params.get("PushDomain")
class UnBindLiveDomainCertRequest(AbstractModel):
"""UnBindLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class UnBindLiveDomainCertResponse(AbstractModel):
"""UnBindLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class UpdateLiveWatermarkRequest(AbstractModel):
"""UpdateLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
:param PictureUrl: 水印图片url。
:type PictureUrl: str
:param XPosition: 显示位置,X轴偏移。
:type XPosition: int
:param YPosition: 显示位置,Y轴偏移。
:type YPosition: int
:param WatermarkName: 水印名称。
:type WatermarkName: str
:param Width: 水印宽度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Width: int
:param Height: 水印高度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Height: int
"""
self.WatermarkId = None
self.PictureUrl = None
self.XPosition = None
self.YPosition = None
self.WatermarkName = None
self.Width = None
self.Height = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
self.PictureUrl = params.get("PictureUrl")
self.XPosition = params.get("XPosition")
self.YPosition = params.get("YPosition")
self.WatermarkName = params.get("WatermarkName")
self.Width = params.get("Width")
self.Height = params.get("Height")
class UpdateLiveWatermarkResponse(AbstractModel):
"""UpdateLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class WatermarkInfo(AbstractModel):
"""水印信息
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
:param PictureUrl: 水印图片url。
:type PictureUrl: str
:param XPosition: 显示位置,X轴偏移。
:type XPosition: int
:param YPosition: 显示位置,Y轴偏移。
:type YPosition: int
:param WatermarkName: 水印名称。
:type WatermarkName: str
:param Status: 当前状态。0:未使用,1:使用中。
:type Status: int
:param CreateTime: 添加时间。
:type CreateTime: str
:param Width: 水印宽
:type Width: int
:param Height: 水印高
:type Height: int
"""
self.WatermarkId = None
self.PictureUrl = None
self.XPosition = None
self.YPosition = None
self.WatermarkName = None
self.Status = None
self.CreateTime = None
self.Width = None
self.Height = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
self.PictureUrl = params.get("PictureUrl")
self.XPosition = params.get("XPosition")
self.YPosition = params.get("YPosition")
self.WatermarkName = params.get("WatermarkName")
self.Status = params.get("Status")
self.CreateTime = params.get("CreateTime")
self.Width = params.get("Width")
self.Height = params.get("Height")
|
[
"tencentcloudapi@tencent.com"
] |
tencentcloudapi@tencent.com
|
359bcec8a0f6c63dc3d1930f122275371211dbc2
|
ed41c5ea889a0f3490480842700f6cd6f192bbe2
|
/pdfWeights/limit_ratio/limit_ratio.py
|
92175af634b571bfb12257b384323f47249a263e
|
[] |
no_license
|
yeshaq/RA1
|
210a4f10fb821ec2f7677c06a2d78424b2593dee
|
f624dcf96e75ad15bffd497e4e404566ce4d3e9d
|
refs/heads/master
| 2020-03-29T12:25:43.640322
| 2014-07-19T23:04:29
| 2014-07-19T23:04:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,844
|
py
|
import ROOT as r
import os, math, re
canvas = r.TCanvas()
canvas.SetRightMargin(0.16)
#canvas.SetLeftMargin(.13)
canvas.SetTickx()
canvas.SetTicky()
r.gStyle.SetNumberContours(40)
models = ["T2bb","T1bbbb"][0:1]
pdfSets = ["ct61","ct10","ct66","ms08","nn21"][0:6]
histos = ["ExpectedUpperLimit", "UpperLimit"]
dirs = ["v3_normalized"]
suffix = ["","_normalized"][0]
UpperLimitDict = {"name":"UpperLimit",
"label":"#sigma^{NLO+NLL} #pm1 #sigma theory",
"lineStyle":1, "lineWidth":3, "color":r.kBlack}
ExpectedUpperlimitDict = {"name":"ExpectedUpperLimit",
"label":"Expected Limit #pm1 #sigma exp.",
"lineStyle":7, "lineWidth":3, "color":r.kViolet}
for model in models :
for histo in histos :
for pdfSet in pdfSets :
epsFileName = "output_fullScale/%s_%s%s_%s_ratio.eps"%(model,pdfSet,suffix,histo)
numFile = r.TFile("%s/%s_%s.root"%(dirs[0],model,pdfSets[0]),"READ")
numHist = numFile.Get(histo)
denFile = r.TFile("%s/%s_%s%s.root"%(dirs[0],model,pdfSet,suffix),"READ")
if pdfSet == "ct61" : denFile = r.TFile("%s/%s_%s.root"%(dirs[0],model,pdfSet),"READ")
denHist = denFile.Get(histo)
result = numHist.Clone()
result.Divide(denHist)
title = re.findall('[A-Z][^A-Z]*', '%s'%histo.replace("_2D_shifted",""))
title = " ".join(title)
print title
result.SetTitle("Ratio of %s %s on #sigma;m_{sbottom} (GeV);m_{LSP} (GeV);ratio of %s on #sigma"%(model,title,histo.replace("_2D_shifted","")))
result.SetTitleSize(.048,"X")
result.SetTitleSize(.048,"Y")
result.SetTitleSize(.048,"Z")
result.SetMarkerStyle(20)
result.SetStats(False)
if "T1bbbb" in model :
result.SetMaximum(1.3)
result.SetMinimum(0.9)
result.GetXaxis().SetLabelSize(.05)
result.GetYaxis().SetLabelSize(.05)
result.GetZaxis().SetLabelSize(.05)
result.GetYaxis().SetTitleOffset(.985)
result.GetZaxis().SetTitleOffset(.985)
result.GetXaxis().SetRangeUser(287.5,1400)
result.GetYaxis().SetRangeUser(0.,1225)
if "T2bb" in model :
result.SetMaximum(1.3)
result.SetMinimum(0.9)
result.GetXaxis().SetLabelSize(.05)
result.GetYaxis().SetLabelSize(.05)
result.GetZaxis().SetLabelSize(.05)
result.GetYaxis().SetTitleOffset(.985)
result.GetZaxis().SetTitleOffset(.985)
#result.GetXaxis().SetRangeUser(287.5,900)
result.GetXaxis().SetRangeUser(287.5,1200)
#result.GetYaxis().SetRangeUser(0,725)
result.GetYaxis().SetRangeUser(0,1025)
result.Draw("colz")
##limit curve TGraph#
limitCurveFile = r.TFile("%s/%s_hcp.root"%(dirs[0],model),"READ")
limitCurveGraph = limitCurveFile.Get(histo+"_graph")
limitCurveGraph.SetLineStyle(1)
limitCurveGraph.SetLineColor(r.kBlack)
limitCurveGraph.SetLineWidth(3)
if "Expected" in histo :
limitCurveGraph.SetLineStyle(7)
limitCurveGraph.SetLineColor(r.kViolet)
limitCurveGraph.SetLineWidth(3)
limitCurveGraph.Draw("same")
canvas.Print(epsFileName)
# result.Write()
os.system("epstopdf "+ epsFileName)
os.remove(epsFileName)
#for model in models :
# for histo in histos :
# for pdfSet in pdfSets :
# epsFileName = "output_fullScale/%s_%s%s_%s.eps"%(model,pdfSet,suffix,histo)
# origFile = r.TFile("%s/%s_%s%s.root"%(dirs[0],model,pdfSet,suffix),"READ")
# if pdfSet == "cteq61l" : origFile = r.TFile("%s/%s_%s.root"%(dirs[0],model,pdfSet),"READ")
# origHist = origFile.Get(histo)
#
# result = origHist.Clone()
# title = re.findall('[A-Z][^A-Z]*', '%s'%histo.replace("_2D_shifted",""))
# title = " ".join(title)
# print title
# result.SetTitle("%s %s on #sigma;m_{sbottom} (GeV);m_{LSP} (GeV);%s on #sigma (pb)"%(model,title,histo.replace("_2D_shifted","")))
# result.SetTitleSize(.048,"X")
# result.SetTitleSize(.048,"Y")
# result.SetTitleSize(.048,"Z")
# result.GetYaxis().SetTitleOffset(.985)
# result.GetZaxis().SetTitleOffset(.985)
#
# result.SetMarkerStyle(20)
# result.SetStats(False)
#
# if "T1bbbb" in model :
# result.SetMaximum(10)
# result.SetMinimum(.001)
# canvas.SetLogz()
# result.GetXaxis().SetLabelSize(.05)
# result.GetYaxis().SetLabelSize(.05)
# result.GetZaxis().SetLabelSize(.05)
#
# if "T2bb" in model :
# result.GetXaxis().SetRangeUser(0,1200)
# result.GetYaxis().SetRangeUser(0.,1200)
# result.SetMaximum(10)
# result.SetMinimum(.001)
# canvas.SetLogz()
# result.GetXaxis().SetLabelSize(.05)
# result.GetYaxis().SetLabelSize(.05)
# result.GetZaxis().SetLabelSize(.05)
# result.Draw("colz")
#
# canvas.Print(epsFileName)
## result.Write()
#
# os.system("epstopdf "+ epsFileName)
# os.remove(epsFileName)
|
[
"yeshaq@yossof-UX31E.(none)"
] |
yeshaq@yossof-UX31E.(none)
|
78f4cff367c9710e80472df1cc9098fe72783fbe
|
7082709e5734a8d6034efac6865208baf53f5ae7
|
/chuckNorris/lib/python3.7/re.py
|
f504328aa7b7e02f376f72231abb05dccc3d7adf
|
[
"BSD-3-Clause"
] |
permissive
|
KevinFerin/chuck-norris-facts-api
|
0c092dbc9f0a371a51cda36324fff69cdf3346c7
|
3520a4e23c8fbfda782ad31c9970361d6abb36df
|
refs/heads/development
| 2020-09-10T14:36:04.480028
| 2019-11-14T14:57:59
| 2019-11-14T14:57:59
| 221,721,752
| 0
| 0
| null | 2019-11-21T13:00:21
| 2019-11-14T14:52:27
|
Python
|
UTF-8
|
Python
| false
| false
| 41
|
py
|
/home/kevin/anaconda3/lib/python3.7/re.py
|
[
"kevin.ferin.3@gmail.com"
] |
kevin.ferin.3@gmail.com
|
6238087954696c8b3ef8a9e8a1d63d31f9315dbc
|
d0346b154f34fb18f311ab0265da8b5a81f1d984
|
/Server/train_model.py
|
b0ff715af655e6da3eb5dd7e2c860754e2fc1669
|
[] |
no_license
|
NikolasBurzynski/HackBU2020
|
229cdb6e663b2228ef0a34790bd4fea59db5f578
|
ff33b188434303df5a831d481757e56e72112e5c
|
refs/heads/master
| 2022-12-26T05:02:05.362891
| 2020-02-11T04:52:47
| 2020-02-11T04:52:47
| 301,014,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
# USAGE
# python train_model.py --embeddings output/embeddings.pickle
# --recognizer output/recognizer.pickle --le output/le.pickle
# import the necessary packages
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
import argparse
import pickle
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--embeddings", required=True,
help="path to serialized db of facial embeddings")
ap.add_argument("-r", "--recognizer", required=True,
help="path to output model trained to recognize faces")
ap.add_argument("-l", "--le", required=True,
help="path to output label encoder")
args = vars(ap.parse_args())
# load the face embeddings
print("[INFO] loading face embeddings...")
data = pickle.loads(open(args["embeddings"], "rb").read())
# encode the labels
print("[INFO] encoding labels...")
le = LabelEncoder()
labels = le.fit_transform(data["names"])
# train the model used to accept the 128-d embeddings of the face and
# then produce the actual face recognition
print("[INFO] training model...")
recognizer = SVC(C=1.0, kernel="linear", probability=True)
recognizer.fit(data["embeddings"], labels)
# write the actual face recognition model to disk
f = open(args["recognizer"], "wb")
f.write(pickle.dumps(recognizer))
f.close()
# write the label encoder to disk
f = open(args["le"], "wb")
f.write(pickle.dumps(le))
f.close()
|
[
"jujubuscus@gmail.com"
] |
jujubuscus@gmail.com
|
5c24262b5869bb78d600241b7e07ecfb429f2b32
|
bd0b80bc66033c6a19a849e884cdb09988c1f3d2
|
/JejuCCAI.py
|
20a64f8a316e2fb3a860bb8caef3702076692f3a
|
[] |
no_license
|
schiaLab/JejuCC_AI
|
de062cd650f36130f29e6fcef444e8a2b87609b5
|
73cd043606754e1802d425707a58d730c5e05539
|
refs/heads/master
| 2022-12-07T16:43:10.642513
| 2020-09-04T09:21:19
| 2020-09-04T09:21:19
| 291,448,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,637
|
py
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import norm
import statistics
import sys
import math
print("Initiating Program.")
#데이터를 가공하여 분석하기 쉽도록 만드는 클래스입니다.
class dataGenerator:
#필요한 애트리뷰트를 미리 선언해 놓겠습니다.
data = None #클래스의 pandas 데이터 원문을 담는 애트리뷰트입니다.
original = None
def __init__(self, fileName):
try:
print("Instance Generate Command Detected. Instance Generating...\n 인스턴스 생성 명령이 감지되었습니다. 인스턴스 생성 중...")
print("\n\nData received. Data reading. Might take time...\n데이터 입수. 데이터 읽는 중입니다. 데이터 크기에 따라 시간이 다소 소요될 수 있습니다...")
self.data = pd.read_csv(fileName)
self.original = self.data
except:
print("\n\nInitiated Failed. Recommendation: Check whether you forget to enter 'csv' file name as parameter. \n 인스턴스 생성 실패. 제안: 'csv' 파일 이름을 제대"
"파라미터로 넣었는지 확인하십시오.")
else:
print(self.data)
print(
"\n\nData reading Complete. Data type in pandas DataFrame object. Displaying data...\n 데이터 읽기가 완료되었습니다. 데이터 타입은 판다스 데이터프레임 객체입니다. 데이터 개요를 표시하였습니다.")
print("\n\nInstance Successfully Generated. \n 인스턴스 생성 성공. 생성 프로세스 종료.")
def dataIndexing(self, columnName):
self.data.index = self.data.loc[:, columnName]
print(self.data)
print("Indexing Complete. \n %s열에 대해 인덱싱이 완료되었습니다." % columnName)
def dataIndexInitiate(self):
self.data.index = range(len(self.data.iloc[:, 1]))
print(self.data)
print("Index initiate Complete. \n 객체의 data 애트리뷰트의 인덱싱이 초기되었습니다.")
def dataExtract(self, dataColumnName, extractingElementName):
try:
return self.data[:][(self.data[dataColumnName] == extractingElementName)]
except:
print("Error.Cannot Extract data. Recommendation: Check if element is in the existing column of your data. \n"
"오류. 데이터 추출에 실패했습니다. 제안: 입력하신 칼럼이 존재하는지, 혹은 해당 칼럼 안에 입력하신 데이터가 존재하는지 확인해 주세요.")
def columnBreaker(self, columnName, index=None, type="numpy"):
if index == None and type == "pandas":
return pd.DataFrame(self.data[:][columnName])
print("column slicing complete. Data type in pandas DataFrame. \n 칼럼 분리에 성공했습니다. 자료형은 판다스 데이터프레임 객체입니다.")
elif index == None and type == "numpy":
return pd.DataFrame(self.data[:][columnName]).to_numpy().T[0]
print("column slicing complete. Data type in Numpy array. \n 칼럼 분리에 성공했습니다. 자료형은 넘파이 객체입니다.")
elif index != None and type == "pandas":
index2 = index.tolist()
return pd.DataFrame(self.data[:][columnName], index=index2)
print("column slicing complete. Data type in pandas DataFrame with custom index. \n 칼럼 분리에 성공했습니다. 자료형은 인덱스가 붙은 판다스 데이터프레임 객체입니다.")
def noneCare(self, type):
if type == None:
print("Error. Please type 'type' parameter. \n 오류. 어떤 종류의 데이터 정화 작업인지 'type' 파라미터를 통해 알려주십시오.")
elif type == "delete":
self.data.dropna()
elif type == "check":
result = pd.DataFrame([], index=self.data.colums.tolist())
print("Searching initiated. \n None 객체를 찾습니다. \n 참고로, 단순히 None의 존재만 파악하실 것이면 판다스 객체는 해당 메소드를 지원합니다.")
for n in range[len(self.data.index)]:
for item in self.data.iloc[n][:]:
if item == None or item == "None":
result.append(self.data[n][:])
if n % 50 == 0 and n != 0:
print("%d rows searched. \n %d번째 행을 분석중입니다.")
print("Searching Complete. Displaying results...\n 모든 행과 열을 확인했습니다. 결과 출력 중...")
print(result)
def model(loc, std, max, input, parameter):
if input >= loc:
modelResult = (-(1/(std*parameter)) * (input-loc)) + max
else:
modelResult = (std * (1 / (parameter)) * (input - loc)) + max
if modelResult < 0 :
return 0
else:
return modelResult
def standard(oneDimensionNumpyArray):
return oneDimensionNumpyArray/oneDimensionNumpyArray.sum()
def dataIndexing(dataset, columnName):
dataset.index=dataset.loc[:, columnName]
print(dataset)
print("Indexing Complete. \n %s열에 대해 인덱싱이 완료되었습니다." %columnName)
return dataset
def dataIndexInitiate(dataset):
dataset.index = range(len(dataset.iloc[:, 1]))
print(dataset)
print("Index initiate Complete. \n 객체의 data 애트리뷰트의 인덱싱이 초기되었습니다.")
return dataset
def groupbyToDataframe(series):
return series.reset_index()
def dataLabeling(dataframe, listOfLocations):
dataIndexing(dataframe, "CARD_SIDO_NM")
dataLabel = dataframe.loc[listOfLocations, :]
dataLabel = dataIndexInitiate(dataLabel)
return dataLabel
def dataCleaning(dataframe):
dataframe1 = dataIndexing(dataframe, "REG_YYMM")
# 코로나 전후의 산업 상태를 추출
precorona = dataframe1.loc[[1, 2, 3, 4], :]
print(precorona)
postcorona = dataframe1.loc[[13, 14, 15, 16], :]
print(postcorona)
dataIndexInitiate(precorona)
dataIndexInitiate(postcorona)
for n in [1, 2, 3, 4]:
postcorona.loc[:, "REG_YYMM"][(postcorona.loc[:, "REG_YYMM"] == 12 + n)] = n
print(postcorona)
preco = precorona.iloc[:, -1]
postco = postcorona.iloc[:, -1]
preco.index = pd.MultiIndex.from_frame(precorona.iloc[:, :-1])
postco.index = pd.MultiIndex.from_frame(postcorona.iloc[:, :-1])
print("Post Corona")
print(postco)
print("Pre Corona")
print(preco)
data2 = (postco - preco).dropna()
print("Delta data")
print(data2)
cleandata = data2.to_frame().reset_index()
print(cleandata)
return dataIndexing(cleandata, "REG_YYMM")
def moneyPerUseOptimize(dataframe):
MPU = dataframe.groupby(["REG_YYMM", "CARD_SIDO_NM", "STD_CLSS_NM"])["Money_per_use"].mean()
MPU = groupbyToDataframe(MPU)
print(MPU)
data2 = dataCleaning(MPU)
alpha = list()
for location in data2.loc[:, "CARD_SIDO_NM"].unique():
for industry in data2.loc[:, "STD_CLSS_NM"].unique():
checker = data2.loc[(data2["CARD_SIDO_NM"]==location) & (data2["STD_CLSS_NM"] == industry)]
print("Checker")
print(checker)
alphamin = checker.loc[:, "Money_per_use"].min()
alphaminLoc = 3
newChecker = list()
if len(checker.iloc[:, 0]) == 4:
for num in checker.loc[:, "REG_YYMM"]:
for value in -checker.loc[:, "Money_per_use"][checker["REG_YYMM"] == num]:
if value <= 0:
for n in range(int(-value)):
newChecker.append(int((6/num)-1))
print("append", int((6/num)-1), -value)
else:
for n in range(int(value)):
newChecker.append(num)
print("append", num, value)
mu, std = norm.fit(newChecker)
alpha.append([location, industry, mu, std, alphamin])
elif len(checker.iloc[:, 0]) > 0:
alpha.append([location, industry, alphaminLoc, 1, alphamin])
else:
alpha.append([location, industry, alphaminLoc, 1, 0])
alpha = pd.DataFrame(alpha, columns= ["CARD_SIDO_NM", "STD_CLSS_NM","alphaLoc", "alphaStand", "alphaMin"])
print(alpha)
return alpha
def usePerCumstomerOptimize(dataframe):
CPU = dataframe.groupby(["REG_YYMM", "CARD_SIDO_NM", "STD_CLSS_NM"])["use_per_cumstomer"].sum()
CPU = groupbyToDataframe(CPU)
print(CPU)
data2 = dataCleaning(CPU)
muList0 = list()
stdList0 = list()
muList1 = list()
stdList1 = list()
muList2 = list()
stdList2 = list()
customModel = list()
for location in data2.loc[:, "CARD_SIDO_NM"].unique():
for industry in data2.loc[:, "STD_CLSS_NM"].unique():
checker = data2.loc[(data2["CARD_SIDO_NM"] == location) & (data2["STD_CLSS_NM"] == industry)]
checker.loc[:, "use_per_cumstomer"][(checker["use_per_cumstomer"] > 0)] = -1
print("Checker")
print(checker)
newChecker = list()
if len(checker.iloc[:, 1]) == 4:
for num in checker.loc[:, "REG_YYMM"]:
for value in -checker.loc[:, "use_per_cumstomer"][checker["REG_YYMM"] == num]:
if value <= 0:
for n in range(int(-value)):
newChecker.append(int((6/num)-1))
print("append", int((6/num)-1), -value)
else:
for n in range(int(value)):
newChecker.append(num)
print("append", num, value)
mu, std = norm.fit(newChecker)
# for use in checker.
if location in ["대구", "인천", "대전", "울산", "세종", "충북", "전북"]:
muList0.append(mu)
stdList0.append(std)
customModel.append([location, industry, mu, "generalStand0"])
print([location, industry, mu, std])
elif location in ["충남", "전남", "경북", "경남"]:
muList1.append(mu)
stdList1.append(std)
customModel.append([location, industry, mu, "generalStand1"])
print([location, industry, mu, std])
else:
muList2.append(mu)
stdList2.append(std)
customModel.append([location, industry, mu, "generalStand2"])
print([location, industry, mu, std])
else:
if location in ["대구", "인천", "대전", "울산", "세종", "충북", "전북"]:
customModel.append([location, industry, "generalMean0", "generalStand0"])
elif location in ["충남", "전남", "경북", "경남"]:
customModel.append([location, industry, "generalMean1", "generalStand1"])
else:
customModel.append([location, industry, "generalMean2", "generalStand2"])
modelMean0 = statistics.mean(muList0)
modelStan0 = statistics.mean(stdList0)
modelMean1 = statistics.mean(muList1)
modelStan1 = statistics.mean(stdList1)
modelMean2 = statistics.mean(muList2)
modelStan2 = statistics.mean(stdList2)
model = pd.DataFrame(np.array(customModel), columns= ["CARD_SIDO_NM", "STD_CLSS_NM","modelMean", "modelStand"])
model.loc[:,"modelMean"][(model["modelMean"]=="generalMean0")] = modelMean0
model.loc[:, "modelStand"][(model["modelStand"] == "generalStand0")] = modelStan0
model.loc[:,"modelMean"][(model["modelMean"]=="generalMean1")] = modelMean1
model.loc[:, "modelStand"][(model["modelStand"] == "generalStand1")] = modelStan1
model.loc[:,"modelMean"][(model["modelMean"]=="generalMean2")] = modelMean2
model.loc[:, "modelStand"][(model["modelStand"] == "generalStand2")] = modelStan2
return model
def AIparameter(LabeledData):
alpha3 = moneyPerUseOptimize(LabeledData)
#["CARD_SIDO_NM", "STD_CLSS_NM","alphaLoc", "alphaStand", "alphaMin"]
model = usePerCumstomerOptimize(LabeledData)
print(alpha3)
print(model)
print("alpha index")
model["alphaLoc"] = alpha3.iloc[:, -3]
model["alphaStand"] = alpha3.iloc[:, -2]
model["alphaMin"] = alpha3.iloc[:, -1]
print(model)
return model
def AI(originalData, parameters, forcastingDate, parameter1, parameter2 , parameter3):
precoronas = originalData.groupby(["CARD_SIDO_NM", "REG_YYMM", "STD_CLSS_NM"])["AMT", "CNT"].sum()
print("precoronas")
print(precoronas)
precoronas = precoronas.reset_index()
newColumn = {"REG_YYMM": list(), "AMT":list()}
for n in range(len(parameters.iloc[:, 1])):
parameter = parameters.iloc[n, :]
precoronas1 = precoronas.iloc[:, :][(precoronas["REG_YYMM"]== 201900 + forcastingDate)]
precorona =precoronas1.loc[:, "AMT"].sum()
preuse = precoronas1.loc[:, "CNT"].sum()
prealpha = precorona / preuse
#already defined.
print("precorona")
print(precorona)
print("Parameter")
print(parameter)
print("forcastingDate", forcastingDate)
print("parameter.loc[modelmean]", parameter.loc["modelMean"])
print("parameter.loc[0, modelStand]", parameter.loc["modelStand"])
Loc = float(parameter.loc["modelMean"])
Scale = float(parameter.loc["modelStand"])
alphaLoc = float(parameter.loc["alphaLoc"])
alphaStand = float(parameter.loc["alphaStand"])
alphaMin = float(parameter.loc["alphaMin"])
if preuse == 0 or alphaMin == 0:
postcorona = 0
else:
alphaModel = norm.pdf(forcastingDate, loc=alphaLoc, scale= alphaStand * parameter2)
modelPre = norm.pdf(forcastingDate, loc=Loc , scale= Scale * parameter1)
alphaModelStand = norm.pdf(alphaLoc, loc=alphaLoc, scale=alphaStand * parameter2 )
ModelStan = norm.pdf(Loc, loc=Loc, scale=Scale * parameter1)
print("Loc:", Loc)
print("Scale:", Scale)
print("AlphaLoc:", alphaLoc)
print("AlphaStand:", alphaStand)
print("preuse:", preuse)
print("alphaModel:", alphaModel)
print("alphaModelStand:", alphaModelStand)
print("ModelPre:", modelPre)
print("ModelStan:", ModelStan)
postcorona = precorona * (1+ (alphaMin/abs(alphaMin))* ((modelPre *alphaModel) / (alphaModelStand * ModelStan)))
print("precorona")
print(precorona)
print("\n postcorona")
print(postcorona, "\n\n", "=" * 30)
if postcorona <= 0:
sys.exit()
newColumn["REG_YYMM"].append(202000 + forcastingDate)
newColumn["AMT"].append(postcorona)
parameters = parameters.assign(REG_YYMM= newColumn["REG_YYMM"], AMT=newColumn["AMT"])
return parameters
'''
sub = prePredictionData
for location in sub.loc[:, "CARD_SIDO_NM"].unique():
precorona1 = originalData.iloc[:, :][
(originalData["CARD_SIDO_NM"] == location) & (
originalData["REG_YYMM"] == forcastingDate + 201900)]
preuse1 = originalData.iloc[:,:][
(originalData["CARD_SIDO_NM"] == location) & (
originalData["REG_YYMM"] == forcastingDate + 201900)]
for industry in sub.loc[:, "STD_CLSS_NM"].unique():
precorona = precorona1["AMT"][(originalData["STD_CLSS_NM"]==industry)]
preuse = preuse1["CNT"][(originalData["STD_CLSS_NM"]==industry)]
precorona = precorona.sum()
preuse = preuse.sum()
print("precorona")
print(precorona)
print("Parameter")
print(parameters)
if precorona != 0:
parameter = parameters[(parameters["CARD_SIDO_NM"] == location) & (parameters["STD_CLSS_NM"] == industry)]
if parameter.empty or parameter.loc[:, "alphaMin"].mean() == 0:
sub.loc[:, "AMT"][(sub["CARD_SIDO_NM"] == location) & (sub["STD_CLSS_NM"] == industry)] = precorona
else:
print(parameter)
parameter.reset_index()
print("forcastingDate", forcastingDate)
print("parameter.loc[modelmean]", parameter.loc[:, "modelMean"])
print("parameter.loc[0, modelStand]", parameter.loc[:, "modelStand"])
Loc = float(parameter.loc[:, "modelMean"].mean())
Scale = float(parameter.loc[:, "modelStand"].mean())*parameter1
alphaLoc = float(parameter.loc[:, "alphaLoc"].mean())
alphaStand = float(parameter.loc[:, "alphaStand"].mean())
alphaMin = float(parameter.loc[:, "alphaMin"].mean())
alphaModel = model(alphaLoc, alphaStand, alphaMin, forcastingDate, parameter2)
modelPre = model(Loc, Scale, preuse, forcastingDate, parameter1)
#customer = originalData["CSTMR_CNT"][(originalData["CARD_SIDO_NM"]==location) & (originalData["STD_CLSS_NM"]==industry) & (originalData["REG_YYMM"]==forcastingDate+201900)]
#customer = customer.sum()
print("Loc:", Loc)
print("Scale:", Scale)
print("Alpha:", alphaModel)
print("ModelPre:", modelPre)
print("AlphaStand:", alphaStand)
print("preuse:", preuse)
postcorona = precorona + (modelPre * alphaModel)
print("precorona")
print(precorona)
print("\n postcorona")
print(postcorona, "\n\n", "="*30)
if pd.isna(postcorona) :
print("AI Error. Wrong Parameter.")
sys.exit()
elif postcorona < 0 :
print("AI Error. Wrong Parameter.")
sys.exit()
else:
sub.loc[:, "AMT"][(sub["REG_YYMM"] == 202000 + forcastingDate) & (sub["CARD_SIDO_NM"]==location) & (sub["STD_CLSS_NM"]==industry)] = postcorona
else:
sub.loc[:, "AMT"][(sub["REG_YYMM"] == 202000 + forcastingDate) & (sub["CARD_SIDO_NM"] == location) & (sub["STD_CLSS_NM"] == industry)] = precorona
return sub
'''
#초기 데이터 정화작업
ccdata = dataGenerator("data.csv")
print(ccdata.data)
newdata = pd.read_csv("202004.csv")
ccdata.data = pd.concat([ccdata.data, newdata])
print(ccdata.data)
ccdata.data.iloc[1::5000].to_csv("/Users/gimhyeonjun/PycharmProjects/JejuCC_AI/id.csv")
print(ccdata.dataIndexing("REG_YYMM"))
print(ccdata.dataIndexInitiate())
customer = ccdata.columnBreaker("CSTMR_CNT")
use = ccdata.columnBreaker("CNT")
usedMoney = ccdata.columnBreaker("AMT")
moneyPerCus = usedMoney / customer
moneyPerPurchase = usedMoney/use
cusPerUse = use
cusPerUseStan = cusPerUse / cusPerUse.sum()
cusPerUseSum = cusPerUse.sum()
variableArray = np.array([moneyPerPurchase, moneyPerCus, cusPerUse])
print("Variable Ready. \n 변수들이 준비되었습니다.")
#불필요한 데이터 삭제
ccdataNoMove = ccdata.data.iloc[ : , 0:6]
variable = pd.DataFrame(variableArray.T, columns=["Money_per_use", "Money_per_customer", "use_per_cumstomer"])
print(variable)
ccdataNoMove = ccdataNoMove.join(variable)
print(ccdataNoMove)
ccdata.data = ccdataNoMove
print(ccdata.data)
#날짜 넘버링 데이터 클리닝
for n in range(1, 13):
ccdata.data.loc[:, "REG_YYMM"][(ccdata.data.loc[:, "REG_YYMM"] == 201900 + n)] = n
for n in range(0, 4):
ccdata.data.loc[:, "REG_YYMM"][(ccdata.data.loc[:, "REG_YYMM"] == 202001 + n)] = n + 13
print("Year and Month data cleansing Complete. Now in indexed int data. 년월 데이터 정화 완료. 이제는 달을 간격으로 인덱싱된 정수 자료형입니다.")
print(ccdata.data)
#관광 수준에 따른 데이터 레이블링 분류
dataLabel0 = ccdata.data
#dataLabel1 = dataLabeling(ccdata.data, ["충남", "전남", "경북", "경남"])
#dataLabel2 = dataLabeling(ccdata.data, ["제주", "강원", "경기", "서울"])
model0 = AIparameter(dataLabel0)
#model1 = AIparameter(dataLabel1)
#model2 = AIparameter(dataLabel2)
#마지막 파라미터를 높이면전체적으로 확산.
sub1=AI(ccdata.original, model0, 4, 3, 3, 3)
sub=AI(ccdata.original, model0, 7, 3, 3, 3)
sub = sub.append(sub1)
sub = sub.sort_values(by=["REG_YYMM", "CARD_SIDO_NM", "STD_CLSS_NM"])
id = list()
for n in range(len(sub.iloc[:, 1])):
id.append(n)
sub = sub.assign(id=id)
sub.loc[:, ["id","REG_YYMM","CARD_SIDO_NM","STD_CLSS_NM","AMT"]].to_csv("/Users/gimhyeonjun/PycharmProjects/JejuCC_AI/FinalSubmission3.csv", index=False)
'''
dataLabel3 = dataLabeling(ccdata.data, ["강원"])
model3 = AIparameter(dataLabel3)
AI(ccdata.original, model3, 4, cusPerUseSum)
'''
|
[
"hyeonjunacademic@gmail.com"
] |
hyeonjunacademic@gmail.com
|
aa9b63a239b9f54f9432e9d9b26b9cc92fb1fa36
|
52691f6d8a16b2c4ac7ae100c901a327e778f87c
|
/deniallist.py
|
ac67f79a1178fd3cf1a04af574684ddcb03225f6
|
[] |
no_license
|
C4st3ll4n/hotel_flask
|
7e4b0780ee24bae5099fe9a9fa200792a9926a41
|
3840a092b85e51f9356408f41c46fcc5fee621e5
|
refs/heads/master
| 2023-01-22T07:13:19.743084
| 2020-11-28T20:02:57
| 2020-11-28T20:02:57
| 312,084,163
| 0
| 0
| null | 2020-11-17T22:04:53
| 2020-11-11T20:38:42
|
Python
|
UTF-8
|
Python
| false
| false
| 21
|
py
|
DENIALLIST = set()
|
[
"henrique.souza@elostecnologia.com.br"
] |
henrique.souza@elostecnologia.com.br
|
b734ae887f040770988e1aeddbeed4bf976f5f8f
|
3858624710870b088e44abbc92a13be5ac449636
|
/myntradb/build/lib/myntradb/user/migrations/0001_initial.py
|
faa8c53e04bb2c514589e30b973dfeca93b24acc
|
[] |
no_license
|
nehashewale/myntradb
|
733a6088717a2bab9c2a7fcc5cb601d9d7d3ae44
|
230b5bbdf91deaec6c88ad4f217db41370ba60f2
|
refs/heads/master
| 2023-06-24T10:30:26.804560
| 2021-07-29T07:09:17
| 2021-07-29T07:09:17
| 387,231,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
# Generated by Django 2.1 on 2021-07-20 16:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('landmark', models.CharField(max_length=100)),
('village', models.CharField(max_length=20)),
('taluka', models.CharField(max_length=20)),
('district', models.CharField(max_length=20)),
('state', models.CharField(max_length=20)),
('pincode', models.IntegerField(default=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('middle_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('phone_number', models.IntegerField(default=True)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=2)),
('date_of_birth', models.DateField(default=True)),
('primary_address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='primary_address', to='user.Address')),
('shipping_address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='user.Address')),
],
),
]
|
[
"nehashewale3010@gmail.com"
] |
nehashewale3010@gmail.com
|
24fdfca44f0e6b00c7b37ca6c0d310609d5f2966
|
c6a0a1d40733a5de1831f66449c32a874fe58c06
|
/headteacher/migrations/0001_initial.py
|
f7ffaeea5ed52eb13ae8dc6b1621eeff36ecc8e3
|
[] |
no_license
|
ZhibekSolp/School
|
6acc26184a2f7e6b7bcd2fdf55736f196514bf57
|
aedec7d9ddf6e3d55f047229c7d8fadbae6fc57c
|
refs/heads/master
| 2023-07-19T00:37:51.001181
| 2021-09-11T16:21:54
| 2021-09-11T16:21:54
| 405,304,849
| 0
| 0
| null | 2021-09-11T14:25:29
| 2021-09-11T06:42:18
|
Python
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
# Generated by Django 3.2.7 on 2021-09-11 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HeadTeacher',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(max_length=255, verbose_name='ФИО')),
('photo', models.ImageField(blank=True, upload_to='', verbose_name='Фото завуча')),
('number', models.IntegerField(default=0, verbose_name='Номер')),
],
options={
'verbose_name_plural': 'Завучи',
},
),
]
|
[
"ZhibekSolp.solpieva.zhibek@gmail.com"
] |
ZhibekSolp.solpieva.zhibek@gmail.com
|
3cc3b9b73a51bfc77678166cea769356ea41b111
|
26c4cf72a18464c6b38b0e499806e846eb130d83
|
/run_experiments.py
|
b7b6a2b17776d0684ec40e9a573fb18a7eea2d80
|
[] |
no_license
|
arnupretorius/noisyNNGPs_2019
|
694d6aec1ec538c66aa491f29d4871d8c4b4f0bd
|
8c765f7b942653cdc147323974a5c2ae4d839518
|
refs/heads/master
| 2020-04-28T03:28:26.530870
| 2019-08-12T08:49:52
| 2019-08-12T08:49:52
| 174,938,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,074
|
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Run experiments with NNGP Kernel.
Usage:
python run_experiments.py \
--num_train=100 \
--num_eval=1000 \
--hparams='nonlinearity=relu,depth=10,weight_var=1.79,bias_var=0.83' \
--n_gauss=501 --n_var=501 --n_corr=500 --max_gauss=10
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os.path
import time
import numpy as np
import tensorflow as tf
import gpr
import load_dataset
import nngp
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('hparams', '',
'Comma separated list of name=value hyperparameter pairs to'
'override the default setting.')
flags.DEFINE_string('experiment_dir', 'results',
'Directory to put the experiment results.')
flags.DEFINE_string('grid_path', './grid_data',
'Directory to put or find the training data.')
flags.DEFINE_integer('num_train', 1000, 'Number of training data.')
flags.DEFINE_integer('num_eval', 1000,
'Number of evaluation data. Use 10_000 for full eval')
flags.DEFINE_integer('seed', 1234, 'Random number seed for data shuffling')
flags.DEFINE_boolean('save_kernel', False, 'Save Kernel do disk')
flags.DEFINE_string('dataset', 'mnist',
'Which dataset to use ["mnist"]')
flags.DEFINE_boolean('use_fixed_point_norm', False,
'Normalize input variance to fixed point variance')
flags.DEFINE_integer('n_gauss', 501,
'Number of gaussian integration grid. Choose odd integer.')
flags.DEFINE_integer('n_var', 501,
'Number of variance grid points.')
flags.DEFINE_integer('n_corr', 500,
'Number of correlation grid points.')
flags.DEFINE_integer('max_var', 100,
'Max value for variance grid.')
flags.DEFINE_integer('max_gauss', 10,
'Range for gaussian integration.')
def set_default_hparams():
return tf.contrib.training.HParams(
nonlinearity='tanh', weight_var=1.3, bias_var=0.2, mu_2=1.0, depth=2)
def do_eval(sess, model, x_data, y_data, save_pred=False):
"""Run evaluation."""
gp_prediction, var_pred, kernel_norm, stability_eps = model.predict(x_data, sess)
if gp_prediction == "NaN":
accuracy = 0
mse = float('Inf')
var = float('NaN')
kernel_norm = float('NaN')
return accuracy, mse, var, kernel_norm, stability_eps
pred_1 = np.argmax(gp_prediction, axis=1)
accuracy = np.sum(pred_1 == np.argmax(y_data, axis=1)) / float(len(y_data))
mse = np.mean(np.mean((gp_prediction - y_data)**2, axis=1))
pred_norm = np.mean(np.linalg.norm(gp_prediction, axis=1))
var = np.mean(var_pred[:,0])
tf.logging.info('Accuracy: %.4f'%accuracy)
tf.logging.info('MSE: %.8f'%mse)
tf.logging.info('Var: %.8f'%var)
print("---- Variance -----")
print(np.mean(var_pred[:,0]))
print(var_pred.shape)
print("-------------------")
print("----- Kernel norm -----")
print(kernel_norm)
print("-------------------")
if save_pred:
with tf.gfile.Open(
os.path.join(FLAGS.experiment_dir, 'gp_prediction_stats.npy'),
'w') as f:
np.save(f, gp_prediction)
return accuracy, mse, var, kernel_norm, stability_eps
def run_nngp_eval(hparams, run_dir):
"""Runs experiments."""
tf.gfile.MakeDirs(run_dir)
# Write hparams to experiment directory.
with tf.gfile.GFile(run_dir + '/hparams', mode='w') as f:
f.write(hparams.to_proto().SerializeToString())
tf.logging.info('Starting job.')
tf.logging.info('Hyperparameters')
tf.logging.info('---------------------')
tf.logging.info(hparams)
tf.logging.info('---------------------')
tf.logging.info('Loading data')
# Get the sets of images and labels for training, validation, and
# # test on dataset.
if FLAGS.dataset == 'mnist':
(train_image, train_label, #valid_image, valid_label,
test_image, test_label) = load_dataset.load_mnist(
num_train=FLAGS.num_train,
mean_subtraction=True,
random_rotated_labels=False)
elif FLAGS.dataset == 'cifar10':
(train_image, train_label, #valid_image, valid_label,
test_image, test_label) = load_dataset.load_cifar10(
num_train=FLAGS.num_train,
mean_subtraction=True)
else:
raise NotImplementedError
tf.logging.info('Building Model')
if hparams.nonlinearity == 'tanh':
nonlin_fn = tf.tanh
elif hparams.nonlinearity == 'relu':
nonlin_fn = tf.nn.relu
else:
raise NotImplementedError
with tf.Session() as sess:
# Construct NNGP kernel
nngp_kernel = nngp.NNGPKernel(
depth=hparams.depth,
weight_var=hparams.weight_var,
bias_var=hparams.bias_var,
mu_2=hparams.mu_2,
nonlin_fn=nonlin_fn,
grid_path=FLAGS.grid_path,
n_gauss=FLAGS.n_gauss,
n_var=FLAGS.n_var,
n_corr=FLAGS.n_corr,
max_gauss=FLAGS.max_gauss,
max_var=FLAGS.max_var,
use_fixed_point_norm=FLAGS.use_fixed_point_norm)
# Construct Gaussian Process Regression model
model = gpr.GaussianProcessRegression(
train_image, train_label, kern=nngp_kernel)
start_time = time.time()
tf.logging.info('Training')
# For large number of training points, we do not evaluate on full set to
# save on training evaluation time.
if FLAGS.num_train <= 5000:
acc_train, mse_train, var_train, norm_train, final_eps = do_eval(
sess, model, train_image[:FLAGS.num_eval],
train_label[:FLAGS.num_eval])
tf.logging.info('Evaluation of training set (%d examples) took '
'%.3f secs'%(
min(FLAGS.num_train, FLAGS.num_eval),
time.time() - start_time))
else:
acc_train, mse_train, var_train, norm_train, final_eps = do_eval(
sess, model, train_image[:1000], train_label[:1000])
tf.logging.info('Evaluation of training set (%d examples) took '
'%.3f secs'%(1000, time.time() - start_time))
# start_time = time.time()
# tf.logging.info('Validation')
# acc_valid, mse_valid, var_valid, norm_valid, _ = do_eval(
# sess, model, valid_image[:FLAGS.num_eval],
# valid_label[:FLAGS.num_eval])
# tf.logging.info('Evaluation of valid set (%d examples) took %.3f secs'%(
# FLAGS.num_eval, time.time() - start_time))
start_time = time.time()
tf.logging.info('Test')
acc_test, mse_test, var_test, norm_test, _ = do_eval(
sess,
model,
test_image[:FLAGS.num_eval],
test_label[:FLAGS.num_eval],
save_pred=False)
tf.logging.info('Evaluation of test set (%d examples) took %.3f secs'%(
FLAGS.num_eval, time.time() - start_time))
metrics = {
'train_acc': float(acc_train),
'train_mse': float(mse_train),
'train_norm': float(norm_train),
# 'valid_acc': float(acc_valid),
# 'valid_mse': float(mse_valid),
# 'valid_norm': float(norm_valid),
'test_acc': float(acc_test),
'test_mse': float(mse_test),
'test_norm': float(norm_test),
'stability_eps': float(final_eps),
}
record_results = [
# FLAGS.num_train, hparams.nonlinearity, hparams.weight_var,
# hparams.bias_var, hparams.mu_2, hparams.depth, acc_train, acc_valid, acc_test,
# mse_train, mse_valid, mse_test, final_eps
FLAGS.num_train, hparams.nonlinearity, hparams.weight_var,
hparams.bias_var, hparams.mu_2, hparams.depth, acc_train, acc_test, var_test, norm_train
]
if nngp_kernel.use_fixed_point_norm:
metrics['var_fixed_point'] = float(nngp_kernel.var_fixed_point_np[0])
record_results.append(nngp_kernel.var_fixed_point_np[0])
# Store data
result_file = os.path.join(run_dir, 'results' + str(hparams.depth) + '.csv')
with tf.gfile.Open(result_file, 'a') as f:
filewriter = csv.writer(f)
filewriter.writerow(record_results)
with tf.Session() as sess:
varss = np.array([x[0] for x in sess.run(nngp_kernel.layer_qaa_dict).values()])
save_string = str(hparams.depth) + "_" + str(hparams.weight_var) + '_' + str(hparams.mu_2)
np.save('results/vars/'+save_string, varss)
return metrics
def main(argv):
del argv # Unused
hparams = set_default_hparams().parse(FLAGS.hparams)
run_nngp_eval(hparams, FLAGS.experiment_dir)
if __name__ == '__main__':
tf.app.run(main)
|
[
"arnupretorius@gmail.com"
] |
arnupretorius@gmail.com
|
ef9a7c367bd1087b092f78ee9feb34f8fb220822
|
0e667a493715932d3dd45f6a59bd31c391c05b6a
|
/bin/pygmentize
|
9e0974abc30dcc7b01ffff006b0e612e8a1e5f35
|
[] |
no_license
|
Anubhav722/QR-Code-Scanner
|
84908069d6dc4082e94ce01c62085ce1ac380a62
|
455d28d5654bed3c9d3161897f7cead21d4c7f8e
|
refs/heads/master
| 2021-04-30T16:13:46.769315
| 2017-01-26T17:28:45
| 2017-01-26T17:28:45
| 79,985,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
#!/home/paras/Desktop/QR-Code-Scanner/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"anubhavs286@gmail.com"
] |
anubhavs286@gmail.com
|
|
9a35602e246a3b5ec780162d71ffce8af4f28b65
|
bbe32e34c0ff194411df58e73606eb723c2fe53d
|
/Expense_Input/userinput/views.py
|
60c0c54e9a9e8c406754b27b83f31de66685436b
|
[] |
no_license
|
Azirly/Expenses-Input
|
3acf3f0a539f8b88cb506df394af5904bec54334
|
8f75aa69083c6f5f951b535241315bc42613abdf
|
refs/heads/master
| 2022-11-10T19:48:12.526929
| 2018-04-19T21:01:42
| 2018-04-19T21:01:42
| 130,256,716
| 0
| 1
| null | 2022-10-22T09:17:14
| 2018-04-19T18:30:51
|
Python
|
UTF-8
|
Python
| false
| false
| 450
|
py
|
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, world. You're at the userindex.")
def detail(request, value):
return HttpResponse("You're looking at question %s." % value)
def results(request, value):
response = "You're looking at the results of question %s."
return HttpResponse(response % value)
def vote(request, value):
return HttpResponse("You're voting on question %s." % value)
|
[
"justonl@uci.edu"
] |
justonl@uci.edu
|
2e98cf7b13c0ef50f478875dc45266c668e6ed04
|
1e07ebba0d691a53ed9859c4514fa0fa26096948
|
/fund5/settings.py
|
12baf04fdb20e9fddd3607b137e9d5cc75cc914e
|
[] |
no_license
|
frankbriones/fundacion
|
bf52a9be94348306b55506677c173428cc626fc1
|
9742d244526374aa4bbcb6c338b33a698c751a1d
|
refs/heads/master
| 2022-12-22T12:29:39.106710
| 2019-11-08T00:25:04
| 2019-11-08T00:25:04
| 191,661,945
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,337
|
py
|
"""
Django settings for fund5 project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import django_heroku
# # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-v=q6ry5d8=bcpczvblq68xc(n*hg0#_9b4p65(tc162f#0nb*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
#ALLOWED_HOSTS = ['*']
ALLOWED_HOSTS = ['.heroku.com', '127.0.0.1']
#https://pypi.org/project/django-admin-interface/
# Application definition
INSTALLED_APPS = [
'admin_interface',
'colorfield',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'comentario',
#generar documentacion de django
'django.contrib.admindocs',
#'social.apps.django_app.default',
#'django.contrib.gis',
'rest_framework',
'crispy_forms',
'persona',
'donacion',
'talleres',
'programa',
'mathfilters',
'bootstrap4',
'website',
'restapi',
'rest_framework.authtoken',
]
#ACCOUNT_ACTIVATION_DAYS = 7
CRISPY_TEMPLATE_PACK = 'bootstrap4'
BOOTSTRAP4 = {
'include_jquery': True,
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#middleware para el cambio de idioma a espanol mas el
#cambio en la internationalizacion
'django.middleware.locale.LocaleMiddleware',
'fund5.middleware.ProfileCompletoMiddleware',
]
ROOT_URLCONF = 'fund5.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fund5.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dctm7eknu2bs2k',
'USER': 'uhgwdtevcmrdrg',
'PASSWORD': '3cfce4a95f05acce0479c89013844f7302d1e07384b13b6e3d3101a633e083af',
'HOST': 'ec2-54-197-238-238.compute-1.amazonaws.com',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# en produccion dirige al path de los estaticos
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
LOGIN_URL = '/modulo-usuario/login/'
LOGIN_REDIRECT_URL = '/modulo-usuario/perfil/'
LOGOUT_REDIRECT_URL = '/'
#envio de correo para recuperrar contrasena
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com' # servicio de correo smtp
EMAIL_PORT = 587 #tambien se puede usar el puerto 25
EMAIL_HOST_USER = 'javier1992frank@gmail.com' # id de correo electrónico
EMAIL_HOST_PASSWORD = 'franklin1992' #password
EMAIL_USE_TLS = True
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'correos_enviados')
#para el mapa que se encuentra en visitanos de la website (API KEY)
#console.developers.google
#EASY_MAPS_GOOGLE_KEY = 'AIzaSyChphb-2I2kXUXmRB_0AIv272gZjbYUpfY'
#EASY_MAPS_CENTER = (-2.188168, -79.895279)
GOOGLE_MAPS_API_KEY = 'AIzaSyChphb-2I2kXUXmRB_0AIv272gZjbYUpfY'
#
AUTH_USER_MODEL = 'persona.Profile'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
#SESSION_COOKIE_AGE = 100 # para prueba solo ponemos 10 segundos para colocar
#minutos solo colocamos la cantidad en segundos
SESSION_SAVE_EVERY_REQUEST = True
#GDAL_LIBRARY_PATH = 'C:/OSGeo4W64/bin/gdal202.dll/'
# REST_FRAMEWORK = {
# # Use Django's standard `django.contrib.auth` permissions,
# # or allow read-only access for unauthenticated users.
# 'DEFAULT_PERMISSION_CLASSES': [
# #'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# 'rest_framework.permissions.IsAuthenticated'
# ]
# }
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
django_heroku.settings(locals())
|
[
"frankbriones90@gmail.com"
] |
frankbriones90@gmail.com
|
4c5fbb4c60927c29d095f78824b181e96bb176ad
|
05289b644f72da70d6c1fa14f8ac44ee46eff42c
|
/day_translator_ina_eng_vice_versa.py
|
4f2c55fab8422de84778587195e4f60703f54722
|
[] |
no_license
|
ridhoaryo/for_loops_project
|
bdc28d94ada504cf5c4d0783a83a48dfec0b4e70
|
a5c18d392c124830fc4dfe14682c050e8f98add1
|
refs/heads/master
| 2020-09-07T14:32:34.077062
| 2019-11-10T15:43:31
| 2019-11-10T15:43:31
| 220,811,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
days = {
'senin': 'monday', 'selasa': 'tuesday', 'rabu': 'wednesday', 'kamis': 'thursday', 'jumat': 'friday', 'sabtu': 'saturday',
'ahad': 'sunday'
}
day = input('Input hari: ').lower()
for ind, eng in days.items():
if day == eng:
print(f'Bahasa Indonesia dari {day.upper()} adalah {ind.upper()}')
elif day == ind:
print(f'Bahasa Inggris dari {day.upper()} adalah {eng.upper()}')
|
[
"noreply@github.com"
] |
noreply@github.com
|
5d9258a3469e2eae18d22eee1f4a8ea2a215497b
|
4e3b279d4c014b904373a402c78a26e8ce281d4c
|
/Candidato.py
|
834719b7a6ddbd1a41dc0f5f3b7b6456a3c08409
|
[] |
no_license
|
renanzinho/trabalhogio
|
01bad9da6afa471f1e0e75546ccf9acb7902282f
|
a98cee5600ef7b30969fe958fb3832fd4a8d8a4a
|
refs/heads/master
| 2020-03-08T18:12:35.016613
| 2018-04-11T02:13:56
| 2018-04-11T02:13:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,702
|
py
|
import Bem
class Candidato:
def __init__(self,
anoEleicao,
uf,
codigoCargo,
descricaoCargo,
nomeCandidato,
idCandidato,
numeroUrna,
cpf,
nomeNaUrna,
numeroPartido,
nomePartido,
siglaPartido,
codigoOcupacaoCandidato,
descricaoOcupacao,
dataNascimento,
sexoCandidato,
grauInstrucao,
estadoCivil,
ufNascimento,
nomeMunicipioNascimento,
situacaoPosPleito,
situacaoCandidatura,
bens=None):
self.__anoEleicao = anoEleicao
self.__uf = uf
self.__codigoCargo = codigoCargo
self.__descricaoCargo = descricaoCargo
self.__nomeCandidato = nomeCandidato
self.__idCandidato = idCandidato
self.__numeroUrna = numeroUrna
self.__cpf = cpf
self.__nomeNaUrna = nomeNaUrna
self.__numeroPartido = numeroPartido
self.__nomePartido = nomePartido
self.__siglaPartido = siglaPartido
self.__codigoOcupacaoCandidato = codigoOcupacaoCandidato
self.__descricaoOcupacao = descricaoOcupacao
self.__dataNascimento = dataNascimento
self.__sexoCandidato = sexoCandidato
self.__grauInstrucao = grauInstrucao
self.__estadoCivil = estadoCivil
self.__ufNascimento = ufNascimento
self.__nomeMunicipioNascimento = nomeMunicipioNascimento
self.__situacaoPosPleito = situacaoPosPleito
self.__situacaoCandidatura = situacaoCandidatura
self.__bens = bens
@property
def anoEleicao(self):
return self.__anoEleicao
@anoEleicao.setter
def anoEleicao(self, new):
self.__anoEleicao = new
@property
def uf(self):
return self.__uf
@uf.setter
def uf(self, new):
self.__uf = new
@property
def codigoCargo(self):
return self.__codigoCargo
@codigoCargo.setter
def codigoCargo(self, new):
self.__codigoCargo = new
@property
def descricaoCargo(self):
return self.__descricaoCargo
@descricaoCargo.setter
def descricaoCargo(self, new):
self.__descricaoCargo = new
@property
def nomeCandidato(self):
return self.__nomeCandidato
@nomeCandidato.setter
def nomeCandidato(self, new):
self.__nomeCandidato = new
@property
def idCandidato(self):
return self.__idCandidato
@idCandidato.setter
def idCandidato(self, new):
self.__idCandidato = new
@property
def numeroUrna(self):
return self.__numeroUrna
@numeroUrna.setter
def numeroUrna(self, new):
self.__numeroUrna = new
@property
def cpf(self):
return self.__cpf
@cpf.setter
def cpf(self, new):
self.__cpf = new
@property
def nomeNaUrna(self):
return self.__nomeNaUrna
@nomeNaUrna.setter
def nomeNaUrna(self, new):
self.__nomeNaUrna = new
@property
def numeroPartido(self):
return self.__numeroPartido
@numeroPartido.setter
def numeroPartido(self, new):
self.__numeroPartido = new
@property
def nomePartido(self):
return self.__nomePartido
@nomePartido.setter
def nomePartido(self, new):
self.__nomePartido = new
@property
def siglaPartido(self):
return self.__siglaPartido
@siglaPartido.setter
def siglaPartido(self, new):
self.__siglaPartido = new
@property
def codigoOcupacaoCandidato(self):
return self.__codigoOcupacaoCandidato
@codigoOcupacaoCandidato.setter
def codigoOcupacaoCandidato(self, new):
self.__codigoOcupacaoCandidato = new
@property
def descricaoOcupacao(self):
return self.__descricaoOcupacao
@descricaoOcupacao.setter
def descricaoOcupacao(self, new):
self.__descricaoOcupacao = new
@property
def dataNascimento(self):
return self.__dataNascimento
@dataNascimento.setter
def dataNascimento(self, new):
self.__dataNascimento = new
@property
def sexoCandidato(self):
return self.__sexoCandidato
@sexoCandidato.setter
def sexoCandidato(self, new):
self.__sexoCandidato = new
@property
def grauInstrucao(self):
return self.__grauInstrucao
@grauInstrucao.setter
def grauInstrucao(self, new):
self.__grauInstrucao = new
@property
def estadoCivil(self):
return self.__estadoCivil
@estadoCivil.setter
def estadoCivil(self, new):
self.__estadoCivil = new
@property
def ufNascimento(self):
return self.__ufNascimento
@ufNascimento.setter
def ufNascimento(self, new):
self.__ufNascimento = new
@property
def nomeMunicipioNascimento(self):
return self.__nomeMunicipioNascimento
@nomeMunicipioNascimento.setter
def nomeMunicipioNascimento(self, new):
self.__nomeMunicipioNascimento = new
@property
def situacaoPosPleito(self):
return self.__situacaoPosPleito
@situacaoPosPleito.setter
def situacaoPosPleito(self, new):
self.__situacaoPosPleito = new
@property
def situacaoCandidatura(self):
return self.__situacaoCandidatura
@situacaoCandidatura.setter
def situacaoCandidatura(self, new):
self.__situacaoCandidatura = new
@property
def bens(self):
return self.__bens
@bens.setter
def bens(self, new):
self.__bens = new
def __str__(self):
valorTotal = 0
valorPorTipo = {}
for bem in self.__bens:
valorTotal += bem.valorBem
if bem.descricaoTipoBem in valorPorTipo:
valorPorTipo[bem.descricaoTipoBem] += bem.valorBem
else:
valorPorTipo[bem.descricaoTipoBem] = bem.valorBem
# Nao eh o jeito mais bonito, mas funfa
dictToString = ''
for i in valorPorTipo:
dictToString += '\t\t- %s: %.2f R$\n' % (i, valorPorTipo[i])
dictToString = dictToString[:-1]
return '''\
%s -- %i -- %s
%s (%s)
%s (%s)
Resumo dos bens:
* Total declarado: %.2f R$
* Total por tipo de bem:
%s
''' % (self.__nomeNaUrna, self.__numeroUrna, self.__descricaoCargo,
self.__uf, self.__nomeMunicipioNascimento, self.__ufNascimento,
valorTotal, dictToString)
def __repr__(self):
return str(self)
def incluirBem(self, novoBem):
self.__bens.append(novoBem)
def __eq__(self, other):
return self.__cpf == other.cpf and self.__nomeCandidato == other.nomeCandidato
|
[
"rfl3@cin.ufpe.br"
] |
rfl3@cin.ufpe.br
|
863b3f65db53956a88d15a3bb3febfdfc115acf3
|
61ad6d40c68c178dceeb4505a33864d20a6696dc
|
/rx/venv/bin/wheel
|
ad6c597162dea86ac41a6956686972ec87b70d14
|
[] |
no_license
|
cobain/ipython
|
e63329462c22d2751e8958164bd019821c12981b
|
1d4f6f559a729adb9e916518398ae44037bd4995
|
refs/heads/master
| 2020-05-21T18:12:34.060671
| 2018-04-27T04:32:13
| 2018-04-27T04:32:13
| 62,492,904
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
#!/Users/htzheng/Documents/python/ipython/rx/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"zhtsuc@gmail.com"
] |
zhtsuc@gmail.com
|
|
53f2c0fd2544fc96d1a60fedeedab9df30233b87
|
2b50fcffa9803fe2090629c3dad5dd946f70a4af
|
/db/run_sql.py
|
1659cb88a1ccbae52dc8c2d71fde2285b02ec9e8
|
[] |
no_license
|
Skivlin1991/task_manager
|
607b35d7ada1de00c3285b76b6487173bf012aab
|
980a40762062ecd4f31634186ef6e5d44e200cb4
|
refs/heads/main
| 2023-05-08T02:26:05.963700
| 2021-05-27T11:59:49
| 2021-05-27T11:59:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
import psycopg2
import psycopg2.extras as ext
def run_sql(sql, values = None):
conn = None
results = []
try:
conn = psycopg2.connect("dbname='task_manager'")
cur = conn.cursor(cursor_factory=ext.DictCursor)
cur.execute(sql, values)
conn.commit()
results = cur.fetchall()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return results
|
[
"garrymhall@gmail.com"
] |
garrymhall@gmail.com
|
85398c9153e63b9b53d9985b044400b6227c505f
|
6806bd3e24d2ec3382cce6964e817e279052f121
|
/sentry/plugins/sentry_sites/models.py
|
4e294b463ec6ef4e47deb100f73b3e68c629019e
|
[
"BSD-2-Clause"
] |
permissive
|
magicaltrevor/sentry
|
af70427a6930f555715362e8899e4269f844e57f
|
8c11b2db7f09844aa860bfe7f1c3ff23c0d30f94
|
refs/heads/master
| 2021-01-18T11:53:55.770327
| 2012-07-29T22:00:35
| 2012-07-29T22:00:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
"""
sentry.plugins.sentry_sites.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class SitesPlugin(TagPlugin):
"""
Adds additional support for showing information about sites including:
* A panel which shows all sites a message was seen on.
* A sidebar module which shows the sites most actively seen on.
"""
slug = 'sites'
title = _('Sites')
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/dcramer/sentry"
tag = 'site'
tag_label = _('Site')
def get_tag_values(self, event):
if not event.site:
return []
return [event.site]
register(SitesPlugin)
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
1e382ff1d7dc2a25325cc82e0775d8691c3da5dc
|
cc1e583a527588ee4a3a95d64603a16cf62f84a9
|
/project2/q2.py
|
222cbbc034efb4d3ac53ee9e07b2b496b4b343e6
|
[] |
no_license
|
LordNecromancer/computer_vision_course_projects
|
ff12a677edf556e68639cb5916c1e8f15d4543c8
|
fe15e6617be3cf05f565581fd3d4bea20ec81ced
|
refs/heads/master
| 2023-06-26T22:55:40.986518
| 2021-07-19T01:23:58
| 2021-07-19T01:23:58
| 387,146,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
import cv2
import numpy as np
def getObjectGrid(w,h,dimension):
grid=np.empty((w*h,3),dtype=np.float32)
c=0
for j in range(h):
for i in range(w):
grid[c]=(i*dimension,j*dimension,0)
c+=1
return grid
def getParameters(images,greys):
worldObjPoints=[]
imagePoints=[]
objPoints=getObjectGrid(9,6,22)
#print(objPoints)
for i in range(len(images)):
foundPattern,corners=cv2.findChessboardCorners(greys[i],patternSize=(9,6))
# print(corners)
if foundPattern==True:
#refinedCorners=cv2.cornerSubPix(greys[i],corners,(5,5),(-1,-1),(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,25,0.002))
worldObjPoints.append(objPoints)
imagePoints.append(corners)
# cv2.drawChessboardCorners(images[i],(9,6),corners,foundPattern)
# cv2.imwrite(str(i)+'.jpg',images[i])
# cv2.drawChessboardCorners(images[i], (9, 6), refinedCorners, foundPattern)
# cv2.imwrite(str(i) + 'eee.jpg', images[i])
patternFound,cameraMatrix,distortion,rotation,translation=cv2.calibrateCamera(worldObjPoints,imagePoints,(greys[0].shape[1],greys[0].shape[0]),None,None)
print(cameraMatrix)
patternFound,cameraMatrix,distortion,rotation,translation=cv2.calibrateCamera(worldObjPoints,imagePoints,(greys[0].shape[1],greys[0].shape[0]),None,None,flags=cv2.CALIB_FIX_PRINCIPAL_POINT)
return cameraMatrix
n1=0.07
images=[]
greys=[]
for i in range(1,21):
if(i<10):
image=cv2.imread('im0'+str(i)+'.jpg')
else:
image=cv2.imread('im'+str(i)+'.jpg')
grey=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
images.append(image)
greys.append(grey)
camMatrix1=getParameters(images[0:10],greys[0:10])
camMatrix2=getParameters(images[5:15],greys[5:15])
camMatrix3=getParameters(images[10:20],greys[10:20])
camMatrix4=getParameters(images[0:20],greys[0:20])
print("part 2")
print(camMatrix1)
print(camMatrix2)
print(camMatrix3)
print(camMatrix4)
|
[
"mmdp313@gmail.com"
] |
mmdp313@gmail.com
|
1bf5009a1190a9c57239a41e7bf9e3f4a691325d
|
0784c4f48ee2e25e95259f35de36f69e7bf3184f
|
/wypok_auth.py
|
f0743e14879723b5d7a393209f065f80695f3c17
|
[] |
no_license
|
a000b/ZombieBot
|
277b0d6d807a0d0332c16428ff3c9f4ac8b1f234
|
c74028bf5bf9bb64488a5efa717a1f6e8e44e765
|
refs/heads/master
| 2020-07-31T13:37:58.524740
| 2020-06-07T09:33:20
| 2020-06-07T09:33:20
| 210,620,100
| 3
| 1
| null | 2019-10-20T06:33:47
| 2019-09-24T14:14:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,004
|
py
|
import pickle
import requests
import hashlib
import logging
target_path = ""
logging.basicConfig(filename=target_path + 'logs.log', level=logging.INFO,
format='%(asctime)s|%(levelname)s|%(filename)s|%(funcName)s|%(message)s')
def check_usrkey_isvalid(kwargs):
check = get_pm_conversation(kwargs)
if check == 'ok':
r = 0
# logging.info('Wypok token aktualny')
elif check == 11:
r = 1
logging.info('Proba aktualizacji tokenu')
elif check == 'err':
r = 2
logging.error('Status err przerywam')
else:
r = 3
logging.error('Dunno przerywam')
return r
def sign_data(data):
headers ={}
hash_d = hashlib.md5(data.encode())
headers = {'apisign': hash_d.hexdigest()}
return headers
def get_token(kwargs):
url = f"https://a2.wykop.pl/Login/Index/appkey/{kwargs['appkey']}/"
tajny = f"{kwargs['secret']}{url}{kwargs['login']},{kwargs['password']},{kwargs['acckey']}"
data = {'login': kwargs['login'], 'password': kwargs['password'], 'accountkey': kwargs['acckey']}
try:
r = requests.post(url, data=data, headers=sign_data(tajny))
content = r.json()
userkey = content['data']['userkey']
except Exception as e:
userkey = 'err'
logging.error(f'{e},{content}')
return userkey
def get_pm_conversation(kwargs):
url = f"https://a2.wykop.pl/Pm/ConversationsList/{kwargs['login']}/appkey/{kwargs['appkey']}/userkey/{kwargs['usrkey']}/"
tajny = f"{kwargs['secret']}{url}"
try:
r = requests.get(url, headers=sign_data(tajny))
if r.status_code == 200:
content = r.json()
status = 'ok'
elif r.status_code == 401:
try:
content = r.json()
status = content['error']['code']
logging.warning(f"Token nieaktualny {content['error']['message_en']}")
except Exception as e:
logging.error(f"Dunno {e}")
status = 'err'
else:
content = r.json()
logging.warning(f"Dunno {content}")
status = 'err'
except Exception as e:
status = 'err'
logging.error(f'{e}')
return status
def load_file(fname):
try:
f = open(fname, "rb", )
except Exception as e:
logging.error(f"{e}")
auth_data = ""
else:
auth_data = pickle.load(f)
# logging.info(f'Otwarto plik {fname}')
f.close()
return auth_data
def save_file(fname, new_parms):
try:
with open(fname, "wb") as p:
pickle.dump(new_parms, p)
logging.info(f'Zapisano plik {fname}')
except Exception as e:
logging.error(f"{e}")
def update_usr_key(fname, kwargs):
newkey = get_token(kwargs)
if newkey != 'err':
kwargs['usrkey'] = newkey
save_file(fname, kwargs)
logging.info(f'Wypok token updated')
return True
return False
|
[
"46565897+a000b@users.noreply.github.com"
] |
46565897+a000b@users.noreply.github.com
|
ceb3047ae6a58c65e8f35948042fcd27b2054788
|
c4b6f8e3b8b22300525d6a8d232aef7c5d3d9b97
|
/codpy/detector.py
|
3890325d2b580d71103e3bd280b885092dd44c93
|
[
"MIT"
] |
permissive
|
GNiklas/codpy
|
b8f8707d422537021621697a7553cee22453c544
|
ad955f1567f4d0c93731a08eda49ad5123ed1204
|
refs/heads/master
| 2023-07-04T02:16:33.453472
| 2021-08-14T10:50:45
| 2021-08-14T10:50:45
| 393,007,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,905
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 5 13:32:09 2021
@author: niklas
"""
import os
import sys
import numpy as np
import cv2
import codpy.file_handling as fh
from codpy.selector import Selector
class Detector(Selector):
"""
Class of basic object detector. Inherits from Selector class.
Has to be inherited and specified for use.
"""
def __init__(self,
meanRefH = 150,
stdRefH = 10,
factor = 1.,
boxSize = 10,
lineWidth = 2):
"""
Constructor.
Parameters
----------
meanRefH : float, optional
mean of reference H color value. The default is 150.
stdRefH : float, optional
standard deviation of reference H color value. The default is 10.
factor : float, optional
color limit factor in colored contour detection.
boxSize : int, optional
side length of bounding boxes. The default is 10.
lineWidth : int, optional
line width of bounding boxes. The default is 2.
Returns
-------
None.
"""
# call inherited constructor
Selector.__init__(self, boxSize, lineWidth)
# additional variables for color detection
self.meanRefH = meanRefH
self.stdRefH = stdRefH
self.factor = factor
def escape(self):
"""
overload escape method to include saving detection parameters.
Returns
-------
None.
"""
# close remaining windows
cv2.destroyAllWindows()
# save results and used parameters to files
fh.saveResults(self.outDir, self.results)
self.saveParameters(self.outDir)
# exit process
sys.exit("Manually exited script.")
def selectColObjCen(self,
imgIn,
contours,
centers):
"""
Select centers of colored objects. Mean and standard deviation of reference
H color value will be used to distinguish from non-colored contours.
Parameters
----------
imgIn : numpy array
input image.
contours : list
object contours.
centers : list
object centers.
Returns
-------
uncObjCen : list
centers of uncolored objects.
colObjCen : list
centers of colored objects.
"""
# convert input image to grayscale
imgGray = cv2.cvtColor(imgIn,
cv2.COLOR_BGR2GRAY)
# convert input image to HSV scale
imgHSV = cv2.cvtColor(imgIn,
cv2.COLOR_BGR2HSV)
# lists of centers of uncolored and colored objects
uncObjCen = centers.copy()
colObjCen = []
# iterate over all object centers
for i in range(len(centers)):
# initialize and fill grayscale contour mask
mask = np.zeros(imgGray.shape, np.uint8)
cv2.drawContours(mask, contours[i], 0, 255, -1)
# get mean HSV colors and mean H value for each object
meanColor = cv2.mean(imgHSV, mask = mask)
meanH = meanColor[0]
# select colored ojects according to reference mean H value
if ((meanH >= (self.meanRefH - self.factor * self.stdRefH)) and
(meanH <= (self.meanRefH + self.factor * self.stdRefH))):
colObjCen.append(centers[i])
# delete colored objects from uncolored ones
for x in colObjCen:
uncObjCen.remove(x)
return uncObjCen, colObjCen
def saveParameters(self, outDir):
"""
save used detection parameters
Parameters
----------
outDir : string
path to output directory.
Returns
-------
None.
"""
header = 'used detection parameters\n'
# check, if output dir exists
if not os.path.isdir(outDir):
os.mkdir(outDir)
# go to output dir
os.chdir(outDir)
# write parameters to file
parFile = open('para.dat', 'w')
parFile.write(header)
line = 'meanRefH: ' + str(self.meanRefH) + '\n'
parFile.write(line)
line = 'stdRefH: ' + str(self.stdRefH) + '\n'
parFile.write(line)
line = 'factor: ' + str(self.factor) + '\n'
parFile.write(line)
parFile.close()
# go back to working dir
os.chdir('..')
|
[
"gnthrn@gmail.com"
] |
gnthrn@gmail.com
|
e01c3b9e135e6767e80e69e93678a8c30583d54b
|
a797793842f433251d2ab0bafb0ebe800b89a076
|
/z7.3.py
|
04495ae9dfa0056a6b01a8be77740c3a7360d223
|
[] |
no_license
|
irhadSaric/Instrukcije
|
b2f576bceb7e75f5fa65bfef99c9cde53d597b32
|
9ac8979b824babdeef3712ab9d23c764536d57b0
|
refs/heads/master
| 2020-09-28T09:00:08.389651
| 2020-02-01T20:33:59
| 2020-02-01T20:33:59
| 226,740,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
lista = []
for i in range(5):
broj = int(input())
lista.append(broj)
lista = sorted(lista)
print(lista[2])
|
[
"irhad.saric@hotmail.com"
] |
irhad.saric@hotmail.com
|
59b86e124976b088f7ebf9dc8fc1e8a4efe667d5
|
e1308eb60ec76b7548936f5e66aeb9975bfc2710
|
/hello20.py
|
612bb8efed8871fb917836c99f3f04162d46e56c
|
[] |
no_license
|
DasomJung24/pythonpractice1
|
ac2f5b81748202d0b141de0ce48e3741984d1049
|
f166bba1a9ae1d2d77fc93f256feb0e0e92f3b6a
|
refs/heads/master
| 2022-11-15T21:17:42.311364
| 2020-07-16T22:08:58
| 2020-07-16T22:08:58
| 280,268,595
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
import random
print ('첫 번째 숫자를 입력하세요.')
a = input()
a = int(a)
print ('두 번째 숫자를 입력하세요.')
b = input()
b = int(b)
c = random.randint(a, b)
print (str(a) + '부터 ' + str(b) + '까지에서 무작위로 선택된 숫자는 ' + str(c) + '입니다.')
|
[
"noreply@github.com"
] |
noreply@github.com
|
0897d325bbb50a683aa7d27391d7d351351b1a3d
|
aa5d710a177fa4e8554cd99911472148004169ba
|
/sample-menu/test.py
|
a15912b7215a569f6a11a5a918a79478f20d5513
|
[] |
no_license
|
haxmanster/scripts
|
4965adb120b0bd92a31ae08a17952d312718d378
|
11ac0b9eb64bad0c744384f5a11b46ec3ce0fc1b
|
refs/heads/master
| 2020-03-19T09:24:11.893321
| 2019-02-20T14:06:42
| 2019-02-20T14:06:42
| 136,285,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
import os
import sys
def menu():
print(" Welcome in initiation script ")
print("""
**************************************************************
*============================================================*
*| Master control unit xD |*
*============================================================*
**************************************************************
* [1] Install python modules (flask,pip,etc) *
* [2] Install gitlab-ce *
* [3] Install gcc modules *
* [4] Install gcc modules *
* [5] Install gcc modules *
* [6] Install gcc modules *
* [7] Install gcc modules *
* [8] Install gcc modules *
* [9] Install gcc modules *
* [10] Install gcc modules *
* [11] Exit this menu *
**************************************************************
""")
choice = input("choose an option ")
if choice == "1":
os.system("apt install gcc-8")
elif choice == "2":
os.system("apt install mc")
elif choice == "11":
print("bye")
else:
os.system("clear")
menu()
menu()
|
[
"grzegorz.wolyniec@tieto.com"
] |
grzegorz.wolyniec@tieto.com
|
7571d6a9631ad006e432dc159f89bde447fbbd5a
|
f9833fa4e11060e0bc824fb26499a49b26d04584
|
/pagerank/pagerank.py
|
814f6888343ab0412655df42b8071b0f757384e4
|
[] |
no_license
|
konstantingl/CS50AI
|
dc3de70c5a8bfa812537bfd83783311adeffeaf9
|
181ab793b2d3c10800b55d76fb8e8c02b08eb722
|
refs/heads/master
| 2022-11-05T09:30:11.686979
| 2020-06-20T12:11:46
| 2020-06-20T12:11:46
| 263,000,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,484
|
py
|
import os
import random
import re
import sys
import numpy as np
DAMPING = 0.85
SAMPLES = 100
def main():
if len(sys.argv) != 2:
sys.exit("Usage: python pagerank.py corpus")
corpus = crawl(sys.argv[1])
ranks = sample_pagerank(corpus, DAMPING, SAMPLES)
print(f"PageRank Results from Sampling (n = {SAMPLES})")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
ranks = iterate_pagerank(corpus, DAMPING)
print(f"PageRank Results from Iteration")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
def crawl(directory):
"""
Parse a directory of HTML pages and check for links to other pages.
Return a dictionary where each key is a page, and values are
a list of all other pages in the corpus that are linked to by the page.
"""
pages = dict()
# Extract all links from HTML files
for filename in os.listdir(directory):
if not filename.endswith(".html"):
continue
with open(os.path.join(directory, filename)) as f:
contents = f.read()
links = re.findall(r"<a\s+(?:[^>]*?)href=\"([^\"]*)\"", contents)
pages[filename] = set(links) - {filename}
# Only include links to other pages in the corpus
for filename in pages:
pages[filename] = set(
link for link in pages[filename]
if link in pages
)
return pages
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
pages_proba = dict()
links = corpus.get(page)
if len(links) > 0:
for link in corpus.keys():
if link in links:
probability = (damping_factor / len(links)) + ((1 - damping_factor) / len(corpus))
pages_proba[link] = probability
else:
probability = (1 - damping_factor) / len(corpus)
pages_proba[link] = probability
return pages_proba
for link in corpus.keys():
pages_proba[link] = 1 / len(corpus)
return pages_proba
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
pageranks = dict()
samples = []
first_page = random.choice(list(corpus.keys()))
samples.append(first_page)
for i in range(n-1):
pages = transition_model(corpus, samples[i],damping_factor)
selection = np.random.choice(list(pages.keys()), 1, p = list(pages.values()))
samples.append(selection[0])
for object in corpus.keys():
count = 0
for i in range(len(samples)):
if object in samples[i]:
count += 1
pageranks[object] = count / len(samples)
return pageranks
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
pageranks = dict()
pageranks_new = dict()
deltas = dict()
for page in corpus.keys():
pageranks[page] = 1 / len(corpus)
while True:
for page in corpus.keys():
keys = []
for i,j in corpus.items():
if page in j:
keys.append(i)
proba = 0
for key in keys:
proba_page = pageranks[key] / len(corpus[key])
proba += proba_page
new_proba = ((1 - damping_factor) / len(corpus)) + damping_factor * proba
delta = new_proba - pageranks[page]
pageranks_new[page] = new_proba
deltas[page] = delta
if all(-0.001 < i < 0.001 for i in deltas.values()):
return pageranks
pageranks = pageranks_new.copy()
if __name__ == "__main__":
main()
|
[
"konstantingl156@gmail.com"
] |
konstantingl156@gmail.com
|
456c590d5d9b7436ae5408d60532e45bf5df7d77
|
b761c9c8775d5a08b3b9be6d8300131a4f6a249f
|
/spring1819_assignment1/assignment1/cs231n/classifiers/neural_net.py
|
ea28f209bbe508af15877b227b81933dd0badea5
|
[] |
no_license
|
DizzyYunxuan/CS231n
|
2599c68ccfcae1ba7dc244440eb30abc9f9340df
|
3684b83639c49a1060437901da242d8cccadef34
|
refs/heads/master
| 2020-09-13T16:40:30.364281
| 2019-11-20T03:49:50
| 2019-11-20T03:49:50
| 222,844,025
| 1
| 0
| null | 2019-11-20T03:45:07
| 2019-11-20T03:36:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,832
|
py
|
from __future__ import print_function
from builtins import range
from builtins import object
import numpy as np
import matplotlib.pyplot as plt
# from past.builtins import xrange
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
|
[
"516488199@qq.com"
] |
516488199@qq.com
|
2eed65682858de5a475169c5a87692ea1fe981ec
|
b66ff47bf4b24682663933a9169008616d5cc840
|
/conceitos/12_break.py
|
800727b5b948b927b8e6306cd467fbdedeb573cb
|
[] |
no_license
|
joseney/python_structural_engineers
|
3bf68dfd29bcfabc748b7d756e9cf27b5bb93c91
|
7c45c411f5db66a5f4543c186e27940c419a880d
|
refs/heads/master
| 2023-09-03T17:45:10.154392
| 2021-09-27T18:45:48
| 2021-09-27T18:45:48
| 407,128,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
for i in range (5):
if i ==3:
break
print(i)
|
[
"joseney_moro@yahoo.com.br"
] |
joseney_moro@yahoo.com.br
|
dcbb7d2c6c118a3060c64cfee2dae6fd5aa40e9d
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2453486_1/Python/dvolgyes/solution.py
|
3336b278088a91573eff663adc3c60e306e631a1
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
#!/usr/bin/python
#
# Google codejam solution
# David Volgyes
#
#
import sys, math, os
#import mpmath as mp # see https://code.google.com/p/mpmath/
import numpy as np # see http://www.numpy.org/
#import sympy as sp # see https://code.google.com/p/sympy/
import networkx as nx # see http://networkx.github.com/
import re
import random
T=int(sys.stdin.readline())
fieldX=np.zeros( (4,4), dtype=np.uint8 )
fieldO=np.zeros( (4,4), dtype=np.uint8 )
def solve(x):
solution=False
for i in range(0,4):
subsolution1=True
subsolution2=True
for j in range(0,4):
if x[i,j]==0: subsolution1=False
if x[j,i]==0: subsolution2=False
if subsolution1 or subsolution2: return True
if x[0,0]+x[1,1]+x[2,2]+x[3,3]==4: return True
if x[0,3]+x[1,2]+x[2,1]+x[3,0]==4: return True
return False
for i in range(0,T):
fieldX.fill(0)
fieldO.fill(0)
counter=0
empty=False
while counter<4:
sline=sys.stdin.readline().strip()
if len(sline)<4:continue
for j in range(0,4):
if sline[j]=='X' or sline[j]=='T':
fieldX[counter,j]=1
if sline[j]=='O' or sline[j]=='T':
fieldO[counter,j]=1
continue
if sline[j]=='.':
empty=True
counter+=1
if solve(fieldX):
print "Case #%i: X won" % (i+1,)
continue
if solve(fieldO):
print "Case #%i: O won" % (i+1,)
continue
if empty:
print "Case #%i: Game has not completed" % (i+1,)
continue
print "Case #%i: Draw" % (i+1,)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
36551e18a8c2beca2f9d1d8520f858a6384900e1
|
a9d6114af0f5d622d74f62f402bc252e7f7742c4
|
/card_img.py
|
fe5d79c95716bda85ca2ed209f6e00702bccc64c
|
[
"Apache-2.0"
] |
permissive
|
ajayjain/OpenCV-Projects
|
705a1d5efce9556aee67e12c53d6cfe130a06d89
|
4ef8e21794665e69bfb169b72e1fe5cfc22ac737
|
refs/heads/master
| 2021-01-11T04:15:25.491492
| 2014-06-28T18:53:59
| 2014-06-28T18:53:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
from __future__ import print_function
import numpy as np
import cv2
import random
FILENAME = "./img/cards.png"
WINDOW_NAME = "Cards"
NUMCARDS = 4
def centroid(moment):
x = moment['m10'] // moment['m00']
y = moment['m01'] // moment['m00']
return (x, y)
def draw_centroid(c, im):
max_rows = im.shape[0]
max_cols = im.shape[1]
for dx in xrange(-3, 3):
for dy in xrange(-3, 3):
x = c[0] + dx
y = c[1] + dy
if (x > 0 and y > 0 and y < max_rows and x < max_cols):
im[y, x] = np.array([1, 1, 255])
# cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.namedWindow(WINDOW_NAME)
# im = cv2.imread(FILENAME, cv2.IMREAD_GRAYSCALE)
im = cv2.imread(FILENAME)
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(imgray, (1, 1), 1000)
flag, thresh = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=cv2.contourArea, reverse=True) # cv2.contourArea is a lambda that
# calculates the area of a contour based a list element
# reverse=True: descending order (biggest first)
# contours = contours[:8] # 4 biggest cosntours (4 cards)
print("Contours: ", len(contours))
colorim = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
for contour in contours:
cv2.drawContours(
colorim,
[contour],
0, # which contour to draw (-1 is all)
# (255, 0, 0),
(random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255)), # color (BGR)
2 # thickness
)
moments = map(cv2.moments, contours)
centroids = map(centroid, moments)
print(centroids[0])
print([colorim[centroid[0], centroid[1]] for centroid in centroids])
for centroid in centroids:
draw_centroid(centroid, colorim)
cv2.imshow(WINDOW_NAME, colorim)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"ajayjain318@gmail.com"
] |
ajayjain318@gmail.com
|
68a41b87ce93babc8cc9ff31ee191ed3942d9e11
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/fv/afabricextconnp.py
|
6c8a4c7ee71ed4b11370d170b02722427f256c7d
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,912
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AFabricExtConnP(Mo):
meta = ClassMeta("cobra.model.fv.AFabricExtConnP")
meta.isAbstract = True
meta.moClassName = "fvAFabricExtConnP"
meta.moClassName = "fvAFabricExtConnP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract Intrasite/Intersite Profile"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.fv.FabricExtConnP")
meta.concreteSubClasses.add("cobra.model.fv.FabricExtConnPDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "id", "id", 21395, PropCategory.REGULAR)
prop.label = "Fabric ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "rt", "rt", 21396, PropCategory.REGULAR)
prop.label = "Global EVPN Route Target"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("rt", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
cc3039604583f0e10d6a227e330a3757e870c8ba
|
a7a30fae1e0fd4e199a365738f3f89d1fcab68eb
|
/git guide.py
|
c8db64d80198da9838adbe3b1a65c0627f1832b2
|
[] |
no_license
|
Chigzzer/Libraryguides
|
7d6cf4ac435b0161827b0476e7c0140446ae3b17
|
f0ba5a6a2f2d1beac2f1f2413ae4efa33aca4e97
|
refs/heads/master
| 2022-11-03T06:09:35.395599
| 2022-10-21T22:43:40
| 2022-10-21T22:43:40
| 144,333,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
git init # creates a git in the folder where you entered bash
git touch xxxx.zz # Creates a file in the folder with xxxx.zz being the file name and extension
git add filename # adds the file to be committed
git add *.extension # addds all filenames to be committed
git add . # adds all files in folder to be committed.
git commit -m 'Commit Message' # commits the files to the branch with the message in between ""
git push # pushes the commited branch to github online
git status # List the files you've changed and those you still need to add or commit:
git remote add origin <server> # If you haven't connected your local repository to a remote server, add the server to be able to push to it
git branch # list all the branches
git checkout -b <branchname> # create a new branch and change to it
git checkout <branchname> # changes to new branch
git branch -d <branchname> # delete featured branch
git pull # Fetch and merge changes on the remote server to your working directory:
git merge <branchname> # To merge a different branch into your active branch:
git touch .gitignore # creates an file, which you edit and add names to it for git to ignore when adding/committing
git remote add origin xxxx # creates origin as the online depositary
git push origin branch name # pushes branch onto origin location
|
[
"chiraag.chandarana@gmail.com"
] |
chiraag.chandarana@gmail.com
|
c6a647a7f1b4bd9c459dfe271e8c98e684869d58
|
9dacc94f2f6819536286c1e6e17196d1edaa980e
|
/ocr.py
|
c2613bb627ca3a87d53755a155ef1653e25af25c
|
[] |
no_license
|
aaminu/OCR
|
505f0807958ce428dc4ed483420abefda21a635c
|
06211f74f74f394c5304361005588aca18ebfae6
|
refs/heads/master
| 2023-02-19T00:43:12.360620
| 2021-01-14T12:37:15
| 2021-01-14T12:37:15
| 265,620,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
"""
performs OCR and returns Text from File
"""
# import libraries
import os
import sys
import requests
from pathlib import Path
from auth import auth_env
def ocr(file_name=None, file_url=None):
""" post to API for response"""
endpoint, subscription_key = auth_env()
# OCR link and parameters
ocr_link = endpoint + "vision/v2.0/ocr"
parameters = {"language": 'unk', "detectOrientation": True}
if file_name:
cwd = Path.cwd()
file_path = Path(Path.joinpath(cwd.parent, file_name))
file_data = open(file_path, 'rb').read()
request_header = {"Content-Type": 'application/octet-stream', "Ocp-Apim-Subscription-Key": subscription_key}
# Post to API
response = requests.post(ocr_link, params=parameters, headers=request_header, data=file_data)
response.raise_for_status()
return response.json()
elif file_url:
data = {'url': file_url}
request_header = {"Ocp-Apim-Subscription-Key": subscription_key}
# Post to API
response = requests.post(ocr_link, params=parameters, headers=request_header, json=data)
response.raise_for_status()
return response.json()
else:
sys.exit('Please provide either file-name or file-url')
def ocr_text_retriever(result):
"""Retrieval of text from OCR Json output using Microsoft Computer Vision API - """
line = result['regions'][0]['lines'] # Access the line
text = [] # Placeholder
def recurs_text(lst_):
""" Recursive func through Line"""
if not lst_:
return text
else:
temp = lst_.pop(0)
for item in temp['words']:
text.append(item.get('text'))
text.append('\n')
return recurs_text(lst_)
return ' '.join(recurs_text(line))
if __name__ == '__main__':
query = ''
vals = ['d', 'o']
while not query.lower() in vals:
try:
query = input('\nHello, do wish to scan from your desktop or an online image.\nFor Desktop, please enter '
'D and press Enter.\nFor online image, please enter O and press Enter'
'\nInput your preference here: ')
assert query.lower() in vals
except AssertionError:
print('\nInput error, Please re-enter your option correctly\n')
if query.lower() == 'd':
print('\nPlease copy file to your parent directory, e.g. Desktop, Documents...\n')
image = input('\nPlease enter file name with extension(e.g "fireflies.jpg"): ')
results = ocr(file_name=image)
print('\nThe content of your document is:\n\n', ocr_text_retriever(results))
else:
url = input('\nPlease enter file-url: ')
results = ocr(file_url=url)
print('\nThe content of your document is:\n\n', ocr_text_retriever(results))
|
[
"24601677+aaminu@users.noreply.github.com"
] |
24601677+aaminu@users.noreply.github.com
|
686705a2f26164659e8b02756037a88a40e2e1de
|
d76fda42dd6178537618091fc2ac8b82d7d5b7d8
|
/trainer_preid.py
|
8c864397cec0126ee3adcbf3f7172a5e4f010b6e
|
[] |
no_license
|
oneysmall/CVTC
|
35bf788f7a765111fce3678cbc4c3f5d0b0d471d
|
5fbfdc3ab139db67bc95732c26213f567cf74377
|
refs/heads/master
| 2020-06-25T01:47:11.742722
| 2019-07-23T06:57:13
| 2019-07-23T06:57:13
| 199,160,110
| 6
| 3
| null | 2019-07-27T12:08:37
| 2019-07-27T12:08:37
| null |
UTF-8
|
Python
| false
| false
| 5,041
|
py
|
import argparse
import os
import tensorflow as tf
from tensorflow.contrib.learn import RunConfig
from datasets.DatasetFactory import DatasetFactory
from helper.model_helper import get_model_function, get_input_function
from nets import nets_factory
CUDA_VISIBLE_DEVICES=0
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
session = tf.Session(config=config)
slim = tf.contrib.slim
def start_training(data_directory, dataset_name, output_directory, network_name, batch_size, learning_rate, batch_threads, num_epochs, initial_checkpoint, checkpoint_exclude_scopes,ignore_missing_variables, trainable_scopes, not_trainable_scopes, fixed_learning_rate, learning_rate_decay_rate, do_evaluation, learning_rate_decay_steps):
dataset_factory = DatasetFactory(dataset_name=dataset_name, data_directory=data_directory)
model_params = {'learning_rate': learning_rate,'fixed_learning_rate': fixed_learning_rate,'learning_rate_decay_rate': learning_rate_decay_rate,'learning_rate_decay_steps': (dataset_factory.get_dataset('train').get_number_of_samples() if learning_rate_decay_steps is None else learning_rate_decay_steps) // batch_size}
run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
# Instantiate Estimator
estimator = tf.estimator.Estimator(model_fn=get_model_function(output_directory, network_name, dataset_factory.get_dataset('train').num_classes(), initial_checkpoint, checkpoint_exclude_scopes, ignore_missing_variables,trainable_scopes, not_trainable_scopes,dataset_name=dataset_name),params=model_params,model_dir=output_directory,config=run_config)
image_size = nets_factory.get_input_size(network_name)
dataset = dataset_factory.get_dataset('train')
#evaluation_summary_writer = get_evaluation_summary_writer(do_evaluation, output_directory)
for epoch in range(num_epochs):
run_training(dataset=dataset, batch_size=batch_size, batch_threads=batch_threads, epoch=epoch, estimator=estimator, num_epochs=num_epochs, image_size=image_size)
print('Finished training')
def run_training(dataset, batch_size, batch_threads, epoch, estimator, num_epochs, image_size):
print('\n\nRunning training of epoch %d of %d:\n' % (epoch + 1, num_epochs))
train_input_function = get_input_function(dataset, batch_size, batch_threads, True, image_size)
estimator.train(input_fn=train_input_function)
print("-----------------------------------------")
print('\nFinished Training epoch %d' % (epoch + 1))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', help='Directory to write the output', dest='output_directory')
parser.add_argument('--data', help='Specify the folder with the images to be trained and evaluated', dest='data_directory')
parser.add_argument('--dataset-name', help='The name of the dataset')
parser.add_argument('--batch-size', help='The batch size', type=int, default=16)
parser.add_argument('--learning-rate', help='The learning rate', type=float, default=0.0001)
parser.add_argument('--batch-threads', help='The number of threads to be used for batching', type=int, default=8)
parser.add_argument('--num-epochs', help='The number of epochs to be trained', type=int, default=50)
parser.add_argument('--initial-checkpoint', help='The initial model to be loaded')
parser.add_argument('--checkpoint-exclude-scopes', help='Scopes to be excluded when loading initial checkpoint')
parser.add_argument('--trainable-scopes', help='Scopes which will be trained')
parser.add_argument('--not-trainable-scopes', help='Scopes which will not be trained')
parser.add_argument('--network-name', help='Name of the network')
parser.add_argument('--ignore-missing-variables', help='If missing variables should be ignored', action='store_true')
parser.add_argument('--fixed-learning-rate', help='If set, no exponential learning rate decay is used', action='store_true')
parser.add_argument('--learning-rate-decay-rate', help='The base of the learning rate decay factor', type=float, default=0.96)
parser.add_argument('--no-evaluation', help='Do evaluation after every epoch', action='store_true')
parser.add_argument('--learning-rate-decay-steps', help='Steps after which the learning rate is decayed', type=int, default=None)
args = parser.parse_args()
print('Running with command line arguments:')
print(args)
print('\n\n')
# tf.logging.set_verbosity(tf.logging.INFO)
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
start_training(args.data_directory, args.dataset_name, args.output_directory, args.network_name, args.batch_size, args.learning_rate, args.batch_threads, args.num_epochs,
args.initial_checkpoint, args.checkpoint_exclude_scopes, args.ignore_missing_variables, args.trainable_scopes, args.not_trainable_scopes, args.fixed_learning_rate,
args.learning_rate_decay_rate, not args.no_evaluation, args.learning_rate_decay_steps)
print('Exiting ...')
if __name__ == '__main__':
main()
|
[
"xmlin1995@163.com"
] |
xmlin1995@163.com
|
767fbacecdf58994beab196a4d0a717ed5894287
|
20b00bc62644aa242c723c1a0bd37aa098b4926e
|
/e/map_vacc_eligible.py
|
17d436889cf7cccd5e1078805ad42365cd773dab
|
[] |
no_license
|
devdatalab/covid
|
0d7e82f0ff40746e2cb3bcf87fc803d1db60bb5b
|
a86c2d00d81eee6d26c343e05ae9ba1087fad47f
|
refs/heads/master
| 2023-04-16T14:20:55.060708
| 2022-10-26T15:42:23
| 2022-10-26T15:42:23
| 253,523,397
| 47
| 21
| null | 2020-06-19T16:00:29
| 2020-04-06T14:33:14
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,862
|
py
|
import geopandas as gpd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
import rasterio
from rasterio.plot import show
from collections import Counter
from shapely.geometry import Point, LineString, box, Polygon
from IPython.core.display import display, HTML
from pathlib import Path
from shutil import copyfile
# get the tmp and iec environment vars
TMP = Path(os.environ.get('TMP'))
IEC = Path(os.environ.get('IEC'))
# set some master parameters to make the font look good
mpl.rcParams['mathtext.fontset'] = 'custom'
mpl.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
mpl.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
mpl.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
# this turns on latex, which makes font and number look really nice, but also
# forces latex syntax which can cause problems (can be set to False)
mpl.rc('text', usetex=True)
# this sets the dots per inch- it's the resolution the figure will render at.
# make this larger for more precise rendering, though larger sizes will take longer
mpl.rcParams['figure.dpi'] = 100
# load dataset we want to map
vacc = pd.read_stata(os.path.join(TMP, "vacc_eligible.dta"))
#rename variables to match with shape file
vacc = vacc.rename(columns={'pc11_state_id': 'pc11_s_id', 'pc11_district_id': 'pc11_d_id'})
#load district-level shapefile
geodist = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-district.shp")
#Convert dataframe to a geodataframe
geodist = gpd.GeoDataFrame(geodist)
#join dataset with district spatial dataset
geodist = geodist.merge(vacc, left_on = ['pc11_s_id', 'pc11_d_id'],
right_on = ['pc11_s_id', 'pc11_d_id'], how = "left")
#Add States' Outlines
geostate = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-state.shp")
# choose colormap
cmap = "viridis_r"
# set up figure
fu, axu = plt.subplots(figsize=[10,10])
# plot data
geodist.plot(ax=axu, column="vacc_eligible",
cmap = cmap, missing_kwds = dict(color = "whitesmoke", linewidth = 1.3), alpha = 2.4)
geostate.plot(ax = axu, color = "none", linewidth = 0.2, alpha = 0.9)
# axis settings
axu.set_aspect("equal")
axu.grid(True)
axu.yaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.xaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.grid(zorder=0)
axu.set_title("Total vaccinations divided by 45$+$ population")
# add custom colorbar
# l:left, b:bottom, w:width, h:height; in normalized unit (0-1)
cax = fu.add_axes([0.94, 0.2, 0.025, 0.6])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=100))
sm._A = []
cbar = fu.colorbar(sm, cax=cax)
cbar.ax.set_ylabel("Total vaccinations/45 $+$ population", labelpad=20, fontsize=14, rotation=270)
# save figure
plt.savefig(os.path.expanduser("~/public_html/png/vacc.png"), bbox_inches="tight", dpi=150)
# load dataset we want to map
vacc = pd.read_stata(os.path.join(TMP, "vacc_all.dta"))
#rename variables to match with shape file
vacc = vacc.rename(columns={'pc11_state_id': 'pc11_s_id', 'pc11_district_id': 'pc11_d_id'})
#load district-level shapefile
geodist = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-district.shp")
#Convert dataframe to a geodataframe
geodist = gpd.GeoDataFrame(geodist)
#join dataset with district spatial dataset
geodist = geodist.merge(vacc, left_on = ['pc11_s_id', 'pc11_d_id'],
right_on = ['pc11_s_id', 'pc11_d_id'], how = "left")
#Add States' Outlines
geostate = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-state.shp")
# choose colormap
cmap = "Reds"
# set up figure
fu, axu = plt.subplots(figsize=[10,10])
# plot data
geodist.plot(ax=axu, column="tot_vacc",
cmap = cmap, missing_kwds = dict(color = "whitesmoke", linewidth = 1.3), alpha = 2.4)
geostate.plot(ax = axu, color = "none", linewidth = 0.2, alpha = 0.9)
# axis settings
axu.set_aspect("equal")
axu.grid(True)
axu.yaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.xaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.grid(zorder=0)
axu.set_title("Total vaccinations")
# add custom colorbar
# l:left, b:bottom, w:width, h:height; in normalized unit (0-1)
cax = fu.add_axes([0.94, 0.2, 0.025, 0.6])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=100))
sm._A = []
cbar = fu.colorbar(sm, cax=cax)
cbar.ax.set_ylabel("Total vaccinations (normalized 0-100)", labelpad=20, fontsize=14, rotation=270)
# save figure
plt.savefig(os.path.expanduser("~/public_html/png/vacc_all.png"), bbox_inches="tight", dpi=150)
# choose colormap
cmap = "PRGn"
# set up figure
fu, axu = plt.subplots(figsize=[10,10])
# plot data
geodist.plot(ax=axu, column="vacc_hc",
cmap = cmap, missing_kwds = dict(color = "whitesmoke", linewidth = 1.3), alpha = 2.4)
geostate.plot(ax = axu, color = "none", linewidth = 0.2, alpha = 0.9)
# axis settings
axu.set_aspect("equal")
axu.grid(True)
axu.yaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.xaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.grid(zorder=0)
axu.set_title("Total vaccinations divided by number of health-care centres")
# add custom colorbar
# l:left, b:bottom, w:width, h:height; in normalized unit (0-1)
cax = fu.add_axes([0.94, 0.2, 0.025, 0.6])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=100))
sm._A = []
cbar = fu.colorbar(sm, cax=cax)
cbar.ax.set_ylabel("Total vaccinations$/$No. health-care centres", labelpad=20, fontsize=14, rotation=270)
# save figure
plt.savefig(os.path.expanduser("~/public_html/png/vacc_hc.png"), bbox_inches="tight", dpi=150)
# Maharashtra
# load dataset we want to map
vacc = pd.read_stata(os.path.join(TMP, "vacc_mah.dta"))
#rename variables to match with shape file
vacc = vacc.rename(columns={'pc11_state_id': 'pc11_s_id', 'pc11_district_id': 'pc11_d_id'})
#load district-level shapefile
geodist = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-district.shp")
#Convert dataframe to a geodataframe
geodist = gpd.GeoDataFrame(geodist)
#join dataset with district spatial dataset
geodist = geodist.merge(vacc, left_on = ['pc11_s_id', 'pc11_d_id'],
right_on = ['pc11_s_id', 'pc11_d_id'], how = "right")
#Add States' Outlines
#geostate = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-state.shp")
# choose colormap
cmap = "plasma_r"
# set up figure
fu, axu = plt.subplots(figsize=[10,10])
# plot data
geodist.plot(ax=axu, column="tot_vacc",
cmap = cmap, missing_kwds = dict(color = "whitesmoke", linewidth = 1.3), alpha = 2.4)
# geostate.plot(ax = axu, color = "none", linewidth = 0.2, alpha = 0.9)
# axis settings
axu.set_aspect("equal")
axu.grid(True)
axu.yaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.xaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.grid(zorder=0)
axu.set_title("Total vaccinations by district, Maharashtra")
# add custom colorbar
# l:left, b:bottom, w:width, h:height; in normalized unit (0-1)
cax = fu.add_axes([0.94, 0.2, 0.025, 0.6])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=100))
sm._A = []
cbar = fu.colorbar(sm, cax=cax)
cbar.ax.set_ylabel("Total vaccinations (Normalized 0--100)", labelpad=20, fontsize=14, rotation=270)
# save figure
plt.savefig(os.path.expanduser("~/public_html/png/vacc_mah.png"), bbox_inches="tight", dpi=150)
# Delhi
vacc = pd.read_stata(os.path.join(TMP, "vacc_del.dta"))
#rename variables to match with shape file
vacc = vacc.rename(columns={'pc11_state_id': 'pc11_s_id', 'pc11_district_id': 'pc11_d_id'})
#load district-level shapefile
geodist = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-district.shp")
#Convert dataframe to a geodataframe
geodist = gpd.GeoDataFrame(geodist)
#join dataset with district spatial dataset
geodist = geodist.merge(vacc, left_on = ['pc11_s_id', 'pc11_d_id'],
right_on = ['pc11_s_id', 'pc11_d_id'], how = "right")
#Add States' Outlines
#geostate = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-state.shp")
# choose colormap
cmap = "plasma_r"
# set up figure
fu, axu = plt.subplots(figsize=[10,10])
# plot data
geodist.plot(ax=axu, column="tot_vacc",
cmap = cmap, missing_kwds = dict(color = "whitesmoke", linewidth = 1.3), alpha = 2.4)
# geostate.plot(ax = axu, color = "none", linewidth = 0.2, alpha = 0.9)
# axis settings
axu.set_aspect("equal")
axu.grid(True)
axu.yaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.xaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.grid(zorder=0)
axu.set_title("Total vaccinations by district, Delhi")
# add custom colorbar
# l:left, b:bottom, w:width, h:height; in normalized unit (0-1)
cax = fu.add_axes([0.94, 0.2, 0.025, 0.6])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=100))
sm._A = []
cbar = fu.colorbar(sm, cax=cax)
cbar.ax.set_ylabel("Total vaccinations (Normalized 0--100)", labelpad=20, fontsize=14, rotation=270)
# save figure
plt.savefig(os.path.expanduser("~/public_html/png/vacc_del.png"), bbox_inches="tight", dpi=150)
#Tamil Nadu
vacc = pd.read_stata(os.path.join(TMP, "vacc_ka.dta"))
#rename variables to match with shape file
vacc = vacc.rename(columns={'pc11_state_id': 'pc11_s_id', 'pc11_district_id': 'pc11_d_id'})
#load district-level shapefile
geodist = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-district.shp")
#Convert dataframe to a geodataframe
geodist = gpd.GeoDataFrame(geodist)
#join dataset with district spatial dataset
geodist = geodist.merge(vacc, left_on = ['pc11_s_id', 'pc11_d_id'],
right_on = ['pc11_s_id', 'pc11_d_id'], how = "right")
#Add States' Outlines
#geostate = gpd.read_file(f"{os.environ['IEC1']}/gis/pc11/pc11-state.shp")
# choose colormap
cmap = "plasma_r"
# set up figure
fu, axu = plt.subplots(figsize=[10,10])
# plot data
geodist.plot(ax=axu, column="tot_vacc",
cmap = cmap, missing_kwds = dict(color = "whitesmoke", linewidth = 1.3), alpha = 2.4)
# geostate.plot(ax = axu, color = "none", linewidth = 0.2, alpha = 0.9)
# axis settings
axu.set_aspect("equal")
axu.grid(True)
axu.yaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.xaxis.grid(color='gray', linewidth=0.25, linestyle="--")
axu.grid(zorder=0)
axu.set_title("Total vaccinations by district, Karnataka")
# add custom colorbar
# l:left, b:bottom, w:width, h:height; in normalized unit (0-1)
cax = fu.add_axes([0.94, 0.2, 0.025, 0.6])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=100))
sm._A = []
cbar = fu.colorbar(sm, cax=cax)
cbar.ax.set_ylabel("Total vaccinations (Normalized 0--100)", labelpad=20, fontsize=14, rotation=270)
# save figure
plt.savefig(os.path.expanduser("~/public_html/png/vacc_ka.png"), bbox_inches="tight", dpi=150)
plt.close("all")
|
[
"ab738@cornell.edu"
] |
ab738@cornell.edu
|
d3692e6852c8e1739ff1b63fce5cd41faa9aa4db
|
554e363152619134fc3cbbbe6116e8acfb3682d4
|
/src/utils/process_and_split.py
|
3f7b6272877aef7563753a59fc2a38e5f7364e77
|
[] |
no_license
|
kinjaljain/QA_SDP
|
c191b30f7996843c927997e30c86231181b789de
|
35456a4688290825e7427aa50583c4492b27c298
|
refs/heads/master
| 2022-11-30T07:18:41.563449
| 2020-04-25T00:16:06
| 2020-04-25T00:16:06
| 241,164,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,835
|
py
|
import re
import nltk
nltk.download('stopwords')
from dataloaders.load_and_parse import load_all
from similarity_metrics.jaccard import get_similarity_score as j
# from similarity_metrics.dice import get_similarity_score as dc
# from similarity_metrics.word2vec import get_similarity_score as w
from tqdm import tqdm
DATA_ROOT = '../../data'
global top_n
def split_citing_sentence(sentence_ids, sentences):
# keep sentences separate and check if they can further be broken down into simpler sentences
# intuition: splitting on conjuctions will give simpler and more answerable portions
citing_sentences = []
for id in sentence_ids:
sentence = sentences[id]
if len(sentence) > 10:
# conjuctions = re.compile('\sbut\s|\showever\s|\sif\s|\swhile\s|\salthough\s|,|;|\s\s+')
# conjuctions = re.compile('\sand\s|\sor\s')
conjuctions = re.compile(',|;|\s\s+')
s = conjuctions.split(sentence)
# if len(s) >= 2:
# # check if splits are at least of length 5
# if any([len(x) < 5 for x in s]):
# citing_sentences.append(sentence)
# else:
# citing_sentences.extend(s)
citing_sentences.extend([x for x in s if len(x) >= 5])
return citing_sentences
def find_next(similarity_scores, current_ids):
for key, id in current_ids.items():
if id == -1:
current_ids[key] += 1
sentence_id = similarity_scores[key][current_ids[key]][0]
if sentence_id in top_n:
if top_n[sentence_id] < similarity_scores[key][current_ids[key]][1]:
top_n[sentence_id] = similarity_scores[key][current_ids[key]][1]
return
top_n[sentence_id] = similarity_scores[key][current_ids[key]][1]
return
best_i = 0
for i in similarity_scores.keys():
if current_ids[i]+1 >= len(similarity_scores[i]):
continue
if similarity_scores[i][current_ids[i]+1][1] > similarity_scores[best_i][current_ids[best_i]+1][1]:
best_i = i
current_ids[best_i] += 1
sentence_id = similarity_scores[best_i][current_ids[best_i]][0]
if sentence_id in top_n:
if top_n[sentence_id] < similarity_scores[best_i][current_ids[best_i]][1]:
top_n[sentence_id] = similarity_scores[best_i][current_ids[best_i]][1]
return
top_n[sentence_id] = similarity_scores[best_i][current_ids[best_i]][1]
return
dataset = load_all(DATA_ROOT)
articles = [x[1] for x in dataset]
n = 5
tp = 0
fp = 0
fn = 0
avg_score = 0
all_scores = []
accuracy = 0.
total = 0.
empty_citations = 0
empty_references = 0
pbar = tqdm(dataset)
for data in pbar:
ref_article = data.ref
citing_article = data.cite
offsets = data.offsets
citing_sentence_ids = offsets.cite
true_ref_sentences = offsets.ref
true_ref_sentence_ids = offsets.ref
if tp > 0:
p = tp / (max(tp + fp, 1))
r = tp / (max(tp + fn, 1))
f1 = 2 * p * r / (p + r)
pbar.set_description("Processing %.3f %.3f %.3f" % (p, r, f1))
if citing_article.sentences:
# new_ids = [c for c in citing_sentence_ids]
# for c in citing_sentence_ids:
# # If additional context is reqd
# to_add = 0
# extra = range(max(1, c - to_add), c)
# new_ids.extend(extra)
# extra = range(c + 1, min(len(citing_article.sentences), c + to_add + 1))
# new_ids.extend(extra)
# citing_sentence_ids = new_ids
# joining the entire set of citing sentences into one big sentence
complete_citing_sentence = " ".join([citing_article.sentences[c] for c in citing_sentence_ids])
if len(complete_citing_sentence) > 10:
citing_sentences = split_citing_sentence(citing_sentence_ids, citing_article.sentences)
else:
citing_sentences = complete_citing_sentence
similarity_score = {}
for i, citing_sentence in enumerate(citing_sentences):
similarity_score[i] = {}
for ref_id, ref_sentence in ref_article.sentences.items():
try:
similarity_score[i][ref_id] = j(ref_sentence, citing_sentence)
except Exception as e:
print(e)
if similarity_score:
for i in similarity_score.keys():
sorted_similarity_score = sorted(similarity_score[i].items(), key=lambda item: -item[1])
similarity_score[i] = sorted_similarity_score
top_n = {}
current_ids = {key: -1 for key in similarity_score.keys()}
while len(top_n) < 5:
find_next(similarity_score, current_ids)
top_n = sorted(top_n.items(), key=lambda item: -item[1])
# for i in range(len(top_n)):
# print(ref_article.sentences[top_n[i][0]])
# print("\n")
# for x in true_ref_sentence_ids:
# print(ref_article.sentences[x])
# print("\n\n")
fp += len(top_n)
top_n_ids = {x[0]: x[1] for x in top_n}
for x in true_ref_sentence_ids:
if x in top_n_ids:
avg_score = (tp * avg_score + top_n_ids[x]) / (max(tp, 1))
all_scores.append(top_n_ids[x])
fp -= 1
tp += 1
else:
fn += 1
print(tp, fp, fn)
# Datum = namedtuple('Datum', 'ref cite offsets author is_test facet year')
# Offsets = namedtuple('Offsets', 'marker cite ref')
# Article = namedtuple('Article', 'xml sentences sections')
# articles = list of articles -> each article has dict of sentences (sentence ID : actual sectence)
|
[
"kinjal@Kinjals-MacBook-Pro.local"
] |
kinjal@Kinjals-MacBook-Pro.local
|
80a886b3cc887cdf1aefb3525eaa35f1f6528e29
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/volume/volume_attributes.py
|
94f52a5e553ca733b3138d1b081bb226e35c66cc
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133
| 2015-01-14T17:40:46
| 2015-01-14T17:40:46
| 29,256,898
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,794
|
py
|
from netapp.volume.volume_hybrid_cache_attributes import VolumeHybridCacheAttributes
from netapp.volume.volume_mirror_attributes import VolumeMirrorAttributes
from netapp.volume.volume_space_attributes import VolumeSpaceAttributes
from netapp.volume.volume_directory_attributes import VolumeDirectoryAttributes
from netapp.volume.volume_state_attributes import VolumeStateAttributes
from netapp.volume.volume_autosize_attributes import VolumeAutosizeAttributes
from netapp.volume.volume_flexcache_attributes import VolumeFlexcacheAttributes
from netapp.volume.volume_id_attributes import VolumeIdAttributes
from netapp.volume.volume_antivirus_attributes import VolumeAntivirusAttributes
from netapp.volume.volume_qos_attributes import VolumeQosAttributes
from netapp.volume.volume_transition_attributes import VolumeTransitionAttributes
from netapp.volume.volume_snapshot_attributes import VolumeSnapshotAttributes
from netapp.volume.volume_language_attributes import VolumeLanguageAttributes
from netapp.volume.volume_security_attributes import VolumeSecurityAttributes
from netapp.volume.volume_sis_attributes import VolumeSisAttributes
from netapp.volume.volume_performance_attributes import VolumePerformanceAttributes
from netapp.volume.volume_inode_attributes import VolumeInodeAttributes
from netapp.volume.volume_snapshot_autodelete_attributes import VolumeSnapshotAutodeleteAttributes
from netapp.volume.volume_vm_align_attributes import VolumeVmAlignAttributes
from netapp.volume.volume_64bit_upgrade_attributes import Volume64BitUpgradeAttributes
from netapp.volume.volume_clone_attributes import VolumeCloneAttributes
from netapp.volume.volume_infinitevol_attributes import VolumeInfinitevolAttributes
from netapp.volume.volume_export_attributes import VolumeExportAttributes
from netapp.netapp_object import NetAppObject
class VolumeAttributes(NetAppObject):
"""
Attributes of a volume.
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_volume_hybrid_cache_attributes = None
@property
def volume_hybrid_cache_attributes(self):
"""
This field contains information on Flash Pool caching
attributes on a volume
"""
return self._volume_hybrid_cache_attributes
@volume_hybrid_cache_attributes.setter
def volume_hybrid_cache_attributes(self, val):
if val != None:
self.validate('volume_hybrid_cache_attributes', val)
self._volume_hybrid_cache_attributes = val
_volume_mirror_attributes = None
@property
def volume_mirror_attributes(self):
"""
This field contains information applying exclusive to
volume mirror.
"""
return self._volume_mirror_attributes
@volume_mirror_attributes.setter
def volume_mirror_attributes(self, val):
if val != None:
self.validate('volume_mirror_attributes', val)
self._volume_mirror_attributes = val
_volume_space_attributes = None
@property
def volume_space_attributes(self):
"""
This field contains information related to volume disk
space management including on-disk layout.
"""
return self._volume_space_attributes
@volume_space_attributes.setter
def volume_space_attributes(self, val):
if val != None:
self.validate('volume_space_attributes', val)
self._volume_space_attributes = val
_volume_directory_attributes = None
@property
def volume_directory_attributes(self):
"""
This field contains information related to directories in
a volume.
"""
return self._volume_directory_attributes
@volume_directory_attributes.setter
def volume_directory_attributes(self, val):
if val != None:
self.validate('volume_directory_attributes', val)
self._volume_directory_attributes = val
_volume_state_attributes = None
@property
def volume_state_attributes(self):
"""
This field contains information about the state or status
of a volume or its features.
"""
return self._volume_state_attributes
@volume_state_attributes.setter
def volume_state_attributes(self, val):
if val != None:
self.validate('volume_state_attributes', val)
self._volume_state_attributes = val
_volume_autosize_attributes = None
@property
def volume_autosize_attributes(self):
"""
This field contains information about the autosize
settings of the volume.
"""
return self._volume_autosize_attributes
@volume_autosize_attributes.setter
def volume_autosize_attributes(self, val):
if val != None:
self.validate('volume_autosize_attributes', val)
self._volume_autosize_attributes = val
_volume_flexcache_attributes = None
@property
def volume_flexcache_attributes(self):
"""
This field contains information applying exclusively to
flexcache volumes.
"""
return self._volume_flexcache_attributes
@volume_flexcache_attributes.setter
def volume_flexcache_attributes(self, val):
if val != None:
self.validate('volume_flexcache_attributes', val)
self._volume_flexcache_attributes = val
_volume_id_attributes = None
@property
def volume_id_attributes(self):
"""
This field contains identification information about the
volume.
"""
return self._volume_id_attributes
@volume_id_attributes.setter
def volume_id_attributes(self, val):
if val != None:
self.validate('volume_id_attributes', val)
self._volume_id_attributes = val
_volume_antivirus_attributes = None
@property
def volume_antivirus_attributes(self):
"""
This field contains information about Antivirus On-Access
settings for the volume.
"""
return self._volume_antivirus_attributes
@volume_antivirus_attributes.setter
def volume_antivirus_attributes(self, val):
if val != None:
self.validate('volume_antivirus_attributes', val)
self._volume_antivirus_attributes = val
_volume_qos_attributes = None
@property
def volume_qos_attributes(self):
"""
This field contains the information that relates to QoS.
"""
return self._volume_qos_attributes
@volume_qos_attributes.setter
def volume_qos_attributes(self, val):
if val != None:
self.validate('volume_qos_attributes', val)
self._volume_qos_attributes = val
_volume_transition_attributes = None
@property
def volume_transition_attributes(self):
"""
This field contains information applying exclusively to
transitioned or transitioning volumes.
"""
return self._volume_transition_attributes
@volume_transition_attributes.setter
def volume_transition_attributes(self, val):
if val != None:
self.validate('volume_transition_attributes', val)
self._volume_transition_attributes = val
_volume_snapshot_attributes = None
@property
def volume_snapshot_attributes(self):
"""
This field contains information applying exclusively to
all the snapshots in the volume. Volume disk
space-related settings are excluded.
"""
return self._volume_snapshot_attributes
@volume_snapshot_attributes.setter
def volume_snapshot_attributes(self, val):
if val != None:
self.validate('volume_snapshot_attributes', val)
self._volume_snapshot_attributes = val
_volume_language_attributes = None
@property
def volume_language_attributes(self):
"""
This field contains information about volume
language-related settings.
"""
return self._volume_language_attributes
@volume_language_attributes.setter
def volume_language_attributes(self, val):
if val != None:
self.validate('volume_language_attributes', val)
self._volume_language_attributes = val
_volume_security_attributes = None
@property
def volume_security_attributes(self):
"""
This field contains information about volume security
settings.
"""
return self._volume_security_attributes
@volume_security_attributes.setter
def volume_security_attributes(self, val):
if val != None:
self.validate('volume_security_attributes', val)
self._volume_security_attributes = val
_volume_sis_attributes = None
@property
def volume_sis_attributes(self):
"""
This field contains information about Deduplication, file
clone, compression, etc.
"""
return self._volume_sis_attributes
@volume_sis_attributes.setter
def volume_sis_attributes(self, val):
if val != None:
self.validate('volume_sis_attributes', val)
self._volume_sis_attributes = val
_volume_performance_attributes = None
@property
def volume_performance_attributes(self):
"""
This field contains information that relates to the
performance of the volume.
"""
return self._volume_performance_attributes
@volume_performance_attributes.setter
def volume_performance_attributes(self, val):
if val != None:
self.validate('volume_performance_attributes', val)
self._volume_performance_attributes = val
_volume_inode_attributes = None
@property
def volume_inode_attributes(self):
"""
This field contains information about inodes in a
volume.
"""
return self._volume_inode_attributes
@volume_inode_attributes.setter
def volume_inode_attributes(self, val):
if val != None:
self.validate('volume_inode_attributes', val)
self._volume_inode_attributes = val
_volume_snapshot_autodelete_attributes = None
@property
def volume_snapshot_autodelete_attributes(self):
"""
This field contains information about snapshot autodelete
policy settings.
"""
return self._volume_snapshot_autodelete_attributes
@volume_snapshot_autodelete_attributes.setter
def volume_snapshot_autodelete_attributes(self, val):
if val != None:
self.validate('volume_snapshot_autodelete_attributes', val)
self._volume_snapshot_autodelete_attributes = val
_volume_vm_align_attributes = None
@property
def volume_vm_align_attributes(self):
"""
This field contains information related to the Virtual
Machine alignment settings on a volume
"""
return self._volume_vm_align_attributes
@volume_vm_align_attributes.setter
def volume_vm_align_attributes(self, val):
if val != None:
self.validate('volume_vm_align_attributes', val)
self._volume_vm_align_attributes = val
_volume_64bit_upgrade_attributes = None
@property
def volume_64bit_upgrade_attributes(self):
"""
Information related to 64-bit upgrade. After 64-bit
upgrade completes, this information is no longer
available.
"""
return self._volume_64bit_upgrade_attributes
@volume_64bit_upgrade_attributes.setter
def volume_64bit_upgrade_attributes(self, val):
if val != None:
self.validate('volume_64bit_upgrade_attributes', val)
self._volume_64bit_upgrade_attributes = val
_volume_clone_attributes = None
@property
def volume_clone_attributes(self):
"""
This field contains information applying exclusively to
clone volumes.
"""
return self._volume_clone_attributes
@volume_clone_attributes.setter
def volume_clone_attributes(self, val):
if val != None:
self.validate('volume_clone_attributes', val)
self._volume_clone_attributes = val
_volume_infinitevol_attributes = None
@property
def volume_infinitevol_attributes(self):
"""
This field contains information about the state of an
Infinite Volume.
"""
return self._volume_infinitevol_attributes
@volume_infinitevol_attributes.setter
def volume_infinitevol_attributes(self, val):
if val != None:
self.validate('volume_infinitevol_attributes', val)
self._volume_infinitevol_attributes = val
_volume_export_attributes = None
@property
def volume_export_attributes(self):
"""
This field contains information about export settings of
the volume.
"""
return self._volume_export_attributes
@volume_export_attributes.setter
def volume_export_attributes(self, val):
if val != None:
self.validate('volume_export_attributes', val)
self._volume_export_attributes = val
@staticmethod
def get_api_name():
return "volume-attributes"
@staticmethod
def get_desired_attrs():
return [
'volume-hybrid-cache-attributes',
'volume-mirror-attributes',
'volume-space-attributes',
'volume-directory-attributes',
'volume-state-attributes',
'volume-autosize-attributes',
'volume-flexcache-attributes',
'volume-id-attributes',
'volume-antivirus-attributes',
'volume-qos-attributes',
'volume-transition-attributes',
'volume-snapshot-attributes',
'volume-language-attributes',
'volume-security-attributes',
'volume-sis-attributes',
'volume-performance-attributes',
'volume-inode-attributes',
'volume-snapshot-autodelete-attributes',
'volume-vm-align-attributes',
'volume-64bit-upgrade-attributes',
'volume-clone-attributes',
'volume-infinitevol-attributes',
'volume-export-attributes',
]
def describe_properties(self):
return {
'volume_hybrid_cache_attributes': { 'class': VolumeHybridCacheAttributes, 'is_list': False, 'required': 'optional' },
'volume_mirror_attributes': { 'class': VolumeMirrorAttributes, 'is_list': False, 'required': 'optional' },
'volume_space_attributes': { 'class': VolumeSpaceAttributes, 'is_list': False, 'required': 'optional' },
'volume_directory_attributes': { 'class': VolumeDirectoryAttributes, 'is_list': False, 'required': 'optional' },
'volume_state_attributes': { 'class': VolumeStateAttributes, 'is_list': False, 'required': 'optional' },
'volume_autosize_attributes': { 'class': VolumeAutosizeAttributes, 'is_list': False, 'required': 'optional' },
'volume_flexcache_attributes': { 'class': VolumeFlexcacheAttributes, 'is_list': False, 'required': 'optional' },
'volume_id_attributes': { 'class': VolumeIdAttributes, 'is_list': False, 'required': 'optional' },
'volume_antivirus_attributes': { 'class': VolumeAntivirusAttributes, 'is_list': False, 'required': 'optional' },
'volume_qos_attributes': { 'class': VolumeQosAttributes, 'is_list': False, 'required': 'optional' },
'volume_transition_attributes': { 'class': VolumeTransitionAttributes, 'is_list': False, 'required': 'optional' },
'volume_snapshot_attributes': { 'class': VolumeSnapshotAttributes, 'is_list': False, 'required': 'optional' },
'volume_language_attributes': { 'class': VolumeLanguageAttributes, 'is_list': False, 'required': 'optional' },
'volume_security_attributes': { 'class': VolumeSecurityAttributes, 'is_list': False, 'required': 'optional' },
'volume_sis_attributes': { 'class': VolumeSisAttributes, 'is_list': False, 'required': 'optional' },
'volume_performance_attributes': { 'class': VolumePerformanceAttributes, 'is_list': False, 'required': 'optional' },
'volume_inode_attributes': { 'class': VolumeInodeAttributes, 'is_list': False, 'required': 'optional' },
'volume_snapshot_autodelete_attributes': { 'class': VolumeSnapshotAutodeleteAttributes, 'is_list': False, 'required': 'optional' },
'volume_vm_align_attributes': { 'class': VolumeVmAlignAttributes, 'is_list': False, 'required': 'optional' },
'volume_64bit_upgrade_attributes': { 'class': Volume64BitUpgradeAttributes, 'is_list': False, 'required': 'optional' },
'volume_clone_attributes': { 'class': VolumeCloneAttributes, 'is_list': False, 'required': 'optional' },
'volume_infinitevol_attributes': { 'class': VolumeInfinitevolAttributes, 'is_list': False, 'required': 'optional' },
'volume_export_attributes': { 'class': VolumeExportAttributes, 'is_list': False, 'required': 'optional' },
}
|
[
"radek@gruchalski.com"
] |
radek@gruchalski.com
|
d536defe9b0153859e93dfe79e1890719bb316f7
|
8ff148371adb02a171c974e6bea4e6609fb72f04
|
/app/__init__.py
|
3c5a958ead1e5f4a6156bb84acd62984c6778dfa
|
[
"MIT"
] |
permissive
|
rmarshall10/Insight_App
|
0c20f00965c6317e8ffbb592a36e0f9f170381c5
|
f0276e79d0ceaf2787762a1602d62a19db1c1800
|
refs/heads/master
| 2023-02-22T17:49:44.064648
| 2020-06-22T23:39:42
| 2020-06-22T23:39:42
| 269,232,641
| 0
| 0
|
MIT
| 2023-02-15T23:45:48
| 2020-06-04T01:31:19
|
Python
|
UTF-8
|
Python
| false
| false
| 274
|
py
|
from flask import Flask
app = Flask(__name__)
#This is wrong, needs FULL path. Right now will save in app folder
#app.config['UPLOAD_FOLDER'] = 'app/uploads'
app.config['MAX_CONTENT_PATH'] = 10 * 1024 * 1024
app.config['ALLOWED_EXTENSIONS'] = {"MP4"}
from app import routes
|
[
"ryanmarshall@Ryans-MacBook-Air.local"
] |
ryanmarshall@Ryans-MacBook-Air.local
|
02d48bd2c223636e35624a38576f0a5412d9f2f8
|
2e06c0df26e3fbccc2af052301e8b486fd17d84c
|
/Line3D/line3d_rectangular_projection.py
|
66b986387a063fbb644ba6817cebe039bc9a5c45
|
[
"MIT"
] |
permissive
|
d8ye/pyecharts-gallery
|
54f44c0a78d88608ae83a678c105424113866f25
|
07995a7f2600983282eb37b1e94da9af2f1a25b5
|
refs/heads/master
| 2020-07-03T13:04:42.093830
| 2019-08-13T04:14:13
| 2019-08-13T04:14:13
| 201,913,794
| 0
| 0
|
MIT
| 2019-08-12T11:04:10
| 2019-08-12T11:04:09
| null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
import math
import pyecharts.options as opts
from pyecharts.charts import Line3D
"""
Gallery 使用 pyecharts 1.1.0
参考地址: https://echarts.baidu.com/examples/editor.html?c=line3d-orthographic&gl=1
目前无法实现的功能:
1、
"""
week_en = "Saturday Friday Thursday Wednesday Tuesday Monday Sunday".split()
clock = (
"12a 1a 2a 3a 4a 5a 6a 7a 8a 9a 10a 11a 12p "
"1p 2p 3p 4p 5p 6p 7p 8p 9p 10p 11p".split()
)
data = []
for t in range(0, 25000):
_t = t / 1000
x = (1 + 0.25 * math.cos(75 * _t)) * math.cos(_t)
y = (1 + 0.25 * math.cos(75 * _t)) * math.sin(_t)
z = _t + 2.0 * math.sin(75 * _t)
data.append([x, y, z])
(
Line3D()
.add(
"",
data,
xaxis3d_opts=opts.Axis3DOpts(data=clock, type_="value"),
yaxis3d_opts=opts.Axis3DOpts(data=week_en, type_="value"),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(
dimension=2,
max_=30,
min_=0,
range_color=[
"#313695",
"#4575b4",
"#74add1",
"#abd9e9",
"#e0f3f8",
"#ffffbf",
"#fee090",
"#fdae61",
"#f46d43",
"#d73027",
"#a50026",
],
)
)
.render("line3d_rectangular_projection.html")
)
|
[
"379978424@qq.com"
] |
379978424@qq.com
|
8555a9983267a8997ab3d10b7b8719a8858dc2bd
|
f518a44abf00aefbd6de048ccff9203aac3eef24
|
/code/exploratory_analysis.py
|
0c2e4702f587ef8f065ef744a11731cc52513dfc
|
[] |
no_license
|
vfulco/mining_the_common_app
|
f510d403b075218c953fb58ccffe346f0baabb52
|
46354664596959e38879a1c51a03ffa40b2eb27e
|
refs/heads/master
| 2021-06-24T05:49:57.944449
| 2017-06-21T05:26:15
| 2017-06-21T05:26:15
| 105,898,177
| 1
| 0
| null | 2017-10-05T14:12:53
| 2017-10-05T14:12:53
| null |
UTF-8
|
Python
| false
| false
| 6,715
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
import re
def histogramSAT(df, year=None, before_after = 'before', lim_min=100, lim_max=2400):
'''
Plots a histogram showing the SAT scores of those students who graduated before a specified year.
'''
if not year==None:
if before_after=='before':
mask_year = df['Undergraduate Graduation Year']<year
else:
mask_year = df['Undergraduate Graduation Year']>year
df[(mask_year)]['Highest Composite SAT Score'].hist(range=(lim_min,lim_max))
else:
df['Highest Composite SAT Score'].hist(range=(lim_min,lim_max))
def parseSAT(x):
'''
Helper function to parse SAT scores. Used inside a Pandas 'apply' function on the df['Highest SAT Scores'] series.
'''
scores = [float(i) for i in x.split()]
if scores[-1] > 800:
if scores[-1] > 1600: # If the rightmost score is >1600, then it is the 2400-scale score
return scores[-1]
else: # The following chunk is for scores between 800 and 1600
if sum(scores[:-1]) == scores[-1]: # If the sum of breakdown == rightmost score
if len(scores[:-1]) == 3: # If breakdown has 3 scores, it must be 2400-scale
return scores[-1]
elif len(scores[:-1]) == 2: # If breakdown has 2 scores, it must be 1600-scale
return scores[-1]/1600.*2400
else:
return np.nan # Cannot be parsed
elif sum(scores[:-1]) < scores[-1]: # If the sum of breakdown < rightmost score
if len(scores[:-1]) == 2: # If the breakdown has 2 scores, it should be 2400-scale.
return scores[-1]
else:
return np.nan # Cannot be parsed
else: # If the rightmost score is <=800
if len(scores) == 3: # If the breakdown has 3 scores, it must be 2400-scale
return sum(scores)
elif len(scores) == 2: # If the breakdown has 2 scores, it must be 1600-scale
return sum(scores)/1600.*2400
else:
return np.nan # Cannot be parsed
def finalizeSAT(df):
# Change NaNs to None in SAT_total_temp so we can apply a max function
df['SAT_total_temp'] = df['SAT_total_temp'].apply(lambda x: None if str(x)=='nan' else x)
# Take max of 2 columns
df['SAT_total_final'] = np.max(df[['Highest Composite SAT Score','SAT_total_temp']], axis=1)
# Remove faulty entries that don't end in '0' (SAT scores are multiples of 10)
df['SAT_total_final'].apply(lambda x: None if x%10>0 else x)
def parseEthnicity(x):
'''
Helper function to parse Ethnicity ethnicity. Used inside a Pandas 'apply' function on the df['Ethnicity'] series.
'''
x = x.lower().split('|')
if 'prefer not to share' in x:
del x[x.index('prefer not to share')]
if len(x) == 0:
return np.nan
else:
return x
def showTopPhrases(df, col, n=50):
'''
Helper function to show top phrases in the df['High School Extracurricular Activities'] series
'''
c = Counter()
for x in df[df[col].notnull()][col]:
data = re.findall('[[]\S+[]]\s[[]\S+[]]\s(.+)', x)
c.update(data)
return c.most_common()[:n]
def parseECC(x, lst):
'''
Helper function to parse Extracurricular activities. Used inside a Pandas 'apply' function on the df['High School Extracurricular Activities'] series.
'''
if type(x)==str:
x = x.lower()
for word in lst:
if x.find(word)>-1:
return 1
return 0
else:
return 0
def getAllSports(x):
'''
Helper function to get all sports data. Used inside a Pandas apply function on the df['High School Sports Played'] series.
'''
if not x in (None,np.nan):
data = re.findall('[[]\S+[]]\s[[]\S+[]]\s(.+)', x)
sports = []
for d in data:
try:
int(d)
except ValueError:
if d[:2] != 'No' and d[:3] != 'Yes':
sports.append(d)
return sports
else:
return []
def getUniqueSports(all_sports):
'''
Helper function to get all unique sports.
'''
all_sports2 = []
map(lambda x: all_sports2.extend(x), all_sports)
return list(set(all_sports2))
def makeSportsDummies(df, unique_sports):
'''
Creates dummy variables for each sport category, initialized with 0s.
'''
for sport in unique_sports:
df['sports_'+sport] = 0
def parseSports(df, unique_sports):
'''
Fills in the dummy variables for each sport category.
'''
for i in df.index:
raw_data = df.loc[i,'High School Sports Played']
if not raw_data is np.nan:
for sport in unique_sports:
if sport in raw_data:
df.loc[i,'sports_'+sport] = 1
def parseVarsity(x, unique_sports, regexp):
'''
Parses through sports text to determine whether someone participated in a varsity sport.
'''
if not x is None and not x is np.nan:
data = regexp.findall(x)
if len(data)>3 and data[2] == 'Yes':
return 1
else:
return 0
else:
return 0
def parseCaptain(x, unique_sports, regexp):
'''
Parses through sports text to determine whether someone was a captain of a sport.
'''
if not x is None and not x is np.nan:
data = regexp.findall(x)
if len(data)>4 and data[3] == 'Yes':
return 1
else:
return 0
else:
return 0
def showNulls(df, col):
'''
Shows the percentage of each column that contains null values.
'''
return df.isnull().sum(axis=0) / len(df) * 100
def exploreCategorical(df, x, y, i, j):
'''
Explore rates of 'y' for each unique value in a particular variable
INPUTS:
df (dataframe): Pandas DataFrame object
x (str): input variable
y (str): output variable
i (int): no of rows in plot grid
j(int): no of cols in plot grid
OUTPUT:
Prints the rate of top_school_final for each unique value in col
Plots histograms of the same data to visualize.
Note: Works better with categorical data (or data with a limited no of unique values)
'''
fig = plt.figure(figsize=(15,6))
for idx,val in enumerate(df[x].unique()):
if not str(val) == 'nan':
print val
a = df[df[x]==val][y].value_counts()
print a/a.sum()
fig.add_subplot(i,j,idx+1)
plt.hist(df[df[x]==val][y])
plt.title(str(val))
|
[
"yungmsh@gmail.com"
] |
yungmsh@gmail.com
|
98d99daa66e747ccc7397198c69bcb06378ea89a
|
df60b560266920c9326dee242ba140d42f97be4a
|
/ex09_lista02.py
|
fa2c2b17801ad93c7363dece17b5212b50ab8e49
|
[] |
no_license
|
carloseduardo1987/Python
|
664c74a2f5af1a12ccd6392b4faab4e040b57106
|
291cf3546faefa081e6d8c7fec1f5d826575a167
|
refs/heads/main
| 2023-01-20T12:17:29.565482
| 2020-11-27T07:54:25
| 2020-11-27T07:54:25
| 316,432,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
print("### Calculadora de desconto em compras ###")
valor = float(input('Digite o valor da compra: R$ '))
if valor <= 150 :
total = valor * 0.05
elif valor <= 300 :
total = valor * 0.07
elif valor <= 500 :
total = valor * 0.10
else:
total = valor * 0.20
print(f'O desconto na compra será R${ round(total, 2) }')
|
[
"noreply@github.com"
] |
noreply@github.com
|
3f82fbf0a527d496e2fb54cbcba4581c72e3fb91
|
401d074875b7878adb456c8a2f6ba55c45484600
|
/scripts/src/write2db.py
|
3f967cf842fb22bb67536f515e3581f7e8148477
|
[] |
no_license
|
novaxiaohui/electricity-filching-detection
|
1ad4842a045a533d2c49c9f26f29524f47bd0301
|
9feafbc0ea50d133cd4e33d9882665c6b0d3cbe0
|
refs/heads/master
| 2020-03-28T00:41:41.672757
| 2018-09-05T02:47:52
| 2018-09-05T02:47:52
| 147,441,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created on 2017/9/29 8:55
@author: Xiaohui
Function: preload data to database
"""
import os
import csv
from itertools import islice
from datetime import datetime
from collections import defaultdict
from ConfParser import ConfParser
from conndb import mysql
import traceback
from preprocess import *
import json
info = ConfParser(os.path.join(os.path.abspath(os.path.dirname(__file__)),'dbServer.conf')).getValue()
db = info['db']
def put_userinfo_2db( sql,path= '../testdata/'): # 用户基本信息入库
data = path + '\\低压抄表'
filename = os.listdir(data)
userlist = {}
for filetemp in filename:
file = csv.reader(open(data + '\\' + filetemp))
param = []
try:
for line in islice(file, 1, None):
user_id = line[2][:-1].encode('utf-8')
if user_id not in userlist:
userlist[user_id] = 1
user_name = line[3].encode('utf-8')
address = line[4].encode('utf-8')
station = line[1].encode('utf-8')
zone_name = line[12].encode('utf-8')
param.append((user_id,user_name,address,zone_name,station))
sql.additem('userinfo(user_id,user_name,address,zone_name,station)',param, format='(%s,%s,%s,%s,%s)')
except ValueError:
traceback.print_exc()
continue
#### # of user :34520
def put_yongdian_2db(figures, sql): # 用户用电量入库
try:
param = []
for user_id in figures:
elec_values = ','.join(map(str,figures[user_id][:90]))
diff_values = ','.join(map(str,reduce(lambda x,y:y-x, figures[user_id][:91])))
param.append((user_id,elec_values))
sql.additem('elecconsume(user_id,elec_value)',param, format='(%s,%s)')
sql.additem('diffconsume(user_id,diff_value)',param, format='(%s,%s)')
except ValueError:
traceback.print_exc()
#### # of user :34520
def put_detectres_2db(efilch_res, sql): # 检测结果入库
sql.truncate('detectres') ### 先清空结果表 #####
# efilch_res = {['3713505700',0.9],['3718012008',0.8],['3720049637',0.7],['3717057052',0.6]}
param = [(x[0],x[1]) for x in efilch_res.items()]
try:
sql.additem('detectres(user_id,prop)',param, format='(%s,%s)')
except ValueError:
traceback.print_exc()
def put_metadata_2db(starttime,endtime,zone_num,user_num,thief_num,sql): #源数据信息入库
try:
sql.additem('metatable(start_time,end_time,zone_num,user_num,thief_num )',[(starttime, endtime,zone_num,user_num,thief_num)],\
format='(%s,%s,%s,%s,%s)')
except ValueError:
traceback.print_exc()
|
[
"noreply@github.com"
] |
noreply@github.com
|
a6b41655f02282e4209fe8686428eda883251306
|
084b3a30a84f20e3eeda5c3846705695918374b0
|
/dbt_gen/py3env/lib/python3.5/site-packages/snowflake/connector/version.py
|
2597e133b042b02ff7e4fcda8de0ddcb01c7c120
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
norton120/dbt_gen
|
db3f77cb164e94870697ece7ef4e1093441d832d
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
refs/heads/master
| 2021-05-01T15:03:44.301147
| 2018-02-16T01:56:25
| 2018-02-16T01:56:25
| 121,029,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
# Update this for the versions
# Don't change the forth version number from None
VERSION = (1, 5, 0, None)
|
[
"norton120@gmail.com"
] |
norton120@gmail.com
|
0255c053d28e22970556e6a2ff0f3ee1edab6c56
|
0578b6008faa202291aa23d29648c4b371656f62
|
/Connect4/Board.py
|
57fa577504d4298151c86e601c6cb8e306b959a4
|
[] |
no_license
|
mirzalorena/Fundamentals-Of-Programming
|
491d7e1ea575554e5ab160aafe3943fcaf4e7997
|
dbc668da2832bbdab28bc34de637e59e46d2f68c
|
refs/heads/master
| 2020-11-27T16:45:47.727316
| 2019-12-22T07:45:56
| 2019-12-22T07:45:56
| 229,533,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,736
|
py
|
'''
Created on Dec 20, 2018
@author: Lorena
'''
class Board:
def __init__(self,width,height):
self.board=[[" "]*width for i in range(height)]
self.width=width
self.height=height
def clear_board(self):
"""
clears the board
"""
self.board=[[" "]*self.width for i in range(0,self.height)]
def full_board(self):
"""
checks if the board is full (space left on columns)
output: true - if ful
false - if not full
"""
for i in range(0,len(self.board)):
for j in range(0,len(self.board[i])):
if self.board[i][j]==' ':
return False
return True
def print_board(self):
''' displays current board
'''
print("-" *(2*self.width), end = ' ')
print()
for i in range(len(self.board)):
for j in range(len(self.board[i])):
print(self.board[i][j],end=' ')
print()
print("-" *(2*self.width), end = ' ')
print()
for row in range(len(self.board[i])):
print(row+1, end= ' ')
def get_empty_cell(self):
res=[]
for i in range(self.board):
for j in range(self.board[i]):
if self.board[i][j]==0:
res.append(i)
return res
def valid_move(self,col):
if col<0:
return False
if col>=self.width:
return False
return True
def add_cell(self,col,obj):
"""
adds an object to the specified column on board
"""
for r in range(self.height-1,-1,-1):
if col<=0:
raise ValueError("Invalid column")
if col>self.width:
raise ValueError("Invalid column")
if self.board[r][col-1]==" ":
self.board[r][col-1]=obj
break
else:
raise ValueError("Column full")
def check_winner(self):
#checks horizontally
for i in range(0,self.height):
for j in range(self.width-3):
if self.board[i][j] != " ":
h=[self.board[i][j],self.board[i][j+1],self.board[i][j+2],self.board[i][j+3]]
for c in range(0,len(h)):
if self.board[i][j]==self.board[i][j+1]==self.board[i][j+2]==self.board[i][j+3]:
return True
#checks vertically
for i in range(0,self.height-3):
for j in range(self.width-3):
if self.board[i][j] != " ":
v=[self.board[i][j],self.board[i+1][j],self.board[i+2][j],self.board[i+3][j]]
for c in range(0,len(v)):
if self.board[i][j]==self.board[i+1][j]==self.board[i+2][j]==self.board[i+3][j]:
return True
#checks major diagonal
for i in range(0,self.height-3):
for j in range(self.width-3):
if self.board[i][j] != " ":
md=[self.board[i][j],self.board[i+1][j+1],self.board[i+2][j+2],self.board[i+3][j+3]]
for c in range(len(md)):
if self.board[i][j]==self.board[i+1][j+1]==self.board[i+2][j+2]==self.board[i+3][j+3]:
return True
#checks major diagonal
for i in range(3,self.height):
for j in range(self.width-3):
if self.board[i][j] != " ":
md=[self.board[i][j],self.board[i-1][j-1],self.board[i-2][j-2],self.board[i-3][j-3]]
for c in range(len(md)):
if self.board[i][j]==self.board[i-1][j-1]==self.board[i-2][j-2]==self.board[i-3][j-3]:
return True
return False
|
[
"noreply@github.com"
] |
noreply@github.com
|
9beb997cd9e41a5838e3e62286a18238532f2c0a
|
21c3daa3c4209314767ccd2cf4ad61d0e43b645f
|
/ex2.py
|
830394cc3cf37897799547cb425e55e18bb14c42
|
[] |
no_license
|
suneff/python-unipi
|
d6207b9d7862fa35ad5642aa62b50c7296b12926
|
43baed544286b336c0b0801390e4ef81eb118982
|
refs/heads/master
| 2021-01-21T10:45:53.222458
| 2017-02-28T21:58:40
| 2017-02-28T21:58:40
| 83,481,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
inp=raw_input("Dwse mou mia akolouthia parenthesewn: \t")
length=len(inp)
flag=False
if (length>=2) and ((length % 2) == 0):
if inp[0]=="(" and inp[-1]==")":
position=0
count=0
while (position<length) and (count>=0):
#if adding 1 when a "(" appears and subtract by 1 when a ")"
#apears, in a valid mathematical sequece, the counter in the end must be 0.
if inp[position]=="(":
count+=1
else:
count-=1
position+=1
if (count==0):
flag=True
print flag
|
[
"noreply@github.com"
] |
noreply@github.com
|
f262ce49a1b63243520cfc38fef089e4926dcf7f
|
5552481b05fb515c25cf86ce224cc4a25e296c41
|
/src/_old/comm_listener.py
|
feca642ace6635dd3213e502cc3fd5bb73f06746
|
[] |
no_license
|
theodorekoutros/comm_sender
|
dac64cdbbd81fee874fdf340907816a312f755ed
|
58e0fb53543c9305ee93a360c0c3166e898cb80b
|
refs/heads/master
| 2020-04-11T22:24:43.456608
| 2019-07-10T21:21:03
| 2019-07-10T21:21:03
| 162,135,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
class ListenerNode:
def __init__(self):
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initialzing." %(self.node_name))
self.sub_key = rospy.Subscriber("/key", String, self.callback, queue_size=1)
def callback(self,msg):
rospy.loginfo(rospy.get_caller_id() + 'I heard %s', msg.data)
if __name__=="__main__":
rospy.init_node('listener_node', anonymous=False)
node = ListenerNode()
rospy.spin()
|
[
"koutrost@student.ethz.ch"
] |
koutrost@student.ethz.ch
|
a35b19e6d1369853d8c70ee127b6d3dee278778a
|
70a0cf4d95ea70e9f24af69fa1a012f272f23330
|
/BOJ/02178-미로_탐색-yeonhee.py
|
17c3feef295e35d2cae6cc434ec35698967aed8c
|
[] |
no_license
|
devpla/algorithm
|
6ee75f0f4cf162674ac626766ce016de843f2fbb
|
b1d9bf9b9f79e4d7295784a6f63de0c0b07963ca
|
refs/heads/master
| 2023-08-28T14:09:30.706665
| 2021-10-13T14:36:27
| 2021-10-13T14:36:27
| 398,161,819
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
from collections import deque
DIRECTION = ((0, 1), (0, -1), (1, 0), (-1, 0))
def bfs():
q = deque([(0, 0)])
graph[0][0] = 1
while q:
r, c = q.popleft()
for dr, dc in DIRECTION:
nr = r + dr
nc = c + dc
if 0 <= nr < n and 0 <= nc < m and graph[nr][nc] == 1:
graph[nr][nc] = graph[r][c] + 1
q.append((nr, nc))
return graph[-1][-1]
n, m = map(int, input().split())
graph = [list(map(int, input())) for _ in range(n)]
print(bfs())
|
[
"chaeyeonhee@kakao.com"
] |
chaeyeonhee@kakao.com
|
7140bca0b1e84c19e6a69277ccdfc52ae883fbdf
|
7ce07395e1b5ace33c68a2755a956bafddd3adb9
|
/contribstats_apis/apps.py
|
a1e5a36819d55d23c4f25d551b48046d087f5f14
|
[] |
no_license
|
Discovery-VSTS/codemetric
|
8014b75e7fddf86490cc29a72a297c913e2037ab
|
08df5f5a191279150b6f82c8e45b648e2f1c0073
|
refs/heads/master
| 2021-01-20T11:52:28.358595
| 2017-03-23T15:00:09
| 2017-03-23T15:00:09
| 80,984,988
| 0
| 1
| null | 2017-03-18T15:42:36
| 2017-02-05T10:15:37
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
from django.apps import AppConfig
class ContribstatsApisConfig(AppConfig):
name = 'contribstats_apis'
|
[
"minhlong.langos@gmail.com"
] |
minhlong.langos@gmail.com
|
5bcab2190075533c78ad6b2d00437877ca00c388
|
f8f7f2d5d74364805465fa56a2e89923265f8248
|
/devel/lib/python2.7/dist-packages/tnp/msg/_Coordinates.py
|
19d40aa0b6c5bf1a48436f1bd313f06aa86b7fec
|
[] |
no_license
|
kefalakis/qtnp
|
2c83a4a1381f0ff48bcfb6808cbed62d47f14e34
|
5066cc43bd654729469fa1346fa87cd012c8eabb
|
refs/heads/master
| 2021-01-13T13:06:39.828109
| 2016-07-17T15:27:23
| 2016-07-17T15:27:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,041
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from tnp/Coordinates.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Coordinates(genpy.Message):
_md5sum = "5233f15c788dc5823ec41234899f903f"
_type = "tnp/Coordinates"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string placemark_type
float64[] longitude
float64[] latitude
"""
__slots__ = ['placemark_type','longitude','latitude']
_slot_types = ['string','float64[]','float64[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
placemark_type,longitude,latitude
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Coordinates, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.placemark_type is None:
self.placemark_type = ''
if self.longitude is None:
self.longitude = []
if self.latitude is None:
self.latitude = []
else:
self.placemark_type = ''
self.longitude = []
self.latitude = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.placemark_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.longitude)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.longitude))
length = len(self.latitude)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.latitude))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.placemark_type = str[start:end].decode('utf-8')
else:
self.placemark_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.longitude = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.latitude = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.placemark_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.longitude)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.longitude.tostring())
length = len(self.latitude)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.latitude.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.placemark_type = str[start:end].decode('utf-8')
else:
self.placemark_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.longitude = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.latitude = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
|
[
"fbalampanis@gmail.com"
] |
fbalampanis@gmail.com
|
959fa6cf6f372189e821fd5412854a9548fa18f2
|
fb657665c617aff5315f4b492807eb4291da61bf
|
/Time-Space Tradeoff/Hashing/double hashing.py
|
03a091eca5525891ce0c53a1174ed16ad0841250
|
[] |
no_license
|
AngelWings1997/Algorithm-and-Complexity
|
12348effc5f4d2cd41df81b2e0eaa0912cf4235b
|
147e7990359d2ec6060e255908e1b379bd4f85ce
|
refs/heads/master
| 2023-04-08T14:45:38.388958
| 2021-04-20T03:36:53
| 2021-04-20T03:36:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,385
|
py
|
"""
# -*- coding: utf-8 -*-
@author: Hongzhi Fu
"""
# implementation of hashing
# the purpose is to split items into hash table evenly,
# and minimize collisions as fewer as possible
# table size is typically 1.5 larger than the number of items
# modulo operation is chosen as a hash function
# collision resolution mechanism: double hashing
# average attempts for (un)successful search is calculated to evaluate efficiency of each mechanism
class HashTable(object):
def __init__(self, size):
self.size = size
self.table = [None for i in range(size)]
def function(self, key):
return key % self.size
def rehash(self, key):
m = self.size
return m - 2 - key % (m - 2)
def put(self, key):
address = self.function(key)
# table value is None
while self.table[address]:
address = self.function(address+self.rehash(key))
self.table[address] = key
def get(self, key):
value = self.function(key)
cnt = 1 # one attempt in a minimum
# table value is None
while self.table[value] is not None:
if self.table[value] == key:
return True, cnt
value = self.function(value+self.rehash(key))
cnt += 1
return False, cnt
keys = [58, 19, 75, 38, 29, 4, 60, 94, 84]
table_size = 13 # prime number is recommended
hashtable = HashTable(table_size)
# build hash table
print("Put items into hash table:\n", keys)
for key in keys:
hashtable.put(key)
# search
print("Get items from hash table:")
search_keys = [60, 84, 22]
for key in search_keys:
status, attempt = hashtable.get(key)
if status:
print("\tKey {} has been found, with {} attempt(s).".format(key, attempt))
else:
print("\tKey {} has not been found.".format(key))
# average case: success
counter_success = 0
for key in keys:
_, attempt = hashtable.get(key)
counter_success += attempt
print("Average attempts for successful search is %.2f." % (counter_success/len(keys)))
# average case: fail
counter_fail = 0
failure_keys = [26, 1, 67, 42, 82, 31, 97, 59, 21, 87, 75, 63, 116] # failure keys for each position of hash table
for key in failure_keys:
_, attempt = hashtable.get(key)
counter_fail += attempt
print("Average attempts for unsuccessful search is %.2f." % (counter_fail/len(failure_keys)))
|
[
"Heartbeats1216@gmail.com"
] |
Heartbeats1216@gmail.com
|
151f79d6e5570854101663710bf6c53ec3afafdc
|
1a1a9906ab0e56335eebe543d1c145750caaa631
|
/modules/gateways/cloud-init.sh
|
deea05919cf5a455dea02f1277272b3b975473c8
|
[] |
no_license
|
bridgecrew-perf4/lab-in-a-box
|
840021e9b38ddc4450c0682831a32ac06dfca0d7
|
ad33a08b18cc3a5f3057afad0505603cee4b4422
|
refs/heads/main
| 2023-03-19T11:53:39.523806
| 2021-02-17T15:23:52
| 2021-02-17T15:23:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
sh
|
#!/usr/bin/python3 /etc/cloud_config.py
installationType="${installation_type}"
allowUploadDownload="${allow_upload_download}"
osVersion= "${os_version}"
templateName="${template_name}"
templateVersion="${template_version}"
isBlink="${is_blink}"
bootstrapScript64="${bootstrap_script64}"
location="${location}"
sicKey="${sic_key}"
tenantId="${tenant_id}"
virtualNetwork="${virtual_network}"
clusterName="${cluster_name}"
externalPrivateAddresses="${external_private_addresses}"
customMetrics="${enable_custom_metrics}"
|
[
"stuartg@checkpoint.com"
] |
stuartg@checkpoint.com
|
ba0236d70ba44c3bbd27a29b3e9e66d95c26ab01
|
d245145b5e54fd1111b9e9a7349fe3aa045e0eeb
|
/queue-stack/MyCircularQueue.py
|
05831985759e7a96eb4b53dac1e4d1021ddd736b
|
[] |
no_license
|
scottfu001/leetcode
|
680d461d072fe70cb51a1342e98ab0f24f090855
|
70c43d67f6faae9d92f28da764a1fb32ca564fa5
|
refs/heads/main
| 2023-05-05T04:35:18.327905
| 2021-05-26T05:55:29
| 2021-05-26T05:55:29
| 363,279,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
class MyCircularQueue:
def __init__(self, k: int):
self.q = [] # the data
self.k = k # the max size
def enQueue(self, value: int) -> bool:
# check the queue is full
if self.isFull():
return False
else:
self.q.append(value)
return True
def deQueue(self) -> bool:
#get the first one
if self.isEmpty():
return False
else:
del self.q[0]
return True
def Front(self) -> int:
if self.isEmpty():
return -1
else:
return self.q[0]
def Rear(self) -> int:
if self.isEmpty():
return -1
else:
return self.q[-1]
def isEmpty(self) -> bool:
if len(self.q) == 0:
return True
else:
return False
def isFull(self) -> bool:
if len(self.q) >= self.k:
return True
else:
return False
# Your MyCircularQueue object will be instantiated and called as such:
# obj = MyCircularQueue(k)
# param_1 = obj.enQueue(value)
# param_2 = obj.deQueue()
# param_3 = obj.Front()
# param_4 = obj.Rear(
|
[
"noreply@github.com"
] |
noreply@github.com
|
c9f187898d60ce3cc53e084f94baf1419abcf063
|
ab68f12dbfe8154564c3a10afb7b0cf36d5061d4
|
/financial_prepp.py
|
e273fd7387a4ed09072ce70245b4a16c6fe52e58
|
[] |
no_license
|
esiiol/financial-text-preprocessing
|
264e736833305beadad3b6f8b301553f5e03e3c6
|
de6e57cb6ae9a8b4871955472c5c932ab395792f
|
refs/heads/main
| 2023-02-22T10:19:09.039142
| 2021-01-28T14:00:15
| 2021-01-28T14:00:15
| 333,775,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,447
|
py
|
#### Author: EO
#### github: esiiol
import string
import re
import pycountry as pc
import pickle
NUMBERS_DICT = {'1st': 'first', '2nd': 'second', '3rd': 'third', '4th': 'fourth', '5th': 'fifth', '6th': 'sixth',
'7th': 'seventh', '8th': 'eighth', '9th': 'ninth', '10th': 'tenth'}
STANDARDS_DICT = {'3g': 'third generation', '4g': 'four genereation', '5g': 'five generation'}
with open('units_dict.pickle', 'rb') as handle:
UNITS_DICT = pickle.load(handle)
UNITS_DICT['k'] = 'kilo'
UNITS_DICT['m'] = 'million'
UNITS_DICT['bn'] = 'billion'
UNITS_DICT['tn'] = 'trillion'
UNITS_DICT['x'] = '<rate>'
UNITS_DICT['bncapital'] = 'billion capital'
UNITS_DICT['bnequity'] = 'billion equity'
UNITS_DICT['bnextdividend'] = 'billion extra dividend'
UNITS_DICT['bnimpact'] = 'billion impact'
UNITS_DICT['ktonnes'] = 'kilo tonnes'
UNITS_DICT['eimpact'] = 'economic impact'
UNITS_DICT['mbit'] = 'mega bit'
UNITS_DICT['mbunker'] = 'million bunker'
UNITS_DICT['mcompleted'] = 'million completed'
UNITS_DICT['mconfirms'] = 'million confirms'
UNITS_DICT['mdistrict'] = 'million district'
UNITS_DICT['mestimate'] = 'million estimate'
UNITS_DICT['mextra'] = 'million extra'
UNITS_DICT['mhigher'] = 'million higher'
UNITS_DICT['mimpact'] = 'million impact'
UNITS_DICT['mlimited'] = 'million limited'
UNITS_DICT['moil'] = 'million oil'
UNITS_DICT['morder'] = 'million order'
UNITS_DICT['more'] = 'more'
UNITS_DICT['mreversed'] = 'million reversed'
UNITS_DICT['mstrong'] = 'million strong'
UNITS_DICT['mterminals'] = 'million terminals'
UNITS_DICT['mtonnes'] = 'million tonnes'
UNITS_DICT['musd'] = 'million usd'
UNITS_DICT['mhz'] = '<units>'
UNITS_DICT['¼'] = '<frac>'
UNITS_DICT['½'] = '<frac>'
UNITS_DICT.pop('a')
UNITS_DICT.pop('am')
UNITS_DICT.pop('as')
UNITS_DICT.pop('ebit')
UNITS_DICT.pop('ex')
UNITS_DICT.pop('i')
UNITS_DICT.pop('min')
UNITS_DICT.pop('mill')
UNITS_DICT.pop('more')
UNITS_DICT.pop('no')
UNITS_DICT.pop('of')
UNITS_DICT.pop('per')
UNITS_DICT.pop('s')
with open('financial_abbreviations.pickle', 'rb') as handle:
FINANTIAL_ABBRV_DICT = pickle.load(handle)
FINANTIAL_ABBRV_DICT['g&a'] = 'general and administration expense'
FINANTIAL_ABBRV_DICT['ir'] = 'interest rate'
FINANTIAL_ABBRV_DICT['kpi'] = 'key performance indicator'
FINANTIAL_ABBRV_DICT['kyc'] = 'know your customer'
FINANTIAL_ABBRV_DICT['mom'] = 'month over month'
CURRENCY_LIST = set('AFN EUR ALL DZD USD EUR AOA XCD XCD ARS AMD AWG AUD EUR AZN BSD BHD BDT BBD BYN EUR BZD XOF BMD INR BTN BOB BOV USD BAM BWP NOK BRL USD BND BGN XOF BIF CVE KHR XAF CAD KYD XAF XAF CLP CLF CNY AUD AUD COP COU KMF CDF XAF NZD CRC XOF HRK CUP CUC ANG EUR CZK DKK DJF XCD DOP USD EGP SVC USD XAF ERN EUR SZL ETB EUR FKP DKK FJD EUR EUR EUR XPF EUR XAF GMD GEL EUR GHS GIP EUR DKK XCD EUR USD GTQ GBP GNF XOF GYD HTG USD AUD EUR HNL HKD HUF ISK INR IDR XDR IRR IQD EUR GBP ILS EUR JMD JPY GBP JOD KZT KES AUD KPW KRW KWD KGS LAK EUR LBP LSL ZAR LRD LYD CHF EUR EUR MOP MKD MGA MWK MYR MVR XOF EUR USD EUR MRU MUR EUR XUA MXN MXV USD MDL EUR MNT EUR XCD MAD MZN MMK NAD ZAR AUD NPR EUR XPF NZD NIO XOF NGN NZD AUD USD NOK OMR PKR USD PAB USD PGK PYG PEN PHP NZD PLN EUR USD QAR EUR RON RUB RWF EUR SHP XCD XCD EUR EUR XCD WST EUR STN SAR XOF RSD SCR SLL SGD ANG XSU EUR EUR SBD SOS ZAR SSP EUR LKR SDG SRD NOK SEK CHF CHE CHW SYP TWD TJS TZS THB USD XOF NZD TOP TTD TND TRY TMT USD AUD UGX UAH AED GBP USD USD USN UYU UYI UYW UZS VUV VES VND USD USD XPF MAD YER ZMW ZWL XBA XBB XBC XBD XTS XXX XAU XPD XPT XAG'.split())
CURRENCY_LIST = list(map(lambda x: x.lower(), list(CURRENCY_LIST)))
COUNTRY_LIST = []
for item in list(pc.countries):
COUNTRY_LIST.append(item.name.lower())
CONTINENT_LIST = ['asia', 'north america', 'south america', 'africa', 'europe', 'oceania', 'antarctica',
'asian', 'north american', 'south american', 'african', 'european', 'oceanian', 'antarctican']
SUBREGION_LIST = ['northern africa', 'northern african',
'north africa', 'north african',
'eastern africa', 'eastern african',
'east africa', 'east african',
'middle africa', 'middle african',
'southern africa', 'southern african',
'south africa', 'south african',
'western africa', 'western african',
'west africa', 'west african',
'northern aisa', 'northern aisan',
'north aisa', 'north aisan',
'eastern aisa', 'eastern aisan',
'east aisa', 'east aisan',
'middle aisa', 'middle aisan',
'southern aisa', 'southern aisan',
'south aisa', 'south aisan',
'western aisa', 'western aisan',
'west aisa', 'west aisan',
'middle east',
'northern europe', 'northern european',
'north europe', 'north european',
'eastern europe', 'eastern european',
'east europe', 'east european',
'middle europe', 'middle european',
'southern europe', 'southern european',
'south europe', 'south european',
'western europe', 'western european',
'west europe', 'west european',
'scandinavia', 'scandinavian',
]
with open('company_names.pickle', 'rb') as handle:
COMPANY_NAMES_LIST = pickle.load(handle)
COMPANY_NAMES_LIST.append('TOT')
COMPANY_NAMES_LIST.append('dtac')
COMPANY_NAMES_LIST.append('Dtac')
COMPANY_NAMES_LIST.append('CAT')
COMPANY_NAMES_LIST.append('Lehto')
class FinancialReportsPreprocess():
def __init__(self, tokens_dict=None):
self.tokens_dict = tokens_dict
self.punctuations_pre = '!"#$()*/:;<=>?@[\\]^_`{|}~'
self.trans_tbl_pre = ''.maketrans(self.punctuations_pre, len(self.punctuations_pre)*' ')
self.punctuations_post = '&.,-+\''
self.trans_tbl_post = ''.maketrans(self.punctuations_post, len(self.punctuations_post)*' ')
def preprocess(self, string):
"""
input: string:str
The text to get preprocessed
output: sting:str
The preprocessed output
"""
string = self.remove_html_tags(string)
string = self.remove_urls(string)
string = string.translate(self.trans_tbl_pre)
if self.tokens_dict is None:
string = self.insert_company_token(string)
string = string.lower()
string = self.insert_financial_unabbrv_words(string)
string = self.insert_number_unabbrv_words(string)
string = self.insert_standard_unabbrv_words(string)
string = self.num_unit_seperator(string)
string = self.insert_financial_unabbrv_words(string)
string = self.insert_unit_unabbrv_words(string)
string = self.insert_currency_token(string)
string = self.insert_country_token(string)
string = self.insert_continent_token(string)
string = self.insert_subregion_token(string)
string = self.remove_apostrophe_s(string)
string = self.insert_Q1_token(string)
string = self.insert_Q2_token(string)
string = self.insert_Q3_token(string)
string = self.insert_Q4_token(string)
string = self.insert_date_token(string)
string = self.insert_percentages_token(string)
string = self.insert_positive_percentage_token(string)
string = self.insert_negative_percentage_token(string)
string = self.insert_percentage_token(string)
string = self.insert_year_token(string)
string = self.insert_years_token(string)
string = self.insert_numbers_token(string)
string = self.insert_number_token(string)
string = re.sub('\n', ' ', string)
string = re.sub('\t', ' ', string)
string = string.translate(self.trans_tbl_post)
return string
else:
string = self.insert_company_token(string, token=self.tokens_dict['company'])
string = string.lower()
string = self.insert_financial_unabbrv_words(string)
string = self.insert_number_unabbrv_words(string)
string = self.insert_standard_unabbrv_words(string)
string = self.num_unit_seperator(string)
string = self.insert_financial_unabbrv_words(string)
string = self.insert_unit_unabbrv_words(string)
string = self.insert_currency_token(string, token=self.tokens_dict['currency'])
string = self.insert_country_token(string, token=self.tokens_dict['country'])
string = self.insert_continent_token(string, token=self.tokens_dict['continent'])
string = self.insert_subregion_token(string, token=self.tokens_dict['subregion'])
string = self.remove_apostrophe_s(string)
string = self.insert_Q1_token(string, token=self.tokens_dict['Q1'])
string = self.insert_Q2_token(string, token=self.tokens_dict['Q2'])
string = self.insert_Q3_token(string, token=self.tokens_dict['Q3'])
string = self.insert_Q4_token(string, token=self.tokens_dict['Q4'])
string = self.insert_date_token(string, token=self.tokens_dict['date'])
string = self.insert_percentages_token(string, token=self.tokens_dict['percentages'])
string = self.insert_positive_percentage_token(string, token=self.tokens_dict['positive_percentage'])
string = self.insert_negative_percentage_token(string, token=self.tokens_dict['negative_percentage'])
string = self.insert_percentage_token(string, token=self.tokens_dict['percentage'])
string = self.insert_year_token(string, token=self.tokens_dict['year'])
string = self.insert_years_token(string, token=self.tokens_dict['years'])
string = self.insert_numbers_token(string, token=self.tokens_dict['numbers'])
string = self.insert_number_token(string, token=self.tokens_dict['number'])
string = re.sub('\n', ' ', string)
string = re.sub('\t', ' ', string)
string = string.translate(self.trans_tbl_post)
return string
def remove_html_tags(self, string):
return re.sub(r'<([a-z]+)\s[^<]+<\/\1>', '', string)
def remove_urls(self, string):
return re.sub(r'((https?|ftp|smtp):\/\/(([a-zA-Z0-9.\-_=?&\\#]+\/?)+))', '', string)
def num_unit_seperator(self, string): #ex: 34,000mt
return re.sub(r'(\b\d+([.,]\d+)?)([^-0-9\s]+\b)', r'\1 \3', string)
def insert_currency_token(self, string, token='<currency>'): #ex: SEK
currencies = []
for currency in CURRENCY_LIST:
if string.find(currency) != -1:
currencies.append(currency)
for currency in currencies:
string = re.sub('\\b' + currency + '\\b', token, string)
return string
def insert_company_token(self, string, token='<company>'): #ex: Zolando
companies = []
for company in COMPANY_NAMES_LIST:
if string.find(company) != -1:
companies.append(company)
for company in companies:
string = re.sub('\\b' + company + '\\b', token, string)
return string
def remove_apostrophe_s(self, string, token=''): #ex: company's
string = re.sub('’s\\b', token, string)
string = re.sub("'s\\b", token, string)
return string
def insert_country_token(self, string, token='<country>'): #ex:china
countries = []
for country in COUNTRY_LIST:
if string.find(country) != -1:
countries.append(country)
for country in countries:
string = re.sub('\\b' + country + '\\b', token, string)
return string
def insert_continent_token(self, string, token='<continent>'): #ex:asia
for continent in CONTINENT_LIST:
string = re.sub('\\b' + continent + '\\b', token, string)
return string
def insert_subregion_token(self, string, token='<subregion>'): #ex:asia #IMPORTANT: this function should come before continent token replacements
for subregion in SUBREGION_LIST:
string = re.sub('\\b' + subregion + '\\b', token, string)
return string
def insert_Q1_token(self, string, token='first quarter'): #ex:q1
return re.sub('\\bq1\\b', token, string)
def insert_Q2_token(self, string, token='second quarter'): #ex:q2
return re.sub('\\bq2\\b', token, string)
def insert_Q3_token(self, string, token='third quarter'): #ex:q3
return re.sub('\\bq3\\b', token, string)
def insert_Q4_token(self, string, token='fourth quarter'): #ex:q4
return re.sub('\\bq4\\b', token, string)
def insert_date_token(self, string, token='<date>'): #ex:9 february
string = re.sub('\d{1,2}\s(january|february|march|april|may|june|july|august|september|october|november|december)',
token,
string)
string = re.sub('\\bjanuary\\b|\\bfebruary\\b|\\bmarch\\b|\\bapril\\b|\\bmay\\b|\\bjune\\b|\\bjuly\\b|\\baugust\\b|\\bseptember\\b|\\boctober\\b|\\bnovember\\b|\\bdecember\\b',
token,
string)
return string
def insert_percentage_token(self, string, token='<percent>'): #ex:3.2%
return re.sub('\\b\d*\.?\d*%', token, string)
def insert_percentages_token(self, string, token='<percents>'): #ex:3.2-4.5%
return re.sub('\\b(\d+\.)?\d+-(\d+\.)?\d+%', token, string)
def insert_positive_percentage_token(self, string, token='<positive_percent>'): #ex:+3.2%
return re.sub('[+]\d*\.?\d*%', token, string)
def insert_negative_percentage_token(self, string, token='<negative_percent>'): #ex:-3.2%
return re.sub('[-]\d*\.?\d*%', token, string)
def insert_year_token(self, string, token='<year>'): #ex: 2010
return re.sub('\\b19\d{2}|20\d{2}\\b', token, string)
def insert_years_token(self, string, token='<years>'): #ex: 2014-15
return re.sub('\\b19\d{2}|20\d{2}-\d{2}\\b', token, string)
def insert_number_token(self, string, token='<number>'): #ex:3.2 #IMPORTANT: This should come after all other token replacements
return re.sub('[-+]?\d+([.,]\d+)?\\b', token, string)
def insert_numbers_token(self, string, token='<number_range> '): #ex:34.3-56.4
return re.sub('\\b(\d+\.)?\d+-(\d+\.)?\d+', token, string)
def insert_financial_unabbrv_words(self, string):
abbrvs = []
for abbrv in FINANTIAL_ABBRV_DICT.keys():
if string.find(abbrv) != -1:
abbrvs.append(abbrv)
for abbrv in abbrvs:
string = re.sub('\\b' + abbrv + '\\b', FINANTIAL_ABBRV_DICT[abbrv], string)
return string
def insert_number_unabbrv_words(self, string):
abbrvs = []
for abbrv in NUMBERS_DICT.keys():
if string.find(abbrv) != -1:
abbrvs.append(abbrv)
for abbrv in abbrvs:
string = re.sub('\\b' + abbrv + '\\b', NUMBERS_DICT[abbrv], string)
return string
def insert_standard_unabbrv_words(self, string):
abbrvs = []
for abbrv in STANDARDS_DICT.keys():
if string.find(abbrv) != -1:
abbrvs.append(abbrv)
for abbrv in abbrvs:
string = re.sub('\\b' + abbrv + '\\b', STANDARDS_DICT[abbrv], string)
return string
def insert_unit_unabbrv_words(self, string):
abbrvs = []
for abbrv in UNITS_DICT.keys():
if string.find(abbrv) != -1:
abbrvs.append(abbrv)
for abbrv in abbrvs:
string = re.sub('\\b' + abbrv + '\\b', UNITS_DICT[abbrv], string)
return string
|
[
"noreply@github.com"
] |
noreply@github.com
|
12f03aa1dc75eaf7a286d0d8d710da31e00ccb2f
|
3bbda49b8ec6ca7de57f9ea3b6e26d17cfd2b7aa
|
/node_modules/fsevents/build/config.gypi
|
18f8e50cca791052039d23b428f9f450f61a30dd
|
[
"MIT"
] |
permissive
|
Skytim/Angular-Project
|
290795b1c1ca8eac6209d2d4903c35fac7960a2c
|
2e47ec60b5622d853d6cbe474781dca1e2283315
|
refs/heads/master
| 2020-07-28T18:29:22.529132
| 2016-11-13T17:38:08
| 2016-11-13T17:38:08
| 73,403,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,149
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in\\icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps\\icu",
"icu_small": "true",
"icu_ver_major": "56",
"node_byteorder": "little",
"node_install_npm": "true",
"node_prefix": "/usr/local",
"node_release_urlbase": "",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\TsaiYuShiang\\.node-gyp\\4.4.7",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "C:\\Users\\TsaiYuShiang\\angular-quickstart\\node_modules\\fsevents\\lib\\binding\\Release\\node-v46-win32-x64\\fse.node",
"module_name": "fse",
"module_path": "C:\\Users\\TsaiYuShiang\\angular-quickstart\\node_modules\\fsevents\\lib\\binding\\Release\\node-v46-win32-x64",
"ignore_optional": "",
"ignore_scripts": "",
"init_license": "MIT",
"init_version": "1.0.0",
"registry": "https://registry.yarnpkg.com",
"save_prefix": "^",
"strict_ssl": "true",
"user_agent": "yarn/0.16.1 npm/? node/v4.4.7 win32 x64",
"version_git_message": "v%s",
"version_git_sign": "",
"version_git_tag": "true",
"version_tag_prefix": "v"
}
}
|
[
"蔡宇祥"
] |
蔡宇祥
|
00fa86ee62a77e85179879f323d588c8c5ec3b54
|
6c3a9a3235592d44254f61ed51d025cf11071030
|
/task_3_2.py
|
ab57ed048f40bb25c4b12f16d8353796654cda0d
|
[] |
no_license
|
stas1803/Data_Engineer
|
caa6ba662db679b3b4f972c38af353043874d337
|
5bec8ddc0323c503bd39f70a4ad1abdbd19fcadc
|
refs/heads/main
| 2023-04-04T17:42:47.408875
| 2021-03-22T11:04:17
| 2021-03-22T11:04:17
| 349,364,995
| 0
| 0
| null | 2021-04-09T08:49:58
| 2021-03-19T09:18:27
|
Python
|
UTF-8
|
Python
| false
| false
| 367
|
py
|
def num_translate(number):
dig_2_str = {'one':'uno', 'two':'dos', 'three':'tres', 'four':'cuatro', 'five':'cinco'}
number_list = list(number)
number_low = number.lower()
if number_list[0].isupper() and number_low in dig_2_str:
print(dig_2_str.get(number_low).capitalize())
else:
print(dig_2_str.get(number))
num_translate('One')
|
[
"stas1803@gmail.com"
] |
stas1803@gmail.com
|
26c28d596fb8b6712cc4ba60a88c42f88de634df
|
959d6f7027a965f609a0be2885960b63c6dc97bc
|
/facebook/likers/steps.py
|
96cfda296f3d581fbb757246dd37896ae0d2495a
|
[] |
no_license
|
ameetbora/facebook-comments
|
0bf57f8e5b4a8ef7804aa999fa86d9913b7ee99c
|
7649c808164f978b147a4410795eadf374e3d3dc
|
refs/heads/master
| 2020-04-12T14:39:23.733965
| 2018-10-30T06:17:42
| 2018-10-30T06:17:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
import time
def login(driver, user_email: str, user_password: str):
driver.get("https://www.facebook.com")
email = driver.find_element_by_id("email")
password = driver.find_element_by_id("pass")
submit = driver.find_element_by_id("loginbutton")
email.send_keys(user_email)
password.send_keys(user_password)
submit.click()
def keep_scrolling(driver, times: int = 99999999999):
while times > 0:
times -= 1
results_end_notifiers = driver.find_elements_by_xpath("//div[text()='End of results']")
if len(results_end_notifiers) > 0:
print("Looks like we found all the likers.")
return True
else:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight + 1000);")
time.sleep(3)
def get_likers(driver):
likers = []
links = [link.get_attribute("href") for link in driver.find_elements_by_xpath("//table[@role='presentation']//tr//td[position()=2]//a[not(@class)]")]
names = [name.text for name in driver.find_elements_by_xpath("//table[@role='presentation']//tr//td[position()=2]//a[not(@class)]/div/div")]
if len(names) > 0 and len(names) == len(links):
for i in range(len(links)):
likers.append({
"name": names[i],
"link": links[i],
})
else:
print("The names And links didn't match, something is wrong with our xpathing.")
return likers
def get_next_likers(driver):
next_page_link = driver.find_elements_by_xpath("//div[@id='see_more_pager']/a")
if len(next_page_link) > 0:
next_page_link[0].click()
return True
return False
def get_facebook_warning(driver):
warning = driver.find_elements_by_xpath("//div[contains(text(), 'It looks like you’re using this feature in a way it wasn’t meant to be used.')]")
return len(warning) > 0
|
[
"lewington@student.unimelb.edu.au"
] |
lewington@student.unimelb.edu.au
|
e2fe0f61764b67cc09280aa54e3523f4515f4dda
|
f1b095ea11282465f8129d457e4b84058b7de426
|
/practice.py
|
7fd5e57278587fedca3c2137958c638d7c7dd807
|
[] |
no_license
|
IvanSivchev/Homeworks
|
d8cf745004a02aee3cf1b05ee5c9c23993368632
|
2f31d9692262d37aa92f1ee6cc09cc4849ef8a09
|
refs/heads/master
| 2022-10-26T21:54:45.524436
| 2020-06-14T23:20:58
| 2020-06-14T23:20:58
| 272,292,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
class Basket:
name = 'Basket'
size = 10
def putin(self, obj):
if obj.size <= self.size:
print(obj.name, 'in {}!'.format(self.name))
self.size -= obj.size
else:
print('You cant put {} in!'.format(obj.name))
class Pack(Basket):
name = 'Pack'
size = 5
class Object:
def __init__(self, name, size):
self.size = size
self.name = name
obj = Object('milk', 3)
obj1 = Object('beer', 2)
obj2 = Object('paper', 5)
basket = Basket()
pack = Pack()
basket.putin(obj)
basket.putin(obj1)
basket.putin(obj2)
basket.putin(obj2)
pack.putin(obj)
pack.putin(obj1)
pack.putin(obj2)
|
[
"vanyasivchev@icloud.com"
] |
vanyasivchev@icloud.com
|
494251cb3ddb98b1b9da717223066f0ed6b69677
|
f2f0478171b5e6d2fc1897e4f40b05eba15a36b6
|
/ex5.py
|
1d0f78354447d159dfede956b8c7d0695518fb36
|
[] |
no_license
|
gssakib/python-projects
|
5930dc612eec8386864ad7b7730642c8155b75ce
|
70c1a356cb563e4b8040dbee7b46222b2b325cf0
|
refs/heads/master
| 2021-09-24T09:49:17.034756
| 2021-09-10T20:25:42
| 2021-09-10T20:25:42
| 57,471,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
name = 'Zed A. Shaw'
age = 35
height = 74
weight = 180
eyes = 'Blue'
teeth = 'White'
# my_teeth = "White"
hair = 'Brown'
in_no_len = 1
in_no_mass = 1
in_cm = in_no_len * 2.54
lb_kg = in_no_mass * 0.45
print "Let's talk about %s." % name
print "He's %d inches tall." % height
print "He's %d pounds heavy." %weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
print "If I add %d, %d, and %d I get %d." %(age, height,weight, age + height + weight)
print "The conversion factor converting from inches to centi-meter is 1 inch = %f centi-meter" % in_cm
print "The conversion factor converting from pound-mass to kilo-grams is 1 pound-mass = %f kilo-grams" % lb_kg
|
[
"sakibgazi9@gmail.com"
] |
sakibgazi9@gmail.com
|
7e03c5d6aaabd3d4ec2c4061fd2fb4562f4f896f
|
548a597a800fbe63e63b6d0bae7a0f000f82c3a7
|
/User/user.py
|
89b662838fbb87659a71f1398bce23e5df3bf810
|
[
"BSD-3-Clause"
] |
permissive
|
SaeGot/SaeGot-Engine_pygame
|
ecd804045915e70f55f95e1ea464f7a7f8f08ce7
|
e97a81d7eeb079164ecc231f4b9406141e84cb55
|
refs/heads/main
| 2023-03-05T15:57:04.201972
| 2021-02-13T17:59:39
| 2021-02-13T17:59:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
import pygame
class User:
_id = None
_logon = False
def __init__(self, id = None, logon = False):
self._id = id
self._logon = logon
def login(self, id):
self._id = id
|
[
"noreply@github.com"
] |
noreply@github.com
|
315cb972ca9bd6d4aa858a998327cbe1adc88343
|
3a7947b96c67d8b9360e7fe8d17511085dd399bf
|
/ระดับคำ/question_num_ans_predict.py
|
810958abbe6e47f3c70600f409802aa4abaff025
|
[] |
no_license
|
noratap09/QA_Question_type_1
|
97da7b987914928277d989e19d6e2b0d56db4a7e
|
9d276d26540097603e83abd837865d0e617eda3a
|
refs/heads/master
| 2020-11-25T10:06:33.361504
| 2020-01-29T12:47:04
| 2020-01-29T12:47:04
| 228,611,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,675
|
py
|
import deepcut
import json
my_file = open("data_set_fix.json",'r',encoding = 'utf-8-sig')
txt = my_file.read()
json_obj = json.loads(txt)
from pythainlp.tag import pos_tag, pos_tag_sents
from pythainlp.tag.named_entity import ThaiNameTagger
ner = ThaiNameTagger()
def ck_have_num(ans):
ans_ne = ner.get_ner(ans)
for i in ans_ne:
if(i[1]=="NUM"): return True
return False
def predict_have_num(question):
question_troken = deepcut.tokenize(question)
if(question.find("เท่าไร")!=-1 or question.find("เท่าไหร่")!=-1 or question.find("เท่าใด")!=-1):
return True
elif("กี่" in question_troken):
return True
else:
return False
def predict_have_person_name(question):
if(question.find("ใคร")!=-1):
return True
else:
return False
count = 0
score = 0
all_num_unit = list()
for count_data , data in enumerate(json_obj['data'][0:15000],start=1):
if(data['question_type']==1):
ans = data['answer']
question = data['question']
question_id = data['question_id']
ans_troken = deepcut.tokenize(ans)
question_troken = deepcut.tokenize(question)
#print("QUS : ",question_troken)
#print("ANS : ",ans_troken)
if(ck_have_num(ans_troken)):
count = count + 1
#print("Question_ID : ",question_id)
#print("QUS : ",question_troken)
#print("ANS : ",ans_troken)
"""
if(predict_have_person_name(question)):
ans_ne = ner.get_ner(ans)
print("Question_ID : ",question_id)
print("QUS : ",question_troken)
print("ANS : ",ans_ne)
"""
if(predict_have_num(question)):
score = score + 1
ans_ne = ner.get_ner(ans_troken)
print("Question_ID : ",question_id)
print("QUS : ",question_troken)
print("ANS : ",ans_ne)
for i in ans_ne:
if(i[1]!="NUM"):
all_num_unit.append(i[0])
#if(predict_have_num(question) == False and ck_have_num(ans_troken) == True):
#print("Question_ID : ",question_id)
#print("QUS : ",question_troken)
#print("ANS : ",ans_troken)
print("Total : ",score,"/",count)
all_num_unit = list(set(all_num_unit))
print(all_num_unit)
all_num_unit = "|".join(all_num_unit)
file_all_num_unit = open("all_num_unit.txt","w",encoding = 'utf-8')
file_all_num_unit.write(all_num_unit)
file_all_num_unit.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
018e46a708e7f3628fb3cb3145bd3c952e687e30
|
702277b0be87a6b2f64e1e3e07194993ca5033e7
|
/app.py
|
a5619bcf73e15bc432c93404d49038b3c731ef59
|
[] |
no_license
|
angiesk/flask-101
|
03c3124ed2a9b7d6ebef989754de88ba65992839
|
2aa436a56e85e7fed43e9d2c095f049fa71d712a
|
refs/heads/master
| 2020-04-21T16:58:27.101681
| 2019-02-08T11:27:22
| 2019-02-08T11:27:22
| 169,720,536
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from flask import Flask
app = Flask(__name__)#app is the name of the object here
@app.route('/')
def home():
return "<h1> Hello World </h1>"
if __name__ =="__main__":
app.run(debug=True,port=8080)
|
[
"shivangi2197@hotmail.com"
] |
shivangi2197@hotmail.com
|
ee2c5b12e32aa25297b2a8707f08e7a5e99f3f03
|
46e1efa57240420cae94a2ce51f10159a169b4c9
|
/hello_celery/hello_celery/urls.py
|
df3b6ae3a1e9961941437b6eb2c41d40607d5d36
|
[
"MIT"
] |
permissive
|
rickding/HelloPython
|
c30d62e53018334e7b3b3b80ed2a7eb097799ac1
|
c3cb07f83642873a3460ffe489c82505923c3c1a
|
refs/heads/master
| 2021-06-16T17:42:29.636391
| 2021-01-31T08:08:25
| 2021-01-31T09:28:41
| 148,429,657
| 2
| 2
|
MIT
| 2020-08-16T08:01:30
| 2018-09-12T06:04:41
|
Python
|
UTF-8
|
Python
| false
| false
| 861
|
py
|
"""hello_celery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app.views import chk_job
urlpatterns = [
path('', chk_job, name='chk'),
path('job', chk_job, name='chk_job'),
path('admin/', admin.site.urls),
]
|
[
"dingxl"
] |
dingxl
|
49091e1f2a89e3e9e7ef5ea6391ee62e7ad44bc5
|
19e5f32caa685ef994a3412062307ce5ff753d3f
|
/make_plots.py
|
e5e4505c64b73e80fc74db22bad76eaf53d9ff15
|
[] |
no_license
|
jsbridge/spatial_grism
|
64f21d58ca4fc71340fb6943684d21c799d39d24
|
7a98b7b64c7e8d5a74ce0f1b3d54228d0e34e270
|
refs/heads/master
| 2020-07-07T18:58:55.317728
| 2019-08-20T20:08:13
| 2019-08-20T20:08:13
| 203,446,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,334
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from astropy.io import fits
from glob import glob
from astropy.cosmology import FlatLambdaCDM
from astropy import units as u
from astropy.coordinates import SkyCoord
from scipy.ndimage.filters import gaussian_filter
import matplotlib.patches as patches
from matplotlib.patches import Circle
from scipy import stats
from sklearn.utils.extmath import weighted_mode
import os
import scipy.ndimage as ndi
from astropy.modeling import models
import photutils
import statmorph
from mpl_toolkits.axes_grid1 import make_axes_locatable
import image_diagnostics
reload(image_diagnostics)
from scipy import stats
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import make_axes_locatable
from astropy.convolution import convolve, Box2DKernel, Gaussian2DKernel
def make_plots(way):
if way == 'up':
want = np.genfromtxt('inner_global_moveup.dat', unpack=True, usecols=(0), dtype='S9')
folder = 'ratio_plots_moveup'
if way == 'down':
want = np.genfromtxt('inner_global_movedown.dat', unpack=True, usecols=(0), dtype='S9')
folder = 'ratio_plots_movedown'
files = glob('CLEAR_v2.1/fits/*/*')
for fil in files:
name = fil.split('/')[-1]
field_id = (name.split('.'))[0]
name = (name.split('.'))[0].split('_')
field = name[0]
num = int(name[1])
if field_id in want:
print field_id
hdu = fits.open(fil)
try:
img = hdu['DSCI'].data[32:48, 32:48]
OIII_lim = hdu['LINE', 'OIII'].data
Hb_lim = hdu['LINE', 'Hb'].data
OIII = hdu['LINE', 'OIII'].data[32:48, 32:48]
Hb = hdu['LINE', 'Hb'].data[32:48, 32:48]
OIII_err = hdu['LINEWHT', 'OIII'].data[32:48, 32:48] # These line weights are the inverse
Hb_err = hdu['LINEWHT', 'Hb'].data[32:48, 32:48] # variance = (1/sigma^2), according to Gabe B.
except KeyError:
print fil+' generated a key error'
continue
sigma_OIII = np.sqrt(1/OIII_err)
sigma_Hb = np.sqrt(1/Hb_err)
reg_OIII = np.copy(OIII)
reg_Hb = np.copy(Hb)
OIII = convolve(OIII_lim, Box2DKernel(2))
Hb = convolve(Hb_lim, Box2DKernel(2))
OIII = OIII[32:48, 32:48]
Hb = Hb[32:48, 32:48]
ratio = OIII/Hb
reg_ratio = reg_OIII/reg_Hb
sigma_ratio = np.abs(reg_OIII/reg_Hb) * np.sqrt((sigma_OIII/reg_OIII)**2 + (sigma_Hb/reg_Hb)**2)
ratio_wht = 1/sigma_ratio**2
img_center = [ratio.shape[0]/2., ratio.shape[1]/2.]
img_cen_range = img[int(img_center[0])-2:int(img_center[0])+2,int(img_center[1])-4:int(img_center[1])+2]
OIII_cen_range = OIII[int(img_center[0])-2:int(img_center[0])+2,int(img_center[1])-4:int(img_center[1])+2]
center_OIII = np.where(OIII == np.nanmax(OIII_cen_range))
center = np.where(img == np.nanmax(img_cen_range))
idx = [[center[0]+2, center[1]-2],
[center[0]+2, center[1]-1],
[center[0]+2, center[1]],
[center[0]+2, center[1]+1],
[center[0]+2, center[1]+2],
[center[0]+1, center[1]-2],
[center[0]+1, center[1]+2],
[center[0], center[1]-2],
[center[0], center[1]+2],
[center[0]-2, center[1]-2],
[center[0]-2, center[1]-1],
[center[0]-2, center[1]],
[center[0]-2, center[1]+1],
[center[0]-2, center[1]+2],
[center[0]-1, center[1]-2],
[center[0]-1, center[1]+2],
[center[0]+3, center[1]-1],
[center[0]+3, center[1]],
[center[0]+3, center[1]+1],
[center[0]-3, center[1]-1],
[center[0]-3, center[1]],
[center[0]-3, center[1]+1],
[center[0]+1, center[1]-3],
[center[0], center[1]-3],
[center[0]-1, center[1]-3],
[center[0]+1, center[1]+3],
[center[0], center[1]+3],
[center[0]-1, center[1]+3]]
y = 15
fig = plt.figure()
ax1 = fig.add_subplot(132)
ax2 = fig.add_subplot(133)
ax4 = fig.add_subplot(131)
im4 = ax4.imshow(img, origin='lower', vmin = 0, vmax =4*np.nanstd(img))
ax4_divider = make_axes_locatable(ax4)
cax4 = ax4_divider.append_axes("bottom", size = '6%', pad='3%')
cb4 = fig.colorbar(im4, cax=cax4, orientation="horizontal")
cb4.set_label(r'Counts')
ax4.set_xticks([])
ax4.set_yticks([])
ax4.text(0.5, 13.5, 'F105W', color='white', fontweight='bold', fontsize=12)
for r in idx:
rect = patches.Rectangle((r[1]-0.5, r[0]-0.5), 1, 1, linewidth = 0.5, edgecolor='black',facecolor='none')
ax4.add_patch(rect)
ax4.add_patch(patches.Rectangle((center[1]-0.5, center[0]-0.5),1,1, linewidth = 0.5, edgecolor='k',facecolor='none'))
im1 = ax1.imshow(OIII, origin='lower', vmin=0, vmax=2*np.std(OIII_lim[60:80, 0:20]))
ax1_divider = make_axes_locatable(ax1)
cax1 = ax1_divider.append_axes("bottom", size = '6%', pad='3%')
cb1 = fig.colorbar(im1, cax=cax1, orientation="horizontal")
cb1.set_label(r'10$^{-17}$ erg/s/cm$^2$')
ax1.text(0.5, 13.5, '[OIII]', color='white', fontweight='bold', fontsize=12)
ax1.set_xticks([])
ax1.set_yticks([])
for r in idx:
rect = patches.Rectangle((r[1]-0.5, r[0]-0.5), 1, 1, linewidth = 0.5, edgecolor='black',facecolor='none')
ax1.add_patch(rect)
ax1.add_patch(patches.Rectangle((center[1]-0.5, center[0]-0.5),1,1, linewidth = 0.5, edgecolor='k',facecolor='none'))
im2 = ax2.imshow(Hb, origin='lower', vmin=0, vmax=2*np.std(Hb_lim[60:80, 0:20]))
ax2_divider = make_axes_locatable(ax2)
cax2 = ax2_divider.append_axes("bottom", size = '6%', pad='3%')
cb2 = fig.colorbar(im2, cax=cax2, orientation="horizontal")
cb2.set_label(r'10$^{-17}$ erg/s/cm$^2$')
ax2.text(0.5, 13.5, r'H$\beta$', color='white', fontweight='bold', fontsize=12)
ax2.set_xticks([])
ax2.set_yticks([])
for r in idx:
rect = patches.Rectangle((r[1]-0.5, r[0]-0.5), 1, 1, linewidth = 0.5, edgecolor='white',facecolor='none')
ax2.add_patch(rect)
ax2.add_patch(patches.Rectangle((center[1]-0.5, center[0]-0.5),1,1, linewidth = 0.5, edgecolor='k',facecolor='none'))
fig.tight_layout()
fig.subplots_adjust(wspace=0.1)
plt.savefig(folder+'/'+field_id+'_lines.pdf', dpi=300)
plt.close(fig)
fig = plt.figure(figsize=(6,3))
ax3 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
im3 = ax3.imshow(reg_ratio, origin='lower', vmin=-1, vmax=8, cmap='magma')
ax3.set_xticks([])
ax3.set_yticks([])
for r in idx:
rect = patches.Rectangle((r[1]-0.5, r[0]-0.5), 1, 1, linewidth = 0.5, edgecolor='white',facecolor='none')
ax3.add_patch(rect)
ax3.add_patch(patches.Rectangle((center[1]-0.5, center[0]-0.5),1,1, linewidth = 0.5, edgecolor='k',facecolor='none'))
im2 = ax2.imshow(ratio, origin='lower', vmin=-1, vmax=8, cmap='magma')
ax2.set_xticks([])
ax2.set_yticks([])
for r in idx:
rect = patches.Rectangle((r[1]-0.5, r[0]-0.5), 1, 1, linewidth = 0.5, edgecolor='white',facecolor='none')
ax2.add_patch(rect)
ax2.add_patch(patches.Rectangle((center[1]-0.5, center[0]-0.5),1,1, linewidth = 0.5, edgecolor='k',facecolor='none'))
# Hacky code to get plots the same size - make colorbar for first subplot then delete it
ax3_divider = make_axes_locatable(ax3)
cax3 = ax3_divider.append_axes("right", size = '6%', pad='5%')
cb3 = fig.colorbar(im3, cax = cax3)
fig.delaxes(fig.axes[2])
ax2_divider = make_axes_locatable(ax2)
cax2 = ax2_divider.append_axes("right", size = '6%', pad='5%')
cb2 = fig.colorbar(im2, cax=cax2)
cb2.set_label(r'[OIII]/H$\beta$')
fig.tight_layout()
fig.subplots_adjust(wspace=-0.05)
plt.savefig(folder+'/'+field_id+'_ratio.pdf', dpi=300)
plt.close(fig)
in_ratio = (ratio[center[0],center[1]])[0]
print 'inner ratio:', in_ratio
ot, otw = [],[]
for i in idx:
if np.isnan(ratio_wht[i][0]) == True:
continue
if np.isnan(ratio[i][0]) == True:
continue
ot.append(ratio[i][0])
otw.append(ratio_wht[i][0])
print ot
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.