blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3e4d56fe678942121ac590ac0eb21f54c0a1b1d
|
ae2907b2b1cc11ea6a991f7cf5dfc22bf6dca8f0
|
/DBpedia/RefineEntropy.py
|
699763e0c3863030695d98acd05d46239062db27
|
[] |
no_license
|
jxulie/ExtractInfoboxFromWikipedia
|
4cf6a8f3b51409633ba1cae84eeed3bbddf88c0a
|
0bc747ed73caa9d57f5240f0aa4f6f4bb1e3394c
|
refs/heads/master
| 2021-01-20T10:10:37.150290
| 2014-03-14T01:53:44
| 2014-03-14T01:53:44
| 15,223,866
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
#-*- coding:UTF-8 -*-
'''
Created on 2014年3月13日
@author: Bo Xu <mailto:bolang1988@gmail.com>
@version: 1.0
@summary:
'''
menu_path = "D:\\xubo\\dbpedia\\"
entropy_file = open(menu_path + "filter_entropy.txt", 'r')
entropy_lines = entropy_file.readlines()
refined_file = open(menu_path + "refine_entropy.txt", 'w')
for line in entropy_lines:
try:
line = line.rstrip()
words = line.split("\t")
category = words[0]
attribute = words[2]
value = words[4]
if value in category:
value = category.replace(value, "jxulie")
refined_file.write("%s\t%s\t%s\n" %(category, attribute, value))
except:
print "error", line
refined_file.close()
|
[
"bolang1988@gmail.com"
] |
bolang1988@gmail.com
|
933b59e302f98e982ead78cbe8328132cfbe6402
|
6f04a6ef99c581ed2f0519c897f254a7b63fb61d
|
/rastervision/utils/zxy2geotiff.py
|
80210424cc55d2fa376a4eef16bfa35762587c46
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
dgketchum/raster-vision
|
18030c9a8bfe99386aa95adbf8e3ec51d204947f
|
fe74bef30daa5821023946576b00c584ddc56de8
|
refs/heads/master
| 2020-08-30T13:56:08.598240
| 2019-11-03T17:38:33
| 2019-11-03T17:38:33
| 218,400,435
| 3
| 1
|
NOASSERTION
| 2019-10-29T23:09:57
| 2019-10-29T23:09:57
| null |
UTF-8
|
Python
| false
| false
| 7,481
|
py
|
import tempfile
from PIL import Image
import numpy as np
import click
import mercantile
import rasterio
from rasterio.windows import Window
import pyproj
from rastervision.utils.files import (download_if_needed, get_local_path,
upload_or_copy)
from rastervision.command.aux.cogify_command import create_cog
def lnglat2merc(lng, lat):
"""Convert lng, lat point to x/y Web Mercator tuple."""
return pyproj.transform(
pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:3857'), lng, lat)
def merc2lnglat(x, y):
"""Convert x, y Web Mercator point to lng/lat tuple."""
return pyproj.transform(
pyproj.Proj(init='epsg:3857'), pyproj.Proj(init='epsg:4326'), x, y)
def merc2pixel(tile_x, tile_y, zoom, merc_x, merc_y, tile_sz=256):
"""Convert Web Mercator point to pixel coordinates.
This is within the coordinate frame of a single ZXY tile.
Args:
tile_x: (int) x coordinate of ZXY tile
tile_y: (int) y coordinate of ZXY tile
zoom: (int) zoom level of ZXY tile
merc_x: (float) Web Mercator x axis of point
merc_y: (float) Web Mercator y axis of point
tile_sz: (int) size of ZXY tile
"""
tile_merc_bounds = mercantile.xy_bounds(tile_x, tile_y, zoom)
pix_y = int(
round(tile_sz * ((tile_merc_bounds.top - merc_y) /
(tile_merc_bounds.top - tile_merc_bounds.bottom))))
pix_x = int(
round(tile_sz * ((merc_x - tile_merc_bounds.left) /
(tile_merc_bounds.right - tile_merc_bounds.left))))
return (pix_x, pix_y)
def _zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog=False):
"""Generates a GeoTIFF of a bounded region from a ZXY tile server.
Args:
tile_schema: (str) the URI schema for zxy tiles (ie. a slippy map tile server)
of the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles
are assumed to be indexed using TMS coordinates, where the y axis starts
at the southernmost point. The URI can be for http, S3, or the local
file system.
zoom: (int) the zoom level to use when retrieving tiles
bounds: (list) a list of length 4 containing min_lat, min_lng,
max_lat, max_lng
output_uri: (str) where to save the GeoTIFF. The URI can be for http, S3, or the
local file system
"""
min_lat, min_lng, max_lat, max_lng = bounds
if min_lat >= max_lat:
raise ValueError('min_lat must be < max_lat')
if min_lng >= max_lng:
raise ValueError('min_lng must be < max_lng')
is_tms = False
if '{-y}' in tile_schema:
tile_schema = tile_schema.replace('{-y}', '{y}')
is_tms = True
tmp_dir_obj = tempfile.TemporaryDirectory()
tmp_dir = tmp_dir_obj.name
# Get range of tiles that cover bounds.
output_path = get_local_path(output_uri, tmp_dir)
tile_sz = 256
t = mercantile.tile(min_lng, max_lat, zoom)
xmin, ymin = t.x, t.y
t = mercantile.tile(max_lng, min_lat, zoom)
xmax, ymax = t.x, t.y
# The supplied bounds are contained within the "tile bounds" -- ie. the
# bounds of the set of tiles that covers the supplied bounds. Therefore,
# we need to crop out the imagery that lies within the supplied bounds.
# We do this by computing a top, bottom, left, and right offset in pixel
# units of the supplied bounds against the tile bounds. Getting the offsets
# in pixel units involves converting lng/lat to web mercator units since we
# assume that is the CRS of the tiles. These offsets are then used to crop
# individual tiles and place them correctly into the output raster.
nw_merc_x, nw_merc_y = lnglat2merc(min_lng, max_lat)
left_pix_offset, top_pix_offset = merc2pixel(xmin, ymin, zoom, nw_merc_x,
nw_merc_y)
se_merc_x, se_merc_y = lnglat2merc(max_lng, min_lat)
se_left_pix_offset, se_top_pix_offset = merc2pixel(xmax, ymax, zoom,
se_merc_x, se_merc_y)
right_pix_offset = tile_sz - se_left_pix_offset
bottom_pix_offset = tile_sz - se_top_pix_offset
uncropped_height = tile_sz * (ymax - ymin + 1)
uncropped_width = tile_sz * (xmax - xmin + 1)
height = uncropped_height - top_pix_offset - bottom_pix_offset
width = uncropped_width - left_pix_offset - right_pix_offset
transform = rasterio.transform.from_bounds(nw_merc_x, se_merc_y, se_merc_x,
nw_merc_y, width, height)
with rasterio.open(
output_path,
'w',
driver='GTiff',
height=height,
width=width,
count=3,
crs='epsg:3857',
transform=transform,
dtype=rasterio.uint8) as dataset:
out_x = 0
for xi, x in enumerate(range(xmin, xmax + 1)):
tile_xmin, tile_xmax = 0, tile_sz - 1
if x == xmin:
tile_xmin += left_pix_offset
if x == xmax:
tile_xmax -= right_pix_offset
window_width = tile_xmax - tile_xmin + 1
out_y = 0
for yi, y in enumerate(range(ymin, ymax + 1)):
tile_ymin, tile_ymax = 0, tile_sz - 1
if y == ymin:
tile_ymin += top_pix_offset
if y == ymax:
tile_ymax -= bottom_pix_offset
window_height = tile_ymax - tile_ymin + 1
# Convert from xyz to tms if needed.
# https://gist.github.com/tmcw/4954720
if is_tms:
y = (2**zoom) - y - 1
tile_uri = tile_schema.format(x=x, y=y, z=zoom)
tile_path = download_if_needed(tile_uri, tmp_dir)
img = np.array(Image.open(tile_path))
img = img[tile_ymin:tile_ymax + 1, tile_xmin:tile_xmax + 1, :]
window = Window(out_x, out_y, window_width, window_height)
dataset.write(
np.transpose(img[:, :, 0:3], (2, 0, 1)), window=window)
out_y += window_height
out_x += window_width
if make_cog:
create_cog(output_path, output_uri, tmp_dir)
else:
upload_or_copy(output_path, output_uri)
@click.command()
@click.argument('tile_schema')
@click.argument('zoom')
@click.argument('bounds')
@click.argument('output_uri')
@click.option('--make-cog', is_flag=True, default=False)
def zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog):
"""Generates a GeoTIFF of a bounded region from a ZXY tile server.
TILE_SCHEMA: the URI schema for zxy tiles (ie. a slippy map tile server) of
the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles are
assumed to be indexed using TMS coordinates, where the y axis starts at
the southernmost point. The URI can be for http, S3, or the local file
system.
ZOOM: the zoom level to use when retrieving tiles
BOUNDS: a space-separated string containing min_lat, min_lng, max_lat,
max_lng
OUTPUT_URI: where to save the GeoTIFF. The URI can be for http, S3, or the
local file system.
"""
bounds = [float(x) for x in bounds.split(' ')]
_zxy2geotiff(tile_schema, int(zoom), bounds, output_uri, make_cog=make_cog)
if __name__ == '__main__':
zxy2geotiff()
|
[
"lewfish@gmail.com"
] |
lewfish@gmail.com
|
c276d6fc997d227971b391f5b9a6cc0221128807
|
78712b3b864eb3d6b78308c7760da9bdd8c51c9d
|
/skillMatch/migrations/0011_auto_20190328_0200.py
|
a1bb1f4a4d156d5760104122793bf78f83fe3faa
|
[] |
no_license
|
UeharaHua/SkillMatch
|
b36071d79a15a43d06970671f3a15fcbf2678263
|
e4f9445ec9c637266c85e8b061563728a9fd20bc
|
refs/heads/master
| 2022-12-14T09:24:06.075625
| 2019-09-29T05:51:58
| 2019-09-29T05:51:58
| 211,557,817
| 0
| 0
| null | 2022-11-22T02:58:05
| 2019-09-28T20:28:27
|
CSS
|
UTF-8
|
Python
| false
| false
| 498
|
py
|
# Generated by Django 2.1.5 on 2019-03-28 06:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('skillMatch', '0010_auto_20190325_1343'),
]
operations = [
migrations.AlterField(
model_name='post',
name='course',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='skillMatch.Class'),
),
]
|
[
"hlu8jm@virginia.edu"
] |
hlu8jm@virginia.edu
|
070700610e801fade392728e0d895aed5093ad8b
|
8a695f0abff49d4e7be050dd278c504b80015456
|
/orbit.py
|
3f0f082e184f53aa5b68bfbbf1a2fc41e1bc0e31
|
[] |
no_license
|
flucey/chem160module5
|
4e5015e053cd8021ae49264615ed157456ada177
|
399092ffa9a8a03921299f91578d7dca34414727
|
refs/heads/master
| 2020-07-30T03:34:59.441757
| 2019-09-22T01:07:40
| 2019-09-22T01:07:40
| 210,072,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
from math import sqrt
from drawtraj import drawtraj
def force(x,y,m,mstar):
r2=x**2+y**2
r32=r2*sqrt(r2)
fx=-x*m*mstar/r32
fy=-y*m*mstar/r32
return fx,fy
def integrate(x,y,vx,vy,fx,fy,m,dt):
ax,ay=fx/m,fy/m
vx+=ax*dt
vy+=ay*dt
x+=vx*dt
y+=vy*dt
return x,y,vx,vy
# Main part of the program
mstar=100
m=1
nsteps=5000000
dt=0.01
r=50
x,y=0,r
vx,vy=0.2,0.4
trajx,trajy=[],[]
for t in range(nsteps):
fx,fy=force(x,y,m,mstar)
x,y,vx,vy=integrate(x,y,vx,vy,fx,fy,m,dt)
trajx.append(x)
trajy.append(y)
drawtraj(trajx,trajy,5*r)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c379cf67ebd5bf094587e4de80fa170eb40a8b11
|
27518b095787e1d5a6d4926a1849401b1dab5f5a
|
/pi_snake/drawers/pi_draw.py
|
04c3bc360ac1dcfc2d02e54ccc91c03b2947e101
|
[] |
no_license
|
mcmhav/pi-snake
|
94a3945f681be86ca3b93de8b3da02d2aa127763
|
0f672419e5d8f7b908cdbdfd61f3f529c74ac2e7
|
refs/heads/master
| 2020-12-01T22:37:10.306834
| 2020-02-16T22:46:38
| 2020-02-16T22:46:38
| 230,795,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
from typing import List
from .drawer import Drawer
B = (0, 0, 0)
R = (255, 0, 0)
G = (0, 255, 0)
class Pihat(Drawer):
def __init__(self):
from sense_hat import SenseHat # pylint: disable=import-error
self._sense = SenseHat()
self._sense.set_rotation(180)
def draw(self, board: List) -> None:
pixels = []
for row in board:
for cell in row:
if cell == 's':
pixels.append(R)
elif cell == 'g':
pixels.append(G)
else:
pixels.append(B)
self._sense.set_pixels(pixels)
def clear(self, game_summary: str) -> None:
self._sense.show_message(game_summary)
self._sense.clear()
|
[
"mcmhav@gmail.com"
] |
mcmhav@gmail.com
|
7250bd6ebf4c7254ac0d435bf15e73ef32fb2375
|
49cba1fba19d5a89c45c118404aad05606d11903
|
/Weekly_182/question.py
|
9a65f31ca2ea82692a0cadf53d50c3b2aca94da1
|
[] |
no_license
|
bharadwajvaduguru8/leetcode_contests
|
9ec2f2ffca8f40c34f2bfb9bdaf0f4b0574f28f4
|
7faab423337e1bbb106e1b00ba723070fca946c1
|
refs/heads/main
| 2023-04-18T17:56:24.785735
| 2021-04-26T05:34:31
| 2021-04-26T05:34:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
# python3
import math
def nums(num):
dict={}
#f=0
for b in nums:
for a in dict.keys:
if(a==b):
dict.update({a:dict[a]+1})
else:
dict.add(b,0)
print(dict)
if __name__ == '__main__':
num=list(map(int,input("Num: ").split()))
nums(num)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5e01983cc6cc3c38d23b976ff0411fc7a18fe497
|
9ecee1c42c264d7fb26e56e9bb88ab969ff9e3c2
|
/ip.py
|
74b0b40cbcc69f3de03b0a965fe1db1d501dfab2
|
[
"Apache-2.0"
] |
permissive
|
kkirsche/whatismyip
|
a508dcba7bd428119367390704877cabac3e0971
|
b53bfb0a0dbc26b3d83bca4bfc893cb5f2afc803
|
refs/heads/master
| 2021-06-04T20:33:39.354527
| 2021-03-22T23:23:40
| 2021-03-22T23:23:40
| 66,574,312
| 1
| 2
|
Apache-2.0
| 2021-03-22T23:23:41
| 2016-08-25T16:19:43
|
CSS
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
import socket
from flask import Flask, request, render_template
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('VZIP_SETTINGS', silent=True)
@app.route('/')
def what_is_my_ip():
return render_template('index.html', request=request)
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
[
"kev.kirsche@gmail.com"
] |
kev.kirsche@gmail.com
|
63614eb49f87cdce16a2e82cd6cee95f4393abff
|
a2a772b2026c907a46da986d4a074255c79fc40c
|
/ResourceLib/BelowGround/Individual/OGS/helpers/__init__.py
|
170e9d41cde78b9c2dc06059452eeaa92cd54b0c
|
[
"MIT"
] |
permissive
|
mcwimm/pyMANGA
|
537663dc24b91a5555e1c379917aa19229a9ed1f
|
b17cff097114bc444a3e5bb3d11f756e0c09fb59
|
refs/heads/master
| 2023-08-09T14:06:40.721064
| 2023-08-04T10:32:37
| 2023-08-04T10:32:37
| 223,939,358
| 0
| 0
|
MIT
| 2019-11-25T12:05:53
| 2019-11-25T12:05:52
| null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 15:25:03 2018
@author: bathmann
"""
from .CellInformation import CellInformation
|
[
"jasper.bathmann@ufz.de"
] |
jasper.bathmann@ufz.de
|
e29d4d92cca7b4533c0b30e395a0f3b88edea30c
|
1cd9ea9920326561830ea31dde3c5507670a4dd8
|
/check_sudoku.py
|
ec9297f2f43443d4d76c2ad51d61de606be7c1c5
|
[] |
no_license
|
iamtraction/CS101-Udacity
|
d0b34174e088ccb8c6b6923a6afb6a151ae1bdc7
|
9842335bec48c45ee9f5e84d5ab064b7d8e2bcb3
|
refs/heads/master
| 2021-06-20T10:26:49.538644
| 2017-07-30T15:04:08
| 2017-07-30T15:04:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
# THREE GOLD STARS
# Sudoku [http://en.wikipedia.org/wiki/Sudoku]
# is a logic puzzle where a game
# is defined by a partially filled
# 9 x 9 square of digits where each square
# contains one of the digits 1,2,3,4,5,6,7,8,9.
# For this question we will generalize
# and simplify the game.
# Define a procedure, check_sudoku,
# that takes as input a square list
# of lists representing an n x n
# sudoku puzzle solution and returns the boolean
# True if the input is a valid
# sudoku square and returns the boolean False
# otherwise.
# A valid sudoku square satisfies these
# two properties:
# 1. Each column of the square contains
# each of the whole numbers from 1 to n exactly once.
# 2. Each row of the square contains each
# of the whole numbers from 1 to n exactly once.
# You may assume the the input is square and contains at
# least one row and column.
correct = [[1, 2, 3],
[2, 3, 1],
[3, 1, 2]]
incorrect = [[1, 2, 3, 4],
[2, 3, 1, 3],
[3, 1, 2, 3],
[4, 4, 4, 4]]
incorrect2 = [[1, 2, 3, 4],
[2, 3, 1, 4],
[4, 1, 2, 3],
[3, 4, 1, 2]]
incorrect3 = [[1, 2, 3, 4, 5],
[2, 3, 1, 5, 6],
[4, 5, 2, 1, 3],
[3, 4, 5, 2, 1],
[5, 6, 4, 3, 2]]
incorrect4 = [['a', 'b', 'c'],
['b', 'c', 'a'],
['c', 'a', 'b']]
incorrect5 = [[1, 1.5],
[1.5, 1]]
def check_sudoku(row):
"""Checks if the given square list of lists representing an n x n
sudoku puzzle solution is a valid sudoku solution."""
length = len(row)
digit = 1
while digit <= length:
i = 0
while i < length: # Go through each row and column
row_count = 0
col_count = 0
j = 0
while j < length: # For each entry in the ith row/column
if row[i][j] == digit:
row_count += 1
if row[j][i] == digit:
col_count += 1
j += 1
if row_count != 1 or col_count != 1:
return False
i += 1
digit += 1
return True
print check_sudoku(incorrect)
#>>> False
print check_sudoku(correct)
#>>> True
print check_sudoku(incorrect2)
#>>> False
print check_sudoku(incorrect3)
#>>> False
print check_sudoku(incorrect4)
#>>> False
print check_sudoku(incorrect5)
#>>> False
|
[
"snkrsn.kampa@gmail.com"
] |
snkrsn.kampa@gmail.com
|
05cc3ba1a5dd1f89efd2bab642e7f3ce8dc47824
|
2c0bc2b556fc0ca1d275ca683acdb677e07c1d22
|
/app/CC/migrations/0007_auto_20200512_1809.py
|
3a8433a89d251c2ba050acac3197a473ea303749
|
[] |
no_license
|
KolbeinnIn/Verklegt-namskeid-II
|
99b8cfe6c829693e06e8fa2af6404d4f1287ae21
|
56afe071ca6d92a48cca7126c31a5c189378ce3f
|
refs/heads/master
| 2022-07-17T08:52:06.005997
| 2020-05-16T11:04:08
| 2020-05-16T11:04:08
| 259,033,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
# Generated by Django 3.0.6 on 2020-05-12 18:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CC', '0006_category_children'),
]
operations = [
migrations.AlterField(
model_name='category',
name='children',
field=models.ManyToManyField(related_name='_category_children_+', to='CC.Category'),
),
]
|
[
"bjarkithorjonsson@gmail.com"
] |
bjarkithorjonsson@gmail.com
|
f778fe4cf71a8dc151b39915e39e96516fc62612
|
00b20ac164caeea7eda33334b514741323034ab1
|
/Parsivel/RawParsivel.py
|
ad4cc877c2989e73a6388ad00ac7d1b7655494c8
|
[] |
no_license
|
joejoezz/PyOLYMPEX
|
1a081eac1ab8be54d06c02e8b3fabcb71ac33bc1
|
1ce7f51808ae6c0ff2a49c1ac636819793e3d417
|
refs/heads/master
| 2021-01-10T09:53:44.119668
| 2018-01-29T03:03:27
| 2018-01-29T03:03:27
| 48,900,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,093
|
py
|
import numpy as np
import pdb
import datetime
def read_parsivel(filenames):
'''
Takes an APU Parsivel file, returns a RawParsivel object
'''
# initialize class:
raw_parsivel = RawParsivel()
# read parsivel file
# takes array of filenames
raw_parsivel.read_parsivel_file(filenames)
raw_parsivel.convert_to_arrays()
# this creates an object called "raw_parsivel" that contains the disdrometer
# data for 1 hr at 10 s intervals (length 360 or 360x1024 for matrix
# variables can be accessed by "self.variable" within class or
# raw_parsivel.variable if feeding to a different class
return raw_parsivel
class RawParsivel(object):
'''
read_parsivel class takes a list of APU files (.dat) generated by the
NASA OLYMPEX Parsivels.
It returns an object with the data from the Parsivel file, in
10 second increments.
Note that this class only reads the Parsivel data, it doesn't
apply any filtering
filenames: [apuxx_yyyymmddhh.dat]
apu: apuxx (apu01-apu11, apu12, apu30, hridge)
Filenames must be in correct order (use glob.glob)
'''
def __init__(self):
#self.filename = filename #filename
self.apu = [] #apu number
self.pytime = [] #time in datatime
self.error_code = [] #error code (0,1,2,3)
self.temperature = [] #Parsivel temperature
self.dbz = [] #Parsivel dBZ
self.rain = [] #Parsivel rain
self.ndrops = [] #Parsivel # particles (use this one)
self.visibility = [] #Parsivel visibility
self.wxcode = [] #Parsivel wx code
self.matrix = [] #Parsivel drop matrix
def read_parsivel_file(self,filenames):
#Figure out length of files (needed for np.matrix)
#8640 lines = full day
matrixdim = 0
x = []
for filename in filenames:
with open(filename) as f:
for line in f:
matrixdim += 1
self.matrix = np.zeros((matrixdim,1024))
dim = 0
#read through files again, this time filling lists and matrix
for filename in filenames:
with open(filename) as f:
for line in f:
data = line.split(',')
time = data[0].split(';')[0]
apu = str(data[0].split(';')[1])
self.pytime.append(datetime.datetime(int(time[0:4]),int(time[4:6]),\
int(time[6:8]),int(time[8:10]),int(time[10:12]),\
int(time[12:14])))
self.error_code.append(int(data[1]))
self.temperature.append(int(data[2]))
self.ndrops.append(int(data[3]))
self.visibility.append(int(data[6]))
self.rain.append(float(data[4]))
self.dbz.append(float(data[5]))
self.wxcode.append(int(data[8]))
try:
matrix_temp = np.array([int(i) for i in data[9:-1]])
self.matrix[dim,:] = matrix_temp
dim += 1
except:
#leaves bad line all zeros...
print 'bad line at: '+str(self.pytime[-1])
dim += 1
def convert_to_arrays(self):
self.pytime= np.array(self.pytime)
self.error_code = np.array(self.error_code)
self.temperature = np.array(self.temperature)
self.ndrops = np.array(self.ndrops)
self.visibility = np.array(self.visibility)
self.rain = np.array(self.rain)
self.dbz = np.array(self.dbz)
self.wxcode = np.array(self.wxcode)
def info(self):
print 'Raw Parsivel: '
print 'Pytime length: '+str(len(self.pytime))
diameter = [
0.06, 0.19, 0.32, 0.45, 0.58, 0.71, 0.84, 0.96, 1.09, 1.22, 1.42, 1.67,
1.93, 2.19, 2.45, 2.83, 3.35, 3.86, 4.38, 4.89, 5.66,
6.7, 7.72, 8.76, 9.78, 11.33, 13.39, 15.45, 17.51, 19.57, 22.15, 25.24]
spread = [
0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.257,
0.257, 0.257, 0.257, 0.257, 0.515, 0.515, 0.515, 0.515, 0.515, 1.030, 1.030,
1.030, 1.030, 1.030, 2.060, 2.060, 2.060, 2.060, 2.060, 3.090, 3.090]
v = [
0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 1.1, 1.3, 1.5, 1.7, 1.9,
2.2, 2.6, 3, 3.4, 3.8, 4.4, 5.2, 6.0, 6.8, 7.6, 8.8, 10.4, 12.0, 13.6, 15.2,
17.6, 20.8]
v_spread = [.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .2, .2, .2, .2, .2, .4,
.4, .4, .4, .4, .8, .8, .8, .8, .8, 1.6, 1.6, 1.6, 1.6, 1.6, 3.2, 3.2]
liquid_matrix = [
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
[
"joejoezz@gmail.com"
] |
joejoezz@gmail.com
|
d2b41f94eec0c608bee7477753703dafae3dccc8
|
98ca37f5dd2751efaa060cca19e0b83f871d7765
|
/sdk/metricsadvisor/azure-ai-metricsadvisor/tests/base_testcase_aad.py
|
a40496c6753e169137d95e6df644d3887e5da9a1
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
jayhebe/azure-sdk-for-python
|
5ea99732ebb9929d3f6f77c08cc640d5915970b1
|
f4455f85d9fe747fa4de2fdc691b975c07bfeea5
|
refs/heads/main
| 2023-06-24T01:22:06.602194
| 2021-07-28T02:12:25
| 2021-07-28T02:12:25
| 390,290,984
| 1
| 0
|
MIT
| 2021-07-28T09:23:46
| 2021-07-28T09:23:46
| null |
UTF-8
|
Python
| false
| false
| 20,507
|
py
|
# coding=utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import datetime
from devtools_testutils import AzureTestCase
from azure_devtools.scenario_tests import (
ReplayableTest,
create_random_name
)
from azure.ai.metricsadvisor import (
MetricsAdvisorKeyCredential,
MetricsAdvisorAdministrationClient,
MetricsAdvisorClient,
)
from azure.ai.metricsadvisor.models import (
SqlServerDataFeedSource,
DataFeedSchema,
DataFeedMetric,
DataFeedDimension,
DataFeedGranularity,
DataFeedIngestionSettings,
DataFeedMissingDataPointFillSettings,
DataFeedRollupSettings,
MetricAlertConfiguration,
MetricAnomalyAlertScope,
MetricAnomalyAlertConditions,
MetricBoundaryCondition,
TopNGroupScope,
SeverityCondition,
MetricDetectionCondition,
MetricSeriesGroupDetectionCondition,
MetricSingleSeriesDetectionCondition,
SmartDetectionCondition,
SuppressCondition,
ChangeThresholdCondition,
HardThresholdCondition,
EmailNotificationHook,
WebNotificationHook,
)
from azure.identity import DefaultAzureCredential
class MockCredential():
def get_token(self, *scopes, **kwargs):
from azure.core.credentials import AccessToken
return AccessToken("fake-token", 0)
class TestMetricsAdvisorAdministrationClientBase(AzureTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key', 'x-api-key']
def __init__(self, method_name):
super(TestMetricsAdvisorAdministrationClientBase, self).__init__(method_name)
self.vcr.match_on = ["path", "method", "query"]
if self.is_live:
service_endpoint = self.get_settings_value("METRICS_ADVISOR_ENDPOINT")
self.sql_server_connection_string = self.get_settings_value("METRICS_ADVISOR_SQL_SERVER_CONNECTION_STRING")
self.azure_table_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_TABLE_CONNECTION_STRING")
self.azure_blob_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_BLOB_CONNECTION_STRING")
self.azure_cosmosdb_connection_string = self.get_settings_value("METRICS_ADVISOR_COSMOS_DB_CONNECTION_STRING")
self.application_insights_api_key = self.get_settings_value("METRICS_ADVISOR_APPLICATION_INSIGHTS_API_KEY")
self.azure_data_explorer_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_DATA_EXPLORER_CONNECTION_STRING")
self.influxdb_connection_string = self.get_settings_value("METRICS_ADVISOR_INFLUX_DB_CONNECTION_STRING")
self.influxdb_password = self.get_settings_value("METRICS_ADVISOR_INFLUX_DB_PASSWORD")
self.azure_datalake_account_key = self.get_settings_value("METRICS_ADVISOR_AZURE_DATALAKE_ACCOUNT_KEY")
self.mongodb_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_MONGO_DB_CONNECTION_STRING")
self.mysql_connection_string = self.get_settings_value("METRICS_ADVISOR_MYSQL_CONNECTION_STRING")
self.postgresql_connection_string = self.get_settings_value("METRICS_ADVISOR_POSTGRESQL_CONNECTION_STRING")
self.anomaly_detection_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_DETECTION_CONFIGURATION_ID")
self.data_feed_id = self.get_settings_value("METRICS_ADVISOR_DATA_FEED_ID")
self.metric_id = self.get_settings_value("METRICS_ADVISOR_METRIC_ID")
credential = DefaultAzureCredential()
self.scrubber.register_name_pair(
self.sql_server_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_table_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_blob_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_cosmosdb_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.application_insights_api_key,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_data_explorer_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.influxdb_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.influxdb_password,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_datalake_account_key,
"connectionstring"
)
self.scrubber.register_name_pair(
self.mongodb_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.mysql_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.postgresql_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.metric_id,
"metric_id"
)
self.scrubber.register_name_pair(
self.data_feed_id,
"data_feed_id"
)
self.scrubber.register_name_pair(
self.anomaly_detection_configuration_id,
"anomaly_detection_configuration_id"
)
else:
service_endpoint = "https://endpointname.cognitiveservices.azure.com"
self.sql_server_connection_string = "SQL_SERVER_CONNECTION_STRING"
self.azure_table_connection_string = "AZURE_TABLE_CONNECTION_STRING"
self.azure_blob_connection_string = "AZURE_BLOB_CONNECTION_STRING"
self.azure_cosmosdb_connection_string = "COSMOS_DB_CONNECTION_STRING"
self.application_insights_api_key = "METRICS_ADVISOR_APPLICATION_INSIGHTS_API_KEY"
self.azure_data_explorer_connection_string = "METRICS_ADVISOR_AZURE_DATA_EXPLORER_CONNECTION_STRING"
self.influxdb_connection_string = "METRICS_ADVISOR_INFLUXDB_CONNECTION_STRING"
self.influxdb_password = "METRICS_ADVISOR_INFLUXDB_PASSWORD"
self.azure_datalake_account_key = "METRICS_ADVISOR_AZURE_DATALAKE_ACCOUNT_KEY"
self.mongodb_connection_string = "METRICS_ADVISOR_AZURE_MONGODB_CONNECTION_STRING"
self.mysql_connection_string = "METRICS_ADVISOR_MYSQL_CONNECTION_STRING"
self.postgresql_connection_string = "METRICS_ADVISOR_POSTGRESQL_CONNECTION_STRING"
self.anomaly_detection_configuration_id = "anomaly_detection_configuration_id"
self.metric_id = "metric_id"
self.data_feed_id = "data_feed_id"
credential = MockCredential()
self.admin_client = MetricsAdvisorAdministrationClient(service_endpoint, credential)
def _create_data_feed(self, name):
name = create_random_name(name)
return self.admin_client.create_data_feed(
name=name,
source=SqlServerDataFeedSource(
connection_string=self.sql_server_connection_string,
query="select * from adsample2 where Timestamp = @StartTime"
),
granularity="Daily",
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings="2019-10-01T00:00:00Z",
)
def _create_data_feed_and_detection_config(self, name):
try:
data_feed = self._create_data_feed(name)
detection_config_name = create_random_name(name)
detection_config = self.admin_client.create_detection_configuration(
name=detection_config_name,
metric_id=data_feed.metric_ids['cost'],
description="testing",
whole_series_detection_condition=MetricDetectionCondition(
smart_detection_condition=SmartDetectionCondition(
sensitivity=50,
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=5,
min_ratio=5
)
)
)
)
return detection_config, data_feed
except Exception as e:
self.admin_client.delete_data_feed(data_feed.id)
raise e
def _create_data_feed_for_update(self, name):
data_feed_name = create_random_name(name)
return self.admin_client.create_data_feed(
name=data_feed_name,
source=SqlServerDataFeedSource(
connection_string=self.sql_server_connection_string,
query=u"select * from adsample2 where Timestamp = @StartTime"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost", display_name="display cost", description="the cost"),
DataFeedMetric(name="revenue", display_name="display revenue", description="the revenue")
],
dimensions=[
DataFeedDimension(name="category", display_name="display category"),
DataFeedDimension(name="city", display_name="display city")
],
timestamp_column="Timestamp"
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 10, 1),
data_source_request_concurrency=0,
ingestion_retry_delay=-1,
ingestion_start_offset=-1,
stop_retry_after=-1,
),
admins=["yournamehere@microsoft.com"],
data_feed_description="my first data feed",
missing_data_point_fill_settings=DataFeedMissingDataPointFillSettings(
fill_type="SmartFilling"
),
rollup_settings=DataFeedRollupSettings(
rollup_type="NoRollup",
rollup_method="None",
),
viewers=["viewers"],
access_mode="Private",
action_link_template="action link template"
)
def _create_alert_config_for_update(self, name):
try:
detection_config, data_feed = self._create_data_feed_and_detection_config(name)
alert_config_name = create_random_name(name)
alert_config = self.admin_client.create_alert_configuration(
name=alert_config_name,
cross_metrics_operator="AND",
metric_alert_configurations=[
MetricAlertConfiguration(
detection_configuration_id=detection_config.id,
alert_scope=MetricAnomalyAlertScope(
scope_type="TopN",
top_n_group_in_scope=TopNGroupScope(
top=5,
period=10,
min_top_count=9
)
),
alert_conditions=MetricAnomalyAlertConditions(
metric_boundary_condition=MetricBoundaryCondition(
direction="Both",
companion_metric_id=data_feed.metric_ids['cost'],
lower=1.0,
upper=5.0
)
)
),
MetricAlertConfiguration(
detection_configuration_id=detection_config.id,
alert_scope=MetricAnomalyAlertScope(
scope_type="SeriesGroup",
series_group_in_scope={'city': 'Shenzhen'}
),
alert_conditions=MetricAnomalyAlertConditions(
severity_condition=SeverityCondition(
min_alert_severity="Low",
max_alert_severity="High"
)
)
),
MetricAlertConfiguration(
detection_configuration_id=detection_config.id,
alert_scope=MetricAnomalyAlertScope(
scope_type="WholeSeries"
),
alert_conditions=MetricAnomalyAlertConditions(
severity_condition=SeverityCondition(
min_alert_severity="Low",
max_alert_severity="High"
)
)
)
],
hook_ids=[]
)
return alert_config, data_feed, detection_config
except Exception as e:
self.admin_client.delete_data_feed(data_feed.id)
raise e
def _create_detection_config_for_update(self, name):
try:
data_feed = self._create_data_feed(name)
detection_config_name = create_random_name("testupdated")
detection_config = self.admin_client.create_detection_configuration(
name=detection_config_name,
metric_id=data_feed.metric_ids['cost'],
description="My test metric anomaly detection configuration",
whole_series_detection_condition=MetricDetectionCondition(
condition_operator="AND",
smart_detection_condition=SmartDetectionCondition(
sensitivity=50,
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=5,
min_ratio=5
)
),
hard_threshold_condition=HardThresholdCondition(
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=5,
min_ratio=5
),
lower_bound=0,
upper_bound=100
),
change_threshold_condition=ChangeThresholdCondition(
change_percentage=50,
shift_point=30,
within_range=True,
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=2,
min_ratio=2
)
)
),
series_detection_conditions=[MetricSingleSeriesDetectionCondition(
series_key={"city": "Shenzhen", "category": "Jewelry"},
smart_detection_condition=SmartDetectionCondition(
anomaly_detector_direction="Both",
sensitivity=63,
suppress_condition=SuppressCondition(
min_number=1,
min_ratio=100
)
)
)],
series_group_detection_conditions=[MetricSeriesGroupDetectionCondition(
series_group_key={"city": "Sao Paulo"},
smart_detection_condition=SmartDetectionCondition(
anomaly_detector_direction="Both",
sensitivity=63,
suppress_condition=SuppressCondition(
min_number=1,
min_ratio=100
)
)
)]
)
return detection_config, data_feed
except Exception as e:
self.admin_client.delete_data_feed(data_feed.id)
raise e
def _create_email_hook_for_update(self, name):
return self.admin_client.create_hook(
hook=EmailNotificationHook(
name=name,
emails_to_alert=["yournamehere@microsoft.com"],
description="my email hook",
external_link="external link"
)
)
def _create_web_hook_for_update(self, name):
return self.admin_client.create_hook(
hook=WebNotificationHook(
name=name,
endpoint="https://httpbin.org/post",
description="my web hook",
external_link="external link",
username="krista",
password="123"
)
)
class TestMetricsAdvisorClientBase(AzureTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key', 'x-api-key']
def __init__(self, method_name):
super(TestMetricsAdvisorClientBase, self).__init__(method_name)
self.vcr.match_on = ["path", "method", "query"]
if self.is_live:
service_endpoint = self.get_settings_value("METRICS_ADVISOR_ENDPOINT")
self.anomaly_detection_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_DETECTION_CONFIGURATION_ID")
self.anomaly_alert_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_ALERT_CONFIGURATION_ID")
self.metric_id = self.get_settings_value("METRICS_ADVISOR_METRIC_ID")
self.incident_id = self.get_settings_value("METRICS_ADVISOR_INCIDENT_ID")
self.dimension_name = self.get_settings_value("METRICS_ADVISOR_DIMENSION_NAME")
self.feedback_id = self.get_settings_value("METRICS_ADVISOR_FEEDBACK_ID")
self.alert_id = self.get_settings_value("METRICS_ADVISOR_ALERT_ID")
credential = DefaultAzureCredential()
self.scrubber.register_name_pair(
self.anomaly_detection_configuration_id,
"anomaly_detection_configuration_id"
)
self.scrubber.register_name_pair(
self.anomaly_alert_configuration_id,
"anomaly_alert_configuration_id"
)
self.scrubber.register_name_pair(
self.metric_id,
"metric_id"
)
self.scrubber.register_name_pair(
self.incident_id,
"incident_id"
)
self.scrubber.register_name_pair(
self.dimension_name,
"dimension_name"
)
self.scrubber.register_name_pair(
self.feedback_id,
"feedback_id"
)
self.scrubber.register_name_pair(
self.alert_id,
"alert_id"
)
else:
service_endpoint = "https://endpointname.cognitiveservices.azure.com"
self.anomaly_detection_configuration_id = "anomaly_detection_configuration_id"
self.anomaly_alert_configuration_id = "anomaly_alert_configuration_id"
self.metric_id = "metric_id"
self.incident_id = "incident_id"
self.dimension_name = "dimension_name"
self.feedback_id = "feedback_id"
self.alert_id = "alert_id"
credential = MockCredential()
self.client = MetricsAdvisorClient(service_endpoint, credential)
|
[
"noreply@github.com"
] |
noreply@github.com
|
29c49f467208a47e78ae1044871005b417e2ddde
|
955f9dc96abddd7593f9e68fef48c529512d34c4
|
/vote/urls.py
|
b6b85d7f0fc869083a4016272c06e27bbb2b3797
|
[] |
no_license
|
junsik-Lim/board_test
|
0443409941b0b58bd44602d0658cc485de82cccd
|
aae90b21dc31c3b77824d0e21b3810467fcacc2f
|
refs/heads/main
| 2023-08-29T11:57:13.344283
| 2021-10-03T04:57:28
| 2021-10-03T04:57:28
| 412,974,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
from django.urls import path
from . import views
app_name = "vote"
urlpatterns = [
path('', views.index, name="index"),
path('create/', views.create, name="create"),
path('detail/<num>', views.detail, name="detail"),
path('vote/<conid>', views.vote, name="vote"),
]
|
[
"bluejun0321@naver.com"
] |
bluejun0321@naver.com
|
364356172473bbaedbfc17d1fa14802f452bec04
|
4493b3f0879000ab48cba198b1e059249198a740
|
/dataset_config.py
|
998d289cc0617edfa2f41a9eb2bfa6add1e656a3
|
[
"BSD-3-Clause"
] |
permissive
|
AGalassi/StructurePrediction18
|
7216a375d170bf27c12375af98c70a88f6f57985
|
ae739e492e4ad55908b0275c49e54d7586a481da
|
refs/heads/master
| 2023-06-08T15:54:04.424556
| 2023-05-24T09:17:21
| 2023-05-24T09:17:21
| 125,429,569
| 11
| 6
|
BSD-3-Clause
| 2023-03-24T22:24:10
| 2018-03-15T21:44:36
|
Python
|
UTF-8
|
Python
| false
| false
| 17,951
|
py
|
__author__ = "Andrea Galassi"
__copyright__ = "Copyright 2018, Andrea Galassi"
__license__ = "BSD 3-clause"
__version__ = "0.0.1"
__email__ = "a.galassi@unibo.it"
"""
output_units : (link classifier, relation classifier, source classifier, target classifier)
"""
dataset_info = {"AAEC_v2": {"output_units": (2, 5, 3, 3),
"min_text": 168,
"min_prop": 72,
"link_as_sum": [[0, 2], [1, 3, 4]],
"categorical_prop": {'Premise': [1, 0, 0, ],
'Claim': [0, 1, 0, ],
'MajorClaim': [0, 0, 1],
},
"categorical_link": {'supports': [1, 0, 0, 0, 0],
'inv_supports': [0, 1, 0, 0, 0],
'attacks': [0, 0, 1, 0, 0],
'inv_attacks': [0, 0, 0, 1, 0],
None: [0, 0, 0, 0, 1],
},
"evaluation_headline_short": ("set\t" +
"F1 AVG all\tF1 AVG LP\tF1 Link\t" +
"F1 R AVG dir\tF1 R support\tF1 R attack\t" +
"F1 P AVG\t" +
"F1 P premise\tF1 P claim\tF1 P majclaim\t" +
"F1 P avg\n\n"),
"evaluation_headline": ("set\t" +
"F1 AVG all\tF1 AVG LP\t"
"F1 Link\tF1 R AVG dir\t"
"F1 R support\tF1 R attack\t" +
"F1 P AVG\tF1 P premise\tF1 P claim\tF1 P majclaim\tF1 P avg\t" +
"Pr P AVG\tPr P premise\tPr P claim\tPr P majclaim\tPr P avg\t" +
"Rec P AVG\tRec P premise\tRec P claim\tRec P majclaim\tRec P avg\t" +
"Supp P premise\tSupp P claim\tSupp P majclaim\t"
"F1 nonLink\tF1_R_avg\tF1_R_not-rel" +
"\n\n"),
"prop_types": ['Premise', 'Claim', 'MajorClaim'],
"rel_types": ['supports', 'inv_supports', 'attacks', 'inv_attacks', "None"],
},
"ECHR2018": {"output_units": (2, 8, 2, 2),
"min_text": 168, # NO
"min_prop": 95,
"link_as_sum": [[0, 2], [1, 3, 4, 5, 6, 7]],
"categorical_prop": {'premise': [1, 0],
'claim': [0, 1],
},
"categorical_link": {'Support': [1, 0, 0, 0, 0, 0, 0, 0],
'inv_Support': [0, 1, 0, 0, 0, 0, 0, 0],
'Attack': [0, 0, 1, 0, 0, 0, 0, 0],
'inv_Attack': [0, 0, 0, 1, 0, 0, 0, 0],
'Citation': [0, 0, 0, 0, 1, 0, 0, 0],
'inv_Citation': [0, 0, 0, 0, 0, 1, 0, 0],
'Duplicate': [0, 0, 0, 0, 0, 0, 1, 0],
None: [0, 0, 0, 0, 0, 0, 0, 1],
},
"evaluation_headline_short": ("set\t" +
"F1 AVG all\tF1 AVG LP\tF1 Link\t" +
"F1 R AVG dir\tF1 R support\tF1 R attack\t" +
"F1 P AVG\t" +
"F1 P premise\tF1 P claim\t" +
"F1 P avg\n\n"),
"prop_types": ['Premise', 'Claim'],
"rel_types": ['Support', 'inv_Support', 'Attack', 'inv_Attack',
'Citation', 'inv_Citation', 'Duplicate', 'None'],
},
"cdcp_ACL17": {"output_units": (2, 5, 5, 5),
"min_text": 552,
"min_prop": 153,
"link_as_sum": [[0, 2], [1, 3, 4]],
"categorical_prop": {'policy': [1, 0, 0, 0, 0],
'fact': [0, 1, 0, 0, 0],
'testimony': [0, 0, 1, 0, 0],
'value': [0, 0, 0, 1, 0],
'reference': [0, 0, 0, 0, 1],
},
"categorical_link": {'reasons': [1, 0, 0, 0, 0],
'inv_reasons': [0, 1, 0, 0, 0],
'evidences': [0, 0, 1, 0, 0],
'inv_evidences': [0, 0, 0, 1, 0],
None: [0, 0, 0, 0, 1],
},
"evaluation_headline_short": ("set\t"
"F1 AVG all\tF1 AVG LP\tF1 Link\t"
"F1 R AVG dir\tF1 R reason\tF1 R evidence\t" +
"F1 P AVG\t" +
"F1 P policy\tF1 P fact\tF1 P testimony\t" +
"F1 P value\tF1 P reference\tF1 P avg\n\n"),
"evaluation_headline": ("set\t"
"F1 AVG all\t"
"F1 AVG LP\t"
"F1 Link\t"
"F1 R AVG dir\tF1 R reason\tF1 R evidence\tF1 P AVG\t"
"F1 P policy\tF1 P fact\tF1 P testimony\tF1 P value\tF1 P reference\tf1 P avg\t"
"PR P AVG\tPr P policy\tPr P fact\tPr P testimony\tPr P value\tPr P reference\tpr P avg\t"
"REC P AVG\tRec P policy\tRec P fact\tRec P testimony\tRec P value\tRec P reference\trec P avg\t"
"Supp P policy\tSupp P fact\tSupp P testimony\tSupp P value\tSupp P reference\t"
"F1 nonLink\tF1_R_avg\tF1_R_not-rel\n\n"),
"prop_types": ['policy', 'fact', 'testimony', 'value', 'reference'],
"rel_types": ['reasons', 'inv_reasons', 'evidences', 'inv_evidences', "None"],
},
"RCT": {"output_units": (2, 5, 2, 2),
"min_text": 2, # wrong, never measured
"min_prop": 181,
"link_as_sum": [[0, 2], [1, 3, 4]],
"categorical_prop": {'Premise': [1, 0, ],
'Claim': [0, 1, ]
},
"categorical_link": {'support': [1, 0, 0, 0, 0],
'inv_support': [0, 1, 0, 0, 0],
'attack': [0, 0, 1, 0, 0],
'inv_attack': [0, 0, 0, 1, 0],
None: [0, 0, 0, 0, 1],
},
"evaluation_headline_short": ("set\t" +
"F1 AVG all\t" +
"F1 AVG LP\t" +
"F1 Link\t" +
"F1 R AVG dir\tF1 R support\tF1 R attack\t" +
"F1 P AVG\t" +
"F1 P premise\tF1 P claim\t" +
"F1 P avg\n\n"),
"evaluation_headline": ("set\t" +
"F1 AVG all\tF1 AVG LP\t"
"F1 Link\tF1 R AVG dir\t"
"F1 R support\tF1 R attack\t" +
"F1 P AVG\tF1 P premise\tF1 P claim\tF1 P avg\t" +
"Pr P AVG\tPr P premise\tPr P claim\tPr P avg\t" +
"Rec P AVG\tRec P premise\tRec P claim\tRec P avg\t" +
"Supp P premise\tSupp P claim\t"
"F1 nonLink\tF1_R_avg\tF1_R_not-rel" +
"\n\n"),
"prop_types": ['Premise', 'Claim',],
"rel_types": ['supports', 'inv_supports', 'attacks', 'inv_attacks', "None"],
},
"scidtb_argmin_annotations": {"output_units": (2, 5, 6, 6),
"min_text": 2, # wrong, never measured
"min_prop": 95,
"link_as_sum": [[0, 2], [1, 3, 4]],
"categorical_prop": {'proposal': [1, 0, 0, 0, 0, 0],
'assertion': [0, 1, 0, 0, 0, 0],
'result': [0, 0, 1, 0, 0, 0],
'observation': [0, 0, 0, 1, 0, 0],
'means': [0, 0, 0, 0, 1, 0],
'description': [0, 0, 0, 0, 0, 1],
},
"categorical_link": {'support': [1, 0, 0, 0, 0],
'inv_support': [0, 1, 0, 0, 0],
'attack': [0, 0, 1, 0, 0],
'inv_attack': [0, 0, 0, 1, 0],
None: [0, 0, 0, 0, 1],
},
# TODO: FIX THIS
"evaluation_headline_short": ("set\t"
"F1 AVG all\tF1 AVG LP\tF1 Link\t"
"F1 R AVG dir\tF1 R reason\tF1 R evidence\t" +
"F1 P AVG\t" +
"F1 P policy\tF1 P fact\tF1 P testimony\t" +
"F1 P value\tF1 P reference\tF1 P avg\n\n"),
"evaluation_headline": ("set\t"
"F1 AVG all\t"
"F1 AVG LP\t"
"F1 Link\t"
"F1 R AVG dir\tF1 R support\tF1 R attack\tF1 P AVG\t"
"F1 P proposal\tF1 P assertion\tF1 P result\tF1 P observation\tF1 P means\tF1 P description\t"
"f1 P avg\t"
"PR P AVG\t"
"Pr P proposal\tPr P assertion\tPr P result\tPr P observation\tPr P means\tPr P description\t"
"pr P avg\t"
"REC P AVG\t"
"Rec P proposal\tRec P assertion\tRec P result\tRec P observation\tRec P means\tRec P description\t"
"rec P avg\t"
"Supp P proposal\tSupp P assertion\tSupp P result\tSupp P observation\tSupp P means\tSupp P description\t"
"F1 nonLink\tF1_R_avg\tF1_R_not-rel\n\n"),
"prop_types": ['proposal', 'assertion', 'result', 'observation', 'means', 'description'],
"rel_types": ['supports', 'inv_supports', 'attacks', 'inv_attacks', "None"],
},
"DrInventor": {"output_units": (2, 6, 3, 3),
"min_text": 2, # wrong, never measured
"min_prop": 106,
"link_as_sum": [[0, 2, 4], [1, 3, 5]],
"categorical_prop": {'own_claim': [1, 0, 0,],
'background_claim': [0, 1, 0],
'data': [0, 0, 1],
},
"categorical_link": {'supports': [1, 0, 0, 0, 0, 0],
'inv_supports': [0, 1, 0, 0, 0, 0],
'contradicts': [0, 0, 1, 0, 0, 0],
'inv_contradicts': [0, 0, 0, 1, 0, 0],
'semantically_same': [0, 0, 0, 0, 1, 0],
None: [0, 0, 0, 0, 0, 1],
},
"evaluation_headline_short": ("set\t" +
"F1 AVG all\tF1 AVG LP\tF1 Link\t" +
"F1 R AVG dir\tF1 R supports\tF1 R contradicts\t" +
"F1 P avg\t" +
"F1 P own_claim\tF1 P background_claim\tF1 P data\t" +
"f1 P avg\n\n"),
"evaluation_headline": ("set\t" +
"F1 AVG all\tF1 AVG LP\tF1 Link\t" +
"F1 R AVG dir\tF1 R supports\tF1 R contradicts\tF1 R semsame\t" +
"F1 P avg\t" +
"F1 P own_claim\tF1 P background_claim\tF1 P data\t" +
"f1 P avg\t"
"PR P AVG\t" +
"Pr P own_claim\tPr P background_claim\tPr P data\t" +
"Pr P avg\t"
"REC P AVG\t" +
"Rec P own_claim\tRec P background_claim\tRec P data\t" +
"Rec P avg\t"
"Supp P own_claim\tSupp P background_claim\tSupp P data\t" +
"F1 non-Link\tF1_R_avg\tF1_R_not-rel"
"\n\n"),
"prop_types": ['own_claim', 'background_claim', 'data'],
"rel_types": ['supports', 'inv_supports', 'contradicts', 'inv_contradicts',
"semantically_same", "None"],
},
}
|
[
"a.galaxy@outlook.it"
] |
a.galaxy@outlook.it
|
530b1900656472b2656291a8b0a6454f5800aa69
|
077617fac92c16de69c8ae2f0e38b44754a892e9
|
/udp.py
|
16ef2f967c3f02bf644549ca8fe65e3a11751d7d
|
[] |
no_license
|
ivanahepjuk/nbiot_rpi-shield
|
e8af1a249ef96184ad310e7d93ab0009f1e0ddf8
|
45d40dbc9a922c4f89e04c2f3cf2e2f1f387ff4a
|
refs/heads/master
| 2021-07-20T08:10:30.321922
| 2017-10-28T12:09:03
| 2017-10-28T12:09:03
| 108,380,921
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
#!/usr/bin/env python
import socket
import serial
import time
import math
#UDP settings:
UDP_IP = "89.103.47.53" #89.102.98.39"
UDP_PORT = 8089
#serialport settings:
#ser = serial.Serial('/dev/ttyACM3', 9600, timeout=61, xonxoff=False, rtscts=False, dsrdtr=False)
#serial flushing
time.sleep(0.5)
#ser.flushInput()
#ser.flushOutput()
inkrement = 15
smer = 0
index = 0
while True:
#precte radek zakonceny \n znakem
#"hodnoty,box=Adrspach temp=%.2f,hum=%.2f,pres=%.2f,pm1=%.2f,pm25=%.2f,pm10=%.2f,time=%d\r\n"
#funkcni dva radky..!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!:
#UDP_MESSAGE = ser.readline()
#UDP_MESSAGE = UDP_MESSAGE[:-2] # cutting off the \r\n from the end of serialport-readed data
UDP_MESSAGE = 'hodnoty,box=Adrspach hum=' +str(40 + (20 * math.sin(inkrement))) + ',temp=' +str(40 + (20 * math.cos(inkrement)))
#temp=21.21,hum=21.21,pres=500.50,pm1=12.32,pm25=12.32,pm10=12.33,time=1111111'
print ("UDP target IP:", UDP_IP)
print ("UDP target port:", UDP_PORT)
print ("message:", UDP_MESSAGE)
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(bytes(UDP_MESSAGE, "UTF-8"), (UDP_IP, UDP_PORT))
time.sleep(10)
inkrement += 0.1
|
[
"ivanahepjuk@gmail.com"
] |
ivanahepjuk@gmail.com
|
cf6a0c4833d16887ee9ee3e5afefb8ed33431c13
|
eacff46eda2c6b509449979a16002b96d4645d8e
|
/Collections-a-installer/community-general-2.4.0/tests/integration/targets/launchd/files/ansible_test_service.py
|
87a23fc47d816bb4b2deacd93a3bcfb45fbf1a9f
|
[
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
permissive
|
d-amien-b/simple-getwordpress
|
5e6d4d15d5f87124ab591e46b63fec552998fdc3
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
refs/heads/master
| 2023-04-08T22:13:37.347545
| 2021-04-06T09:25:51
| 2021-04-06T09:25:51
| 351,698,069
| 0
| 0
|
MIT
| 2021-03-31T16:16:45
| 2021-03-26T07:30:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 594
|
py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
if __name__ == '__main__':
if sys.version_info[0] >= 3:
import http.server
import socketserver
PORT = int(sys.argv[1])
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
httpd.serve_forever()
else:
import mimetypes
mimetypes.init()
mimetypes.add_type('application/json', '.json')
import SimpleHTTPServer
SimpleHTTPServer.test()
|
[
"test@burdo.fr"
] |
test@burdo.fr
|
374ed2b88911029a8ab121c1aa784973710303df
|
f1611fcb4029e59f9b1a16c88b4e7805659b952b
|
/entity extraction from speech/populate_form.py
|
d7cf9e30c1a8cba303b8a369d36675b576918a56
|
[] |
no_license
|
syntaxterrorr/Insurance-Fraud-Detection
|
c4505490ae14fdd0fefe3a9726ac82d5c47d5910
|
4989ef0cbde47b463292c4a1cfaa073f2e2bf919
|
refs/heads/master
| 2020-04-26T08:28:01.513387
| 2019-03-04T05:28:58
| 2019-03-04T05:28:58
| 173,423,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,692
|
py
|
from rasa_nlu.model import Interpreter
import json
import speech_recognition as sr
r = sr.Recognizer()
speech = sr.AudioFile('aamir.wav')
with speech as source:
audio = r.record(source)
text = r.recognize_google(audio)
# text = u"I am calling to claim insurance for my Honda Civic that suffered an accident two days ago when it came in the way of a multi vehical collision."
# This is a call to claim insurance for a [Honda](brand) [Accord](model) from [Karnataka](state). My [Son](relation) was driving his car on the 16th of february this year when he suffered a [front](collision) collision which resulted in [major](severity) loss. We called the [fire](authorities) fighters. It was in the [night](time_of_day). It was a [multi vehical](incident_type) crash.
interpreter = Interpreter.load('models/current/nlu')
result = interpreter.parse(text)
# print(json.dumps(result))
entities = {}
incident_hour = {'morning': 9, 'afternoon': 2, 'evening': 6, 'night': 10}
relative = {'child':'own-child', 'son':'own-child', 'daughter':'own-child', 'husband':'husband', 'wife':'wife', 'relative':'other-relative'}
for entity in result['entities']:
if entity['confidence'] < 0.35:
continue
extractor = entity['extractor']
if extractor == 'ner_crf':
key = entity['entity']
value = entity['value']
if key == 'time_of_day':
entities['incident_hour'] = incident_hour[value]
elif key == 'relation':
entities[key] = relative[value]
elif key == 'collision':
entities[key] = value.capitalize() + ' Collision'
else:
entities[key] = value.capitalize()
print(result['entities'])
|
[
"aamir@darukhanawalla.com"
] |
aamir@darukhanawalla.com
|
6472b56dba93e6e79962129ca72af412695bb634
|
ae4441d5f79a46dbd99a575bd814842f5cbbc181
|
/django_azure_linebot/urls.py
|
f4c5b24b1eab528b630f191685df68c6840a7e6e
|
[] |
no_license
|
chrissmart/django_azure_linebot
|
938ee389316f99e897f0767c3fd9b003ae2e2d7b
|
1c2d88ea8c55e64dedb880dc45c9cd885e6a301e
|
refs/heads/master
| 2021-01-01T19:58:04.185177
| 2017-07-29T12:38:09
| 2017-07-29T12:38:09
| 98,731,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
"""django_azure_linebot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"mr.clare007@hotmail.com"
] |
mr.clare007@hotmail.com
|
8916c15193078890d423b88049af7ba61c106438
|
00d5a9202f3dde90b760dfef63932f862d8e1b52
|
/nsx.py
|
bea933d58b83a04e2357d640b48d57171f4131f7
|
[] |
no_license
|
sariego/blender-simulation-visualizer
|
2cc021d47f19e205d04ba9a650e7330375ff0c5c
|
b82ee509fee439b7d6a7e895ef98377d066a84ab
|
refs/heads/master
| 2021-05-27T23:19:14.350497
| 2014-11-05T02:02:11
| 2014-11-05T02:02:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
from t3dclasses import *
from spagheti import *
from itehgaps import *
root = Tk()
root.resizable(0,0)
root.title('Visor de trayectoria de particulas')
root.iconbitmap('xyz.ico')
note = Notebook(root)
note.pack()
##first = Frame(root)
##directorio = DirectoryGetter(first)
##frames = FramesGetter(first)
##limits = LimitsGetter(first)
##Separator(first).pack(fill=X)
##particles = ParticlesGetter(first,directorio)
##ExAndGo(first, directorio, particles, frames)
fideos = Frame(root)
directorio_f = DirectoryGetter(fideos)
frames_f = FramesGetter(fideos)
detalles = SpaDetails(fideos)
show = Show(fideos, directorio_f, detalles)
Separator(fideos).pack(fill=X)
Go(fideos, directorio_f, frames_f, detalles, show)
soedif = Frame(root)
directorio_s = DirectoryGetter(soedif)
frames_s = FramesGetter(soedif)
detalles_s = SpaDetails(soedif)
show_s = Show(soedif, directorio_s, detalles_s)
Separator(soedif).pack(fill=X)
Og(soedif, directorio_s, frames_s, detalles_s, show_s)
#status = StatusBar(root)
#note.add(first, text='Trayectoria 3D')
note.add(fideos, text='Spagheti')
note.add(soedif, text='itehgapS')
root.mainloop()
|
[
"pedrosariego@gmail.com"
] |
pedrosariego@gmail.com
|
655fc60113486788cda8dd340498234bbc42f5bc
|
0172c529b9490f18a40d3f068dd04d994fd164a7
|
/tests/test_util.py
|
fb6868a824e17ab1acdaa96c7d119a208b4ec171
|
[] |
no_license
|
tillahoffmann/variational_bayes
|
c8cbc5859d0a3ce9600d6f4f5551686a4865af47
|
590903fb4f2c39bd501ec1e42a2b1267439d2ffb
|
refs/heads/master
| 2021-01-20T01:22:31.908349
| 2018-08-21T12:15:19
| 2018-08-21T12:15:19
| 89,263,469
| 1
| 1
| null | 2017-08-22T13:47:53
| 2017-04-24T16:34:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
import itertools as it
import numpy as np
import pytest
import variational_bayes as vb
@pytest.mark.parametrize('shape', [10, (3, 7)])
def test_softmax(shape):
x = np.random.normal(0, 1, shape)
proba = vb.softmax(x)
np.testing.assert_array_less(0, proba)
np.testing.assert_allclose(np.sum(proba, axis=-1), 1)
@pytest.mark.parametrize('num_blocks, block_size, offset',
it.product([1, 3, 7], [1, 5, 9], [0, 11]))
def test_pack_unpack_diag_roundtrip(num_blocks, block_size, offset):
blocks = np.random.normal(0, 1, (num_blocks, block_size, block_size))
packed = vb.pack_block_diag(blocks, offset)
unpacked = vb.unpack_block_diag(packed, block_size, offset)
np.testing.assert_allclose(blocks, unpacked)
def test_onehot():
z = np.random.randint(0, 5, 100)
onehot = vb.onehot(z, 5)
np.testing.assert_equal(np.argmax(onehot, 1), z)
def test_cluster_order():
z = np.random.randint(0, 5, 100)
onehot = vb.onehot(z)
order = np.random.permutation(5)
np.testing.assert_equal(vb.cluster_order(onehot[:, order], onehot), order)
|
[
"tillahoffmann@gmail.com"
] |
tillahoffmann@gmail.com
|
30459cc5e6a093410d325a173ea9cba76452b99a
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/HotelSoldOrdersIncrementGetRequest.py
|
08229a2b1227b49f5b2a06b967fb59b0da52b1e9
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570
| 2016-03-04T09:48:24
| 2016-03-04T09:48:24
| 45,093,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class HotelSoldOrdersIncrementGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.end_modified = None
self.need_guest = None
self.need_message = None
self.page_no = None
self.page_size = None
self.start_modified = None
self.status = None
self.use_has_next = None
def getapiname(self):
return 'taobao.hotel.sold.orders.increment.get'
|
[
"yangwenjin@T4F-MBP-17.local"
] |
yangwenjin@T4F-MBP-17.local
|
3e9b1c5df00f5f386c3f78b3a37146483923a0fc
|
69c5ff8edc25452a6732caf74de020eedaaf5a69
|
/signal_test.py
|
cfe0e1dca1294e184fdc5d764f61b1d61333f0a0
|
[] |
no_license
|
ree-rishun/IoH
|
e7a97dfe3858b0d88237b0f84468f41b67d7110c
|
af66c25e17e00b5da46cd0c99bafab395c85bae4
|
refs/heads/master
| 2020-07-03T05:57:20.152574
| 2019-08-12T05:53:47
| 2019-08-12T05:53:47
| 201,810,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
# -*- coding: utf-8 -*-
# import
import time
import sys
import wiringpi
import RPi.GPIO as GPIO
# 宣言
# GPIO Pin
SPICLK = 11
SPIMOSI = 10
SPIMISO = 9
SPICS = 8
# GPIO config
GPIO.setmode(GPIO.BCM)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICS, GPIO.OUT)
# ADC関数
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if adcnum > 7 or adcnum < 0:
return -1
GPIO.output(cspin, GPIO.HIGH)
GPIO.output(clockpin, GPIO.LOW)
GPIO.output(cspin, GPIO.LOW)
commandout = adcnum
commandout |= 0x18 # スタートビット+シングルエンドビット
commandout <<= 3 # LSBから8ビット目を送信するようにする
for i in range(5):
# LSBから数えて8ビット目から4ビット目までを送信
if commandout & 0x80:
GPIO.output(mosipin, GPIO.HIGH)
else:
GPIO.output(mosipin, GPIO.LOW)
commandout <<= 1
GPIO.output(clockpin, GPIO.HIGH)
GPIO.output(clockpin, GPIO.LOW)
adcout = 0
# 13ビット読む(ヌルビット+12ビットデータ)
for i in range(13):
GPIO.output(clockpin, GPIO.HIGH)
GPIO.output(clockpin, GPIO.LOW)
adcout <<= 1
if i>0 and GPIO.input(misopin)==GPIO.HIGH:
adcout |= 0x1
GPIO.output(cspin, GPIO.HIGH)
return adcout
# main loop
while True:
try:
# ADCから値を取得
inputVal = readadc(0, SPICLK, SPIMOSI, SPIMISO, SPICS)
print(inputVal)
except (KeyboardInterrupt, SystemExit):
exit()
|
[
"ree0432@gmail.com"
] |
ree0432@gmail.com
|
1d7c655cada6cc584bc771570befda0a77c18d18
|
0f0570125ecbbdfda0acaed9d3ec0a297616f79a
|
/day4/part1.py
|
d4665159f88161442f8958dc01d42adf0a30d047
|
[] |
no_license
|
na-wu/2019-aoc
|
3b3689d13cab22a4200f177fd6e28adada36a303
|
ad91fb704c00b590dcd89c1f67d9d1738b866fbd
|
refs/heads/master
| 2020-09-23T10:28:28.497778
| 2019-12-24T22:33:46
| 2019-12-24T22:33:46
| 225,477,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
RANGE1 = 124075
RANGE2 = 580770
# Transform @param num into int array
# iteratively check if each element has an identical neighbour
# Return True immediately if found, presence of more identical neighbors ignored
def checkAdjacent(num: int) -> bool:
arr = list(map(int, str(num)))
for j in range(len(arr) - 1):
if arr[j] == arr[j+1]:
return True
return False
# Create 2 copies of @param num, and transform them into int arrays
# If sorting one does not change the array structure,
# it means they were initially sorted
def checkIncreasing(num: int) -> bool:
expectedArr = list(map(int, str(num)))
arr = list(map(int, str(num)))
if sorted(arr) == expectedArr:
return True
else:
return False
def foo():
numPossibilities = 0
for i in range(RANGE1, RANGE2):
if checkAdjacent(i) and checkIncreasing(i): # If both conditions are satisfied, it is a possible password
numPossibilities = numPossibilities + 1
print(numPossibilities)
def main():
foo()
if __name__ == '__main__':
main()
|
[
"nwu1018@gmail.com"
] |
nwu1018@gmail.com
|
24bce9adfd9986c448487e74e16658ad17c265dd
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/poster/tags/V00-00-01/SConscript
|
b190c174d8a3bdc4f9abefb1be153557a06627e1
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,985
|
#------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package poster
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
import os
from SConsTools.standardExternalPackage import standardExternalPackage
#
# For the standard external packages which contain includes, libraries,
# and applications it is usually sufficient to call standardExternalPackage()
# giving few keyword arguments. Here is a complete list of arguments:
#
# PREFIX - top directory of the external package
# INCDIR - include directory, absolute or relative to PREFIX
# INCLUDES - include files to copy (space-separated list of patterns)
# PYDIR - Python src directory, absolute or relative to PREFIX
# LINKPY - Python files to link (patterns), or all files if not present
# PYDIRSEP - if present and evaluates to True installs python code to a
# separate directory arch/$SIT_ARCH/python/<package>
# LIBDIR - libraries directory, absolute or relative to PREFIX
# COPYLIBS - library names to copy
# LINKLIBS - library names to link, or all libs if LINKLIBS and COPYLIBS are empty
# BINDIR - binaries directory, absolute or relative to PREFIX
# LINKBINS - binary names to link, or all binaries if not present
# PKGLIBS - names of libraries that have to be linked for this package
# DEPS - names of other packages that we depend upon
# PKGINFO - package information, such as RPM package name
# here is an example setting up a fictional package
pkg = "poster"
pkg_ver = "0.8.1"
PREFIX = os.path.join('$SIT_EXTERNAL_SW', pkg, pkg_ver)
PYDIR = os.path.join("lib", '$PYTHON', "site-packages", pkg)
PYDIRSEP = True
PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found')
standardExternalPackage ( pkg, **locals() )
|
[
"salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
|
ce6adbff5e4323bd42d9c2f2edf80e4909dc4551
|
177fd37d63ab37aa66b5054f74a018aa788a6183
|
/trips-backend/besttrips/wsgi.py
|
b4e04dbbfbc8bb7a769961f2d2cf3656db4bec99
|
[
"MIT"
] |
permissive
|
pgarr/best-trips
|
504475adfb3c72d558c5c4d036a8eb447296cf64
|
edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310
|
refs/heads/main
| 2023-03-29T01:32:03.285668
| 2021-04-13T17:19:33
| 2021-04-13T17:19:33
| 342,875,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for besttrips project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'besttrips.settings')
application = get_wsgi_application()
|
[
"garlej.p@gmail.com"
] |
garlej.p@gmail.com
|
dc5f47e41dd896ee44f05aa76d5189db027ffe70
|
d2c4151eff768af64946ababc2e41c13d8973cd3
|
/ABC146/a.py
|
c10b5d53d83522345cefe135c52ff627ef03099c
|
[] |
no_license
|
Intel-out-side/AtCoder
|
2de19b71981247135432aed2d6d9c2a16c3ab7f0
|
0c419d2df15fff02032432cb1b1323612484e16e
|
refs/heads/master
| 2022-06-23T04:21:12.886072
| 2022-06-13T14:39:07
| 2022-06-13T14:39:07
| 235,240,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
week = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
day = str(input())
if day == "SUN":
result = 7
else:
result = week.index("SUN") - week.index(day)
print(result)
|
[
"so.eng.eng.1rou@gmail.com"
] |
so.eng.eng.1rou@gmail.com
|
b4f5e476d9ed960c5b7c75ae3306646d0f653c4a
|
b519ebf4af176a53036373ba7c59552ed8a86505
|
/modelo/datosVehiculo.py
|
003af032d1b96ea3042c0e9a1cfb6e9f5e5274d8
|
[] |
no_license
|
cesarmcuellar/LavaOpita
|
f88dd260b306977c2bb92a5f95ede61eb1e04a89
|
70b0efa0b209b265fe93552b13c0fe5fd7c38c9c
|
refs/heads/main
| 2022-12-27T01:09:16.940067
| 2020-10-10T22:25:34
| 2020-10-10T22:25:34
| 301,786,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
from flask_mysqldb import MySQL
class DatosVehiculo():
def __init__(self, mysql):
self.mysql=mysql
self.cursor=self.mysql.connection.cursor()
def consultarPorPlaca(self, placa):
consulta="select * from vehiculos where vehPlaca= %s"
self.cursor.execute(consulta,(placa,))
resultado = self.cursor.fetchone()
self.cursor.close()
return resultado
|
[
"noreply@github.com"
] |
noreply@github.com
|
19e5431649b0241c81c88ff498959a3fad797237
|
4f179fdd48108020f49064be6686abcaac69d1ef
|
/Medium/11_container_with_most_water.py
|
95dfcdd654e6cd1fec0561ba2c9be6f804acfe7a
|
[] |
no_license
|
RuchitDoshi/LeetCode_Practice
|
b1e4fc64b9e8b5b60b1d4c115d7f1477b83fa6dc
|
48dd00abc4b71e83b475ecdac23bc3ddbe55641e
|
refs/heads/master
| 2023-03-04T07:45:28.978099
| 2021-02-14T04:46:14
| 2021-02-14T04:46:14
| 283,289,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
class Solution:
def maxArea(self, height) -> int:
area=0
head=0
tail=len(height)-1
while(head!=tail):
if (height[head] < height[tail]):
temp=(tail-head)*(height[head])
head+=1
else:
temp=(tail-head)*height[tail]
tail-=1
if temp > area :
area=temp
return area
|
[
"doshiruchit12@gmail.com"
] |
doshiruchit12@gmail.com
|
9217751689c20a44cbffa776fd1f9c8aabb36593
|
5a396f14b3689273aaf1a6e20dcb0853d78a9f04
|
/GetSharedWithDomainTeamDriveACLs.py
|
0c114d9b0daa023ea4ef045d01a424197485f1cf
|
[] |
no_license
|
NosIreland/GAM-Scripts3
|
642b4dd827189352afd8357a41b576d6acf159bc
|
de3ee3007e6906c5b6d28fef8aea27827646db00
|
refs/heads/master
| 2023-03-04T21:58:44.594405
| 2021-02-18T14:39:20
| 2021-02-18T14:39:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,744
|
py
|
#!/usr/bin/env python3
"""
# Purpose: For a Google Drive User(s), delete all drive file ACLs for Team Drive files shared with a list of specified domains
# Note: This script requires Advanced GAM:
# https://github.com/taers232c/GAMADV-XTD3
# Customize: Set DOMAIN_LIST and DESIRED_ALLOWFILEDISCOVERY
# Python: Use python or python3 below as appropriate to your system; verify that you have version 3
# $ python -V or python3 -V
# Python 3.x.y
# Usage:
# For all Team Drives, start at step 1; For Team Drives selected by user/group/OU, start at step 7
# All Team Drives
# 1: Get all Team Drives.
# $ gam redirect csv ./TeamDrives.csv print teamdrives fields id,name
# 2: Get ACLs for all Team Drives
# $ gam redirect csv ./TeamDriveACLs.csv multiprocess csv TeamDrives.csv gam print drivefileacls ~id fields emailaddress,role,type
# 3: Customize GetTeamDriveOrganizers.py for this task:
# Set DOMAIN_LIST as required
# Set ONE_ORGANIZER = True
# Set SHOW_GROUP_ORGANIZERS = False
# Set SHOW_USER_ORGANIZERS = True
# 4: From that list of ACLs, output a CSV file with headers "id,name,organizers"
# that shows the organizers for each Team Drive
# $ python3 GetTeamDriveOrganizers.py TeamDriveACLs.csv TeamDrives.csv TeamDriveOrganizers.csv
# 5: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line
# DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type domain em
# DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type domain allowfilediscovery true em
# DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type domain allowfilediscovery false em
# $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDriveOrganizers.csv gam user ~organizers print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions pm type domain em
# 6: Go to step 11
# Selected Team Drives
# 7: If want Team Drives for a specific set of organizers, replace <UserTypeEntity> with your user selection in the command below
# $ gam redirect csv ./AllTeamDrives.csv <UserTypeEntity> print teamdrives role organizer fields id,name
# 8: Customize DeleteDuplicateRows.py for this task:
# Set ID_FIELD = 'id'
# 9: Delete duplicate Team Drives (some may have multiple organizers).
# $ python3 DeleteDuplicateRows.py ./AllTeamDrives.csv ./TeamDrives.csv
# 10: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line
# DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type domain em
# DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type domain allowfilediscovery true em
# DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type domain allowfilediscovery false em
# $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDrives.csv gam user ~User print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions pm type domain em
# Common code
# 11: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,permissionId,role,domain,allowFileDiscovery"
# that lists the driveFileIds and permissionIds for all ACLs shared with the selected domains.
# (n.b., driveFileTitle, role, domain and allowFileDiscovery are not used in the next step, they are included for documentation purposes)
# $ python3 GetSharedWithDomainTeamDriveACLs.py filelistperms.csv deleteperms.csv
# 12: Inspect deleteperms.csv, verify that it makes sense and then proceed
# 13: Delete the ACLs
# $ gam csv deleteperms.csv gam user "~Owner" delete drivefileacl "~driveFileId" "~permissionId"
"""
import csv
import re
import sys
FILE_NAME = 'name'
ALT_FILE_NAME = 'title'
# If you want to limit finding ACLS for a specific list of domains, use the list below, e.g., DOMAIN_LIST = ['domain.com',] DOMAIN_LIST = ['domain1.com', 'domain2.com',]
DOMAIN_LIST = []
# Specify desired value of allowFileDiscovery field: True, False, Any (matches True and False)
DESIRED_ALLOWFILEDISCOVERY = 'Any'
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'w', encoding='utf-8', newline='')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle', 'permissionId', 'role', 'domain', 'allowFileDiscovery'], lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
for k, v in iter(row.items()):
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v == 'domain':
permissions_N = mg.group(1)
domain = row[f'permissions.{permissions_N}.domain']
allowFileDiscovery = row.get(f'permissions.{permissions_N}.allowFileDiscovery', str(row.get(f'permissions.{permissions_N}.withLink') == 'False'))
if (not DOMAIN_LIST or domain in DOMAIN_LIST) and (DESIRED_ALLOWFILEDISCOVERY in ('Any', allowFileDiscovery)):
outputCSV.writerow({'Owner': row['Owner'],
'driveFileId': row['id'],
'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')),
'permissionId': f'id:{row[f"permissions.{permissions_N}.id"]}',
'role': row[f'permissions.{permissions_N}.role'],
'domain': domain,
'allowFileDiscovery': allowFileDiscovery})
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
|
[
"ross.scroggs@gmail.com"
] |
ross.scroggs@gmail.com
|
acef9f70b1845e351623099172b72e8864af5bd7
|
ca1342a596280070a1b1f7f702f15dc382569eb7
|
/guessingGame.py
|
4c66b69ad167916d11659f4f074c3cff3383a1a9
|
[] |
no_license
|
Lakshya7312/python_number_guesser
|
0de0e939f86f6bd66232a38ec589a08df889e343
|
0a7156074de4506ab9374befc7822d76840aeec5
|
refs/heads/main
| 2023-02-26T21:33:39.038699
| 2021-02-02T08:39:44
| 2021-02-02T08:39:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
# Import the random module for generating random numbers
import random
# Message to start game & instructions
print("\nA number between 1 and 9 has been generated!\nYou have only 5 chances to guess the number!\nStart Guessing!\n")
# Initializing variables
number = random.randint(1, 9)
guessCount = 0
# The condition
while guessCount < 5:
guess = int(input("Enter your guess: "))
guessCount += 1
if number > guess:
print("\nYour guess is smaller than the number, try guessing higher!\n")
elif number < guess:
print("\nYour guess is higher than the number, try guessing smaller!\n")
if number == guess:
print("\nYaY! You did it in " + str(guessCount) + " try(s)\n")
break
if guess == number:
print("You guessed the number in " + str(guessCount) + " try(s)!")
else:
print("Your 5 tries are over! You could'nt guess the number\nThe number was " + str(number))
|
[
"noreply@github.com"
] |
noreply@github.com
|
4cbf6e3fcafd24fc240850a479e41ddfe6d770ac
|
d5b339d5b71c2d103b186ed98167b0c9488cff09
|
/marvin/cloudstackAPI/deleteCondition.py
|
e6c1d13261e1a92194f4e5a345cf1351557e1bd8
|
[
"Apache-2.0"
] |
permissive
|
maduhu/marvin
|
3e5f9b6f797004bcb8ad1d16c7d9c9e26a5e63cc
|
211205ae1da4e3f18f9a1763f0f8f4a16093ddb0
|
refs/heads/master
| 2020-12-02T17:45:35.685447
| 2017-04-03T11:32:11
| 2017-04-03T11:32:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
"""Removes a condition"""
from baseCmd import *
from baseResponse import *
class deleteConditionCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the condition."""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
self.required = ["id", ]
class deleteConditionResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
|
[
"int-mccd_jenkins@schubergphilis.com"
] |
int-mccd_jenkins@schubergphilis.com
|
2f6d6fbdfda9f1bfcda1e71694f8a0666de5b492
|
8c0674a5e31d4de10235e6533145ab2e15a834d9
|
/0x03-python-data_structures/8-multiple_returns.py
|
43d1dafa6c062b66ad3d561443d9c24e91bf0136
|
[] |
no_license
|
jdanielue/holbertonschool-higher_level_programming
|
1815f86b15490998c7ca7a9a22450c8cb6968f60
|
aff477e3c7629da333e0675395e9a24fdaf4dc73
|
refs/heads/master
| 2023-04-16T23:01:28.961579
| 2021-05-06T13:48:48
| 2021-05-06T13:48:48
| 319,405,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
#!/usr/bin/python3
def multiple_returns(sentence):
if len(sentence) == 0:
return 0, None
return len(sentence), sentence[0]
|
[
"jdurregoe@gmail.com"
] |
jdurregoe@gmail.com
|
b1595331ebd43ebd2cf3e52cfd1d6589b83e28b2
|
391dbe903b191fd2d439947a5bf5f73d3a19db7a
|
/pyocd/utility/hex.py
|
01040beddb958dc07d06ca5ef2fef96a41ca14c0
|
[
"MIT"
] |
permissive
|
XIVN1987/RTTView
|
eb45b2e5c5d7fed3178980f5f118e91af640aa5f
|
05d237f0baa8dd1107018a9d560eb205c6e5432e
|
refs/heads/master
| 2023-03-06T07:52:47.345165
| 2023-02-08T09:53:43
| 2023-02-08T09:53:43
| 138,362,483
| 80
| 27
|
MIT
| 2021-10-19T05:09:31
| 2018-06-23T01:40:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
# pyOCD debugger
# Copyright (c) 2018 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def format_hex_width(value, width):
if width == 8:
return "%02x" % value
elif width == 16:
return "%04x" % value
elif width == 32:
return "%08x" % value
else:
raise ValueError("unrecognized register width (%d)" % width)
def dump_hex_data(data, startAddress=0, width=8, output=None):
if output is None:
output = sys.stdout
i = 0
while i < len(data):
output.write("%08x: " % (startAddress + (i * (width // 8))))
while i < len(data):
d = data[i]
i += 1
if width == 8:
output.write("%02x " % d)
if i % 4 == 0:
output.write(" ")
if i % 16 == 0:
break
elif width == 16:
output.write("%04x " % d)
if i % 8 == 0:
break
elif width == 32:
output.write("%08x " % d)
if i % 4 == 0:
break
output.write("\n")
|
[
"XIVN1987@163.com"
] |
XIVN1987@163.com
|
f15b490903261f2afb007cd9301a5d6ac771bbba
|
23e9bc3c6810b3cfa897653eefe74dadf5817d6b
|
/2020/day1-2.py
|
f7d4a87409d28ecaa1760d00e9dca9c543fe3416
|
[] |
no_license
|
LordBrom/advent-of-code
|
3f68789b0fd31e22f840b26c7984cccdfc26e4c8
|
f7165af7f03b3fb0d3db01191cdcff8a95474b8f
|
refs/heads/master
| 2023-07-31T19:19:09.062740
| 2021-09-05T19:58:16
| 2021-09-05T19:58:16
| 319,400,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
inputsArray = open("day1.in", "r").read().split("\n")
inputsArray.pop()
for i in range(len(inputsArray)):
for j in range(i, len(inputsArray)):
for k in range(j, len(inputsArray)):
if int(inputsArray[i]) + int(inputsArray[j]) + int(inputsArray[k]) == 2020:
print(int(inputsArray[i]) * int(inputsArray[j]) * int(inputsArray[k]))
|
[
"mills.nate@gmail.com"
] |
mills.nate@gmail.com
|
2f5f68b3f678d84bdc6d49307107c175f3c16b8f
|
4454e19d52e71e5fd1435e2a37dcfd074f944f83
|
/utils/ms_ssim.py
|
549732d887c62f8b861c54854093f878b64c3ca6
|
[] |
no_license
|
Emr03/deepInfoMax
|
854d6c3289ae13b7aa1e8783a7a4db9f9946499f
|
b5c0182d71c88c1af872fcd78c51cffdbf10106e
|
refs/heads/master
| 2022-11-02T16:06:27.899770
| 2020-06-16T21:21:53
| 2020-06-16T21:21:53
| 248,629,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
import torch
import math
def ms_ssim(X_a, X_b, window_size=11, size_average=True, C1=0.01**2, C2=0.03**2):
"""
Taken from Po-Hsun-Su/pytorch-ssim
"""
channel = X_a.size(1)
def gaussian(sigma=1.5):
gauss = torch.Tensor(
[math.exp(-(x - window_size // 2) **
2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window():
_1D_window = gaussian(window_size).unsqueeze(1)
_2D_window = _1D_window.mm(
_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = torch.Tensor(
_2D_window.expand(channel, 1, window_size,
window_size).contiguous())
return window.cuda()
window = create_window()
mu1 = torch.nn.functional.conv2d(X_a, window,
padding=window_size // 2, groups=channel)
mu2 = torch.nn.functional.conv2d(X_b, window,
padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = torch.nn.functional.conv2d(
X_a * X_a, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = torch.nn.functional.conv2d(
X_b * X_b, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = torch.nn.functional.conv2d(
X_a * X_b, window, padding=window_size // 2, groups=channel) - mu1_mu2
ssim_map = (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) /
((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
|
[
"elsa.riachi@mail.mcgill.ca"
] |
elsa.riachi@mail.mcgill.ca
|
b89514586d2c8ef0a1fa99979861bca2c7fc1001
|
6d486cc1fc08b79111c701f5c9b0e74abf809e4f
|
/test/test_dml.py
|
382d8f939676d32a8c02a99c4d958a5cf0f8e799
|
[
"MIT"
] |
permissive
|
albertvisser/a-propos
|
1d16c122a7698e6e32a55767a1af9acbbf6c3ca2
|
31c89bc4560b93635a79ab3ab7d224b8d8f756fb
|
refs/heads/master
| 2023-04-30T19:28:40.235949
| 2023-04-15T13:29:44
| 2023-04-15T13:29:44
| 219,269,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
import pytest
from apropos import dml
def test_get_apofile(monkeypatch, capsys):
assert dml.get_apofile('') == dml.pathlib.Path('apropos.apo')
assert dml.get_apofile('test') == dml.pathlib.Path('test.apo')
assert dml.get_apofile('testing.pck') == dml.pathlib.Path('testing.apo')
def test_load_notes_file_not_found(tmp_path):
"""run the load_notes method with no existing file
"""
# monkeypatch.setattr(dml.pathlib.Path, 'exists', lambda *x: False)
apofile = tmp_path / 'apropos.apo'
opts, apodata = dml.load_notes(apofile)
assert opts == {"AskBeforeHide": True, "ActiveTab": 0, 'language': 'eng',
'NotifyOnSave': True, 'NotifyOnLoad': True}
assert apodata == {}
def test_load_notes_not_a_pickle(tmp_path):
"""run the load_notes method with a non-pickle file
"""
apofile = tmp_path / 'apropos.apo'
with apofile.open(mode='w') as f:
f.write("oihgyavjjvjdvj diidn dnni")
f.write("\n")
opts, apodata = dml.load_notes(apofile)
def test_load_notes_happy_flow(tmp_path):
"""run the load_notes method on a "correct" file
"""
apofile = tmp_path / 'apropos.apo'
with apofile.open(mode='wb') as f:
dml.pickle.dump({0: 'opts', 1: 'apodata'}, f, protocol=2)
opts, apodata = dml.load_notes(apofile)
assert opts == 'opts'
assert apodata == {1: 'apodata'}
def test_save_notes_happy_flow(tmp_path):
"""save the notes and check if it's readable correctly
"""
apofile = tmp_path / 'apropos.apo'
opts = 'opts'
apodata = {1: 'apodata'}
dml.save_notes(apofile, opts, apodata)
with apofile.open(mode='rb') as f:
data = dml.pickle.load(f)
assert data == {0: 'opts', 1: 'apodata'}
|
[
"albert.visser@gmail.com"
] |
albert.visser@gmail.com
|
a39d2cecc8e96bfe1de6ff608258dea96d977463
|
a267b269c261656bdefe99586c7dc3fb9d7f4ad1
|
/lesson1/price.py
|
cec3719bc3d56c01438dbc07d524f51f49849deb
|
[] |
no_license
|
Sandello76/learn-python
|
2e6785b9d716c8b32a24a2f2712091f2a82fea60
|
7309da4fa50ddb1ca311df46fa493d69024a84ca
|
refs/heads/master
| 2023-05-06T17:39:46.223060
| 2021-05-29T00:31:59
| 2021-05-29T00:31:59
| 367,744,547
| 0
| 0
| null | 2021-05-29T00:32:00
| 2021-05-15T22:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
'''price = 100
discount = 5
price_with_discount = price - price * discount / 100
print(price_with_discount)
'''
def discounted(price, discount, max_discount=20):
price = abs(float(price))
discount = abs(float(discount))
max_discount = abs(float(max_discount))
if max_discount > 99:
raise ValueError('Слишком большая максимальная скидка')
if discount >= max_discount:
return price
else:
return price - (price * discount / 100)
|
[
"raue76@gmail.com"
] |
raue76@gmail.com
|
95284482bae6b27e575c9dba31d4eb4b90091235
|
7a9b0a37b5f7519898e58237a46542b17dbdd0b1
|
/bookmark/views.py
|
9527727871930c2332f09f63d8769cdf47f12003
|
[] |
no_license
|
DDIPONG/devops
|
39dc1c228dba441253839669eb9c922f43e1d0f1
|
40ffd9ab074bfc635ca1865dddac18c283569b64
|
refs/heads/master
| 2022-12-27T12:37:23.992065
| 2020-10-06T06:46:42
| 2020-10-06T06:46:42
| 299,507,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
from django.shortcuts import render
# Create your views here.
from django.views.generic import ListView, DetailView
# In order to use a classed Generic View, Import ListView and Detailview
from bookmark.models import Bookmark
#Import class Model to search Table info.
class BookmarkLV(ListView):
model = Bookmark
# BookmarkLV is a view to show a record of Bookmark Record List
# It's inherited Generic view.
# In case of Inherit Listview,
class BookmarkDV(DetailView):
model = Bookmark
#BookmarkDV is a view to show detail info of certain records of a Table.
|
[
"root@ip-192-168-114-129.ap-northeast-2.compute.internal"
] |
root@ip-192-168-114-129.ap-northeast-2.compute.internal
|
8f80e791626212567e6315fb22f46ac16819c206
|
59f2c160c6497ad60c5eb30ba9408942b46aabf1
|
/smartcab/simulator.py
|
a6cd662855505b76698d98072e3aca0cd2ba276a
|
[] |
no_license
|
atrij/ai_cabdriver
|
bfd11568471e9dc4d5e8e252e11d87526dae09ae
|
5cbc5e6305ceaf1ff9eac10720de1d2f6a70ba29
|
refs/heads/master
| 2021-01-21T15:03:55.952261
| 2017-06-25T18:04:10
| 2017-06-25T18:04:10
| 95,374,182
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,166
|
py
|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
###########################################
import os
import time
import random
import importlib
import csv
class Simulator(object):
"""Simulates agents in a dynamic smartcab environment.
Uses PyGame to display GUI, if available.
"""
colors = {
'black' : ( 0, 0, 0),
'white' : (255, 255, 255),
'red' : (255, 0, 0),
'green' : ( 0, 255, 0),
'dgreen' : ( 0, 228, 0),
'blue' : ( 0, 0, 255),
'cyan' : ( 0, 200, 200),
'magenta' : (200, 0, 200),
'yellow' : (255, 255, 0),
'mustard' : (200, 200, 0),
'orange' : (255, 128, 0),
'maroon' : (200, 0, 0),
'crimson' : (128, 0, 0),
'gray' : (155, 155, 155)
}
def __init__(self, env, size=None, update_delay=2.0, display=True, log_metrics=False, optimized=False):
self.env = env
self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 2) * self.env.block_size)
self.width, self.height = self.size
self.road_width = 44
self.bg_color = self.colors['gray']
self.road_color = self.colors['black']
self.line_color = self.colors['mustard']
self.boundary = self.colors['black']
self.stop_color = self.colors['crimson']
self.quit = False
self.start_time = None
self.current_time = 0.0
self.last_updated = 0.0
self.update_delay = update_delay # duration between each step (in seconds)
self.display = display
if self.display:
try:
self.pygame = importlib.import_module('pygame')
self.pygame.init()
self.screen = self.pygame.display.set_mode(self.size)
self._logo = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "logo.png")), (self.road_width, self.road_width))
self._ew = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "east-west.png")), (self.road_width, self.road_width))
self._ns = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "north-south.png")), (self.road_width, self.road_width))
self.frame_delay = max(1, int(self.update_delay * 1000)) # delay between GUI frames in ms (min: 1)
self.agent_sprite_size = (32, 32)
self.primary_agent_sprite_size = (42, 42)
self.agent_circle_radius = 20 # radius of circle, when using simple representation
for agent in self.env.agent_states:
if agent.color == 'white':
agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.primary_agent_sprite_size)
else:
agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.agent_sprite_size)
agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height())
self.font = self.pygame.font.Font(None, 20)
self.paused = False
except ImportError as e:
self.display = False
print "Simulator.__init__(): Unable to import pygame; display disabled.\n{}: {}".format(e.__class__.__name__, e)
except Exception as e:
self.display = False
print "Simulator.__init__(): Error initializing GUI objects; display disabled.\n{}: {}".format(e.__class__.__name__, e)
# Setup metrics to report
self.log_metrics = log_metrics
self.optimized = optimized
if self.log_metrics:
a = self.env.primary_agent
# Set log files
if a.learning:
if self.optimized: # Whether the user is optimizing the parameters and decay functions
self.log_filename = os.path.join("logs", "sim_improved-learning.csv")
self.table_filename = os.path.join("logs","sim_improved-learning.txt")
else:
self.log_filename = os.path.join("logs", "sim_default-learning.csv")
self.table_filename = os.path.join("logs","sim_default-learning.txt")
self.table_file = open(self.table_filename, 'wb')
else:
self.log_filename = os.path.join("logs", "sim_no-learning.csv")
self.log_fields = ['trial', 'testing', 'parameters', 'initial_deadline', 'final_deadline', 'net_reward', 'actions', 'success']
self.log_file = open(self.log_filename, 'wb')
self.log_writer = csv.DictWriter(self.log_file, fieldnames=self.log_fields)
self.log_writer.writeheader()
def run(self, tolerance=0.05, n_test=0):
""" Run a simulation of the environment.
'tolerance' is the minimum epsilon necessary to begin testing (if enabled)
'n_test' is the number of testing trials simulated
Note that the minimum number of training trials is always 20. """
self.quit = False
# Get the primary agent
a = self.env.primary_agent
total_trials = 1
testing = False
trial = 1
while True:
# Flip testing switch
if not testing:
if total_trials > 20: # Must complete minimum 20 training trials
if a.learning:
if a.epsilon < tolerance: # assumes epsilon decays to 0
testing = True
trial = 1
else:
testing = True
trial = 1
# Break if we've reached the limit of testing trials
else:
if trial > n_test:
break
# Pretty print to terminal
print
print "/-------------------------"
if testing:
print "| Testing trial {}".format(trial)
else:
print "| Training trial {}".format(trial)
print "\-------------------------"
print
self.env.reset(testing,trial)
self.current_time = 0.0
self.last_updated = 0.0
self.start_time = time.time()
while True:
try:
# Update current time
self.current_time = time.time() - self.start_time
# Handle GUI events
if self.display:
for event in self.pygame.event.get():
if event.type == self.pygame.QUIT:
self.quit = True
elif event.type == self.pygame.KEYDOWN:
if event.key == 27: # Esc
self.quit = True
elif event.unicode == u' ':
self.paused = True
if self.paused:
self.pause()
# Update environment
if self.current_time - self.last_updated >= self.update_delay:
self.env.step()
self.last_updated = self.current_time
# Render text
self.render_text(trial, testing)
# Render GUI and sleep
if self.display:
self.render(trial, testing)
self.pygame.time.wait(self.frame_delay)
except KeyboardInterrupt:
self.quit = True
finally:
if self.quit or self.env.done:
break
if self.quit:
break
# Collect metrics from trial
if self.log_metrics:
self.log_writer.writerow({
'trial': trial,
'testing': self.env.trial_data['testing'],
'parameters': self.env.trial_data['parameters'],
'initial_deadline': self.env.trial_data['initial_deadline'],
'final_deadline': self.env.trial_data['final_deadline'],
'net_reward': self.env.trial_data['net_reward'],
'actions': self.env.trial_data['actions'],
'success': self.env.trial_data['success']
})
# Trial finished
if self.env.success == True:
print "\nTrial Completed!"
print "Agent reached the destination."
else:
print "\nTrial Aborted!"
print "Agent did not reach the destination."
# Increment
total_trials = total_trials + 1
trial = trial + 1
# Clean up
if self.log_metrics:
if a.learning:
f = self.table_file
f.write("/-----------------------------------------\n")
f.write("| State-action rewards from Q-Learning\n")
f.write("\-----------------------------------------\n\n")
for state in a.Q:
f.write("{}\n".format(state))
for action, reward in a.Q[state].iteritems():
f.write(" -- {} : {:.2f}\n".format(action, reward))
f.write("\n")
self.table_file.close()
self.log_file.close()
print "\nSimulation ended. . . "
# Report final metrics
if self.display:
self.pygame.display.quit() # shut down pygame
def render_text(self, trial, testing=False):
""" This is the non-GUI render display of the simulation.
Simulated trial data will be rendered in the terminal/command prompt. """
status = self.env.step_data
if status and status['waypoint'] is not None: # Continuing the trial
# Previous State
if status['state']:
print "Agent previous state: {}".format(status['state'])
else:
print "!! Agent state not been updated!"
# Result
if status['violation'] == 0: # Legal
if status['waypoint'] == status['action']: # Followed waypoint
print "Agent followed the waypoint {}. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['action'] == None:
if status['light'] == 'red': # Stuck at red light
print "Agent properly idled at a red light. (rewarded {:.2f})".format(status['reward'])
else:
print "Agent idled at a green light with oncoming traffic. (rewarded {:.2f})".format(status['reward'])
else: # Did not follow waypoint
print "Agent drove {} instead of {}. (rewarded {:.2f})".format(status['action'], status['waypoint'], status['reward'])
else: # Illegal
if status['violation'] == 1: # Minor violation
print "Agent idled at a green light with no oncoming traffic. (rewarded {:.2f})".format(status['reward'])
elif status['violation'] == 2: # Major violation
print "Agent attempted driving {} through a red light. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['violation'] == 3: # Minor accident
print "Agent attempted driving {} through traffic and cause a minor accident. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['violation'] == 4: # Major accident
print "Agent attempted driving {} through a red light with traffic and cause a major accident. (rewarded {:.2f})".format(status['action'], status['reward'])
# Time Remaining
if self.env.enforce_deadline:
time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline'])
print "{:.0f}% of time remaining to reach destination.".format(time)
else:
print "Agent not enforced to meet deadline."
# Starting new trial
else:
a = self.env.primary_agent
print "Simulating trial. . . "
if a.learning:
print "epsilon = {:.4f}; alpha = {:.4f}".format(a.epsilon, a.alpha)
else:
print "Agent not set to learn."
def render(self, trial, testing=False):
""" This is the GUI render display of the simulation.
Supplementary trial data can be found from render_text. """
# Reset the screen.
self.screen.fill(self.bg_color)
# Draw elements
# * Static elements
# Boundary
self.pygame.draw.rect(self.screen, self.boundary, ((self.env.bounds[0] - self.env.hang)*self.env.block_size, (self.env.bounds[1]-self.env.hang)*self.env.block_size, (self.env.bounds[2] + self.env.hang/3)*self.env.block_size, (self.env.bounds[3] - 1 + self.env.hang/3)*self.env.block_size), 4)
for road in self.env.roads:
# Road
self.pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width)
# Center line
self.pygame.draw.line(self.screen, self.line_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), 2)
for intersection, traffic_light in self.env.intersections.iteritems():
self.pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), self.road_width/2)
if traffic_light.state: # North-South is open
self.screen.blit(self._ns,
self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2))
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2), 2)
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size + self.road_width/2), 2)
else:
self.screen.blit(self._ew,
self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2))
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), 2)
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), 2)
# * Dynamic elements
self.font = self.pygame.font.Font(None, 20)
for agent, state in self.env.agent_states.iteritems():
# Compute precise agent location here (back from the intersection some)
agent_offset = (2 * state['heading'][0] * self.agent_circle_radius + self.agent_circle_radius * state['heading'][1] * 0.5, \
2 * state['heading'][1] * self.agent_circle_radius - self.agent_circle_radius * state['heading'][0] * 0.5)
agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1])
agent_color = self.colors[agent.color]
if hasattr(agent, '_sprite') and agent._sprite is not None:
# Draw agent sprite (image), properly rotated
rotated_sprite = agent._sprite if state['heading'] == (1, 0) else self.pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90)
self.screen.blit(rotated_sprite,
self.pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2,
agent._sprite_size[0], agent._sprite_size[1]))
else:
# Draw simple agent (circle with a short line segment poking out to indicate heading)
self.pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius)
self.pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width)
if state['destination'] is not None:
self.screen.blit(self._logo,
self.pygame.rect.Rect(state['destination'][0] * self.env.block_size - self.road_width/2, \
state['destination'][1]*self.env.block_size - self.road_width/2, \
state['destination'][0]*self.env.block_size + self.road_width/2, \
state['destination'][1]*self.env.block_size + self.road_width/2))
# * Overlays
self.font = self.pygame.font.Font(None, 50)
if testing:
self.screen.blit(self.font.render("Testing Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10))
else:
self.screen.blit(self.font.render("Training Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10))
self.font = self.pygame.font.Font(None, 30)
# Status text about each step
status = self.env.step_data
if status:
# Previous State
if status['state']:
self.screen.blit(self.font.render("Previous State: {}".format(status['state']), True, self.colors['white'], self.bg_color), (350, 10))
if not status['state']:
self.screen.blit(self.font.render("!! Agent state not updated!", True, self.colors['maroon'], self.bg_color), (350, 10))
# Action
if status['violation'] == 0: # Legal
if status['action'] == None:
self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40))
else:
self.screen.blit(self.font.render("Agent drove {}. (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40))
else: # Illegal
if status['action'] == None:
self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40))
else:
self.screen.blit(self.font.render("{} attempted (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40))
# Result
if status['violation'] == 0: # Legal
if status['waypoint'] == status['action']: # Followed waypoint
self.screen.blit(self.font.render("Agent followed the waypoint!", True, self.colors['dgreen'], self.bg_color), (350, 70))
elif status['action'] == None:
if status['light'] == 'red': # Stuck at a red light
self.screen.blit(self.font.render("Agent idled at a red light!", True, self.colors['dgreen'], self.bg_color), (350, 70))
else:
self.screen.blit(self.font.render("Agent idled at a green light with oncoming traffic.", True, self.colors['mustard'], self.bg_color), (350, 70))
else: # Did not follow waypoint
self.screen.blit(self.font.render("Agent did not follow the waypoint.", True, self.colors['mustard'], self.bg_color), (350, 70))
else: # Illegal
if status['violation'] == 1: # Minor violation
self.screen.blit(self.font.render("There was a green light with no oncoming traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 2: # Major violation
self.screen.blit(self.font.render("There was a red light with no traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 3: # Minor accident
self.screen.blit(self.font.render("There was traffic with right-of-way.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 4: # Major accident
self.screen.blit(self.font.render("There was a red light with traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
# Time Remaining
if self.env.enforce_deadline:
time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline'])
self.screen.blit(self.font.render("{:.0f}% of time remaining to reach destination.".format(time), True, self.colors['black'], self.bg_color), (350, 100))
else:
self.screen.blit(self.font.render("Agent not enforced to meet deadline.", True, self.colors['black'], self.bg_color), (350, 100))
# Denote whether a trial was a success or failure
if (state['destination'] != state['location'] and state['deadline'] > 0) or (self.env.enforce_deadline is not True and state['destination'] != state['location']):
self.font = self.pygame.font.Font(None, 40)
if self.env.success == True:
self.screen.blit(self.font.render("Previous Trial: Success", True, self.colors['dgreen'], self.bg_color), (10, 50))
if self.env.success == False:
self.screen.blit(self.font.render("Previous Trial: Failure", True, self.colors['maroon'], self.bg_color), (10, 50))
if self.env.primary_agent.learning:
self.font = self.pygame.font.Font(None, 22)
self.screen.blit(self.font.render("epsilon = {:.4f}".format(self.env.primary_agent.epsilon), True, self.colors['black'], self.bg_color), (10, 80))
self.screen.blit(self.font.render("alpha = {:.4f}".format(self.env.primary_agent.alpha), True, self.colors['black'], self.bg_color), (10, 95))
# Reset status text
else:
self.pygame.rect.Rect(350, 10, self.width, 200)
self.font = self.pygame.font.Font(None, 40)
self.screen.blit(self.font.render("Simulating trial. . .", True, self.colors['white'], self.bg_color), (400, 60))
# Flip buffers
self.pygame.display.flip()
def pause(self):
""" When the GUI is enabled, this function will pause the simulation. """
abs_pause_time = time.time()
self.font = self.pygame.font.Font(None, 30)
pause_text = "Simulation Paused. Press any key to continue. . ."
self.screen.blit(self.font.render(pause_text, True, self.colors['red'], self.bg_color), (400, self.height - 30))
self.pygame.display.flip()
print pause_text
while self.paused:
for event in self.pygame.event.get():
if event.type == self.pygame.KEYDOWN:
self.paused = False
self.pygame.time.wait(self.frame_delay)
self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (400, self.height - 30))
self.start_time += (time.time() - abs_pause_time)
|
[
"atrij.singhal@gmail.com"
] |
atrij.singhal@gmail.com
|
9fe7a328b27380a9afc1f19106fa9edd8aa1033c
|
21208873652ce9a35035801cea488004e337b07b
|
/data_loader/__init__.py
|
784c4dc41287ec0e8680637c3b93983f20eae44f
|
[
"Apache-2.0"
] |
permissive
|
zlszhonglongshen/crnn.pytorch
|
55321a6764a6143be7ab9d2c6b3bcafcdd9470e7
|
bf7a7c62376eee93943ca7c68e88e3d563c09aa8
|
refs/heads/master
| 2022-11-07T22:57:28.983335
| 2020-06-19T03:01:35
| 2020-06-19T03:01:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
# -*- coding: utf-8 -*-
# @Time : 18-11-16 下午5:46
# @Author : zhoujun
import copy
from torch.utils.data import DataLoader
from torchvision import transforms
def get_dataset(data_path, module_name, transform, dataset_args):
"""
获取训练dataset
:param data_path: dataset文件列表,每个文件内以如下格式存储 ‘path/to/img\tlabel’
:param module_name: 所使用的自定义dataset名称,目前只支持data_loaders.ImageDataset
:param transform: 该数据集使用的transforms
:param dataset_args: module_name的参数
:return: 如果data_path列表不为空,返回对应的Dataset对象,否则None
"""
from . import dataset
s_dataset = getattr(dataset, module_name)(transform=transform, data_path=data_path, **dataset_args)
return s_dataset
def get_transforms(transforms_config):
tr_list = []
for item in transforms_config:
if 'args' not in item:
args = {}
else:
args = item['args']
cls = getattr(transforms, item['type'])(**args)
tr_list.append(cls)
tr_list = transforms.Compose(tr_list)
return tr_list
def get_dataloader(module_config, num_label):
if module_config is None:
return None
config = copy.deepcopy(module_config)
dataset_args = config['dataset']['args']
dataset_args['num_label'] = num_label
if 'transforms' in dataset_args:
img_transfroms = get_transforms(dataset_args.pop('transforms'))
else:
img_transfroms = None
# 创建数据集
dataset_name = config['dataset']['type']
data_path_list = dataset_args.pop('data_path')
if 'data_ratio' in dataset_args:
data_ratio = dataset_args.pop('data_ratio')
else:
data_ratio = [1.0]
_dataset_list = []
for data_path in data_path_list:
_dataset_list.append(get_dataset(data_path=data_path, module_name=dataset_name, dataset_args=dataset_args, transform=img_transfroms))
if len(data_ratio) > 1 and len(dataset_args['data_ratio']) == len(_dataset_list):
from . import dataset
loader = dataset.Batch_Balanced_Dataset(dataset_list=_dataset_list, ratio_list=data_ratio, loader_args=config['loader'])
else:
_dataset = _dataset_list[0]
loader = DataLoader(dataset=_dataset, **config['loader'])
loader.dataset_len = len(_dataset)
return loader
|
[
"572459439@qq.com"
] |
572459439@qq.com
|
b8954b6cea35abb939ed06c8276b23e8b81f83d3
|
b2e340f22a7f613dc33ea361ba87a393d65b723c
|
/LogicAnalyzer/config/config.py
|
f19d2b4e3d649ece283274df9b734d2dc8094f99
|
[
"MIT"
] |
permissive
|
CospanDesign/logic-analyzer
|
6369cfc423f3fae050f9ab784a6ae94003422654
|
284ea339c001b4845a46fcb0672511487271c9c3
|
refs/heads/master
| 2021-01-20T18:58:53.477152
| 2016-06-24T02:22:04
| 2016-06-24T02:22:04
| 61,488,220
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,335
|
py
|
import logging
import json
TRIGGER = "trigger"
TRIGGER_MASK = "trigger_mask"
TRIGGER_EDGE = "trigger_edge"
TRIGGER_BOTH_EDGE = "both_edges"
TRIGGER_REPEAT = "repeat"
TRIGGER_AFTER = "trigger_after"
CAPABILITY_NAMES = [
TRIGGER,
TRIGGER_MASK,
TRIGGER_EDGE,
TRIGGER_BOTH_EDGE,
TRIGGER_REPEAT,
TRIGGER_AFTER
]
CALLBACK_START = "start"
CALLBACK_STOP = "stop"
CALLBACK_FORCE = "force"
CALLBACK_UPDATE = "update"
CALLBACK_GET_SIZE = "get_size"
CALLBACK_CLOSE = "close"
CALLBACK_NAMES = [
CALLBACK_START,
CALLBACK_STOP,
CALLBACK_FORCE,
CALLBACK_UPDATE,
CALLBACK_GET_SIZE,
CALLBACK_CLOSE
]
class Config(object):
@staticmethod
def get_name():
return "Invalid Config, make your own!!"
def __init__(self):
self.log = logging.getLogger("LAX")
self.caps = {}
self.callbacks = {}
self.channels = []
for name in CAPABILITY_NAMES:
self.caps[name] = None
for name in CALLBACK_NAMES:
self.callbacks[name] = None
def get_channel_dict(self):
"""
Return a dictionary that maps names to channel(s)
"""
return self.channels
def get_capabilities(self):
"""
Return a list of capabilities (strings) that this device supports
"""
names = []
for name in self.caps:
if self.caps[name] is not None:
names.append(name)
return names
def has_capability(self, name):
"""
Return true if the device has the capabilities
"""
return self.caps[name] is not None
def get_value(self, name):
"Get the value of a capability"
if not self.has_capability(name):
raise AssertionError("LAX Does not have capability")
else:
return self.caps[name]
def set_callback(self, name, func):
self.log.debug("Setting callback for: %s" % name)
self.callbacks[name] = func
def ready(self):
"""The controller tells the config interface it's ready"""
raise AssertionError("%s not implemented" % sys._getframe().f_code.co_name)
def captured(self):
"""callback when capture occurs"""
raise AssertionError("%s not implemented" % sys._getframe().f_code.co_name)
|
[
"cospan@gmail.com"
] |
cospan@gmail.com
|
3f88cdeb167581de419c97c7d69a29b54ad556fd
|
c0c84529d07551bd6cac4fce3bbb44bb51e25ff1
|
/CO1PG15.py
|
25dd00b91fdba7afe96ab964fb945cf9cc1f1e42
|
[] |
no_license
|
Amalajoy/ProgrammingLab-Amala
|
1c73d282afe922be4eb5541a34b525643e55c7e5
|
9229a764b40a589a19b5ee7e17e49c3b9b201d0d
|
refs/heads/main
| 2023-03-13T02:58:36.138084
| 2021-02-17T18:34:36
| 2021-02-17T18:34:36
| 321,900,750
| 1
| 0
| null | 2021-01-11T07:26:15
| 2020-12-16T07:17:29
| null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
color_list_1 = set(["White", "Violet", "Indigo","Blue"])
color_list_2 = set(["Blue", "Green","Yellow","White"])
print(color_list_1.difference(color_list_2))
|
[
"noreply@github.com"
] |
noreply@github.com
|
6063109ef967c55b97de6c5daf44a5c8e88ac02e
|
11cfe900a2cd2363bca41c6e9fa45710e7c578cc
|
/backend/admin.py
|
b4bafb653197350b06e2e7238d5f15c9180d8a59
|
[] |
no_license
|
RENZOje/remote_learning
|
2dd5a353733a49a080769740afad0ffe513731e5
|
8ce4ee4b2a832bea921be94f630101434024ec06
|
refs/heads/master
| 2023-06-10T21:31:29.423609
| 2021-06-11T15:58:54
| 2021-06-11T15:58:54
| 347,067,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from django.contrib import admin
from django import forms
from .models import *
from ckeditor_uploader.widgets import CKEditorUploadingWidget
# Register your models here.
admin.site.register(Quiz)
admin.site.register(Grade)
admin.site.register(Teacher)
admin.site.register(Section)
admin.site.register(Student)
admin.site.register(Course)
admin.site.register(Group_custom)
admin.site.register(Assignment)
admin.site.register(UploadAssignment)
class AnswerInline(admin.TabularInline):
model = Answer
class QuestionAdmin(admin.ModelAdmin):
inlines = [AnswerInline]
admin.site.register(Question, QuestionAdmin)
admin.site.register(Answer)
admin.site.register(ResultQuiz)
admin.site.register(ResultAssignment)
class AtricleAdminForm(forms.ModelForm):
description = forms.CharField(widget=CKEditorUploadingWidget())
class Meta:
model = Article
fields = '__all__'
class ArticleAdmin(admin.ModelAdmin):
form = AtricleAdminForm
admin.site.register(Article, ArticleAdmin)
|
[
"RENZOje@users.noreply.github.com"
] |
RENZOje@users.noreply.github.com
|
b8b31f2f8c5f2ae17653cf45b579c862992df144
|
f5ba09db506abc4d356ad7b86fdb5ffa88de61dd
|
/homework/HW3.py
|
041df4deff63f18a6cdddb586f5fc477b69eb366
|
[] |
no_license
|
bmlee-99/my_project
|
698367a9ca7aed0bb765a326e9bd2e213f93be6a
|
b46ad8f2c3ea3bb995b124d2ba501a8f3aec86d2
|
refs/heads/main
| 2023-02-20T15:50:42.085015
| 2021-01-21T10:35:42
| 2021-01-21T10:35:42
| 322,453,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.dbsparta
import requests
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://www.genie.co.kr/chart/top200?ditc=D&rtm=N&ymd=20200713', headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
trs = soup.select('#body-content > div.newest-list > div > table > tbody > tr')
for tr in trs:
rank = tr.select_one('td.number').text[0:3].strip()
# text.split()[0] 으로도 할 수 있다.
title = tr.select_one('td.info > a.title.ellipsis').text.strip()
artist = tr.select_one('td.info > a.artist.ellipsis').text
doc = {
'rank': rank,
'title':title,
'artist':artist
}
# 딕션어리로 만듬
db.genieranking.insert_one(doc)
|
[
"bm.lee@erpper.com"
] |
bm.lee@erpper.com
|
aa5eece40af1223be935846770e0a2e01e7a3ec4
|
732a0dcf738d220b320369cdcb7bd49368fde3fe
|
/PythonWebServerTemplate/src/controller/index.py
|
7d942fc7e2404a9a0b9c534b381a9c8f990bbe46
|
[] |
no_license
|
RogerLai/Tools
|
03c50dfecf3f730210aa0e89a63ba5e34f686f4a
|
1e679ea4fa07cfe618d01037f171ad6bfcfc8ae0
|
refs/heads/master
| 2021-01-10T15:29:19.311679
| 2015-12-23T15:15:48
| 2015-12-23T15:15:48
| 44,651,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
#!/usr/bin/env Python
#coding=utf-8
'''
Created on Apr 13, 2015
@author: rogerlai
'''
import tornado.web
from common import process
from common.config import TEMPLATE_PATH, STATIC_HOST, WEB_SERVER_ADDR
loader = tornado.web.template.Loader(TEMPLATE_PATH)
class WebGetIndexHandler(tornado.web.RequestHandler):
@staticmethod
def get_handler(self):
param_dict = {}
param_dict['title'] = u'随机分组'
param_dict['static_host'] = STATIC_HOST
param_dict['web_server'] = WEB_SERVER_ADDR
response = loader.load("index.html").generate(params = param_dict)
return response
def get(self):
self.write(process.process_request(self.request, lambda: WebGetIndexHandler.get_handler(self), 'html'))
|
[
"laixingrong@egeio.com"
] |
laixingrong@egeio.com
|
0fbd545a8ceab1cdd8f34f23bbb35f2cfe068d26
|
c08ffbe1a032611b32c865391b3a1043101f3182
|
/DigitalJournal.py
|
6414e25bb1ea873d6830a40bb5525b26985ee9f8
|
[] |
no_license
|
bseibo61/LastfmLocation
|
3d35e5f2c84b788713818f577e266f4a486a7b03
|
612ae59231dad20b104cd4c1f4f5335d0767a717
|
refs/heads/master
| 2022-08-04T14:13:36.083976
| 2022-07-31T22:30:43
| 2022-07-31T22:30:43
| 231,705,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,431
|
py
|
import pandas as pd
import glob, os, json
import datetime
import bisect
import itertools
import xml.etree.ElementTree as et
location_path = r'Data\GoogleData1-1-2020\Location History\Semantic Location History\2019'
smsPath = r'smsData'
all_files = glob.glob(os.path.join('', "*.json"))
def findkeys(node, kv):
if isinstance(node, list):
for i in node:
for x in findkeys(i, kv):
yield x
elif isinstance(node, dict):
if kv in node:
yield node[kv]
for j in node.values():
for x in findkeys(j, kv):
yield x
# print intresting bits of full_json
def printFinalJson(json):
for i in json:
match i['name']:
case 'text':
print("{0}: {1}".format(i['contactName'], i['body']))
case 'activitySegment':
print(i['activityType'])
case 'placeVisit':
print(i['placeName'])
case _:
raise Exception("Trying to print unrecognized name")
# Read location json
full_location_json = []
year_list = ['2020']
for year in year_list:
# for f in glob.glob(os.path.join(r'Data\GoogleData1-1-2020\Location History\Semantic Location History\{0}'.format(year), "*.json")):
for f in glob.glob(os.path.join(location_path, "*.json")):
with open(f, encoding="utf8") as i:
full_location_json.append(json.load(i))
# Read lastFM csv
music_df = pd.read_csv(r'Data/lastfm.csv', names=['artist', 'album', 'song', 'date'])
# Read SMS xml
sms_xml = open('Data/SmsDataTest.xml', 'r', encoding="utf8").read()
root = et.XML(sms_xml)
sms_json = []
for child in root:
if child.tag == 'sms':
sms_json.append({
'name':'text', 'body': child.attrib['body'], 'startTime': child.attrib['date'], 'type': child.attrib['type'], 'contactName': child.attrib['contact_name']
})
# Convert lastfm time string to timestamp like google has
music_df['timestamp'] = music_df['date'].apply(lambda date: datetime.datetime.timestamp(datetime.datetime.strptime(date, '%d %b %Y %H:%M')))
temp_location_json = []
for month in full_location_json:
for activity in month['timelineObjects']:
segmentName = next(iter(activity))
startTime = list(findkeys(activity, 'startTimestampMs'))[0]
activityType = ''
placeAddress = ''
placeName = ''
# get activity type
if(segmentName == 'activitySegment'):
activityType = activity['activitySegment']['activityType']
if(segmentName == 'placeVisit'):
placeName = activity['placeVisit']['location']['name']
# need .get beacuse some places don't have addresses
placeAddress = activity['placeVisit']['location'].get('address')
temp_location_json.append(
{
'name': segmentName, 'activityType': activityType, 'startTime': startTime, 'placeName': placeName, 'placeAddress': placeAddress
})
full_location_json = temp_location_json
full_json = full_location_json + sms_json
# Sort months chronologically
full_json = sorted(full_json, key = lambda i: i['startTime'] )
# TODO add in markers for each new day, get lastfm songs in full_json, look into making a webpage to display everything
# more text analytics? like most used words with people, texting frequency with people ect
printFinalJson(full_json)
|
[
"brseibol@buffalo.edu"
] |
brseibol@buffalo.edu
|
f80411d77a0e6127a5f80503c618a43932da409a
|
29c39568880658d341ebc61202253a9a242327a8
|
/sku/views.py
|
169a8e90bf60b0bc666e1a53c19185a4d1b4457c
|
[] |
no_license
|
thomaslzb/warehouse
|
e524937203452ddee0761ed296c6fd6a1605b3ec
|
0dabda06e5898e0b82e63641f6055d229d83d4e8
|
refs/heads/main
| 2023-04-30T12:06:30.488162
| 2021-05-20T07:09:28
| 2021-05-20T07:09:28
| 287,015,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,003
|
py
|
import datetime
import math
import xlrd
from django.core.files.storage import FileSystemStorage
from django.http import HttpResponseRedirect
from django.shortcuts import render, reverse
from django.views import View
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from django.db import transaction
from menu.views import get_user_grant_list
from quote.models import EuroCountry
from quote.public_func import parcel
from .forms import SkuUKForm, SkuEuroForm, SkuForm
from .models import Sku, SkuFileUpload
MY_MENU_LOCAL = 'MY_SKU'
def valid_file(req):
error = ''
if not len(req.FILES): # 判断是否有选择文件
error = 'Must selected a file to upload.'
try:
uploaded_file = req.FILES['document']
# 通过文件的后缀名,判断选择的文件是否是excel文件
if not uploaded_file.name.split('.')[-1].upper() in ['XLS', 'XLSX']:
error = 'Only excel file can be uploaded.'
# 判断选择的文件是否大于5M 1M = bytes/1000000
if uploaded_file.size / 1000000 > 5:
error = 'File size = ' + format(uploaded_file.size / 1000000, "4.2") + 'M. File size can not more than 5M.'
except:
error = 'Must selected a file to upload.'
return error
def valid_excel_data(excel_table):
error = False
n_rows = excel_table.nrows # 行数
for i in range(1, n_rows):
rowValues = excel_table.row_values(i)
try:
if float(rowValues[2]) <= 0:
error = True
if float(rowValues[3]) <= 0:
error = True
if float(rowValues[4]) <= 0:
error = True
if float(rowValues[5]) <= 0:
error = True
except:
error = True
if error:
break
return error
class SkuCreateView(CreateView):
model = Sku
form_class = SkuForm
template_name = 'sku_create.html'
success_url = '/sku/sku-list'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
return context
def form_invalid(self, form): # 定义表对象没有添加失败后跳转到的页面。
response = super().form_invalid(form)
return response
class SkuSaveAndAnotherView(SkuCreateView):
success_url = '/sku/add'
class SkuUpdateView(UpdateView):
model = Sku
form_class = SkuForm
template_name = 'sku_edit.html'
success_url = '/sku/sku-list'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
return context
def form_invalid(self, form): # 定义表对象没有添加失败后跳转到的页面。
response = super().form_invalid(form)
return response
class SkuListView(ListView):
model = Sku
template_name = 'sku_list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
return context
def get_queryset(self):
query_status = self.request.GET.get('status')
query_sku = self.request.GET.get('s_sku')
query_product = self.request.GET.get('s_product')
if query_status or query_sku or query_product:
if query_status == '':
return Sku.objects.filter(sku_no__icontains=query_sku,
sku_name__icontains=query_product,
custom_id=self.request.user.id,
)
else:
return Sku.objects.filter(is_ok__exact=query_status, sku_no__icontains=query_sku,
sku_name__icontains=query_product,
custom_id=self.request.user.id,
)
else:
return Sku.objects.filter(custom_id=self.request.user.id)
class SkuUKDetail(DetailView):
model = Sku
template_name = 'sku_detail_uk.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
return context
class SkuEuroDetail(DetailView):
model = Sku
template_name = 'sku_detail_euro.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
all_euro_queryset = EuroCountry.objects.all().order_by('country')
context['all_euro'] = all_euro_queryset
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
return context
class SkuQuoteUK(View):
def post(self, request, slug):
sku_uk_form = SkuUKForm(request.POST)
sku_queryset = Sku.objects.filter(id__exact=slug)
if sku_uk_form.is_valid():
length = int(math.ceil(sku_queryset[0].sku_length))
width = int(math.ceil(sku_queryset[0].sku_width))
high = int(math.ceil(sku_queryset[0].sku_high))
# 确定长,宽,高的正确顺序 length > width > high
list_sort = [length, width, high]
list_sort.sort()
high = list_sort[0]
width = list_sort[1]
length = list_sort[2]
is_uk = True
weight = math.ceil(math.ceil(sku_queryset[0].sku_weight))
qty = int(request.POST.get("qty", 0))
postcode = request.POST.get("postcode", "").upper()
address_type = request.POST.get("addresstype", "").upper()
user_id = request.user.id
company_code = 'HERM'
l_hermes = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk)
company_code = 'PASC'
l_pacelforce = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk)
company_code = 'DHL'
l_dhl = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk)
company_code = 'DPD'
l_dpd = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk)
company_code = 'UPS'
l_ups = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk)
if (not l_hermes[10]) and (not l_pacelforce[10]) and (not l_dhl[10]) and (not l_dpd[10]) and (
not l_ups[10]):
return render(request, 'quote_error.html', {'go': 'UK',
'length': length,
'width': width,
'high': high,
'weight': weight,
'qty': qty,
'postcode': postcode,
'address_type': address_type,
"quote_uk_form": sku_uk_form,
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'sku_no': sku_queryset[0].sku_no,
'sku_name': sku_queryset[0].sku_name,
})
l_hermes = l_hermes[:-1]
l_pacelforce = l_pacelforce[:-1]
l_dhl = l_dhl[:-1]
l_dpd = l_dpd[:-1]
l_ups = l_ups[:-1]
return render(request, 'list_price.html', {
'hermes': l_hermes,
'parcelforce': l_pacelforce,
'dhl': l_dhl,
'dpd': l_dpd,
'ups': l_ups,
'length': length,
'width': width,
'high': high,
'weight': weight,
'qty': qty,
'postcode': postcode,
'address_type': address_type,
'is_uk': is_uk,
'now': datetime.datetime.now(),
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'sku_no': sku_queryset[0].sku_no,
'sku_name': sku_queryset[0].sku_name,
})
return render(request, "sku_detail_uk.html", {
'sku_uk_form': sku_uk_form,
'object': sku_queryset[0],
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
})
class SkuQuoteEURO(View):
def post(self, request, slug):
all_euro = EuroCountry.objects.all().filter(belong='EURO')
sku_euro_form = SkuEuroForm(request.POST)
sku_queryset = Sku.objects.filter(id__exact=slug)
if sku_euro_form.is_valid():
length = int(math.ceil(sku_queryset[0].sku_length))
width = int(math.ceil(sku_queryset[0].sku_width))
high = int(math.ceil(sku_queryset[0].sku_high))
# 确定长,宽,高的正确顺序 length > width > high
list_sort = [length, width, high]
list_sort.sort()
high = list_sort[0]
width = list_sort[1]
length = list_sort[2]
is_uk = False
weight = math.ceil(math.ceil(sku_queryset[0].sku_weight))
qty = int(request.POST.get("qty", 0))
postcode = request.POST.get("euro", "")
address_type = request.POST.get("addresstype", "").upper()
user_id = request.user.id
company_code = 'HERM'
l_hermes = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, )
company_code = 'PASC'
l_pacelforce = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, )
company_code = 'DHL'
l_dhl = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, )
company_code = 'DPD'
l_dpd = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, )
company_code = 'UPS'
l_ups = parcel(company_code, length, width, high, weight, postcode, qty, user_id, is_uk, )
if (not l_hermes[10]) and (not l_pacelforce[10]) \
and (not l_dhl[10]) and (not l_dpd[10]) and (not l_ups[10]):
return render(request, 'quote_error.html', {'go': 'EURO',
'length': length,
'width': width,
'high': high,
'weight': weight,
'qty': qty,
'postcode': postcode,
'address_type': address_type,
"quote_uk_form": sku_euro_form,
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'sku_no': sku_queryset[0].sku_no,
'sku_name': sku_queryset[0].sku_name,
})
l_hermes = l_hermes[:-1]
l_pacelforce = l_pacelforce[:-1]
l_dhl = l_dhl[:-1]
l_dpd = l_dpd[:-1]
l_ups = l_ups[:-1]
return render(request, 'list_price.html', {
'hermes': l_hermes,
'parcelforce': l_pacelforce,
'dhl': l_dhl,
'dpd': l_dpd,
'ups': l_ups,
'length': length,
'width': width,
'high': high,
'weight': weight,
'qty': qty,
'postcode': postcode,
'address_type': address_type,
'is_uk': is_uk,
'now': datetime.datetime.now(),
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'sku_no': sku_queryset[0].sku_no,
'sku_name': sku_queryset[0].sku_name,
})
return render(request, "sku_detail_euro.html", {
'sku_uk_form': sku_euro_form,
'object': sku_queryset[0],
'all_euro': all_euro,
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
})
class SkuDeleteView(DeleteView):
model = Sku
template_name = "sku_confirm_delete.html"
def get_object(self, queryset=None):
""" Hook to ensure object is owned by request.user. """
obj = super(SkuDeleteView, self).get_object()
# if not obj.op_user == self.request.user.id:
# raise Http404
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
return context
def get_success_url(self):
return reverse('sku:sku-list')
class SkuFileUploadView(View):
def get(self, request):
return render(request, 'sku_upload.html', {
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
})
def post(self, request):
error = valid_file(request)
if error:
return render(request, 'sku_upload.html', {
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'error': error,
})
uploaded_file = request.FILES['document']
excel_data = xlrd.open_workbook(filename=None, file_contents=uploaded_file.read())
table = excel_data.sheet_by_index(0)
n_rows = table.nrows # 行数
if valid_excel_data(table):
error = 'Uploading Failure. length/width/high/weight must be more than zero. ' \
'There are some error in the uploading File - ' + \
uploaded_file.name + '. '
return render(request, 'sku_upload.html', {
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'error': error,
})
try:
with transaction.atomic():
for i in range(1, n_rows):
rowValues = table.row_values(i)
Sku.objects.create(sku_no=rowValues[0],
sku_name=rowValues[1],
sku_length=rowValues[2],
sku_width=rowValues[3],
sku_high=rowValues[4],
sku_weight=rowValues[5],
is_ok='1',
custom_id=request.user.id
)
except Exception as e:
error = 'Sku No can no be duplication. There are some error in the uploading Files - ' + \
uploaded_file.name + '. '
return render(request, 'sku_upload.html', {
'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'error': error,
})
return HttpResponseRedirect(reverse('sku:sku-list'))
class UserListView(ListView):
model = Sku
template_name = 'sku_list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
return context
|
[
"thomaslzbuk@gmail.com"
] |
thomaslzbuk@gmail.com
|
2fea730fbc2ed8ead8cdf20b0fe1527890efd6c7
|
eee6dd18897d3118f41cb5e6f93f830e06fbfe2f
|
/venv/lib/python3.6/site-packages/scipy/sparse/bsr.py
|
1627132c92f71edcadf2178965702a5a2e4adba9
|
[] |
no_license
|
georgeosodo/ml
|
2148ecd192ce3d9750951715c9f2bfe041df056a
|
48fba92263e9295e9e14697ec00dca35c94d0af0
|
refs/heads/master
| 2020-03-14T11:39:58.475364
| 2018-04-30T13:13:01
| 2018-04-30T13:13:01
| 131,595,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,024
|
py
|
"""Compressed Block Sparse Row matrix format"""
__docformat__ = "restructuredtext en"
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
from warnings import warn
import numpy as np
from .data import _data_matrix, _minmax_mixin
from .compressed import _cs_matrix
from .base import isspmatrix, _formats, spmatrix
from .sputils import isshape, getdtype, to_native, upcast, get_index_dtype
from . import _sparsetools
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_pass1,
bsr_matmat_pass2, bsr_transpose, bsr_sort_indices)
class bsr_matrix(_cs_matrix, _minmax_mixin):
"""Block Sparse Row matrix
This can be instantiated in several ways:
bsr_matrix(D, [blocksize=(R,C)])
where D is a dense matrix or 2-D ndarray.
bsr_matrix(S, [blocksize=(R,C)])
with another sparse matrix S (equivalent to S.tobsr())
bsr_matrix((M, N), [blocksize=(R,C), dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
bsr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard BSR representation where the block column
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding block values are stored in
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
supplied, the matrix dimensions are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
BSR format index array
indptr
BSR format index pointer array
blocksize
Block size of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
**Summary of BSR format**
The Block Compressed Row (BSR) format is very similar to the Compressed
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
sub matrices like the last example below. Block matrices often arise in
vector-valued finite element discretizations. In such cases, BSR is
considerably more efficient than CSR and CSC for many sparse arithmetic
operations.
**Blocksize**
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
That is, R and C must satisfy the relationship ``M % R = 0`` and
``N % C = 0``.
If no blocksize is specified, a simple heuristic is applied to determine
an appropriate blocksize.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3 ,4, 5, 6])
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
array([[1, 1, 0, 0, 2, 2],
[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 0, 3, 3],
[4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6]])
"""
format = 'bsr'
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if isspmatrix_bsr(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.tobsr(blocksize=blocksize)
self._set_self(arg1)
elif isinstance(arg1,tuple):
if isshape(arg1):
# it's a tuple of matrix dimensions (M,N)
self.shape = arg1
M,N = self.shape
# process blocksize
if blocksize is None:
blocksize = (1,1)
else:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % blocksize)
blocksize = tuple(blocksize)
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
raise ValueError('shape must be multiple of blocksize')
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))
self.indices = np.zeros(0, dtype=idx_dtype)
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
elif len(arg1) == 2:
# (data,(row,col)) format
from .coo import coo_matrix
self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize))
elif len(arg1) == 3:
# (data,indices,indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = 1
if shape is not None:
maxval = max(shape)
if blocksize is not None:
maxval = max(maxval, max(blocksize))
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
raise ValueError('unrecognized bsr_matrix constructor usage')
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)
self._set_self(arg1)
if shape is not None:
self.shape = shape # spmatrix will check for errors
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
M = len(self.indptr) - 1
N = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
R,C = self.blocksize
self.shape = (M*R,N*C)
if self.shape is None:
if shape is None:
# TODO infer shape here
raise ValueError('need to infer shape')
else:
self.shape = shape
if dtype is not None:
self.data = self.data.astype(dtype)
self.check_format(full_check=False)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
*Parameters*:
full_check:
True - rigorous check, O(N) operations : default
False - basic check, O(1) operations
"""
M,N = self.shape
R,C = self.blocksize
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indices, self.indptr))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError("indices, and indptr should be 1-D")
if self.data.ndim != 3:
raise ValueError("data should be 3-D")
# check index pointer
if (len(self.indptr) != M//R + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= N//C:
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
if self.indices.min() < 0:
raise ValueError("column index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices(check_first=False)
def _get_blocksize(self):
return self.data.shape[1:]
blocksize = property(fget=_get_blocksize)
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for BSR format")
R,C = self.blocksize
return int(self.indptr[-1] * R * C)
getnnz.__doc__ = spmatrix.getnnz.__doc__
def __repr__(self):
format = _formats[self.getformat()][1]
return ("<%dx%d sparse matrix of type '%s'\n"
"\twith %d stored elements (blocksize = %dx%d) in %s format>" %
(self.shape + (self.dtype.type, self.nnz) + self.blocksize +
(format,)))
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
M,N = self.shape
R,C = self.blocksize
y = np.empty(min(M,N), dtype=upcast(self.dtype))
_sparsetools.bsr_diagonal(M//R, N//C, R, C,
self.indptr, self.indices,
np.ravel(self.data), y)
return y
##########################
# NotImplemented methods #
##########################
def getdata(self,ind):
raise NotImplementedError
def __getitem__(self,key):
raise NotImplementedError
def __setitem__(self,key,val):
raise NotImplementedError
######################
# Arithmetic methods #
######################
def matvec(self, other):
return self * other
def matmat(self, other):
return self * other
def _mul_vector(self, other):
M,N = self.shape
R,C = self.blocksize
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
bsr_matvec(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
other, result)
return result
def _mul_multivector(self,other):
R,C = self.blocksize
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
bsr_matvecs(M//R, N//C, n_vecs, R, C,
self.indptr, self.indices, self.data.ravel(),
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
R,n = self.blocksize
# convert to this format
if isspmatrix_bsr(other):
C = other.blocksize[1]
else:
C = 1
from .csr import isspmatrix_csr
if isspmatrix_csr(other) and n == 1:
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
else:
other = other.tobsr(blocksize=(n,C))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=(M//R)*(N//C))
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
csr_matmat_pass1(M//R, N//C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
indptr)
bnnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=bnnz)
indptr = indptr.astype(idx_dtype)
indices = np.empty(bnnz, dtype=idx_dtype)
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
bsr_matmat_pass2(M//R, N//C, R, C, n,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
data = data.reshape(-1,R,C)
# TODO eliminate zeros
return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))
######################
# Conversion methods #
######################
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix into Block Sparse Row Format.
With copy=False, the data/indices may be shared between this
matrix and the resultant bsr_matrix.
If blocksize=(R, C) is provided, it will be used for determining
block size of the bsr_matrix.
"""
if blocksize not in [None, self.blocksize]:
return self.tocsr().tobsr(blocksize=blocksize)
if copy:
return self.copy()
else:
return self
def tocsr(self, copy=False):
return self.tocoo(copy=False).tocsr(copy=copy)
# TODO make this more efficient
tocsr.__doc__ = spmatrix.tocsr.__doc__
def tocsc(self, copy=False):
return self.tocoo(copy=False).tocsc(copy=copy)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=True):
"""Convert this matrix to COOrdinate format.
When copy=False the data array will be shared between
this matrix and the resultant coo_matrix.
"""
M,N = self.shape
R,C = self.blocksize
indptr_diff = np.diff(self.indptr)
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
# Check for potential overflow
indptr_diff_limited = indptr_diff.astype(np.intp)
if np.any(indptr_diff_limited != indptr_diff):
raise ValueError("Matrix too big to convert")
indptr_diff = indptr_diff_limited
row = (R * np.arange(M//R)).repeat(indptr_diff)
row = row.repeat(R*C).reshape(-1,R,C)
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
row = row.reshape(-1)
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
col += np.tile(np.arange(C), (R,1))
col = col.reshape(-1)
data = self.data.reshape(-1)
if copy:
data = data.copy()
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
R, C = self.blocksize
M, N = self.shape
NBLK = self.nnz//(R*C)
if self.nnz == 0:
return bsr_matrix((N, M), blocksize=(C, R),
dtype=self.dtype, copy=copy)
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
indices = np.empty(NBLK, dtype=self.indices.dtype)
data = np.empty((NBLK, C, R), dtype=self.data.dtype)
bsr_transpose(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
indptr, indices, data.ravel())
return bsr_matrix((data, indices, indptr),
shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
R,C = self.blocksize
M,N = self.shape
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
nonzero_blocks = mask.nonzero()[0]
if len(nonzero_blocks) == 0:
return # nothing to do
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
# modifies self.indptr and self.indices *in place*
_sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
self.indices, mask)
self.prune()
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
R, C = self.blocksize
M, N = self.shape
# port of _sparsetools.csr_sum_duplicates
n_row = M // R
nnz = 0
row_end = 0
for i in range(n_row):
jj = row_end
row_end = self.indptr[i+1]
while jj < row_end:
j = self.indices[jj]
x = self.data[jj]
jj += 1
while jj < row_end and self.indices[jj] == j:
x += self.data[jj]
jj += 1
self.indices[nnz] = j
self.data[nnz] = x
nnz += 1
self.indptr[i+1] = nnz
self.prune() # nnz may have changed
self.has_canonical_format = True
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if self.has_sorted_indices:
return
R,C = self.blocksize
M,N = self.shape
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
self.has_sorted_indices = True
def prune(self):
""" Remove empty space after all non-zero elements.
"""
R,C = self.blocksize
M,N = self.shape
if len(self.indptr) != M//R + 1:
raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
# utility functions
def _binopt(self, other, op, in_shape=None, out_shape=None):
"""Apply the binary operation fn to two sparse matrices."""
# Ideally we'd take the GCDs of the blocksize dimensions
# and explode self and other to match.
other = self.__class__(other, blocksize=self.blocksize)
# e.g. bsr_plus_bsr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
R,C = self.blocksize
max_bnnz = len(self.data) + len(other.data)
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=max_bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(max_bnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
else:
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0]//R, self.shape[1]//C, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
actual_bnnz = indptr[-1]
indices = indices[:actual_bnnz]
data = data[:R*C*actual_bnnz]
if actual_bnnz < max_bnnz/2:
indices = indices.copy()
data = data.copy()
data = data.reshape(-1,R,C)
return self.__class__((data, indices, indptr), shape=self.shape)
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
# # these functions are used by the parent class
# # to remove redudancy between bsc_matrix and bsr_matrix
# def _swap(self,x):
# """swap the members of x if this is a column-oriented matrix
# """
# return (x[0],x[1])
def isspmatrix_bsr(x):
return isinstance(x, bsr_matrix)
|
[
"georgeosodo2010@gmail.com"
] |
georgeosodo2010@gmail.com
|
acb4347eff7bdd262db2056c03fe1aae4543962b
|
30e2b17d954a73d6a6eade6ba12c6fdb3af57068
|
/HackerRanck_Python_Challenges/Solutions/stringClassif.py
|
5dcb1bc07c994dea19461e9dc93709d8759922a5
|
[] |
no_license
|
rock-feller/HackerRanck_Python_Challenges
|
9c8a1447af7ee50faba9a05875e4f9eb1345d1eb
|
35e176d0a498f537bf2b2f32907755c9a0f07a75
|
refs/heads/master
| 2020-07-02T08:59:51.201894
| 2019-08-09T14:09:35
| 2019-08-09T14:09:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
"""author = Rockefeller"""
import string
def unik(li):
el = []
for i in li:
if i not in el:
el.append(i)
return el
def perm(x , y):
al = string.ascii_lowercase
ind = []
ind2= []
s_x =unik(x)
s_y =unik(y)
for i in range(min(len(s_x) , len(s_y))):
ind.append(al.index(s_x[i]) - al.index(s_y[i]))
for x_i , y_i in zip(x , y):
ind2.append(al.index(x_i) - al.index(y_i))
if ind==unik(ind2):
print ("same class")
else:
print("different class")
|
[
"noreply@github.com"
] |
noreply@github.com
|
88e5196904ade4057d99bc9906f441c9cf1edb74
|
93257ad72659cf766e9d99fe24666f434c4ae40d
|
/users/migrations/0027_auto_20190814_1449.py
|
8885e223728fd46844deeb633bb1aac30e809e89
|
[] |
no_license
|
AkshatLal16/myblog
|
6ce01acabb91e75fe78b23a9b2ce7ed746fe2a08
|
66c71e16b8979711f5006d70895234dba034b7c5
|
refs/heads/master
| 2020-07-15T13:09:28.915594
| 2019-09-27T07:37:41
| 2019-09-27T07:37:41
| 205,569,274
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
# Generated by Django 2.2.1 on 2019-08-14 09:19
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0026_auto_20190813_1522'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='email',
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.RemoveField(
model_name='profile',
name='password',
),
migrations.RemoveField(
model_name='profile',
name='username',
),
migrations.AddField(
model_name='profile',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='story',
name='date_posted',
field=models.DateTimeField(default=datetime.datetime(2019, 8, 14, 9, 19, 33, 292612, tzinfo=utc)),
),
]
|
[
"akshat@henryharvin.in"
] |
akshat@henryharvin.in
|
5a405a2cc937b4a389075317ae656d5cd0d8aaeb
|
02bf59df060b4a680d4c4cdbd6c8780952f02d48
|
/GetReferralCodeByDeviceId.py
|
26f93e4e59278934ac76108f332d96691d81f001
|
[] |
no_license
|
EdwardWuYiHsuan/Referral-Code
|
c874b020899cf356214b436d6948cd532a9d446e
|
88fb041b32eb9f494094dc1469aae2aa9f36e26f
|
refs/heads/main
| 2023-06-09T19:10:32.555450
| 2021-07-01T09:23:18
| 2021-07-01T09:23:18
| 370,632,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
import datetime
import redis
endpoint = "elasticache-savedeviceidandreferralcode.akto78.ng.0001.apne1.cache.amazonaws.com";
def lambda_handler(event, context):
print("[Info] Request : {}".format(event));
timestamp = datetime.datetime.utcnow().isoformat() + 'Z';
deviceId = event.get("device_id");
if deviceId is None:
return {
"code" : "0001",
"desc" : "Invalid 'device_id'",
"timestamp" : timestamp
}
try:
redisClient = redis.StrictRedis(host=endpoint, port=6379, db=0, socket_timeout=1);
referralCode = redisClient.get(deviceId);
if referralCode is None:
return {
"code" : "0004",
"desc" : "Referral code not found",
"timestamp" : timestamp
}
except:
return {
"code" : "0005",
"desc" : "Failed to connect to redis",
"timestamp" : timestamp
}
return {
"code" : "0",
"desc" : "success",
"timestamp" : timestamp,
"data" : {
"referral_code" : referralCode
}
}
|
[
"edwardwu@xrex.io"
] |
edwardwu@xrex.io
|
02745dd02ec7954ea531da8ddfb292e43a976771
|
8a102033a266d39128e4b64aa0780cf67055e196
|
/1330.py
|
3fe9a718323d5727aeb4c2c1501dafb25b860ada
|
[] |
no_license
|
yuseungwoo/baekjoon
|
4dec0798b8689b9378121b9d178713c9cf14a53f
|
099031e2c4401e27edcdc05bd6c9e6a558b09bb9
|
refs/heads/master
| 2020-09-03T15:25:40.764723
| 2018-10-08T02:35:27
| 2018-10-08T02:35:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
# coding: utf-8
a, b = map(int, input().split())
if a > b:
print('>')
if a < b:
print('<')
if a == b:
print('==')
|
[
"blacksangi14@naver.com"
] |
blacksangi14@naver.com
|
fa8bf7a05bf66710fd0d1de3652d77a24a921dc4
|
732ea9c77c138ed29ac259618f3f75c2cfea7ae3
|
/ds/subset.py
|
36def3b8da5734584523f0155ee15379b968874e
|
[] |
no_license
|
sharmak/python
|
842ae452434247b62f2f43f25dce9ec88dc0ceff
|
3218a8bad7a02bfde622d922e9e3fe8d5359e20a
|
refs/heads/master
| 2020-04-28T00:28:11.692739
| 2015-11-29T17:13:17
| 2015-11-29T17:13:17
| 24,114,794
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 29 06:00:54 2014
@author: kishor
"""
# Subset problem
# Given a set find all the subset of the given set
# e.g. {1,2,3} => {{1},{2}, {3}, {1,2}, {1,3} {2,3}, {1,2,3}, {}}
# Solve the problem using backtracking
def is_subset_solution(n, k):
return n == k
def generate_subset_candidates(n, k):
# Kth element can be either present or not
# present in the subset solution
return [True, False]
def process_subset_solution(a, data):
values = list()
for i in xrange(len(a)):
if a[i]:
values.append(data[i])
print(values)
def subset_backtrack(a, n, k, data):
#print(n, k)
if is_subset_solution(n, k):
process_subset_solution(a, data)
else:
k = k + 1
#print(k)
#print(n)
candidates = generate_subset_candidates(n, k)
for c in candidates:
a[k-1] = c
subset_backtrack(a, n, k, data)
subset_backtrack([False,False,False], 3, 0, [1,2,3])
|
[
"kishor.iitr@gmail.com"
] |
kishor.iitr@gmail.com
|
d8f85667c90be108d54a8e925957d55331a8e7d5
|
5fc3a5fb73e4fef5a022f9f2fee83775af3d7b71
|
/ems-cloud-mgmt-sdk/python/setup.py
|
d71a3cde5bf7d1107297182b72a15b7f587676ae
|
[
"MIT"
] |
permissive
|
byblakeorriver/coinapi-sdk
|
f030267f4283a8ae9217217668bdbcfc75c2b89c
|
d786aed283c562b030a13d4efd3adbebe53d9b27
|
refs/heads/master
| 2023-03-10T21:31:38.381408
| 2023-02-23T04:54:38
| 2023-02-23T04:54:38
| 292,376,196
| 0
| 0
|
MIT
| 2020-12-31T17:03:42
| 2020-09-02T19:29:17
|
C++
|
UTF-8
|
Python
| false
| false
| 7,715
|
py
|
# coding: utf-8
"""
EMS - REST API
This section will provide necessary information about the `CoinAPI EMS REST API` protocol. <br/> This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> <br/><br/> Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540) ### Endpoints <table> <thead> <tr> <th>Deployment method</th> <th>Environment</th> <th>Url</th> </tr> </thead> <tbody> <tr> <td>Managed Cloud</td> <td>Production</td> <td>Use <a href=\"#ems-docs-sh\">Managed Cloud REST API /v1/locations</a> to get specific endpoints to each server site where your deployments span</td> </tr> <tr> <td>Managed Cloud</td> <td>Sandbox</td> <td><code>https://ems-gateway-aws-eu-central-1-dev.coinapi.io/</code></td> </tr> <tr> <td>Self Hosted</td> <td>Production</td> <td>IP Address of the <code>ems-gateway</code> container/excecutable in the closest server site to the caller location</td> </tr> <tr> <td>Self Hosted</td> <td>Sandbox</td> <td>IP Address of the <code>ems-gateway</code> container/excecutable in the closest server site to the caller location</td> </tr> </tbody> </table> ### Authentication If the software is deployed as `Self-Hosted` then API do not require authentication as inside your infrastructure, your company is responsible for the security and access controls. <br/><br/> If the software is deployed in our `Managed Cloud`, there are 2 methods for authenticating with us, you only need to use one: 1. Custom authorization header named `X-CoinAPI-Key` with the API Key 2. Query string parameter named `apikey` with the API Key 3. <a href=\"#certificate\">TLS Client Certificate</a> from the `Managed Cloud REST API` (/v1/certificate/pem endpoint) while establishing a TLS session with us. #### Custom authorization header You can authorize by providing additional custom header named `X-CoinAPI-Key` and API key as its value. Assuming that your API key is `73034021-THIS-IS-SAMPLE-KEY`, then the authorization header you should send to us will look like: <br/><br/> `X-CoinAPI-Key: 73034021-THIS-IS-SAMPLE-KEY` <aside class=\"success\">This method is recommended by us and you should use it in production environments.</aside> #### Query string authorization parameter You can authorize by providing an additional parameter named `apikey` with a value equal to your API key in the query string of your HTTP request. Assuming that your API key is `73034021-THIS-IS-SAMPLE-KEY` and that you want to request all balances, then your query string should look like this: <br/><br/> `GET /v1/balances?apikey=73034021-THIS-IS-SAMPLE-KEY` <aside class=\"notice\">Query string method may be more practical for development activities.</aside> # noqa: E501
The version of the OpenAPI document: v1
Contact: support@coinapi.io
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "openapi-client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"certifi >= 14.5.14",
"frozendict ~= 2.3.4",
"python-dateutil ~= 2.7.0",
"setuptools >= 21.0.0",
"typing_extensions ~= 4.3.0",
"urllib3 ~= 1.26.7",
]
setup(
name=NAME,
version=VERSION,
description="EMS - REST API",
author="COINAPI LTD",
author_email="support@coinapi.io",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "EMS - REST API"],
python_requires=">=3.7",
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="28961",
long_description="""\
This section will provide necessary information about the `CoinAPI EMS REST API` protocol. <br/> This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> <br/><br/> Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540) ### Endpoints <table> <thead> <tr> <th>Deployment method</th> <th>Environment</th> <th>Url</th> </tr> </thead> <tbody> <tr> <td>Managed Cloud</td> <td>Production</td> <td>Use <a href=\"#ems-docs-sh\">Managed Cloud REST API /v1/locations</a> to get specific endpoints to each server site where your deployments span</td> </tr> <tr> <td>Managed Cloud</td> <td>Sandbox</td> <td><code>https://ems-gateway-aws-eu-central-1-dev.coinapi.io/</code></td> </tr> <tr> <td>Self Hosted</td> <td>Production</td> <td>IP Address of the <code>ems-gateway</code> container/excecutable in the closest server site to the caller location</td> </tr> <tr> <td>Self Hosted</td> <td>Sandbox</td> <td>IP Address of the <code>ems-gateway</code> container/excecutable in the closest server site to the caller location</td> </tr> </tbody> </table> ### Authentication If the software is deployed as `Self-Hosted` then API do not require authentication as inside your infrastructure, your company is responsible for the security and access controls. <br/><br/> If the software is deployed in our `Managed Cloud`, there are 2 methods for authenticating with us, you only need to use one: 1. Custom authorization header named `X-CoinAPI-Key` with the API Key 2. Query string parameter named `apikey` with the API Key 3. <a href=\"#certificate\">TLS Client Certificate</a> from the `Managed Cloud REST API` (/v1/certificate/pem endpoint) while establishing a TLS session with us. #### Custom authorization header You can authorize by providing additional custom header named `X-CoinAPI-Key` and API key as its value. Assuming that your API key is `73034021-THIS-IS-SAMPLE-KEY`, then the authorization header you should send to us will look like: <br/><br/> `X-CoinAPI-Key: 73034021-THIS-IS-SAMPLE-KEY` <aside class=\"success\">This method is recommended by us and you should use it in production environments.</aside> #### Query string authorization parameter You can authorize by providing an additional parameter named `apikey` with a value equal to your API key in the query string of your HTTP request. Assuming that your API key is `73034021-THIS-IS-SAMPLE-KEY` and that you want to request all balances, then your query string should look like this: <br/><br/> `GET /v1/balances?apikey=73034021-THIS-IS-SAMPLE-KEY` <aside class=\"notice\">Query string method may be more practical for development activities.</aside> # noqa: E501
"""
)
|
[
"support@coinapi.io"
] |
support@coinapi.io
|
bf91de8bc79c94d76bf93ea0cc534b567dc2e161
|
4d9bd7874fc5a4f2ec56bb172f4e93a9601c4c83
|
/main.py
|
4864dd4bbecc043b09c96f4fb427a06e03a0c031
|
[] |
no_license
|
liziniu/Model-Uncertainty-in-Neural-Networks
|
ff65009b3c165c4fd82efb9759cb26d41f914a2e
|
67c6042c52dd7e7a918ab42d34764bbb9a88c8a2
|
refs/heads/master
| 2020-05-04T00:26:47.315086
| 2019-04-06T03:19:47
| 2019-04-06T03:19:47
| 178,884,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
from model1.default import get_config
from model1.model import Model
from utli import load_data, get_session, update_para
import argparse
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=int, default=1)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--num_units", type=int, default=100)
parser.add_argument("--pi", type=float, default=0.25)
parser.add_argument("--mu1", type=float, default=0.0)
parser.add_argument("--std1", type=float, default=0.5)
parser.add_argument("--mu2", type=float, default=0.0)
parser.add_argument("--std2", type=float, default=1.5)
parser.add_argument("--train", action="store_true", default=False)
parser.add_argument("--load_path", type=str, default="logs/model1/")
return parser.parse_args()
def main(args):
sess = get_session()
default_para = get_config()
para = update_para(default_para, args)
model = Model(sess, para)
x_train, x_test, y_train, y_test = load_data()
x_train_ = x_train[:-5000]
y_train_ = y_train[:-5000]
x_valid = x_train[-5000:]
y_valid = y_train[-5000:]
if args.train:
model.train(x_train_, y_train_, x_valid, y_valid)
else:
model.load(args.load_path)
model.test(x_test, y_test)
if __name__ == "__main__":
args = arg_parse()
main(args)
|
[
"374387855@qq.com"
] |
374387855@qq.com
|
69fa3adebed46d0c5a9509edfcf765554631cdb7
|
a16d3c43e455298c371ff853e14acd8eea1db8fd
|
/test/test_seq_util.py
|
fe03b10b9322e58fb0cd788fb1afa0724283ba64
|
[] |
no_license
|
TyloRoberts/fosvis
|
a17eebe9dc29a589ac38c902bf1b06841b0a9e53
|
992535b2799dc04334b7be97f81b64e986dcd0cf
|
refs/heads/master
| 2023-02-19T00:38:36.931003
| 2021-01-21T22:19:05
| 2021-01-21T22:19:05
| 316,780,812
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,596
|
py
|
from fosvis import seq_util
import unittest
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import filecmp
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import difflib
# Done
class test_remove_too_small_contigs(unittest.TestCase):
def test_remove_too_small_contigs(self):
input_file = 'test/data_for_testing/Fosmid_Size_Selection_Tests/test_remove_too_small_contigs_input.fasta'
result = seq_util.remove_too_small_contigs(input_file, 100)
seq3_len_100 = SeqRecord(
Seq("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"),
id="seq3_len_100",
name="seq3_len_100",
description="seq3_len_100")
seq4_len_101 = SeqRecord(
Seq("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"),
id="seq4_len_101",
name="seq4_len_101",
description="seq4_len_101")
seq5_len_200 = SeqRecord(
Seq("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"),
id="seq5_len_200",
name="seq5_len_200",
description="seq5_len_200")
expected_result = [seq3_len_100, seq4_len_101, seq5_len_200]
self.assertEqual(len(result), len(expected_result))
self.assertTrue(result[0].seq == expected_result[0].seq)
self.assertTrue(result[1].seq == expected_result[1].seq)
self.assertTrue(result[2].seq == expected_result[2].seq)
self.assertTrue(result[0].id == expected_result[0].id)
self.assertTrue(result[1].id == expected_result[1].id)
self.assertTrue(result[2].id == expected_result[2].id)
# Done
# class test_write_seqs_to_file(unittest.TestCase):
#
# def test_write_seqs_to_file(self):
# file_to_write = 'test/data_for_testing/Fosmid_Size_Selection_Tests/write_seqs_to_file_actual_output.fasta'
# seq1 = SeqRecord(
# Seq("AAGGTTCC"),
# id="seq1",
# name="seq1",
# description="seq1")
# seq2 = SeqRecord(
# Seq("GGAACCTT"),
# id="seq2",
# name="seq2",
# description="seq2")
# seqs_to_write = [seq1,seq2]
#
# seq_util.write_seqs_to_file(seqs_to_write, file_to_write)
#
# expected_output_file = 'test/data_for_testing/Fosmid_Size_Selection_Tests/test_write_seqs_to_file_expected_output.fasta'
#
# self.assertTrue(filecmp.cmp(expected_output_file, file_to_write, shallow=False))
#
# os.remove(file_to_write)
# Done
class test_get_karyotype_data(unittest.TestCase):
def test_get_karyotype_data(self):
seq1_len_8 = SeqRecord(
Seq("AAGGTTCC"),
id="seq1_len_8",
name="seq1_len_8",
description="seq1_len_8")
seq2_len_18 = SeqRecord(
Seq("GGAACCTTGGAACCTT"),
id="seq1_len_8",
name="seq1_len_8",
description="seq1_len_8")
seqs = [seq1_len_8,seq2_len_18]
result = seq_util.get_karyotype_data(seqs)
expected_result_data = {'chr_prefix':['chr', 'chr'], '-prefix':['-', '-'], 'variable_name':['seq1_len_8', 'seq1_len_8'], 'diagram_label':['1', '2'], 'start':[1, 1], 'end':[9, 17], 'color':['rgb(120,120,120,0.4)', 'rgb(120,120,120,0.4)']}
expected_result_df = pd.DataFrame(expected_result_data)
self.assertEqual(assert_frame_equal(result, expected_result_df, check_dtype=False), None)
# Done
class test_gc_interval(unittest.TestCase):
def test_gc_interval(self):
interval_10_input_file = 'test/data_for_testing/gc_interval_tests/gc_content_test_interval_10_input.fasta'
interval_3_input_file = 'test/data_for_testing/gc_interval_tests/gc_content_test_interval_3_input.fasta'
interval_10_result = seq_util.gc_interval(interval_10_input_file, 10)
interval_3_result = seq_util.gc_interval(interval_3_input_file, 3)
expected_interval_10_result = {'contig':['interval_10_mix', 'interval_10_mix', 'interval_10_mix', 'interval_10_mix',
'interval_10_all_GC', 'interval_10_all_GC', 'interval_10_not_divis_by_10'],
'interval_start':[1, 11, 21, 31, 1, 11, 1], 'interval_end':[11,21,31,41, 11, 21, 5], 'gc_content':[100,50,50,0,100,100,50]}
expected_interval_10_result_df = pd.DataFrame(expected_interval_10_result)
expected_interval_3_result = {'contig':['interval_3_mix', 'interval_3_mix', 'interval_3_mix', 'interval_3_mix',
'interval_3_not_divis_by_3', 'interval_3_not_divis_by_3', 'interval_3_not_divis_by_3', 'interval_3_not_divis_by_3'],
'interval_start':[1,4,7,10, 1, 4, 7, 10],
'interval_end':[4, 7, 10, 13, 4, 7, 10, 11],
'gc_content':[((2/3)*100), 0, 100, (1/3)*100, 100,100,0,100]}
expected_interval_3_result_df = pd.DataFrame(expected_interval_3_result)
self.assertEqual(assert_frame_equal(interval_10_result, expected_interval_10_result_df, check_dtype=False), None)
self.assertEqual(assert_frame_equal(interval_3_result, expected_interval_3_result_df, check_dtype=False), None)
if __name__ == "__main__":
unittest.main()
|
[
"troberts@shamwow.microbiology.ubc.ca"
] |
troberts@shamwow.microbiology.ubc.ca
|
1125e6f6ae45ed4a3a5edb239f269df89b29130e
|
7937031274c8ebebd6f8391af245216421338cc7
|
/myenv/bin/easy_install-3.8
|
dd98319436e38a41ec73653bac068144f5feac69
|
[] |
no_license
|
ISLAMTU/Flappy_bird
|
fa621bfcf523ab7b255d416e9eb049abd448bdc5
|
37caacf3cbdab084ab5a65727abb46b802fd47c8
|
refs/heads/master
| 2023-03-10T07:22:43.186020
| 2021-02-16T21:25:39
| 2021-02-16T21:25:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
8
|
#!/home/mohamed/Desktop/bird_game/myenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"mohamedhisham694@gmail.com"
] |
mohamedhisham694@gmail.com
|
1935bfa537c5f257092b4e5689d56e2394be68bb
|
a09c10c29478fed167c94d83d5dff9371f9a1680
|
/Client.py
|
ec5149235aa6288eed9ea16cd6590f770fc45567
|
[] |
no_license
|
batra98/Distributed-Web-Cache
|
83e208689b18b95724dd0ba657b4ef89e9054d2a
|
7e08dfe4dd6739c779c59da3ab7301f3cb33af6a
|
refs/heads/master
| 2022-11-28T05:21:33.220922
| 2020-08-07T10:15:32
| 2020-08-07T10:15:32
| 285,793,260
| 2
| 0
| null | 2020-08-07T09:41:56
| 2020-08-07T09:41:56
| null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import socket
import sys
def send(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(bytes(message, 'ascii'))
response = str(sock.recv(1024), 'ascii')
print("Received: {}".format(response))
return response.split(None, 1)
finally:
sock.close()
def get(ip, port, key):
return send(ip, port, "get {0}".format(key))
def add(ip, port, key, data):
return send(ip, port, "add {0} {1}".format(key, data))
def add_node(ip, port, key):
return send(ip, port, "addnode {0}".format(key))
def rm_node(ip, port, key):
return send(ip, port, "rmnode {0}".format(key))
def stats(ip, port):
return send(ip, port, "stats")
def performance(ip,port):
return send(ip,port, "performance")
def test_load_balancing(ip,port,num_node,num_data):
return send(ip,port, "test {0} {1}".format(num_node,num_data))
def clean(ip,port):
return send(ip,port,"clean")
if __name__ == "__main__":
ip, port = sys.argv[1], int(sys.argv[2])
while True:
command = input("> ")
send(ip, port, command)
|
[
"sarthak.singhal@students.iiit.ac.in"
] |
sarthak.singhal@students.iiit.ac.in
|
356a84ee74b58014400d3f8cd8a3a8ef23a75015
|
46738f59d6358042f879567788cfd8ae8192a7b6
|
/lambda/save_sample_data/save_sample_data.py
|
6e94006a92e7fed7df4f113536d6fbdd063da3cc
|
[] |
no_license
|
perezbarbosa/hosting-compare
|
8fb2d6f45b37d7101da592950749dbf3f2c30667
|
9c0cd1ed0e274b568277d153ab2f22bf45a8e75d
|
refs/heads/master
| 2023-05-06T02:52:35.792340
| 2021-06-03T17:18:31
| 2021-06-03T17:18:31
| 273,340,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
import boto3
import json
from pprint import pprint
import sys
def handler(event, context):
""" Loads sample data into local database
Expects to receive a payload with a list of json objects
formatted as dynamodb.put_item expects
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.put_item
"""
if event['body']:
body = json.loads(event['body'])
out = {}
out['headers'] = {
'Content-Type': 'application/json',
}
# Using a local docker network to access to dynamodb container by its name
dynamodb = boto3.client('dynamodb', endpoint_url='http://dynamodb:8000')
try:
for entry in body:
pprint(entry)
response = dynamodb.put_item(
TableName='HostingList',
Item=entry,
)
out['statusCode'] = 200
out['body'] = {
'message': response,
}
except:
print("Unexpected error")
pprint(sys.exc_info())
out['statusCode'] = 500
out['body'] = {
'message': 'Unexpected error',
}
return out
|
[
"noreply@github.com"
] |
noreply@github.com
|
ec48f6b5a8782524afec24561f65b13e75302679
|
0d3f9dc797c0ad1d72bfe25390284a7a2231ef75
|
/chap4/45.py
|
ece8c684a747cd9a410d081d8c61fbdde63d8aad
|
[] |
no_license
|
mocas-usr/tensorflow_primary
|
b71b90cc4c0690316ebafd20aa7f7644bc16e839
|
e854f11026aea863a701e5691e1068b2648b4002
|
refs/heads/master
| 2022-11-02T18:04:31.538146
| 2019-11-01T08:25:35
| 2019-11-01T08:25:35
| 218,905,003
| 0
| 1
| null | 2022-10-24T03:15:05
| 2019-11-01T03:17:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 14:59:46 2019
@author: HAX
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
plotdata = { "batchsize":[], "loss":[] }
def moving_average(a, w=10):
if len(a) < w:
return a[:]
return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
#生成模拟数据
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3 # y=2x,但是加入了噪声
#图形显示
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.legend()
plt.show()
tf.reset_default_graph()##全部释放资源图
##定义ip和端口
strps_hosts='localhost:1681'
strworker_hosts='localhost:1682,loaclhost:1683'
##定义角色名称
strjob_name='ps'
task_index=0
#将字符串转换成数组
ps_hosts=strps_hosts.split(',')
worker_hosts=strworker_hosts.split(',')
print(ps_hosts)
cluster_spec=tf.train.ClusterSpec({'ps':ps_hosts,'worker':worker_hosts})
##创建server
server=tf.train.Server({'ps':ps_hosts,'worker':worker_hosts},job_name=strjob_name,task_index=task_index)
##ps使用join进行等待
if strjob_name=='ps':
print('wait')
server.join()
with tf.device(tf.train.replica_device_setter(worker_device='/job'))
|
[
"wangyuhang_mocas@163.com"
] |
wangyuhang_mocas@163.com
|
78ce4126165b88ced683f46c15d42dfb92e4f168
|
f7c19bd02cfc09992d804ae35e293323d2ea99e4
|
/classviews/migrations/0001_initial.py
|
fdb331a483de7545d040ba72ef362511027e225c
|
[] |
no_license
|
jesusjamz100/cms_blog
|
e98c927c5f93ca12ef6f71748620b0168f25dcb8
|
c08d6e1a063236bfd59d11e630254aa0043eaf17
|
refs/heads/master
| 2020-04-18T19:19:11.057386
| 2019-02-02T23:25:07
| 2019-02-02T23:25:07
| 167,709,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# Generated by Django 2.1.5 on 2019-01-28 02:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=100)),
('phone', models.CharField(max_length=50)),
],
),
]
|
[
"jesusjamz100@gmail.com"
] |
jesusjamz100@gmail.com
|
014a6b6fc7c93c425ce7da5ad70dfce4b7273ee8
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/largestTime_20200903122053.py
|
697fb00998e96926352a6433e5a6da6d088d57dd
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
from itertools import permutations
def Time(A):
# getting the different permutations
# get the one that falls between 0000 and 2359
# then place the semi colon in the proper place
# otherwise return an empty string
A = [str(i) for i in A]
perm = permutations(A)
newArray = []
for i in list(perm):
string = "".join(i)
newArray.append(string)
newArray = [int(i) for i in newArray]
for i in newArray:
if i > 0000 and i =<2359:
Time([1,2,3,4])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
41812c6d4cc481ed2d7caedd0323b6ca88aa5b06
|
e7003f13ad2e4b8cfeeb3d7bf6c5a393e8f38bf7
|
/custom_components/sbanken/config_flow.py
|
da9dfdbc7429a3cbf5f541836a10efc984187ceb
|
[
"MIT"
] |
permissive
|
toringer/home-assistant-sbanken
|
5a620d49abb1807bbda968d2c561eec84f0361c7
|
7540960042df455cc7ce672d7891c134a9976251
|
refs/heads/master
| 2023-02-21T13:07:14.543789
| 2023-02-12T14:36:08
| 2023-02-12T14:36:08
| 124,766,061
| 3
| 5
|
MIT
| 2022-03-17T14:25:03
| 2018-03-11T14:42:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,249
|
py
|
"""Config flow for Sbanken integration."""
from __future__ import annotations
import logging
from typing import Any
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResult
from homeassistant.core import callback
from .sbanken_api import SbankenApi
from .const import (
DOMAIN,
CONF_CLIENT_ID,
CONF_NUMBER_OF_TRANSACTIONS,
CONF_SECRET,
TITLE,
CannotConnect,
InvalidAuth,
)
_LOGGER = logging.getLogger(__name__)
STEP_USER_DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_CLIENT_ID): str,
vol.Required(CONF_SECRET): str,
vol.Required(CONF_NUMBER_OF_TRANSACTIONS, default=10): int,
}
)
async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, Any]:
"""Validate the user input allows us to connect."""
api = SbankenApi(data[CONF_CLIENT_ID], data[CONF_SECRET])
session = await hass.async_add_executor_job(api.get_session)
if not session.authorized:
raise InvalidAuth
customer_info = await hass.async_add_executor_job(api.get_customer_information)
return {"title": TITLE, "customer_id": customer_info["customerId"]}
@config_entries.HANDLERS.register(DOMAIN)
class SbankenConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Sbanken."""
VERSION = 1
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA
)
errors = {}
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
unique_id = info["customer_id"]
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=info["title"],
data=user_input,
options=user_input,
)
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
return SbankenOptionsFlowHandler(config_entry)
class SbankenOptionsFlowHandler(config_entries.OptionsFlow):
"""Sbanken config flow options handler."""
def __init__(self, config_entry):
self.options = config_entry.options
self.data = config_entry.data
async def async_step_init(self, _user_input=None):
"""Manage the options."""
return await self.async_step_user()
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_CLIENT_ID,
default=user_input[CONF_CLIENT_ID],
): str,
vol.Required(
CONF_SECRET,
default=user_input[CONF_SECRET],
): str,
vol.Required(
CONF_NUMBER_OF_TRANSACTIONS,
default=user_input[CONF_NUMBER_OF_TRANSACTIONS],
): int,
}
),
errors=errors,
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_CLIENT_ID,
default=self.options.get(CONF_CLIENT_ID),
): str,
vol.Required(
CONF_SECRET,
default=self.options.get(CONF_SECRET),
): str,
vol.Required(
CONF_NUMBER_OF_TRANSACTIONS,
default=self.options.get(CONF_NUMBER_OF_TRANSACTIONS),
): int,
}
),
)
|
[
"toringe@redalen.no"
] |
toringe@redalen.no
|
fab423ac20ec004dccb37459a3fcde5478b15d1e
|
ac5042582ec4fb8f128a97ead4bb59d6bbf6cef5
|
/tests/utils/test_prolang.py
|
f11790d225e44c2626e5efd7ca18fd0c49a53acc
|
[
"MIT"
] |
permissive
|
optittm/bugprediction
|
4be0e4c532e06c27ac4c9a3e7812bc9fc4f5a113
|
161628d504627d0623b584e7e92bb3130a24b8ef
|
refs/heads/main
| 2023-08-22T04:20:57.039589
| 2023-07-19T09:39:46
| 2023-07-19T09:39:46
| 522,447,765
| 2
| 2
|
MIT
| 2023-09-14T10:10:08
| 2022-08-08T07:28:57
|
Python
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
from tests.__fixtures__ import *
def test_guess_programing_language():
"""
Guess what is the programming language from a file extension
>>>guess_programing_language("php")
PHP
>>>guess_programing_language(".php")
PHP
>>>guess_programing_language(".hidden/test.h")
C
>>>guess_programing_language("")
None
>>>guess_programing_language("java")
Java
>>>guess_programing_language("c++")
C++
>>>guess_programing_language("c")
C
>>>guess_programing_language("class")
None
>>>guess_programing_language("cpp")
C++
"""
pass
|
[
"benjamin.balet@gmail.com"
] |
benjamin.balet@gmail.com
|
cadcaf82134eb1b16eeb3991d6a5f168d62aaa56
|
714301f86767b075dd7a9132535e25689b7a7e4a
|
/Windows/src/LaZagne/softwares/databases/dbvis.py
|
02efcb16ebc7402c9d8b1ab802b48ea5dfb0ee4c
|
[] |
no_license
|
eddgomez/LaZagne
|
ebac0cc9a57786a064922380090fb3850492f876
|
aafee7e88361db1466d3b5148838700b5fab36bc
|
refs/heads/master
| 2021-01-15T21:39:36.390397
| 2015-02-20T15:18:27
| 2015-02-20T15:18:27
| 31,222,312
| 1
| 0
| null | 2015-02-23T18:24:15
| 2015-02-23T18:24:15
| null |
UTF-8
|
Python
| false
| false
| 3,057
|
py
|
from Crypto.Hash import MD5
from Crypto.Cipher import DES
import binascii, array, hashlib
import base64, re, os
import xml.etree.cElementTree as ET
from config.write_output import print_output, print_debug
from config.constant import *
from config.header import Header
class Dbvisualizer():
# ---- functions used to decrypt the password ----
def get_salt(self):
salt_array = [-114,18,57,-100,7,114,111,90]
salt = array.array('b', salt_array)
hexsalt = binascii.hexlify(salt)
return binascii.unhexlify(hexsalt)
def get_iteration(self):
return 10
def get_derived_key(self, password, salt, count):
key = bytearray(password) + salt
for i in range(count):
m = hashlib.md5(key)
key = m.digest()
return (key[:8], key[8:])
def decrypt(self, salt, msg, password):
enc_text = base64.b64decode(msg)
(dk, iv) = self.get_derived_key(password, salt, self.get_iteration())
crypter = DES.new(dk, DES.MODE_CBC, iv)
text = crypter.decrypt(enc_text)
return re.sub(r'[\x01-\x08]','',text)
def get_passphrase(self):
return 'qinda'
# ---- end of the functions block ----
def get_infos(self, path, passphrase, salt):
xml_file = path + os.sep + 'config70/dbvis.xml'
if os.path.exists(xml_file):
tree = ET.ElementTree(file=xml_file)
pwdFound = []
for e in tree.findall('Databases/Database'):
values = {}
try:
values['Connection Name'] = e.find('Alias').text
except:
pass
try:
values['Userid'] = e.find('Userid').text
except:
pass
try:
ciphered_password = e.find('Password').text
try:
password = self.decrypt(salt, ciphered_password, passphrase)
values['Password'] = password
passwordFound = True
except:
pass
except:
pass
try:
values['Driver'] = e.find('UrlVariables//Driver').text.strip()
except:
pass
try:
elem = e.find('UrlVariables')
for ee in elem.getchildren():
for ele in ee.getchildren():
if 'Server' == ele.attrib['UrlVariableName']:
values['Server'] = str(ele.text)
if 'Port' == ele.attrib['UrlVariableName']:
values['Port'] = str(ele.text)
if 'SID' == ele.attrib['UrlVariableName']:
values['SID'] = str(ele.text)
except:
pass
if len(values) > 0:
pwdFound.append(values)
# print the results
print_output("DbVisualizer", pwdFound)
def get_mainPath(self):
if 'HOMEPATH' in os.environ:
path = os.environ['HOMEPATH'] + os.sep + '.dbvis'
if os.path.exists(path):
return path
else:
return 'DBVIS_NOT_EXISTS'
else:
return 'var_Env_Not_Found'
def retrieve_password(self):
# print title
Header().title_debug('Dbvisualizer')
mainPath = self.get_mainPath()
if mainPath == 'DBVIS_NOT_EXISTS':
print_debug('INFO', 'Dbvisualizer not installed.')
elif mainPath == 'var_Env_Not_Found':
print_debug('ERROR', 'The HOMEPATH environment variable is not definded.')
else:
passphrase = self.get_passphrase()
salt = self.get_salt()
self.get_infos(mainPath, passphrase, salt)
|
[
"zanni.alessandro@gmail.com"
] |
zanni.alessandro@gmail.com
|
23e6461e758dbf032f5c96edf2252ec91d06e177
|
cbd3c85d385d065ff48bfa8a6321d4bf160acb09
|
/abjadext/cli/get_text_editor.py
|
923d3bb47c4ecdda2a782bea3166a3c3b3177547
|
[
"MIT"
] |
permissive
|
DaviRaubach/abjad-ext-cli
|
4b36424a9930bc9d04e53b763c8c63804baede2b
|
4c2fa22f317ef0a3d5f0d6a8ea92f4375b8fc8eb
|
refs/heads/master
| 2022-10-17T21:27:42.324333
| 2020-06-18T19:29:10
| 2020-06-18T19:29:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
import os
import abjad
def get_text_editor():
text_editor = abjad.configuration["text_editor"]
if text_editor is not None:
return text_editor
elif os.name == "posix":
return "vi"
else:
return "edit"
|
[
"trevor.baca@gmail.com"
] |
trevor.baca@gmail.com
|
703a8e40bd746970ed7d5c2e13f250617fe1a660
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02574/s746022410.py
|
9331bad8a322a0b5502729d4fc4e2aa050191d05
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
import math,itertools,fractions,heapq,collections,bisect,sys,queue,copy
sys.setrecursionlimit(10**7)
inf=10**20
mod=10**9+7
dd=[(-1,0),(0,1),(1,0),(0,-1)]
ddn=[(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
# def LF(): return [float(x) for x in sys.stdin.readline().split()]
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def LS(): return sys.stdin.readline().split()
def S(): return input()
def main():
N=I()
A=LI()
g=0
for x in A:
g=math.gcd(g,x)
if g>1:
return 'not coprime'
sosu=[0]*1000100
for x in A:
if x==1:
continue
sosu[x]+=1
if sosu[x]>1:
return 'setwise coprime'
for y in range(2,int(math.sqrt(x))+1):
if x%y!=0:
continue
z=x//y
if y==z:
sosu[y]+=1
if sosu[y]>1:
return 'setwise coprime'
else:
sosu[y]+=1
if sosu[y]>1:
return 'setwise coprime'
sosu[z]+=1
if sosu[z]>1:
return 'setwise coprime'
return 'pairwise coprime'
# main()
print(main())
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b6c2f9db34a01f882c5076597ae485bcdb696cf9
|
b597aca9f8ed351075ab6609a654ecf4d6f3d07e
|
/calc_pppkes_ind_stats.py
|
4511c06677c688b277f1d104ecb8c3f01720a3d8
|
[] |
no_license
|
mcbarlowe/daily_scrape
|
e44b6d55304c63f2fa70a946cc5de49e9dcf56ed
|
209f415179b40b1252a0ebc0eea045e3def34c30
|
refs/heads/master
| 2022-01-08T18:17:34.765264
| 2018-12-20T03:40:03
| 2018-12-20T03:40:03
| 185,601,600
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50,181
|
py
|
'''
This script calculates individual stats for both home and away team given
the strength state passed to the functions. It works for all strength states
except for all situations. Harry Shomers skater totals include the goalie so
if you wanted 5v5 you would actually pass 6 for each skaters. In cases where
the strength state is not even the first number passed will be the first number
in the strength state i.e. 5 and 6 would be equivalent to 4v5 and 6 and 5 is
5v4 etc.
'''
import pandas as pd
import numpy as np
import calc_all_sits_ind_stats as es_metrics
def calc_adj_ind_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num):
'''
function to calculate individual shot metrics and return a data
frame with them
Inputs:
pbp_df - play by play dataframe
Ouputs:
ind_shots_df - df with calculated iSF, iCF, iFF need to add ixG to
this later once xg model is finished
'''
corsi = ['SHOT', 'BLOCK', 'MISS', 'GOAL']
fenwick = ['SHOT', 'MISS', 'GOAL']
shot = ['SHOT', 'GOAL']
home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(~pbp_df.home_goalie.isna())]
away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(~pbp_df.away_goalie.isna())]
home_corsi = home_5v4_df[(home_5v4_df.event.isin(corsi)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['adj_corsi'].sum().reset_index()
home_fenwick = home_5v4_df[(home_5v4_df.event.isin(fenwick)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['adj_fenwick'].sum().reset_index()
home_xg = home_5v4_df[(home_5v4_df.event.isin(fenwick)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['adj_xg'].sum().reset_index()
home_shot = home_5v4_df[(home_5v4_df.event.isin(corsi)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_shot'].sum().reset_index()
home_corsi.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iCF']
home_fenwick.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iFF']
home_shot.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iSF']
home_xg.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'ixg']
home_metrics_df = home_corsi.merge(home_fenwick,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_metrics_df = home_metrics_df.merge(home_shot,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_metrics_df = home_metrics_df.merge(home_xg,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_metrics_df = home_metrics_df.fillna(0)
away_corsi = away_5v4_df[(away_5v4_df.event.isin(corsi)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['adj_corsi'].sum().reset_index()
away_fenwick = away_5v4_df[(away_5v4_df.event.isin(fenwick)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['adj_fenwick'].sum().reset_index()
away_xg = away_5v4_df[(away_5v4_df.event.isin(fenwick)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['adj_xg'].sum().reset_index()
away_shot = away_5v4_df[(away_5v4_df.event.isin(corsi)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_shot'].sum().reset_index()
away_corsi.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iCF']
away_fenwick.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iFF']
away_shot.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iSF']
away_xg.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'ixg']
away_metrics_df = away_corsi.merge(away_fenwick,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_metrics_df = away_metrics_df.merge(away_shot,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_metrics_df = away_metrics_df.merge(away_xg,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_metrics_df = away_metrics_df.fillna(0)
metrics_df = pd.concat([away_metrics_df, home_metrics_df], sort=False)
metrics_df.loc[:, ('player_id', 'game_id','iCF', 'iFF', 'iSF')] = \
metrics_df.loc[:, ('player_id', 'game_id', 'iCF', 'iFF', 'iSF')].astype(int)
return metrics_df
def calc_ind_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num):
'''
function to calculate individual shot metrics and return a data
frame with them
Inputs:
pbp_df - play by play dataframe
Ouputs:
ind_shots_df - df with calculated iSF, iCF, iFF need to add ixG to
this later once xg model is finished
'''
corsi = ['SHOT', 'BLOCK', 'MISS', 'GOAL']
fenwick = ['SHOT', 'MISS', 'GOAL']
shot = ['SHOT', 'GOAL']
home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(~pbp_df.home_goalie.isna())]
away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(~pbp_df.away_goalie.isna())]
home_corsi = home_5v4_df[(home_5v4_df.event.isin(corsi)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_corsi'].sum().reset_index()
home_fenwick = home_5v4_df[(home_5v4_df.event.isin(fenwick)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_fenwick'].sum().reset_index()
home_xg = home_5v4_df[(home_5v4_df.event.isin(fenwick)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['xg'].sum().reset_index()
home_shot = home_5v4_df[(home_5v4_df.event.isin(corsi)) &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_shot'].sum().reset_index()
home_corsi.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iCF']
home_fenwick.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iFF']
home_shot.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iSF']
home_xg.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'ixg']
home_metrics_df = home_corsi.merge(home_fenwick,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_metrics_df = home_metrics_df.merge(home_shot,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_metrics_df = home_metrics_df.merge(home_xg,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_metrics_df = home_metrics_df.fillna(0)
away_corsi = away_5v4_df[(away_5v4_df.event.isin(corsi)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_corsi'].sum().reset_index()
away_fenwick = away_5v4_df[(away_5v4_df.event.isin(fenwick)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_fenwick'].sum().reset_index()
away_xg = away_5v4_df[(away_5v4_df.event.isin(fenwick)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['xg'].sum().reset_index()
away_shot = away_5v4_df[(away_5v4_df.event.isin(corsi)) &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_shot'].sum().reset_index()
away_corsi.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iCF']
away_fenwick.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iFF']
away_shot.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iSF']
away_xg.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'ixg']
away_metrics_df = away_corsi.merge(away_fenwick,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_metrics_df = away_metrics_df.merge(away_shot,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_metrics_df = away_metrics_df.merge(away_xg,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_metrics_df = away_metrics_df.fillna(0)
metrics_df = pd.concat([away_metrics_df, home_metrics_df], sort=False)
metrics_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF')] = \
metrics_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF')].astype(int)
return metrics_df
def calc_ind_hits(pbp_df, pp_skaters_num, pk_skaters_num):
'''
function calculates hits for and against from the pbp_df.
Input:
pbp_df - play by play dataframe
Output:
hit_df - dataframe of each players hits stats
'''
home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(~pbp_df.home_goalie.isna())]
away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(~pbp_df.away_goalie.isna())]
home_hit_for = home_5v4_df[(home_5v4_df.event == 'HIT') &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
home_hit_for.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iHF']
home_hit_against = home_5v4_df[(home_5v4_df.event == 'HIT') &
((home_5v4_df.p2_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name']).size().\
reset_index()
home_hit_against.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iHA']
home_hit_df = home_hit_for.merge(home_hit_against,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_hit_df = home_hit_df.fillna(0)
away_hit_for = away_5v4_df[(away_5v4_df.event == 'HIT') &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
away_hit_for.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iHF']
away_hit_against = away_5v4_df[(away_5v4_df.event == 'HIT') &
((away_5v4_df.p2_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name']).size().\
reset_index()
away_hit_against.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iHA']
away_hit_df = away_hit_for.merge(away_hit_against,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_hit_df = away_hit_df.fillna(0)
hit_df_list = [home_hit_df, away_hit_df]
hit_df = pd.concat(hit_df_list, sort=False).reset_index()
hit_df = hit_df[['season', 'game_id', 'date',
'player_id', 'player_name', 'iHF', 'iHA']]
hit_df.loc[:, ('season', 'game_id', 'player_id', 'iHF', 'iHA')] = \
hit_df.loc[:, ('season', 'game_id', 'player_id', 'iHF', 'iHA')].astype(int)
return hit_df
def calc_pp_gata(pbp_df, pp_skaters_num, pk_skaters_num):
'''
function calculates giveaways and takeaways from the pbp_df.
Input:
pbp_df - play by play dataframe
Output:
hit_df - dataframe of each players GA/TA stats
'''
home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(~pbp_df.home_goalie.isna())]
away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(~pbp_df.away_goalie.isna())]
home_ga = home_5v4_df[(home_5v4_df.event == 'GIVE') &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
home_ga.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iGA']
home_ta = home_5v4_df[(home_5v4_df.event == 'TAKE') &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
home_ta.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iTA']
home_gata = home_ga.merge(home_ta, on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
home_gata = home_gata.fillna(0)
away_ga = away_5v4_df[(away_5v4_df.event == 'GIVE') &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
away_ga.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iGA']
away_ta = away_5v4_df[(away_5v4_df.event == 'TAKE') &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
away_ta.columns = ['season', 'game_id', 'date', 'player_id',
'player_name', 'iTA']
away_gata = away_ga.merge(away_ta, on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_gata = away_gata.fillna(0)
gata = [home_gata, away_gata]
gata_df = pd.concat(gata, sort=False)
gata_df.loc[:, ('season', 'game_id', 'player_id', 'iGA', 'iTA')] = \
gata_df.loc[:, ('season', 'game_id', 'player_id', 'iGA', 'iTA')].astype(int)
gata_df = gata_df[['season', 'game_id', 'date', 'player_id',
'player_name', 'iGA', 'iTA']]
return gata_df
def calc_pp_blocks(pbp_df, pp_skaters_num, pk_skaters_num):
'''
function to calculate a players blocks while on the pp
Inputs:
pbp_df - dataframe of play by play data
Outputs:
blk_df - dataframe of blocks by players on the power play
'''
home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(~pbp_df.home_goalie.isna())]
away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(~pbp_df.away_goalie.isna())]
home_blk_df = home_5v4_df[(home_5v4_df.event == 'BLOCK') &
((home_5v4_df.p2_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name']).size().\
reset_index()
away_blk_df = away_5v4_df[(away_5v4_df.event == 'BLOCK') &
((away_5v4_df.p2_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name']).size().\
reset_index()
blk_list = [home_blk_df, away_blk_df]
blk_df = pd.concat(blk_list, sort=False)
blk_df.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'BLK']
blk_df.loc[:, ('season', 'game_id', 'player_id', 'BLK')] = \
blk_df.loc[:, ('season', 'game_id', 'player_id', 'BLK')].astype(int)
return blk_df
def calc_pp_faceoffs(pbp_df, pp_skaters_num, pk_skaters_num):
'''
calculate the faceoffs won and lost by a player whose team is on the power
player
Inputs:
pbp_df - play by play dataframe
pp_skaters_num - number of skaters for team on the power play
pk_skaters_num - number of skaters for team on the penalty kill
Outputs
fo_df - dataframe of FOW and FOL for teams on the PP
'''
home_5v4_df = pbp_df[(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(~pbp_df.home_goalie.isna())]
away_5v4_df = pbp_df[(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(~pbp_df.away_goalie.isna())]
home_fo_won = home_5v4_df[(home_5v4_df.event == 'FAC') &
((home_5v4_df.p1_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p1_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
home_fo_won.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'FOW']
home_fo_lost = home_5v4_df[(home_5v4_df.event == 'FAC') &
((home_5v4_df.p2_id == home_5v4_df.homeplayer1_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer2_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer3_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer4_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer5_id) |
(home_5v4_df.p2_id == home_5v4_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name']).size().\
reset_index()
home_fo_lost.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'FOL']
home_5v4_fo_df = home_fo_won.merge(home_fo_lost,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_fo_won = away_5v4_df[(away_5v4_df.event == 'FAC') &
((away_5v4_df.p1_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p1_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name']).size().\
reset_index()
away_fo_won.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'FOW']
away_fo_lost = away_5v4_df[(away_5v4_df.event == 'FAC') &
((away_5v4_df.p2_id == away_5v4_df.awayplayer1_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer2_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer3_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer4_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer5_id) |
(away_5v4_df.p2_id == away_5v4_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name']).size().\
reset_index()
away_fo_lost.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'FOL']
away_5v4_fo_df = away_fo_won.merge(away_fo_lost,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
fo_dfs = [home_5v4_fo_df, away_5v4_fo_df]
fo_5v4 = pd.concat(fo_dfs, sort=False)
fo_5v4 = fo_5v4.fillna(0)
fo_5v4 = fo_5v4[['season', 'game_id', 'date', 'player_id',
'player_name', 'FOW', 'FOL']]
fo_5v4.loc[:, ('season', 'game_id', 'player_id', 'FOW', 'FOL')] = \
fo_5v4.loc[:, ('season', 'game_id', 'player_id', 'FOW', 'FOL')].astype(int)
return fo_5v4
def calc_pp_ind_points(pbp_df, pp_skaters_num, pk_skaters_num):
'''
This function calculates the individual goals and assists scored while at
a strength state of 5v4
Input:
pbp_df - play by play dataframe
Output:
five_v_4_df - play by play dataframe of events taken at 5v4 strength
'''
home_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.home_team) &
(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(~pbp_df.home_goalie.isna())]
away_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.away_team) &
(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(~pbp_df.home_goalie.isna())]
home_pp_points = es_metrics.calc_ind_points(home_pp_df)
away_pp_points = es_metrics.calc_ind_points(away_pp_df)
pts_pp = [home_pp_points, away_pp_points]
pts_pp_df = pd.concat(pts_pp, sort=False)
pts_pp_df = pts_pp_df[['season', 'game_id', 'date', 'player_id',
'player_name', 'g', 'a1', 'a2']]
pts_pp_df.loc[:, ('season', 'game_id')] = pts_pp_df.loc[:, ('season', 'game_id')].astype(int)
return pts_pp_df
def calc_pp_penalties(pbp_df, pp_skaters_num, pk_skaters_num):
'''
function to calculate penalties drawn and taken for teams on the
'''
home_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.home_team) &
(pbp_df.home_players == pp_skaters_num) &
(pbp_df.away_players == pk_skaters_num) &
(pbp_df.is_penalty > 0) &
(~pbp_df.home_goalie.isna())]
away_pp_df = pbp_df[(pbp_df.ev_team == pbp_df.away_team) &
(pbp_df.home_players == pk_skaters_num) &
(pbp_df.away_players == pp_skaters_num) &
(pbp_df.is_penalty > 0) &
(~pbp_df.home_goalie.isna())]
home_pent = home_pp_df[(home_pp_df.event == 'PENL') &
((home_pp_df.p1_id == home_pp_df.homeplayer1_id) |
(home_pp_df.p1_id == home_pp_df.homeplayer2_id) |
(home_pp_df.p1_id == home_pp_df.homeplayer3_id) |
(home_pp_df.p1_id == home_pp_df.homeplayer4_id) |
(home_pp_df.p1_id == home_pp_df.homeplayer5_id) |
(home_pp_df.p1_id == home_pp_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_penalty'].sum().\
reset_index()
home_pent.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iPENT']
home_pend = home_pp_df[(home_pp_df.event == 'PENL') &
((home_pp_df.p2_id == home_pp_df.homeplayer1_id) |
(home_pp_df.p2_id == home_pp_df.homeplayer2_id) |
(home_pp_df.p2_id == home_pp_df.homeplayer3_id) |
(home_pp_df.p2_id == home_pp_df.homeplayer4_id) |
(home_pp_df.p2_id == home_pp_df.homeplayer5_id) |
(home_pp_df.p2_id == home_pp_df.homeplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name'])['is_penalty'].sum().\
reset_index()
home_pend.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iPEND']
home_pp_penl = home_pent.merge(home_pend,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
away_pent = away_pp_df[(away_pp_df.event == 'PENL') &
((away_pp_df.p1_id == away_pp_df.awayplayer1_id) |
(away_pp_df.p1_id == away_pp_df.awayplayer2_id) |
(away_pp_df.p1_id == away_pp_df.awayplayer3_id) |
(away_pp_df.p1_id == away_pp_df.awayplayer4_id) |
(away_pp_df.p1_id == away_pp_df.awayplayer5_id) |
(away_pp_df.p1_id == away_pp_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p1_id', 'p1_name'])['is_penalty'].sum().\
reset_index()
away_pent.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iPENT']
away_pend = away_pp_df[(away_pp_df.event == 'PENL') &
((away_pp_df.p2_id == away_pp_df.awayplayer1_id) |
(away_pp_df.p2_id == away_pp_df.awayplayer2_id) |
(away_pp_df.p2_id == away_pp_df.awayplayer3_id) |
(away_pp_df.p2_id == away_pp_df.awayplayer4_id) |
(away_pp_df.p2_id == away_pp_df.awayplayer5_id) |
(away_pp_df.p2_id == away_pp_df.awayplayer6_id))].\
groupby(['season', 'game_id', 'date',
'p2_id', 'p2_name'])['is_penalty'].sum().\
reset_index()
away_pend.columns = ['season', 'game_id', 'date',
'player_id', 'player_name', 'iPEND']
away_pp_penl = away_pent.merge(away_pend, on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
penl_dfs = [home_pp_penl, away_pp_penl]
pp_penl_dfs = pd.concat(penl_dfs, sort=False)
pp_penl_dfs = pp_penl_dfs.fillna(0)
pp_penl_dfs = pp_penl_dfs[['season', 'game_id', 'date', 'player_id',
'player_name', 'iPENT', 'iPEND']]
pp_penl_dfs.loc[:, ('season', 'game_id', 'player_id', 'iPENT', 'iPEND')] = \
pp_penl_dfs.loc[:, ('season', 'game_id', 'player_id', 'iPENT', 'iPEND')].astype(int)
return pp_penl_dfs
def calc_ppespk_ind_metrics(pbp_df, pp_skaters_num,
pk_skaters_num, calc_blk=calc_pp_blocks, \
calc_fo=calc_pp_faceoffs,
calc_points=calc_pp_ind_points,
calc_penalties=calc_pp_penalties,
calc_hits=calc_ind_hits,
calc_shot_metrics=calc_ind_shot_metrics,
calc_gata=calc_pp_gata):
'''
this function calculates the individual metrics of each players
contribution during the game
Input:
pbp_df - play by play df
pp_skaters_num - the first number of the strength state wanted for 5v5
would be 6 because of the scraper for 4v5 would be five
pk_skaters_num - the second number of the strength state wanted for 5v5
would be 6 because of the scraper for 4v5 would be six
Output:
player_df - individual player stats df
'''
#calculate each individual stats data frames and then join them all together
#will pull in teams with the on ice measures
points_df = calc_points(pbp_df, pp_skaters_num, pk_skaters_num)
metrics_df = calc_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num)
penalty_df = calc_penalties(pbp_df, pp_skaters_num, pk_skaters_num)
hit_df = calc_hits(pbp_df, pp_skaters_num, pk_skaters_num)
gata_df = calc_gata(pbp_df, pp_skaters_num, pk_skaters_num)
fo_df = calc_fo(pbp_df, pp_skaters_num, pk_skaters_num)
blk_df = calc_blk(pbp_df, pp_skaters_num, pk_skaters_num)
ind_stats_df = metrics_df.merge(points_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(penalty_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(hit_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(gata_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(fo_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(blk_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.fillna(0)
ind_stats_df.loc[:, ('player_id', 'iCF', 'iFF', 'iSF', 'g',
'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA',
'iGA', 'iTA', 'FOW', 'FOL', 'BLK')] = \
ind_stats_df.loc[:, ('player_id', 'iCF', 'iFF', 'iSF', 'g',
'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA',
'iGA', 'iTA', 'FOW', 'FOL', 'BLK')].astype(int)
ind_stats_df = ind_stats_df[['season', 'game_id', 'date', 'player_id', 'player_name',
'iCF', 'iFF', 'iSF', 'ixg', 'g',
'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA',
'iGA', 'iTA', 'FOW', 'FOL', 'BLK']]
ind_stats_df = ind_stats_df[ind_stats_df.player_id != 0]
ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.home_goalie_id.unique())]
ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.away_goalie_id.unique())]
return ind_stats_df.reset_index(drop=True)
def calc_adj_ppespk_ind_metrics(pbp_df, pp_skaters_num,
pk_skaters_num, calc_blk=calc_pp_blocks, \
calc_fo=calc_pp_faceoffs,
calc_points=calc_pp_ind_points,
calc_penalties=calc_pp_penalties,
calc_hits=calc_ind_hits,
calc_shot_metrics=calc_adj_ind_shot_metrics,
calc_gata=calc_pp_gata):
'''
this function calculates the individual metrics of each players
contribution during the game
Input:
pbp_df - play by play df
pp_skaters_num - the first number of the strength state wanted for 5v5
would be 6 because of the scraper for 4v5 would be five
pk_skaters_num - the second number of the strength state wanted for 5v5
would be 6 because of the scraper for 4v5 would be six
Output:
player_df - individual player stats df
'''
#calculate each individual stats data frames and then join them all together
#will pull in teams with the on ice measures
points_df = calc_points(pbp_df, pp_skaters_num, pk_skaters_num)
metrics_df = calc_shot_metrics(pbp_df, pp_skaters_num, pk_skaters_num)
penalty_df = calc_penalties(pbp_df, pp_skaters_num, pk_skaters_num)
hit_df = calc_hits(pbp_df, pp_skaters_num, pk_skaters_num)
gata_df = calc_gata(pbp_df, pp_skaters_num, pk_skaters_num)
fo_df = calc_fo(pbp_df, pp_skaters_num, pk_skaters_num)
blk_df = calc_blk(pbp_df, pp_skaters_num, pk_skaters_num)
ind_stats_df = metrics_df.merge(points_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(penalty_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(hit_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(gata_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(fo_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.merge(blk_df,
on=['season', 'game_id', 'date',
'player_id', 'player_name'],
how='outer')
ind_stats_df = ind_stats_df.fillna(0)
ind_stats_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF', 'g',
'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA',
'iGA', 'iTA', 'FOW', 'FOL', 'BLK')] = \
ind_stats_df.loc[:, ('game_id', 'player_id', 'iCF', 'iFF', 'iSF', 'g',
'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA',
'iGA', 'iTA', 'FOW', 'FOL', 'BLK')].astype(int)
ind_stats_df = ind_stats_df[['season', 'game_id', 'date', 'player_id', 'player_name',
'iCF', 'iFF', 'iSF', 'ixg', 'g',
'a1', 'a2', 'iPENT', 'iPEND', 'iHF', 'iHA',
'iGA', 'iTA', 'FOW', 'FOL', 'BLK']]
ind_stats_df = ind_stats_df[ind_stats_df.player_id != 0]
ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.home_goalie_id.unique())]
ind_stats_df = ind_stats_df[~ind_stats_df.player_id.isin(pbp_df.away_goalie_id.unique())]
return ind_stats_df.reset_index(drop=True)
def main():
return
if __name__ == '__main__':
main()
|
[
"mcbarlowe@gmail.com"
] |
mcbarlowe@gmail.com
|
47213a723487f5382748a8d76a7546ee674ea1f5
|
a26d91163fe40924c7c4f9d94fcd973989b68983
|
/watchlist_app/migrations/0003_alter_movie_description.py
|
bed775bad8d30539b2b34f84c48a3511902e2b22
|
[] |
no_license
|
rcoffie/Django-Rest-Tut
|
a840ecb838098ed2d525c1b5321f042e0d29c5fb
|
9925bfb11b92a49aa6973e3929b2d05d9528ee27
|
refs/heads/master
| 2023-08-25T06:43:41.019767
| 2021-10-27T15:27:06
| 2021-10-27T15:27:06
| 409,567,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Generated by Django 3.2.5 on 2021-09-23 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watchlist_app', '0002_rename_movies_movie'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='description',
field=models.TextField(),
),
]
|
[
"rcoffie22@yahoo.com"
] |
rcoffie22@yahoo.com
|
e5d23091a1fc43d867a7d92b6d2aeaccf1d37eca
|
09724c51f0012474eb322a676fd112d1bc102bb6
|
/CodeBuilder/Valids.py
|
bb671f0840c37c2cab29f8c586e3505cbaf0c0cc
|
[] |
no_license
|
danmarshall208/CodeBuilder
|
53d4b72476d704a8290aeb5925975f10a184817f
|
526f0d37b9ef11484eada691275b9ce71f5c2fab
|
refs/heads/master
| 2021-05-09T07:53:15.845690
| 2018-01-29T11:33:51
| 2018-01-29T11:33:51
| 119,374,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
valid_var_names = ['a', 'b', 'c', 'd', 'e']
valid_func_names = ['A', 'B', 'C', 'D', 'E']
valid_funcs = ['chr', 'float', 'ord', 'print', 'round', 'str', 'sum']
#valid_funcs = ['abs', 'bool', 'chr', 'float', 'id', 'len', 'ord', 'print', 'round', 'str', 'sum', 'type']
operations = ['object', 'add', 'subtract', 'multiply', 'divide']
#operations = ['object', 'add', 'subtract', 'multiply', 'divide', 'equals', 'not_equals']
objects = ['int', 'string', 'var', 'function']
|
[
"danmarshall208@hotmail.com"
] |
danmarshall208@hotmail.com
|
6bac759d5e99e5cea3ef506eb72cad61410bf459
|
2a1b7c51756d692bc2d06d57f5e99d89e114a2e9
|
/ANPP_code/preprocess/AliRepeat/convert_to_norm.py
|
aed6cc67fbb72482cd3a9a074e6657c8e216beb3
|
[] |
no_license
|
AnonymousOpenResearch/ANPP
|
fb66b506af9eef2f714d537c87af40f2a3256129
|
e5a416a69998fdeda91aa767b0a430892f47c53c
|
refs/heads/master
| 2023-01-11T12:29:15.289027
| 2020-11-06T13:51:05
| 2020-11-06T13:51:05
| 271,594,404
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
import numpy as np
import pandas as pd
import os
import sys
import pickle
file_user_log = 'user_log_format1.csv'
file_user_purchase = "user_purchase.csv"
file_item_info = "item.csv"
min_user_count = 5
min_item_count = 5
purchase_action_type = 2
arr = sys.argv
folder_dataset = arr[1]
file_pkl_review = arr[2]
file_pkl_meta = arr[3]
year = '2014'
def norm_timestamp(t):
s = str(t)
y = year
if(len(s) < 4):
s = "0" + s
m = s[0:2]
d = s[-2:]
return "-".join([y, m, d])
def filt_purchase(folder, file_src, file_dst):
print("filt_purchase...")
file_src = os.path.join(folder, file_src)
file_dst = os.path.join(folder, file_dst)
df = pd.read_csv(file_src)
df = df[df['action_type'] == 2]
df.rename(columns={'user_id': 'user', 'item_id': 'item', 'cat_id': 'categories', 'brand_id': 'brand', 'time_stamp': 'timestamp'}, inplace=True)
df = df[['user', 'item', 'timestamp', 'categories', 'brand']]
df['timestamp'] = df['timestamp'].map(lambda x: norm_timestamp(x))
df = df.sort_values(['user', 'timestamp'])
df.to_csv(file_dst, index=False)
print("filt_purchase done!")
def norm_order_data(folder, file_src, file_pkl_review, file_pkl_meta):
print("norm_order_data...")
file_src = os.path.join(folder, file_src)
df = pd.read_csv(file_src)
df['title'] = ""
#filt speical data
df = df[df['timestamp'] < '2014-11-01']
#filt
df = df.groupby('item').filter(lambda x: len(x) >= min_item_count)
df = df.groupby('user').filter(lambda x: len(x) >= min_user_count)
user_df = df[['user', 'item', 'timestamp']].drop_duplicates()
meta_df = df[['item', 'categories', 'title', 'brand']].drop_duplicates()
file_pkl_review = os.path.join(folder, file_pkl_review)
with open(file_pkl_review, 'wb') as f:
pickle.dump(user_df, f, pickle.HIGHEST_PROTOCOL)
file_pkl_meta = os.path.join(folder, file_pkl_meta)
with open(file_pkl_meta, 'wb') as f:
pickle.dump(meta_df, f, pickle.HIGHEST_PROTOCOL)
print("norm_order_data done!")
def main():
#filt_purchase(folder_dataset, file_user_log, file_user_purchase)
norm_order_data(folder_dataset, file_user_purchase, file_pkl_review, file_pkl_meta)
main()
|
[
"guyulongthu@gmail.com"
] |
guyulongthu@gmail.com
|
1259e4c9fd64611ee9165faa91df8c43bf9b224d
|
d77d0433c9db4b0f529c4066011c6b5eb037981b
|
/mpi-proxy-split/test/mana_test.py
|
b4683cd4ea33c53253eadb04c1fbe541d3d38ddc
|
[] |
no_license
|
mpickpt/mana
|
bb11f4da82e01c2f6b37f0cb357bf1303cd720c1
|
8f341500f259187588dbede15bf152a0771a7022
|
refs/heads/main
| 2023-09-01T08:42:51.518528
| 2023-08-14T21:38:16
| 2023-08-31T18:25:31
| 179,736,099
| 26
| 23
| null | 2023-09-14T03:12:09
| 2019-04-05T18:37:56
|
C++
|
UTF-8
|
Python
| false
| false
| 3,058
|
py
|
#!/usr/bin/env python3
import argparse
import sys
import subprocess
'''
This util is designed to be an argument parsing utility for C/C++ tests.
'''
class CustomParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def main():
parser = CustomParser(description='Run a MANA Test')
parser.add_argument('-i', '--iterations', metavar='I',
help='Number of iterations for test')
parser.add_argument('test', metavar='T', help='Path to test case to run')
parser.add_argument('-n','--num_ranks', metavar='N', help='Number of ranks\
for test', required=True)
parser.add_argument('-m','--mana_bin', metavar='M', help='Absolute \
path to mana_bin folder', default=
'', required=False)
parser.add_argument('-r','--mpirun', help='Use mpirun instead\
of srun', action="store_true")
parser.add_argument('-a', '--args', help='Arguments to pass to test',
default='')
args = parser.parse_args()
if args.mana_bin == '':
mana_coordinator_path = f'mana_coordinator'
mana_launch_path = f'mana_launch'
else:
mana_coordinator_path = f'{args.mana_bin}/mana_coordinator'
mana_launch_path = f'{args.mana_bin}/mana_launch'
print(f'{mana_coordinator_path}')
coord_child = subprocess.run([f'{mana_coordinator_path}'])
run = 'srun'
if args.mpirun:
run = 'mpirun'
if args.iterations == None:
print(f'{run} -n {args.num_ranks} {mana_launch_path} '
f'{args.test}.mana.exe {arg.args}')
test_child = subprocess.run([f'{run}', '-n', f'{args.num_ranks}',
f'{mana_launch_path}',
f'{args.test}.mana.exe'
f'{args.args}'], stdout = subprocess.DEVNULL)
else:
if args.args == '':
print(f'{run} -n {args.num_ranks} {mana_launch_path} '
f'{args.test}.mana.exe {args.iterations} {args.args}')
test_child = subprocess.run([f'{run}', '-n', f'{args.num_ranks}',
f'{mana_launch_path}',
f'{args.test}.mana.exe',
f'{args.iterations}', f'{args.args}'],
stdout = subprocess.DEVNULL)
else:
print(f'{run} -n {args.num_ranks} {mana_launch_path} '
f'{args.test}.mana.exe -i {args.iterations} {args.args}')
test_child = subprocess.run([f'{run}', '-n', f'{args.num_ranks}',
f'{mana_launch_path}',
f'{args.test}.mana.exe', '-i'
f'{args.iterations}', f'{args.args}'],
stdout = subprocess.DEVNULL)
if __name__ == "__main__":
main()
|
[
"107428972+chirag-singh-memverge@users.noreply.github.com"
] |
107428972+chirag-singh-memverge@users.noreply.github.com
|
f825ef4df82e8584a89a77987d0e9dca9f38a446
|
23d43f570f5c99c9fea510bda5579116ea7fd1e5
|
/main.py
|
9d4fa83ab7f5e9cfda00a02b770230fe512e5cea
|
[
"Apache-2.0"
] |
permissive
|
AM6SoftwareCompany/Azkhar
|
4647e8469d5511c06b2ea6895fd4b7034b4f6057
|
d86a4bd58ac956640bb1bee5fce64ff647c94a1b
|
refs/heads/main
| 2023-06-09T22:57:08.494783
| 2021-07-09T10:01:04
| 2021-07-09T10:01:04
| 353,202,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,861
|
py
|
import time
import datetime
import webbrowser
import pyperclip
import pyautogui
AzkharAlsabah = [
"اللَّهُمَّ أنْتَ رَبِّي لا إلَهَ إلَّا أنْتَ، خَلَقْتَنِي وأنا عَبْدُكَ، وأنا علَى عَهْدِكَ ووَعْدِكَ ما اسْتَطَعْتُ، أعُوذُ بكَ مِن شَرِّ ما صَنَعْتُ، أبُوءُ لكَ بنِعْمَتِكَ عَلَيَّ، وأَبُوءُ لكَ بذَنْبِي فاغْفِرْ لِي، فإنَّه لا يَغْفِرُ الذُّنُوبَ إلَّا أنْت",
'أَصبَحْنا على فِطرةِ الإسلامِ، وعلى كَلِمةِ الإخلاصِ، وعلى دِينِ نَبيِّنا محمَّدٍ صلَّى اللهُ عليه وسلَّمَ، وعلى مِلَّةِ أبِينا إبراهيمَ، حَنيفًا مُسلِمًا، وما كان مِنَ المُشرِكينَ',
'سبحانَ اللَّهِ وبحمدِه لا قوَّةَ إلَّا باللَّهِ ما شاءَ اللَّهُ كانَ وما لم يشأ لم يَكن أعلمُ أنَّ اللَّهَ على كلِّ شيءٍ قديرٌ وأنَّ اللَّهَ قد أحاطَ بِكلِّ شيءٍ علمًا',
'قال رسول الله صلى الله عليه وسلم: (مَن قال: بسمِ اللهِ الذي لا يَضرُ مع اسمِه شيءٌ في الأرضِ ولا في السماءِ وهو السميعُ العليمِ، ثلاثُ مراتٍ، لم تصبْه فجأةُ بلاءٍ حتى يُصبحَ)',
'قال رسول الله صلى الله عليه وسلم: (مَن قالَ حينَ يصبحُ وحينَ يُمسي: سبحانَ اللَّهِ وبحمدِهِ مائةَ مرَّةٍ: لم يأتِ أحدٌ يومَ القيامةِ بأفضلَ ممَّا جاءَ بِهِ، إلَّا أحدٌ قالَ مثلَ ما قالَ، أو زادَ علَيهِ)',
'اللهمَّ إني أسألُك العفوَ والعافيةَ، في الدنيا والآخرةِ، اللهمَّ إني أسألُك العفوَ والعافيةَ، في دِيني ودنيايَ وأهلي ومالي، اللهمَّ استُرْ عوراتي، وآمِنْ روعاتي، واحفظني من بين يدي، ومن خلفي، وعن يميني، وعن شمالي، ومن فوقي، وأعوذُ بك أن أُغْتَالَ من تحتي',
'للَّهمَّ بِكَ أصبَحنا، وبِكَ أمسَينا، وبِكَ نحيا وبِكَ نموتُ وإليكَ المصيرُ',
'اللهمَّ إنِّي أعوذُ بك من الهمِّ والحزنِ، والعجزِ والكسلِ، والبُخلِ والجُبنِ، وضَلَعِ الدَّينِ، وغَلَبَةِ الرجالِ',
'اللَّهمَّ إنِّي أسألُكَ خيرَ هذا اليومِ فتحَه، ونصرَه، ونورَه، وبرَكتَه، وَهدايتَهُ، وأعوذُ بِكَ من شرِّ ما فيهِ وشرِّ ما بعدَه',
'اللَّهُمَّ إنِّي أسألُكَ العافيةَ في الدُّنيا والآخِرةِ، اللَّهُمَّ إنِّي أسألُكَ العَفوَ والعافيةَ في دِيني ودُنيايَ، وأهْلي ومالي، اللَّهُمَّ استُرْ عَوْراتي، وآمِنْ رَوْعاتي، اللَّهُمَّ احْفَظْني من بينِ يَدَيَّ، ومن خَلْفي، وعن يَميني، وعن شِمالي، ومن فَوْقي، وأعوذُ بعَظَمتِكَ أنْ أُغْتالَ من تَحْتي',
'اللهم إنا نعوذُ بك من أن نُشرِكَ بك شيئًا نعلَمُه، و نستغفرُك لما لا نعلمُه',
'يا حيُّ يا قيُّومُ، برَحمتِكَ أستَغيثُ، أصلِح لي شأني كُلَّهُ، ولا تَكِلني إلى نَفسي طرفةَ عينٍ',
'اللَّهمَّ ما أصبحَ بي من نعمةٍ أو بأحدٍ من خلقِكَ فمنكَ وحدَكَ لا شريكَ لكَ فلكَ الحمدُ ولكَ الشُّكرُ',
'اللَّهمَّ عالِمَ الغَيبِ والشَّهادةِ، فاطرَ السَّمواتِ والأرضِ، رَبَّ كلِّ شيءٍ ومَليكَهُ، أشهدُ أن لا إلَهَ إلَّا أنتَ، أعوذُ بِكَ مِن شرِّ نفسي وشرِّ الشَّيطانِ وشِركِهِ',
'(حَسبيَ اللهُ لا إلهَ إلَّا هو، عليه تَوكَّلْتُ، وهو ربُّ العَرشِ العَظيمِ)، سَبعَ مراتٍ',
'(سُبْحَانَ اللهِ وَبِحَمْدِهِ، عَدَدَ خَلْقِهِ وَرِضَا نَفْسِهِ وَزِنَةَ عَرْشِهِ وَمِدَادَ كَلِمَاتِهِ)، وهي تُقال ثلاث مرات',
'سبحانَ اللَّهِ وبحمدِهِ وهي تُقال مئةَ مرَّةٍ',
'اللَّهُمَّ إنِّي أصبَحتُ أُشهِدُك، وأُشهِدُ حَمَلةَ عَرشِكَ، ومَلائِكَتَك، وجميعَ خَلقِكَ: أنَّكَ أنتَ اللهُ لا إلهَ إلَّا أنتَ، وأنَّ مُحمَّدًا عبدُكَ ورسولُكَ',
'رَضيتُ باللَّهِ ربًّا، وبالإسلامِ دينًا، وبِمُحمَّدٍ رسولًا',
'اللَّهمَّ عافِني في بدَني اللَّهمَّ عافِني في سمعي اللَّهمَّ عافِني في بصري لا إلهَ إلَّا أنت. اللَّهمَّ إنِّي أعوذُ بِكَ منَ الكُفْرِ والفقرِ اللَّهمَّ إنِّي أعوذُ بكَ من عذابِ القبرِ لا إلهَ إلَّا أنت تعيدُها ثَلاثَ مرَّاتٍ',
'أَصْبَحْنَا وَأَصْبَحَ المُلْكُ لِلَّهِ وَالْحَمْدُ لِلَّهِ لا إلَهَ إلَّا اللَّهُ، وَحْدَهُ لا شَرِيكَ له . له المُلْكُ وَلَهُ الحَمْدُ وَهو علَى كُلِّ شيءٍ قَدِيرٌ، رَبِّ أَسْأَلُكَ خَيْرَ ما في هذِه اللَّيْلَةِ وَخَيْرَ ما بَعْدَهَا، وَأَعُوذُ بكَ مِن شَرِّ ما في هذِه اللَّيْلَةِ وَشَرِّ ما بَعْدَهَا، رَبِّ أَعُوذُ بكَ مِنَ الكَسَلِ وَسُوءِ الكِبَرِ، رَبِّ أَعُوذُ بكَ مِن عَذَابٍ في النَّارِ وَعَذَابٍ في القَبْرِ',
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا صَلَّيْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ، اللَّهُمَّ بَارِكْ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا بَارَكْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ (مَن صلى عَلَيَّ حين يُصْبِحُ عَشْرًا ، وحين يُمْسِي عَشْرًا أَدْرَكَتْه شفاعتي يومَ القيامةِ)',
'أستغفرُ اللهَ العظيمَ الذي لا إلهَ إلَّا هو الحيَّ القيومَ وأتوبُ إليه',
'اللَّهمَّ إنِّي أسألُكَ عِلمًا نافعًا ورزقًا طيِّبًا وعملًا متقبَّلًا',
'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق',
'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق',
'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق',
'من قال إذا أصبَح: لا إلهَ إلَّا اللهُ وحدَه لا شريكَ له له الملكُ وله الحمدُ وهو على كلِّ شيءٍ قديرٌ عشْرَ مرَّاتٍ كُتِب له بهنَّ عشْرُ حسناتٍ ومُحي بهنَّ عنه عشْرُ سيِّئاتٍ ورُفِع له بهن عشْرُ درجاتٍ وكُنَّ له عَدْلَ عِتاقةِ أربعِ رقابٍ وكُنَّ له حرَسًا مِن الشَّيطانِ حتَّى يُمسيَ',
'آية الكرسي: (اللَّهُ لَا إِلَٰهَ إِلَّا هُوَ الْحَيُّ الْقَيُّومُ ۚ لَا تَأْخُذُهُ سِنَةٌ وَلَا نَوْمٌ ۚ لَّهُ مَا فِي السَّمَاوَاتِ وَمَا فِي الْأَرْضِ ۗ مَن ذَا الَّذِي يَشْفَعُ عِندَهُ إِلَّا بِإِذْنِهِ ۚ يَعْلَمُ مَا بَيْنَ أَيْدِيهِمْ وَمَا خَلْفَهُمْ ۖ وَلَا يُحِيطُونَ بِشَيْءٍ مِّنْ عِلْمِهِ إِلَّا بِمَا شَاءَ ۚ وَسِعَ كُرْسِيُّهُ السَّمَاوَاتِ وَالْأَرْضَ ۖ وَلَا يَئُودُهُ حِفْظُهُمَا ۚ وَهُوَ الْعَلِيُّ الْعَظِيمُ)',
"سورة الإخلاص: (قُلْ هُوَ اللَّهُ أَحَدٌ* اللَّهُ الصَّمَدُ* لَمْ يَلِدْ وَلَمْ يُولَدْ* وَلَمْ يَكُن لَّهُ كُفُوًا أَحَدٌ) ثلاثا",
'سورة الفلق: (قُلْ أَعُوذُ بِرَبِّ الْفَلَقِ* مِن شَرِّ مَا خَلَقَ* وَمِن شَرِّ غَاسِقٍ إِذَا وَقَبَ* وَمِن شَرِّ النَّفَّاثَاتِ فِي الْعُقَدِ* وَمِن شَرِّ حَاسِدٍ إِذَا حَسَدَ) ثلاثا',
'سورة الناس: (قُلْ أَعُوذُ بِرَبِّ النَّاسِ* مَلِكِ النَّاسِ* إِلَٰهِ النَّاسِ* مِن شَرِّ الْوَسْوَاسِ الْخَنَّاسِ* الَّذِي يُوَسْوِسُ فِي صُدُورِ النَّاسِ* مِنَ الْجِنَّةِ وَالنَّاسِ) ثلاثا',
'قوله تعالى: (رَبِّ أَعُوذُ بِكَ مِنْ هَمَزَاتِ الشَّيَاطِينِ وَأَعُوذُ بِكَ رَبِّ أَنْ يَحْضُرُونِ)',
'قوله تعالى: (رَبِّ أَعُوذُ بِكَ مِنْ هَمَزَاتِ الشَّيَاطِينِ وَأَعُوذُ بِكَ رَبِّ أَنْ يَحْضُرُونِ)قوله تعالى: (حَسْبِيَ اللَّهُ لَا إِلَٰهَ إِلَّا هُوَ ۖ عَلَيْهِ تَوَكَّلْتُ ۖ وَهُوَ رَبُّ الْعَرْشِ الْعَظِيمِ).'
]
# =======================================================================================================================================================================================================================================================================================================================================================
AzkharAlMasaa = [
'اللَّهمَّ إنِّي عَبدُك، وابنُ عبدِك، وابنُ أمتِك، ناصِيَتي بيدِكَ، ماضٍ فيَّ حكمُكَ، عدْلٌ فيَّ قضاؤكَ، أسألُكَ بكلِّ اسمٍ هوَ لكَ سمَّيتَ بهِ نفسَك، أو أنزلْتَه في كتابِكَ، أو علَّمتَه أحدًا من خلقِك، أو استأثرتَ بهِ في علمِ الغيبِ عندَك، أن تجعلَ القُرآنَ ربيعَ قلبي، ونورَ صَدري، وجَلاءَ حَزَني، وذَهابَ هَمِّي',
'اللَّهمَّ إنِّي أسأَلُكَ مِن الخيرِ كلِّه عاجلِه وآجلِه ما علِمْتُ منه وما لَمْ أعلَمْ وأعوذُ بكَ مِن الشَّرِّ كلِّه عاجلِه وآجلِه ما علِمْتُ منه وما لَمْ أعلَمْ، اللَّهمَّ إنِّي أسأَلُكَ مِن الخيرِ ما سأَلكَ عبدُك ونَبيُّكَ وأعوذُ بكَ مِن الشَّرِّ ما عاذ به عبدُك ونَبيُّكَ وأسأَلُكَ الجنَّةَ وما قرَّب إليها مِن قولٍ وعمَلٍ وأعوذُ بكَ مِن النَّارِ وما قرَّب إليها مِن قولٍ وعمَلٍ وأسأَلُكَ أنْ تجعَلَ كلَّ قضاءٍ قضَيْتَه لي خيرًا',
'(بسمِ اللهِ الذي لا يَضرُ مع اسمِه شيءٌ في الأرضِ ولا في السماءِ وهو السميعُ العليمِ)، وتُقال ثلاث مرات',
'رَضِيتُ بِاللهِ رَبًّا، وَبِالْإِسْلَامِ دِينًا، وَبِمُحَمَّدٍ صَلَّى اللهُ عَلَيْهِ وَسَلَّمَ نَبِيًّا وَرَسُولًا',
'اللَّهمَّ بِكَ أمسَينا وبِكَ أصبَحنا وبِكَ نحيا وبِكَ نموتُ وإليكَ المصير',
'اللَّهمَّ ما أمسى بي مِن نعمةٍ أو بأحَدٍ مِن خَلْقِكَ، فمنكَ وحدَكَ لا شريكَ لكَ، فلَكَ الحمدُ ولكَ الشُّكرُ، فقد أدى شُكْرَ ذلكَ اليومِ',
'سبحانَ اللَّهِ وبحمدِهِ وهي تُقال مئةَ مرَّةٍ',
'(سُبْحَانَ اللهِ وَبِحَمْدِهِ، عَدَدَ خَلْقِهِ وَرِضَا نَفْسِهِ وَزِنَةَ عَرْشِهِ وَمِدَادَ كَلِمَاتِهِ)، وهي تُقال ثلاث مرات',
'اللَّهُمَّ إنِّي أمسيت أُشهِدُك، وأُشهِدُ حَمَلةَ عَرشِكَ، ومَلائِكَتَك، وجميعَ خَلقِكَ: أنَّكَ أنتَ اللهُ لا إلهَ إلَّا أنتَ، وأنَّ مُحمَّدًا عبدُكَ ورسولُكَ',
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا صَلَّيْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ، اللَّهُمَّ بَارِكْ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ، كَمَا بَارَكْتَ عَلَى إِبْرَاهِيمَ وَعَلَى آلِ إِبْرَاهِيمَ، إِنَّكَ حَمِيدٌ مَجِيدٌ (مَن صلى عَلَيَّ حين يُصْبِحُ عَشْرًا ، وحين يُمْسِي عَشْرًا أَدْرَكَتْه شفاعتي يومَ القيامةِ)',
'لا إلهَ إلَّا اللهُ وحدَه لا شريكَ له له الملكُ وله الحمدُ وهو على كلِّ شيءٍ قديرٌ',
'أمسَيْنا على فِطرةِ الإسلامِ وعلى كَلِمةِ الإخلاصِ وعلى دينِ نبيِّنا محمَّدٍ صلَّى اللهُ عليه وسلَّم وعلى مِلَّةِ أبينا إبراهيمَ حنيفًا مسلمًا وما كان مِنَ المشركينَ',
'(اللَّهمَّ عافِني في بدَني اللَّهمَّ عافِني في سمعي اللَّهمَّ عافِني في بصري لا إلهَ إلَّا أنت، اللَّهمَّ إنِّي أعوذُ بِكَ منَ الكُفْرِ والفقرِ اللَّهمَّ إنِّي أعوذُ بكَ من عذابِ القبرِ لا إلهَ إلَّا أنت) وتقال ثَلاثَ مرَّاتٍ',
'اللهم إنا نعوذُ بك من أن نُشرِكَ بك شيئًا نعلَمُه، و نستغفرُك لما لا نعلمُه',
'أستغفرُ اللهَ العظيمَ الذي لا إلهَ إلَّا هو الحيَّ القيومَ وأتوبُ إليه',
'اللَّهمَّ إنِّي أسألُكَ عِلمًا نافعًا ورزقًا طيِّبًا وعملًا متقبَّلًا',
'اللَّهمَّ إنِّي أسألُكَ عِلمًا نافعًا ورزقًا طيِّبًا وعملًا متقبَّلًايا حيُّ يا قيُّومُ، برَحمتِكَ أستَغيثُ، أصلِح لي شأني كُلَّهُ، ولا تَكِلني إلى نَفسي طرفةَ عينٍ',
'اللَّهمَّ عالِمَ الغَيبِ والشَّهادةِ، فاطرَ السَّمواتِ والأرضِ، رَبَّ كلِّ شيءٍ ومَليكَهُ، أشهدُ أن لا إلَهَ إلَّا أنتَ، أعوذُ بِكَ مِن شرِّ نفسي وشرِّ الشَّيطانِ وشِركِهِ',
'اللهمَّ فاطرَ السمواتِ والأرضِ، عالمَ الغيبِ والشهادةِ، لا إلهَ إلَّا أنتَ ربَّ كلِّ شيءٍ ومَليكَه، أعوذُ بك من شرِّ نفسي ومن شرِّ الشيطانِ وشرَكِه، وأنْ أقترفَ على نفسي سوءًا أو أجرَّهُ إلى مسلمٍ',
'اللهمَّ إنِّي أعوذُ بك من الهمِّ والحزنِ، والعجزِ والكسلِ، والبُخلِ والجُبنِ، وضَلَعِ الدَّينِ، وغَلَبَةِ الرجالِ',
'أعوذُ بكلماتِ اللهِ التَّامَّاتِ مِن شرِّ ما خلَق',
'اللهمَّ إني أسألُك العفوَ والعافيةَ، في الدنيا والآخرةِ، اللهمَّ إني أسألُك العفوَ والعافيةَ، في دِيني ودنيايَ وأهلي ومالي، اللهمَّ استُرْ عوراتي، وآمِنْ روعاتي، واحفظني من بين يدي، ومن خلفي، وعن يميني، وعن شمالي، ومن فوقي، وأعوذُ بك أن أُغْتَالَ من تحتي',
'أَمْسَيْنَا وَأَمْسَى المُلْكُ لِلَّهِ، وَالْحَمْدُ لِلَّهِ لا إلَهَ إلَّا اللَّهُ، وَحْدَهُ لا شَرِيكَ له، له المُلْكُ وَلَهُ الحَمْدُ وَهو علَى كُلِّ شيءٍ قَدِيرٌ، رَبِّ أَسْأَلُكَ خَيْرَ ما في هذِه اللَّيْلَةِ وَخَيْرَ ما بَعْدَهَا، وَأَعُوذُ بكَ مِن شَرِّ ما في هذِه اللَّيْلَةِ وَشَرِّ ما بَعْدَهَا، رَبِّ أَعُوذُ بكَ مِنَ الكَسَلِ وَسُوءِ الكِبَرِ، رَبِّ أَعُوذُ بكَ مِن عَذَابٍ في النَّارِ وَعَذَابٍ في القَبْرِ',
'اللَّهُمَّ أنْتَ رَبِّي لا إلَهَ إلَّا أنْتَ، خَلَقْتَنِي وأنا عَبْدُكَ، وأنا علَى عَهْدِكَ ووَعْدِكَ ما اسْتَطَعْتُ، أعُوذُ بكَ مِن شَرِّ ما صَنَعْتُ، أبُوءُ لكَ بنِعْمَتِكَ عَلَيَّ، وأَبُوءُ لكَ بذَنْبِي فاغْفِرْ لِي، فإنَّه لا يَغْفِرُ الذُّنُوبَ إلَّا أنْتَ',
'اللَّهمَّ إنِّي أسألُكَ خيرَ هذه الليلة فتحَها، ونصرَها، ونورَها، وبرَكتَها، وَهداها، وأعوذُ بِكَ من شرِّ ما فيها وشرِّ ما بعدَها',
'آية الكرسي: (اللَّهُ لَا إِلَٰهَ إِلَّا هُوَ الْحَيُّ الْقَيُّومُ ۚ لَا تَأْخُذُهُ سِنَةٌ وَلَا نَوْمٌ ۚ لَّهُ مَا فِي السَّمَاوَاتِ وَمَا فِي الْأَرْضِ ۗ مَن ذَا الَّذِي يَشْفَعُ عِندَهُ إِلَّا بِإِذْنِهِ ۚ يَعْلَمُ مَا بَيْنَ أَيْدِيهِمْ وَمَا خَلْفَهُمْ ۖ وَلَا يُحِيطُونَ بِشَيْءٍ مِّنْ عِلْمِهِ إِلَّا بِمَا شَاءَ ۚ وَسِعَ كُرْسِيُّهُ السَّمَاوَاتِ وَالْأَرْضَ ۖ وَلَا يَئُودُهُ حِفْظُهُمَا ۚ وَهُوَ الْعَلِيُّ الْعَظِيمُ)',
"قال تعالى في سورة البقرة أيضاً: (آمَنَ الرَّسُولُ بِمَا أُنزِلَ إِلَيْهِ مِن رَّبِّهِ وَالْمُؤْمِنُونَ ۚ كُلٌّ آمَنَ بِاللَّهِ وَمَلَائِكَتِهِ وَكُتُبِهِ وَرُسُلِهِ لَا نُفَرِّقُ بَيْنَ أَحَدٍ مِّن رُّسُلِهِ ۚ وَقَالُوا سَمِعْنَا وَأَطَعْنَا ۖ غُفْرَانَكَ رَبَّنَا وَإِلَيْكَ الْمَصِيرُ*لَا يُكَلِّفُ اللَّهُ نَفْسًا إِلَّا وُسْعَهَا ۚ لَهَا مَا كَسَبَتْ وَعَلَيْهَا مَا اكْتَسَبَتْ ۗ رَبَّنَا لَا تُؤَاخِذْنَا إِن نَّسِينَا أَوْ أَخْطَأْنَا ۚ رَبَّنَا وَلَا تَحْمِلْ عَلَيْنَا إِصْرًا كَمَا حَمَلْتَهُ عَلَى الَّذِينَ مِن قَبْلِنَا ۚ رَبَّنَا وَلَا تُحَمِّلْنَا مَا لَا طَاقَةَ لَنَا بِهِ ۖ وَاعْفُ عَنَّا وَاغْفِرْ لَنَا وَارْحَمْنَا ۚ أَنتَ مَوْلَانَا فَانصُرْنَا عَلَى الْقَوْمِ الْكَافِرِينَ)",
"سورة الإخلاص: (قُلْ هُوَ اللَّهُ أَحَدٌ* اللَّهُ الصَّمَدُ* لَمْ يَلِدْ وَلَمْ يُولَدْ* وَلَمْ يَكُن لَّهُ كُفُوًا أَحَدٌ) ثلاثا",
'سورة الفلق: (قُلْ أَعُوذُ بِرَبِّ الْفَلَقِ* مِن شَرِّ مَا خَلَقَ* وَمِن شَرِّ غَاسِقٍ إِذَا وَقَبَ* وَمِن شَرِّ النَّفَّاثَاتِ فِي الْعُقَدِ* وَمِن شَرِّ حَاسِدٍ إِذَا حَسَدَ) ثلاثا',
'سورة الناس: (قُلْ أَعُوذُ بِرَبِّ النَّاسِ* مَلِكِ النَّاسِ* إِلَٰهِ النَّاسِ* مِن شَرِّ الْوَسْوَاسِ الْخَنَّاسِ* الَّذِي يُوَسْوِسُ فِي صُدُورِ النَّاسِ* مِنَ الْجِنَّةِ وَالنَّاسِ) ثلاثا'
]
def story(PageName, Text):
pyautogui.moveTo(950, 300, duration=1)
time.sleep(2)
pyautogui.click()
pyautogui.moveTo(900, 200, duration=1)
time.sleep(2)
pyautogui.click()
pyautogui.write(PageName)
time.sleep(2)
pyautogui.moveTo(970, 270, duration=1)
time.sleep(6)
pyautogui.click()
pyautogui.moveTo(1000, 500, duration=1)
time.sleep(2)
pyautogui.click()
pyautogui.moveTo(150, 400, duration=1)
time.sleep(2)
pyautogui.click()
# Store our string to the clipboard
pyperclip.copy(Text)
# Hotkey the paste command
pyautogui.hotkey("ctrl", "v")
pyautogui.moveTo(250, 700, duration=1)
time.sleep(2)
pyautogui.click()
x = int(input('Enter the type (0 for test, 1 for AzkharAlsabah, 2 for AzkharAlMasaa): '))
if x == 0:
webbrowser.open_new('https://business.facebook.com/creatorstudio/home')
time.sleep(10)
story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlsabah Done on {datetime.datetime.now().time()}✔')
elif x == 1:
webbrowser.open_new('https://business.facebook.com/creatorstudio/home')
time.sleep(10)
story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlsabah Starts')
for i in AzkharAlsabah:
story('apocryphon', i)
time.sleep(2)
story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlsabah Done on {datetime.datetime.now().time()}✔')
elif x == 2:
webbrowser.open_new('https://business.facebook.com/creatorstudio/home')
time.sleep(10)
story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlMasaa Starts')
for i in AzkharAlMasaa:
story('apocryphon', i)
time.sleep(2)
story('apocryphon', f'{datetime.datetime.now().date()} AzkharAlMasaa Done on {datetime.datetime.now().time()}✔')
|
[
"ahmed.abo.sita66@gmail.com"
] |
ahmed.abo.sita66@gmail.com
|
9207f63b377b4990be34f2882127edcd256361e6
|
70e1d7c3e375ecff09df36e5a4ceda5691221968
|
/tmp.py
|
1936e8cb7425e44fc01199bc2937e82f0e03ce0a
|
[
"Apache-2.0"
] |
permissive
|
doublechenching/ship_detection
|
5b91aa4a7fbe6eb5a88389d1a517641a53740890
|
1ba4926e0d28043863df05ae8afc3d5b336b350d
|
refs/heads/master
| 2020-04-06T17:53:23.855070
| 2018-11-15T08:47:02
| 2018-11-15T08:47:02
| 157,676,999
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
Nucleoplasm 12885 0.25
Cytosol 8228 0.16
Plasma membrane 3777 0.07
Nucleoli 3621 0.07
Mitochondria 2965 0.06
Golgi apparatus 2822 0.06
Nuclear bodies 2513 0.05
Nuclear speckles 1858 0.04
Nucleoli fibrillar center 1561 0.03
Centrosome 1482 0.03
Nuclear membrane 1254 0.02
Intermediate filaments 1093 0.02
Microtubules 1066 0.02
Endoplasmic reticulum 1008 0.02
Microtubule organizing center 902 0.02
Cell junctions 802 0.02
Actin filaments 688 0.01
Focal adhesion sites 537 0.01
Cytokinetic bridge 530 0.01
Cytoplasmic bodies 328 0.01
Aggresome 322 0.01
Mitotic spindle 210 0.00
Lipid droplets 172 0.00
Peroxisomes 53 0.00
Endosomes 45 0.00
Lysosomes 28 0.00
Microtubule ends 21 0.00
Rods & rings 11 0.00
|
[
"vichenqin@gmail.com"
] |
vichenqin@gmail.com
|
e7f2596520d3677c4a237c7632d8e1b51aad8246
|
dcefbb67cfdc837a5b1016ea674ead66263f0af2
|
/algorithm/BOJ_2442.py
|
8e5b65e98dfb9ba6de506f7620715ecfc2770547
|
[] |
no_license
|
SeungYeopB/weekend-study
|
0a5d5bdbb00a7d81f2ec7c6c5b2fc7b96d92c296
|
02651855bb91e26784611bbed34a01023f4ef307
|
refs/heads/master
| 2023-06-23T15:52:54.475077
| 2021-07-23T07:57:16
| 2021-07-23T07:57:16
| 382,514,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
N = int(input())
for i in range(1,N+1):
for j in range(N-i):
print(" ",end="")
for j in range(2*i-1):
print("*",end="")
print()
|
[
"study0610@naver.com"
] |
study0610@naver.com
|
92ad7068066c80a09e52e4efc9930ae95ea93b66
|
0223a85b6a3fecd453b9bd61368cb51c2978c30f
|
/Courses/1 month/4 week/day 2/Задача №2.py
|
7b91a00c66d20af8e6336a4673389637c2148b5c
|
[
"MIT"
] |
permissive
|
emir-naiz/first_git_lesson
|
4cb502d13ead7f459a8e57333581d5256fd8de39
|
1fecf712290f6da3ef03deff518870d91638eb69
|
refs/heads/main
| 2023-01-05T12:29:02.905718
| 2020-11-10T06:00:19
| 2020-11-10T06:00:19
| 306,527,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
# Функция которая перебирает все четные числа до указанного числа
def find_even_nums(number):
even_list = []
for i in range(1, number):
if i % 2 == 0:
even_list.append(i)
return even_list
print(find_even_nums(10))
|
[
"naizabekoff@mail.ru"
] |
naizabekoff@mail.ru
|
2f56eca6cc281d7aae80bae6b6e73ce086e853ba
|
64564ad34eb88c9b366b23c21e90684dbbf0ba94
|
/gen-py/livetex/livetex_service/__init__.py
|
e790cd5d759e1a7017e92a7f4b883ce35961affd
|
[] |
no_license
|
hobbit-vt/livetex-fake-services
|
53481fcc02190a14648ac6950eeb39b31087f9f2
|
b595d912307148d1bc6b162f63608369256d9c43
|
refs/heads/master
| 2021-01-25T07:34:36.583297
| 2014-10-17T09:51:11
| 2014-10-17T09:51:11
| 23,706,391
| 1
| 1
| null | 2014-10-23T15:16:52
| 2014-09-05T14:42:37
|
Python
|
UTF-8
|
Python
| false
| false
| 58
|
py
|
# -*- coding: utf-8 -*-
__all__ = ['ttypes', 'constants']
|
[
"viktor.g@livetex.ru"
] |
viktor.g@livetex.ru
|
5a11bc517beebdc39fe586b47fc5f2a103dbdbb6
|
a5de047e66a44459cbde9062d8d8d6c2da11d750
|
/db_session.py
|
84820499406b2f08a6647a194a09f4b0d3ee539a
|
[] |
no_license
|
MrRomacka/diary
|
515b1f4fc7732c936464827c1431ab14f300b32d
|
219f9c0472ed933d71b7412e79b8ea6a8a77eb5d
|
refs/heads/main
| 2023-01-22T14:23:02.251413
| 2020-11-13T13:41:42
| 2020-11-13T13:41:42
| 312,364,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy.orm import Session
import sqlalchemy.ext.declarative as dec
SqlAlchemyBase = dec.declarative_base()
__factory = None
def global_init(db_file: str):
global __factory
if __factory:
return
if not db_file or not db_file.strip():
raise Exception('Необходимо указать файл базы данных.')
conn_str = f'sqlite:///{db_file.strip()}?check_same_thread=False'
print(f"Подключение к базе данных по адресу {conn_str}")
engine = sa.create_engine(conn_str, echo=False)
__factory = orm.sessionmaker(bind=engine)
SqlAlchemyBase.metadata.create_all(engine)
def create_session() -> Session:
global __factory
return __factory()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b1718080973873c3be8fe613fdb2207a4ef59185
|
86bebcb32810b3af337f6fe0e4ce30ebb7fa7a16
|
/ServerlessDjango/asgi.py
|
a30b4fcadfd0186624b55569e6e08e9bf7709d32
|
[] |
no_license
|
NimishVerma/ServerlessDjangoTemplate
|
7bc665d043d656c4abaf1b076a38935f98390d41
|
9f1f8fba7a37867f90369feac0d4f215ec3ab53d
|
refs/heads/master
| 2023-04-01T12:07:31.012179
| 2021-04-05T02:54:02
| 2021-04-05T02:54:02
| 354,104,152
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
ASGI config for ServerlessDjango project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ServerlessDjango.settings')
application = get_asgi_application()
|
[
"nimishverma@ymail.com"
] |
nimishverma@ymail.com
|
3f0e72af94251d7862f5e10f270251b0a1d37a48
|
d3f0a0a8a4508dbc7ddd3c6251760672368c2cc9
|
/word2vec_classif.py
|
e798ef6bcbba52c0ac03982abdb4582e689926b2
|
[] |
no_license
|
Jey1kRey/projet6
|
13883b1eac53dcea6639bde8461b48977e1428d1
|
d7ae9b798fc631277fb62d1e14b57f25393b2822
|
refs/heads/master
| 2020-04-20T17:11:51.781883
| 2019-02-03T19:04:06
| 2019-02-03T19:04:06
| 168,981,810
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 08:42:39 2018
@author: Jérôme
"""
import pandas as pd
import gensim
import numpy as np
from gensim.models import word2vec
import re
from nltk.corpus import stopwords
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
#model=gensim.models.Word2Vec.load('model_wv_150.bin')
model=gensim.models.Word2Vec.load('model_wv_complet.bin')
#w2v = dict(zip(model.wv.index2word, model.wv.syn0))
vocabulaire=model.wv.syn0
#print(len(w2v))
#print(len(model.wv.index2word))
#print(len(model.wv.vocab.keys()))
#df=pd.read_csv('base_texte.csv',sep=',')
df=pd.read_csv('base_totale.csv',sep=',', engine='python')
dftags=pd.read_csv('dico_tags.csv', sep=',')
df_test=pd.read_csv('base_test.csv', sep=',', engine='python')
df=df.fillna('')
questions=df['Body'].iloc[0:20]
indices=questions.index
df_tags=df['Tags'].iloc[0:20]
''' normalisation du texte '''
def dico_tag(texte):
dico=[]
for x in texte :
dico.append(x)
return dico
dico=dico_tag(dftags.TagName)
def tokeniz(texte):
regex=re.compile("[^a-zA-Z]")
text_modif=regex.sub(" ", texte)
texte_lower=text_modif.lower()
phrase=texte_lower.split()
for i in list(phrase):
if i in stopwords.words('english'):
phrase.remove(i)
for i in list(phrase):
if i not in dico:
phrase.remove(i)
mots=" ".join(phrase)
return mots
def nettoyage_dataframe(data):
texte=data.apply(tokeniz)
return texte
df_question=nettoyage_dataframe(questions)
question_test=df_question.iloc[0]
print(question_test)
def net_tg(texte):
regex=re.compile("[^a-zA-Z]")
text_modif=regex.sub(" ", texte)
texte_lower=text_modif.lower()
return texte_lower
def net_df_tg(data):
texte=data.apply(net_tg)
return texte
y_tg=net_df_tg(df_tags)
def recup_vecteurs(corpus, model):
index2word_set = set(model.wv.vocab.keys()) # words known to model
featureVec = np.zeros(model.vector_size, dtype="float32")
liste_vecteur=[]
for word in corpus :
if word in index2word_set:
featureVec = np.add(featureVec, model[word])
liste_vecteur.append(featureVec)
return liste_vecteur
def creation_corpus(questions):
liste_questions=[]
for element in questions :
mots_wv=recup_vecteurs(element, model)
liste_questions.append(mots_wv)
return liste_questions
x_train, x_test, y_train, y_test = train_test_split(questions, y_tg, train_size=0.7)
liste_xtrain=creation_corpus(x_train)
liste_xtest=creation_corpus(x_test)
train=np.vstack(liste_xtrain)
test=np.vstack(liste_xtest)
#☻essai=pd.DataFrame(liste_finale, index=indices)
#print(essai)
#vecteurs_corpus=recup_vecteurs(question_test, model)
#print(len(vecteurs_corpus))
#print(vecteurs_corpus)
#x_train, x_test, y_train, y_test = train_test_split(test, y_tg, train_size=0.7)
foret=OneVsRestClassifier(RandomForestClassifier())
foret.fit(train, y_train)
#print(foret.score(x_test,y_test))
#☺print(foret.predict(x_test[10:15]))
#print(y_test[10:15])
'''
x_train, x_test, y_train, y_test = train_test_split(vocabulaire, y_tg, train_size=0.7)
foret=OneVsRestClassifier(RandomForestClassifier())
foret.fit(x_train, y_train)
print(foret.score(x_test,y_test))
print(foret.predict(x_test[10:15]))
print(y_test[10:15])
'''
|
[
"jeyonizuka@hotmail.com"
] |
jeyonizuka@hotmail.com
|
5d360aaf3e6c4ec30b0ffd85d0c0fbd4ac6c452c
|
501d8f799185f58af7ad864962f29ba3066a3a66
|
/python/locate.py
|
3dc7eb55c09872ed04993bce4e9ebd5b41d9887e
|
[] |
no_license
|
liuyujyyz/learning
|
55c866c3a0c4db3d4d5475c049785a3ad856aec3
|
299914b27fad9e3fc9175d68905760d939b06d2a
|
refs/heads/master
| 2021-01-19T08:10:37.553429
| 2020-08-13T07:42:14
| 2020-08-13T07:42:14
| 87,607,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
import cv2
import numpy as np
from decorators import timer
from cluster import Kmeans
from tqdm import tqdm
def extract(img):
img = cv2.resize(img, (16, 16))
U, S, V = np.linalg.svd(img)
return V[0]
@timer
def divide(img, window, stride=1):
h, w, _ = img.shape
parth, partw = window
out = []
outImg = []
steph = (h-parth)//stride
stepw = (w-partw)//stride
boxes = []
for i in range(steph):
for j in range(stepw):
tmpImg = img[stride*i:stride*i+parth,stride*j:stride*j+partw]
U = np.concatenate([extract(tmpImg[:,:,0]),extract(tmpImg[:,:,1]),extract(tmpImg[:,:,2])], axis=0)
#U = extract(tmpImg[:,:,0])+extract(tmpImg[:,:,1])+extract(tmpImg[:,:,2])
out.append(U)
outImg.append(tmpImg)
boxes.append((stride*i, stride*j, stride*i+parth, stride*j+partw))
out = np.array(out)
outImg = np.array(outImg)
boxes = np.array(boxes)
return out, outImg, boxes
def get_rep(filename, ID):
img = cv2.imread(filename)
rep, imgset, boxes = divide(img, (45, 45), 10)
rep2, imgset2, boxes2 = divide(img, (90, 90), 20)
rep3, imgset3, boxes3 = divide(img, (30, 30), 10)
rep4, imgset4, boxes4 = divide(img, (60, 60), 20)
rep = np.concatenate([rep, rep2, rep3, rep4], axis=0)
boxes = np.concatenate([boxes, boxes2, boxes3, boxes4], axis=0)
fileIndex = ID*np.ones((rep.shape[0],), dtype='int')
return img, rep, boxes, fileIndex
def findBackground(cato, index):
return (cato.sum()*2 < cato.shape[0])
if __name__ == '__main__':
reps = []
imgsets = []
boxess = []
fileIndexs = []
imgs = []
dists = []
numImg = 10
for i in tqdm(range(numImg)):
img, rep, boxes, fileIndex = get_rep('../data/cat/2/pic%s.jpg'%(i), i)
imgs.append(img)
for j in range(3):
cato, dist = Kmeans(rep, 2)
if cato.sum() == 0:
from IPython import embed
embed()
tag = int(cato.sum()*2 < cato.shape[0])
if j > 0:
tag = 1 - tag
idx = np.where(cato == tag)[0]
rep = rep[idx]
boxes = boxes[idx]
fileIndex = fileIndex[idx]
dist = dist[idx]
reps.append(rep)
boxess.append(boxes)
fileIndexs.append(fileIndex)
dists.append(dist)
rep = np.concatenate(reps, axis=0)
boxes = np.concatenate(boxess, axis=0)
fileIndex = np.concatenate(fileIndexs, axis=0)
dist = np.concatenate(dists, axis=0)
while True:
if rep.shape[0] < 10 * numImg:
break
cato, dist = Kmeans(rep, 2)
tag = findBackground(cato, fileIndex)
tag = 1 - tag
print(set(cato), tag)
idx = np.where(cato == tag)[0]
nrep = rep[idx]
nbox = boxes[idx]
nfile = fileIndex[idx]
ndist = dist[idx]
count = [(nfile==i).sum() for i in range(numImg)]
if min(count) > 0:
rep = nrep
boxes = nbox
fileIndex = nfile
dist =ndist
else:
print(count)
break
maxi = dist.max()
mini = dist.min()
mean = dist.mean()
ratio = 255 * (dist - mini) / (mean - mini)
for i in range(rep.shape[0]):
if dist[i] > mean:
continue
cv2.rectangle(imgs[fileIndex[i]], (boxes[i][1], boxes[i][0]), (boxes[i][3], boxes[i][2]), (int(ratio[i]),0,0), 1)
for i in range(numImg):
cv2.imshow('x', imgs[i])
cv2.waitKey(0)
|
[
"liuyujyyz@gmail.com"
] |
liuyujyyz@gmail.com
|
49125a103d0ef8ad23344162256cf34b29c740c5
|
5c0506e42fc7f0325728994223f1b0be4f1187fc
|
/summa_py_textrank.py
|
2fd1d59fa66724ab7ba0f6a9607be02ff57006a6
|
[] |
no_license
|
Trevahok/summarizer
|
602d492385c3130c6c9f11dd82e71177541ede73
|
cfd134e79ec5dfac3530081c6863421ab667207d
|
refs/heads/master
| 2020-03-19T20:36:21.680650
| 2018-06-12T06:54:36
| 2018-06-12T06:54:36
| 136,908,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
from urllib.request import urlopen
from summa.summarizer import summarize
from sys import argv
from bs4 import BeautifulSoup as bs
import PyPDF2
def from_link():
page=urlopen(argv[1])
soup=bs(page,'lxml')
text=soup.find_all('p')
text='\n'.join([ i.text for i in text])
print(summarize(text,ratio=0.2))
def from_pdf():
pdfdoc = open(argv[1], 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfdoc)
count = pdfReader.numPages
for i in range(count):
page = pdfReader.getPage(i)
print('Page Number: ',i,'\n')
print(summarize(page.extractText(),ratio=0.2))
print('\n\n')
def from_txt():
file=open(argv[1],'r')
text=file.read()
print(summarize(text,ratio=0.2))
if __name__=="__main__":
try:
filetype = argv[2]
if filetype=='url':
from_link()
elif filetype=='pdf':
from_pdf()
else:
from_txt()
except IndexError:
print("\nUsage:\n \tsummarize 'http:// url.to.summarize' url \n or \n \tsummarize 'path/to/file/file.pdf' pdf \n or \n \tsummarize 'path/to/file/file.txt' txt ")
|
[
"vighneshss@gmail.com"
] |
vighneshss@gmail.com
|
c9708fe103af2012e13994b656c45ba4a852077c
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/8kyu/8kyu-interpreters-hq9-plus/Python/solution1.py
|
bdd53cce40278d9d04a75b8b2e61e0cc09d79511
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# Python - 3.6.0
gets = lambda i: 's' if i != 1 else ''
HQ9 = {
'H': 'Hello World!',
'Q': 'Q',
'9': '\n'.join(
f'{i} bottle{gets(i)} of beer on the wall, {i} bottle{gets(i)} of beer.\nTake one down and pass it around, {i - 1 if i > 1 else "no more"} bottle{gets(i - 1)} of beer on the wall.' for i in range(99, 0, -1)
) + '\nNo more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy some more, 99 bottles of beer on the wall.'
}.get
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
77f295368703e4c0c9e03efee7b637781e2efdc9
|
17ed1551bbb4435b6d816408bb69eafb27c7cc6d
|
/growl-http-proxy.py
|
f0d9a9ed5fe38e8c708b8549e25685afdb332c93
|
[
"WTFPL"
] |
permissive
|
brunobord/growl-http-proxy
|
8622a2dc20e0bbcbf5c7481c659ab517c83a8be9
|
4e575e1ae9d395055eab9c3ef7f693068de88c1a
|
refs/heads/master
| 2021-03-12T19:19:58.518156
| 2012-10-15T22:14:27
| 2012-10-15T22:14:27
| 6,206,709
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import argparse
from flask import Flask, request, abort, render_template
import Growl
import os
app = Flask(__name__)
APP_NAME = 'growl-http-proxy'
NOTIFICATIONS = ['update']
DEFAULT_ICON_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', 'icon.png')
def send_notification(title, message, sticky, icon, notification, priority):
notifier = Growl.GrowlNotifier(APP_NAME, NOTIFICATIONS, applicationIcon=icon)
notifier.register()
notifier.notify(notification, title, message, sticky=sticky, priority=priority)
@app.route('/', methods=['POST'])
def send():
data = request.json or request.form or {}
if 'title' not in data or 'message' not in data:
abort(400) # Bad request
sticky = data.get('sticky') or False
icon_path = DEFAULT_ICON_PATH
icon = Growl.Image.imageFromPath(icon_path)
notification = data.get('notification') or 'update'
priority = int(data.get('priority') or 1)
send_notification(data.get('title'), data.get('message'),
sticky, icon, notification, priority)
return 'Message sent\n'
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Growl HTTP Proxy')
parser.add_argument('-d', '--debug', action="store_true", default=False)
args = parser.parse_args()
app.run(debug=args.debug)
|
[
"bruno@jehaisleprintemps.net"
] |
bruno@jehaisleprintemps.net
|
874570360011745971d66be0be7251d3126ff31a
|
658b10bae84ea77a13d25273266170d499bdad59
|
/blog/migrations/0001_initial.py
|
45e06cefd195c38b8cf0542f61d8802de048fd2c
|
[] |
no_license
|
du4ok/my-first-blog
|
9f1e117f41d4f8de4ccec82993d4498d4c464dd6
|
a5b4a3ab870fa17b9e67793c89811500be56ea0e
|
refs/heads/master
| 2020-07-20T17:46:00.119085
| 2016-11-14T20:31:22
| 2016-11-14T20:31:22
| 73,744,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-02 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=30)),
('title', models.CharField(max_length=100)),
('bodytext', models.TextField()),
('timestamp', models.DateTimeField()),
],
),
]
|
[
"iriver87@gmail.com"
] |
iriver87@gmail.com
|
b2432c7ce576836fc769e1c9a990bb2a1b00d91c
|
ef243d91a1826b490e935fa3f3e6c29c3cc547d0
|
/cv2/cv2/MergeExposures.py
|
7d68ec4c5e8da4d27c6ad8ddb544c23ea3973a7e
|
[] |
no_license
|
VentiFang/Python_local_module
|
6b3d0b22399e817057dfd15d647a14bb1e41980e
|
c44f55379eca2818b29732c2815480ee755ae3fb
|
refs/heads/master
| 2020-11-29T11:24:54.932967
| 2019-12-25T12:57:14
| 2019-12-25T12:57:14
| 230,101,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
# encoding: utf-8
# module cv2.cv2
# from F:\Python\Python36\lib\site-packages\cv2\cv2.cp36-win_amd64.pyd
# by generator 1.147
""" Python wrapper for OpenCV. """
# imports
import cv2.cv2 as # F:\Python\Python36\lib\site-packages\cv2\cv2.cp36-win_amd64.pyd
import cv2.Error as Error # <module 'cv2.Error'>
import cv2.cuda as cuda # <module 'cv2.cuda'>
import cv2.detail as detail # <module 'cv2.detail'>
import cv2.dnn as dnn # <module 'cv2.dnn'>
import cv2.fisheye as fisheye # <module 'cv2.fisheye'>
import cv2.flann as flann # <module 'cv2.flann'>
import cv2.instr as instr # <module 'cv2.instr'>
import cv2.ipp as ipp # <module 'cv2.ipp'>
import cv2.ml as ml # <module 'cv2.ml'>
import cv2.ocl as ocl # <module 'cv2.ocl'>
import cv2.ogl as ogl # <module 'cv2.ogl'>
import cv2.samples as samples # <module 'cv2.samples'>
import cv2.utils as utils # <module 'cv2.utils'>
import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'>
import cv2 as __cv2
class MergeExposures(__cv2.Algorithm):
# no doc
def process(self, src, times, response, dst=None): # real signature unknown; restored from __doc__
"""
process(src, times, response[, dst]) -> dst
. @brief Merges images.
.
. @param src vector of input images
. @param dst result image
. @param times vector of exposure time values for each image
. @param response 256x1 matrix with inverse camera response function for each pixel value, it should
. have the same number of channels as images.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
|
[
"5149528+ventifang@user.noreply.gitee.com"
] |
5149528+ventifang@user.noreply.gitee.com
|
05a6db46acd058f904eeb21596b3a1e9c2fec67c
|
62d489ced99e830920da02a0ba62572f144833cd
|
/Ques_21.py
|
122805da78c4a6a67adf44ea9bf2a8962c96565d
|
[] |
no_license
|
HarshSharma12/ProjectEulerPython
|
e8cc11d44a4ea917b5e7669667161aa1c635a0cd
|
c300a3d34fc99a0aa08047f1195eceaefd7d68c9
|
refs/heads/master
| 2016-09-06T17:07:23.183383
| 2014-10-12T13:56:51
| 2014-10-12T13:56:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 11 12:48:17 2014
@author: Harsh Sharma
Amicable numbers
Problem 21
Published on Friday, 5th July 2002, 06:00 pm; Solved by 74130
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b
are an amicable pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20,
22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are
1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Answer = 31626
"""
from math import sqrt
def divisors(n):
sqRoot = int(sqrt(n))
arr = [1]
for i in range(2,sqRoot+1):
if(n%i==0):
arr.append(i)
arr.append(n/i)
arr.sort()
return arr
def divSum(n):
d = divisors(n)
x=0
for i in d:
x+=i
return x
total = 0
for i in range(1, 10000):
newNum = divSum(i)
if (divSum(newNum) == i and newNum!=i):
total+=i
print 'New = ', newNum
print 'Original = ', i
print 'Sum = ', total
print '\n'
print 'Answer = ', total
|
[
"mail.hs.harsh@gmail.com"
] |
mail.hs.harsh@gmail.com
|
a21f249ea161330a50ec05a4b83dc19d497cf0ba
|
c174895f234affe017233c64d90dd522b77aea46
|
/core_1024/items.py
|
29a4f00bd290f9661d10266ee66b1ef537477602
|
[] |
no_license
|
starplanet/core_1024
|
316321efd7c8aa0cbd1b6a90e0efa41f1c2386ca
|
d83e618a3f01d08e9c06ff0c79a6c617f226c07a
|
refs/heads/master
| 2016-09-13T20:28:48.621859
| 2016-05-06T08:43:44
| 2016-05-06T08:43:44
| 58,192,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Core1024Item(scrapy.Item):
# define the fields for your item here like:
page_url = scrapy.Field()
title = scrapy.Field()
name = scrapy.Field()
av_girl = scrapy.Field()
av_format = scrapy.Field()
av_size = scrapy.Field()
image_urls = scrapy.Field()
images = scrapy.Field()
download_url = scrapy.Field()
image_path = scrapy.Field()
|
[
"zhangjinjie@jinjiedeMacBook-Pro.local"
] |
zhangjinjie@jinjiedeMacBook-Pro.local
|
9c0bf6f14b76ace7d83c6afd94b31c12d698f62b
|
dea486be1f69bc984632ff554e8c00280750bb49
|
/lib/environ.py
|
c6af26b607d4e8225e2f1e4870889ba1446cbf7c
|
[] |
no_license
|
cmkwong/RL_code
|
b20354a188c08d0a2c0cab78fb39577b7b425fa1
|
8b930747f4e9e54af6e8bf97b6bf109f7ab39b99
|
refs/heads/master
| 2022-04-13T14:46:40.094865
| 2020-03-14T14:25:15
| 2020-03-14T14:25:15
| 234,214,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,315
|
py
|
import gym
import gym.spaces
from gym.utils import seeding
import enum
import numpy as np
import collections
from . import data
DEFAULT_BARS_COUNT = 20
DEFAULT_COMMISSION_PERC = 0.1
class Actions(enum.Enum):
Skip = 0
Buy = 1
Close = 2
class State:
def __init__(self, bars_count, commission_perc, reset_on_close, reward_on_close=True, volumes=True, train_mode=True):
assert isinstance(bars_count, int)
assert bars_count > 0
assert isinstance(commission_perc, float)
assert commission_perc >= 0.0
assert isinstance(reset_on_close, bool)
assert isinstance(reward_on_close, bool)
self.bars_count = bars_count
self.commission_perc = commission_perc
self.reset_on_close = reset_on_close
self.reward_on_close = reward_on_close
self.volumes = volumes
self.train_mode = train_mode
def reset(self, data, date, extra_set, offset):
assert isinstance(data, dict)
assert offset >= self.bars_count - 1
self.have_position = False
self.open_price = 0.0
self._data = data
self._date = date
self._extra_set = extra_set # empty if {}
self.extra_indicator = False
self._offset = offset
def normalised_trend_data(self):
start = self._offset - self.bars_count + 1
end = self._offset + 1
# normalise the data from an array
x = 0
y = 0
target_data = np.ndarray(shape=(self.bars_count, self.extra_trend_size), dtype=np.float64)
for indicator in self._extra_set['trend'].values():
y = y + indicator.encoded_size
target_data[:, x:y] = indicator.normalise(start, end, self.train_mode)
x = y
y = x
return target_data
def normalised_status_data(self):
start = self._offset - self.bars_count + 1
end = self._offset + 1
target_data = np.ndarray(shape=(1, self.extra_status_size), dtype=np.float64)
# normalise the data from an array
x = 0
y = 0
for indicator in self._extra_set['status'].values():
y = y + indicator.encoded_size
target_data[0, x:y] = indicator.normalise(start, end, self.train_mode)
x = y
y = x
return target_data
@property
def shape_data(self):
# bars * (h, l, c, bc_o, v) + position_flag + rel_profit (since open)
self.extra_trend_size = 0
if len(self._extra_set) is not 0:
if len(self._extra_set['trend']) is not 0:
for trend_name in list(self._extra_set['trend'].keys()):
self.extra_trend_size += self._extra_set['trend'][trend_name].encoded_size
if self.volumes:
self.base_trend_size = 5
return (self.bars_count, self.base_trend_size + self.extra_trend_size)
else:
self.base_trend_size = 4
return (self.bars_count, self.base_trend_size + self.extra_trend_size)
@property
def shape_status(self):
self.base_status_size = 2
self.extra_status_size = 0
if len(self._extra_set) is not 0:
if len(self._extra_set['status']) is not 0:
for status_name in list(self._extra_set['status'].keys()):
self.extra_status_size += self._extra_set['status'][status_name].encoded_size
return (1, self.base_status_size + self.extra_status_size)
def encode(self): # p.336
"""
Convert current state into numpy array.
"""
encoded_data = collections.namedtuple('encoded_data', field_names=['data', 'status'])
data = np.ndarray(shape=self.shape_data, dtype=np.float64)
status = np.ndarray(shape=self.shape_status, dtype=np.float64)
shift_r = 0
# data stacking
bese_volume = self._data['volume'][self._offset - self.bars_count + 1]
for bar_idx in range(-self.bars_count + 1, 1):
shift_c = 0
data[shift_r, shift_c] = (self._data['high'][self._offset + bar_idx] - self._data['open'][self._offset + bar_idx]) / \
self._data['open'][self._offset + bar_idx]
shift_c += 1
data[shift_r, shift_c] = (self._data['low'][self._offset + bar_idx] - self._data['open'][self._offset + bar_idx]) / \
self._data['open'][self._offset + bar_idx]
shift_c += 1
data[shift_r, shift_c] = (self._data['close'][self._offset + bar_idx] - self._data['open'][self._offset + bar_idx]) / \
self._data['open'][self._offset + bar_idx]
shift_c += 1
data[shift_r, shift_c] = (self._data['close'][(self._offset - 1) + bar_idx] - self._data['open'][self._offset + bar_idx]) / \
self._data['open'][self._offset + bar_idx]
shift_c += 1
if self.volumes:
data[shift_r, shift_c] = self._data['volume'][self._offset + bar_idx] / bese_volume
shift_c += 1
shift_r += 1
# status stacking
status[0,0] = float(self.have_position)
if not self.have_position:
status[0,1] = 0.0
else:
status[0,1] = (self._data['close'][self._offset] - self.open_price) / self.open_price
# extra_data
normal_array = np.ndarray(shape=(self.bars_count, self.extra_trend_size), dtype=np.float64)
if len(self._extra_set) is not 0:
if len(self._extra_set['trend']) is not 0:
normal_array = self.normalised_trend_data()
data[:, self.base_trend_size:] = normal_array
if len(self._extra_set['status']) is not 0:
normal_array = self.normalised_status_data()
status[0, self.base_status_size:] = normal_array
return encoded_data(data=data, status=status)
def _cur_close(self):
"""
Calculate real close price for the current bar
"""
open = self._data['open'][self._offset]
rel_close = self._data['close'][self._offset]
return open * (1.0 + rel_close)
def step(self, action):
"""
Perform one step in our price, adjust offset, check for the end of prices
and handle position change
:param action:
:return: reward, done
"""
assert isinstance(action, Actions)
reward = 0.0
done = False
# don't need self._cur_close() because it is not relative price
close = self._data['close'][self._offset]
if action == Actions.Buy and not self.have_position:
self.have_position = True
self.open_price = close
reward -= self.commission_perc
elif action == Actions.Close and self.have_position:
reward -= self.commission_perc
done |= self.reset_on_close # done if reset_on_close
if self.reward_on_close:
reward += 100.0 * (close - self.open_price) / self.open_price
self.have_position = False
self.open_price = 0.0
self._offset += 1
prev_close = close
close = self._data['close'][self._offset]
done |= self._offset >= self._data['close'].shape[0]-1 # done if reached limit
if self.have_position and not self.reward_on_close:
reward += 100.0 * (close - prev_close) / prev_close # change with respect to last day close-price
return reward, done
class StocksEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, data, date, extra_set, bars_count=DEFAULT_BARS_COUNT,
commission=DEFAULT_COMMISSION_PERC, reset_on_close=True,
random_ofs_on_reset=True, reward_on_close=False, volumes=False, train_mode=True):
assert isinstance(data, dict)
self.universe_data = data
self.universe_date = date
self.universe_extra_set = extra_set # empty dict if there is no extra data
self._state = State(bars_count, commission, reset_on_close, reward_on_close=reward_on_close, volumes=volumes, train_mode=train_mode)
self.random_ofs_on_reset = random_ofs_on_reset
self.train_mode = train_mode
self.seed()
# get the shape first for creating the net
self.get_data_shape()
self.action_space = gym.spaces.Discrete(n=len(Actions))
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf,
shape=(self._state.bars_count, self.data_size), dtype=np.float64)
def get_data_shape(self):
self.reset()
self.price_size = self._state.base_trend_size
self.trend_size = self._state.extra_trend_size
self.data_size = self.price_size + self.trend_size
self.status_size = self._state.base_status_size + self._state.extra_status_size
def offset_modify(self, prices, extra_set, train_mode):
available_start = 0
if len(extra_set) is not 0:
# append the length, cal the min_length
invalid_length = []
if len(extra_set['trend']) is not 0:
for key in list(extra_set['trend'].keys()):
invalid_length.append(extra_set['trend'][key].invalid_len)
if len(extra_set['status']) is not 0:
for key in list(extra_set['status'].keys()):
invalid_length.append(extra_set['status'][key].invalid_len)
available_start = np.max(invalid_length)
bars = self._state.bars_count
if self.random_ofs_on_reset:
if train_mode:
offset = self.np_random.choice(range(available_start, prices['high'].shape[0] - bars * 10)) + bars
else:
offset = self.np_random.choice(prices['high'].shape[0] - bars * 10) + bars
else:
if train_mode:
offset = bars + available_start
else:
offset = bars
return offset
def reset(self):
# make selection of the instrument and it's offset. Then reset the state
self._instrument = self.np_random.choice(list(self.universe_data.keys()))
data = self.universe_data[self._instrument]
date = self.universe_date[self._instrument]
extra_set_ = {}
if len(self.universe_extra_set) is not 0:
extra_set_ = self.universe_extra_set[self._instrument]
offset = self.offset_modify(data, extra_set_, self.train_mode) # train_mode=False, random offset is different
self._state.reset(data, date, extra_set_, offset)
return self._state.encode()
def step(self, action_idx):
action = Actions(action_idx)
reward, done = self._state.step(action)
obs = self._state.encode()
info = {"instrument": self._instrument, "offset": self._state._offset}
return obs, reward, done, info
def render(self, mode='human', close=False):
pass
def close(self):
pass
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31
return [seed1, seed2]
|
[
"47665725+cmkwong@users.noreply.github.com"
] |
47665725+cmkwong@users.noreply.github.com
|
a34e7441458961ad75e663aec37218d89eee0fd6
|
d400110ac8637883daa86aff7bce7fe49ad7f916
|
/option.py
|
f5dae12031a9a66329aae84b829bfd4ab96ab63d
|
[
"MIT"
] |
permissive
|
zdddw/SISN-Face-Hallucination
|
59e7ba74f60c57e115bda26b15a7cdd543d7fef6
|
a36d189e6e890f3a01e2a027ec54eec7b2db23a0
|
refs/heads/main
| 2023-09-03T20:36:51.860967
| 2021-11-08T08:07:20
| 2021-11-08T08:07:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=1)
# models
parser.add_argument("--pretrain", type=str)
parser.add_argument("--model", type=str, default="SISN")
# augmentations
parser.add_argument("--use_moa", action="store_true")
parser.add_argument("--augs", nargs="*", default=["none"])
parser.add_argument("--prob", nargs="*", default=[1.0])
parser.add_argument("--mix_p", nargs="*")
parser.add_argument("--alpha", nargs="*", default=[1.0])
parser.add_argument("--aux_prob", type=float, default=1.0)
parser.add_argument("--aux_alpha", type=float, default=1.2)
# dataset
parser.add_argument("--dataset_root", type=str, default="dataset/FFHQ/1024X1024")
parser.add_argument("--dataset", type=str, default="FSR")
parser.add_argument("--train_val_range", type=str, default="1-850/851-950")
parser.add_argument("--scale", type=int, default=4)
# training setups
parser.add_argument("--lr", type=float, default=2e-4)
parser.add_argument("--decay", type=str, default="25-50-75")
parser.add_argument("--gamma", type=int, default=0.5)
parser.add_argument("--patch_size", type=int, default=32)
parser.add_argument("--batch_size", type=int, default=10)
parser.add_argument("--max_steps", type=int, default=700000)
parser.add_argument("--eval_steps", type=int, default=1000)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--gclip", type=int, default=0)
# misc
parser.add_argument("--test_only", action="store_true")
parser.add_argument("--save_result", action="store_true")
parser.add_argument("--ckpt_root", type=str, default="./pt")
parser.add_argument("--save_root", type=str, default="./output")
return parser.parse_args()
def make_template(opt):
opt.strict_load = opt.test_only
opt.num_groups = 10
opt.num_blocks = 10
opt.num_channels = 64
opt.reduction = 16
opt.res_scale = 1.0
opt.max_steps = 1000000
opt.decay = "50-100-150-200-250-300-350-400"
opt.gclip = 0.5 if opt.pretrain else opt.gclip
# evaluation setup
opt.crop = 6 if "FSR" in opt.dataset else 0
opt.crop += opt.scale
opt.eval_y_only = False
# default augmentation policies
if opt.use_moa:
opt.augs = ["blend", "rgb", "mixup", "cutout", "cutmix", "cutmixup", "cutblur"]
opt.prob = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
opt.alpha = [0.6, 1.0, 1.2, 0.001, 0.7, 0.7, 0.7]
opt.aux_prob, opt.aux_alpha = 1.0, 1.2
opt.mix_p = None
def get_option():
opt = parse_args()
make_template(opt)
return opt
|
[
"noreply@github.com"
] |
noreply@github.com
|
d40d4c0886ebeb7c5e6c46de7f421799756c92b7
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_318/ch23_2019_03_27_15_00_44_973644.py
|
e268beaac58b3819e4295d3aa5d048c89b2d4156
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
def verifica_idade(x):
if(x>20):
print("Liberado EUA e BRASIL")
return x
if(x<21 and x>17):
print("Liberado BRASIL")
return x
if(x<18):
print("Nao esta liberado")
return x
|
[
"you@example.com"
] |
you@example.com
|
c46b8a2458a636eea1bde0cc9df07da0126d1e1c
|
0c13891448e6c3136e2f651c776d1d11edee2577
|
/src/template_method.py
|
91d30a13953d467cfa30df4a7500ae59f97997f2
|
[
"MIT"
] |
permissive
|
MrRezoo/design-patterns-python
|
31cb7b73ae05c5bd361eb3455df234c20529f465
|
8f8e2501ad8e05f1a75ce5be659d926c0ec99698
|
refs/heads/master
| 2023-08-01T22:01:01.186910
| 2021-10-02T07:57:49
| 2021-10-02T07:57:49
| 349,936,987
| 8
| 1
|
MIT
| 2021-04-07T14:55:10
| 2021-03-21T08:13:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
"""
Behavioral pattern:
Template method
Example:
when we have static job between several classes use one ABC class
"""
from abc import ABC, abstractmethod
class Top(ABC):
def template_method(self):
self.first_common()
self.second_common()
self.third_require()
self.fourth_require()
self.hook()
def first_common(self):
print('I am first common...')
def second_common(self):
print('I am second common')
@abstractmethod
def third_require(self):
pass
@abstractmethod
def fourth_require(self):
pass
def hook(self):
pass
class One(Top):
def third_require(self):
print('This is Third require from One...')
def fourth_require(self):
print('This is Fourth require from One...')
def hook(self):
print('This is Hook from One')
class Two(Top):
def third_require(self):
print('This is Third require from Two...')
def fourth_require(self):
print('This is Fourth require from Two...')
def client(class_):
class_.template_method()
if __name__ == '__main__':
client(Two())
|
[
"rezam578@gmail.com"
] |
rezam578@gmail.com
|
f87b02a9fd6d6f6edb041ee8e26bca60f2574e32
|
786f29dc7be151b716433bbe14db7552a683b8af
|
/bk3d_to_huxing_test.py
|
75d1d7350a78108c6e2451b03995df5d9b457c98
|
[] |
no_license
|
Hengle/my_test_project
|
bfea2e32734b921ee7ff6da8d18956a24f0ad64f
|
a9f30f883a7bd906c7ed346c79bc8f1fd53db11d
|
refs/heads/master
| 2023-03-22T08:36:32.102048
| 2021-03-08T08:10:25
| 2021-03-08T08:10:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79,682
|
py
|
#!/usr/bin/env python
# encoding: utf-8
bk_3d_data = {
"defaultMode": "",
"work": {
"user_id": 1000000022111887,
"project_id": "auto3d--dWuf-5Gjg_UBUYzi65czt",
"name": "贝壳·如视 | 真实,如你所视",
"is_active": True,
"title": "34a9d4021c7dfe48b2516fd9a98f2330",
"description": "",
"picture_url": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg",
"picture_checksum": "31bcf5cc3b7b3a22cafd028d9fa57a02",
"initial": {
"pano_index": 0,
"heading": 0,
"longitude": 3.0838206627680336,
"latitude": 0,
"fov": 95,
"flag_position": [
0.27164421074434364,
-1.148597545783536,
1.5910047808386651
],
"pano": 0
},
"machine_id": "RA01838",
"machine_version": "3",
"title_picture_url": "http://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc1_GyzGyfF73_1.jpg",
"title_picture_checksum": "c3f0057fc8499dde008f15eca52c335d",
"house_layout": {
"bedroom_amount": 3,
"parlor_amount": 1,
"cookroom_amount": 1,
"toilet_amount": 1
},
"standard_floor_plan_url": "",
"standard_floor_plan_checksum": "",
"hierarchy_floor_plan": [
{
"url": "https://vrlab-public.ljcdn.com/release/vrcustomer/hierarchy_0_5c614d16-be70-4ee5-afd2-3f88e189ee3e.png",
"index": 0,
"checksum": "2be749db94a85fad8266cf0779d4978e"
}
],
"outline_floor_plan": [
{
"url": "https://vrlab-public.ljcdn.com/release/vrcustomer/outline_0_5c614d16-be70-4ee5-afd2-3f88e189ee3e.png",
"index": 0,
"checksum": "66b6fe144c6dabeb357a76a17b8a9234"
}
],
"auto_floor_plan_url": "http://vrint.api.lianjia.com/floornet/auto3d--dWuf-5Gjg_UBUYzi65czt.json",
"auto_floor_plan_checksum": "2a0356149aaf636bece331f041ca745b",
"status": 1,
"floor_plan_type": 0,
"editor_type": 2,
"panorama_url": "",
"create_time": "2020年07月05日",
"work_code": "eQOd3gK40NL3m2Yy",
"house_info": {
"relation_id": 7231622,
"city_id": 310000,
"source": "ALLIANCE",
"house_id": "0",
"house_code": "107102634627"
},
"model": {
"file_url": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/model/auto3d--dWuf-5Gjg_UBUYzi65czt.at3d",
"material_base_url": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/",
"material_textures": [
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_0.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_1.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_2.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_3.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_4.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_5.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_6.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_7.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_8.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_9.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_10.jpg",
"https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/materials/texture_11.jpg"
],
"score": 77.91,
"type": 0,
"work_code": "eQOd3gK40NL3m2Yy"
},
"panorama": {
"count": 17,
"base_url": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/",
"type": 0,
"list": [
{
"index": 0,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/0/91879c5ff9eb70ace0cc2fb6947e347f/0_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/0/91879c5ff9eb70ace0cc2fb6947e347f/0_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/0/91879c5ff9eb70ace0cc2fb6947e347f/0_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/0/91879c5ff9eb70ace0cc2fb6947e347f/0_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/0/91879c5ff9eb70ace0cc2fb6947e347f/0_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/0/91879c5ff9eb70ace0cc2fb6947e347f/0_b.jpg"
},
{
"index": 1,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/1/ab78298dc32b8500acd3cd096dc00ad2/1_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/1/ab78298dc32b8500acd3cd096dc00ad2/1_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/1/ab78298dc32b8500acd3cd096dc00ad2/1_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/1/ab78298dc32b8500acd3cd096dc00ad2/1_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/1/ab78298dc32b8500acd3cd096dc00ad2/1_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/1/ab78298dc32b8500acd3cd096dc00ad2/1_b.jpg"
},
{
"index": 2,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/2/362cabc1bcf48643e6a204d250846ba7/2_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/2/362cabc1bcf48643e6a204d250846ba7/2_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/2/362cabc1bcf48643e6a204d250846ba7/2_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/2/362cabc1bcf48643e6a204d250846ba7/2_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/2/362cabc1bcf48643e6a204d250846ba7/2_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/2/362cabc1bcf48643e6a204d250846ba7/2_b.jpg"
},
{
"index": 3,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/3/7ec346ec5572b13b9003bfb86c6c03ca/3_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/3/7ec346ec5572b13b9003bfb86c6c03ca/3_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/3/7ec346ec5572b13b9003bfb86c6c03ca/3_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/3/7ec346ec5572b13b9003bfb86c6c03ca/3_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/3/7ec346ec5572b13b9003bfb86c6c03ca/3_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/3/7ec346ec5572b13b9003bfb86c6c03ca/3_b.jpg"
},
{
"index": 4,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/4/a04f7cd712f7f38eb72e0d4443146339/4_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/4/a04f7cd712f7f38eb72e0d4443146339/4_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/4/a04f7cd712f7f38eb72e0d4443146339/4_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/4/a04f7cd712f7f38eb72e0d4443146339/4_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/4/a04f7cd712f7f38eb72e0d4443146339/4_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/4/a04f7cd712f7f38eb72e0d4443146339/4_b.jpg"
},
{
"index": 5,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/5/5059f332863f645130d796b9b625627d/5_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/5/5059f332863f645130d796b9b625627d/5_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/5/5059f332863f645130d796b9b625627d/5_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/5/5059f332863f645130d796b9b625627d/5_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/5/5059f332863f645130d796b9b625627d/5_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/5/5059f332863f645130d796b9b625627d/5_b.jpg"
},
{
"index": 6,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/6/52c8a20c5fb82b48a9773ae6ee733980/6_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/6/52c8a20c5fb82b48a9773ae6ee733980/6_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/6/52c8a20c5fb82b48a9773ae6ee733980/6_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/6/52c8a20c5fb82b48a9773ae6ee733980/6_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/6/52c8a20c5fb82b48a9773ae6ee733980/6_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/6/52c8a20c5fb82b48a9773ae6ee733980/6_b.jpg"
},
{
"index": 7,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/7/27d10f5733b2eb03c37751535424c76d/7_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/7/27d10f5733b2eb03c37751535424c76d/7_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/7/27d10f5733b2eb03c37751535424c76d/7_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/7/27d10f5733b2eb03c37751535424c76d/7_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/7/27d10f5733b2eb03c37751535424c76d/7_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/7/27d10f5733b2eb03c37751535424c76d/7_b.jpg"
},
{
"index": 8,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/8/84f79bad3300746ca5750392426a6367/8_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/8/84f79bad3300746ca5750392426a6367/8_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/8/84f79bad3300746ca5750392426a6367/8_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/8/84f79bad3300746ca5750392426a6367/8_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/8/84f79bad3300746ca5750392426a6367/8_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/8/84f79bad3300746ca5750392426a6367/8_b.jpg"
},
{
"index": 9,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/9/1df3172b098d5a9a5629f429b1208f68/9_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/9/1df3172b098d5a9a5629f429b1208f68/9_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/9/1df3172b098d5a9a5629f429b1208f68/9_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/9/1df3172b098d5a9a5629f429b1208f68/9_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/9/1df3172b098d5a9a5629f429b1208f68/9_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/9/1df3172b098d5a9a5629f429b1208f68/9_b.jpg"
},
{
"index": 10,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/10/319dc3a45d06a2021f9e7431f274d47e/10_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/10/319dc3a45d06a2021f9e7431f274d47e/10_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/10/319dc3a45d06a2021f9e7431f274d47e/10_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/10/319dc3a45d06a2021f9e7431f274d47e/10_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/10/319dc3a45d06a2021f9e7431f274d47e/10_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/10/319dc3a45d06a2021f9e7431f274d47e/10_b.jpg"
},
{
"index": 11,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/11/95a070c9736c72a8e94ccff200c37522/11_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/11/95a070c9736c72a8e94ccff200c37522/11_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/11/95a070c9736c72a8e94ccff200c37522/11_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/11/95a070c9736c72a8e94ccff200c37522/11_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/11/95a070c9736c72a8e94ccff200c37522/11_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/11/95a070c9736c72a8e94ccff200c37522/11_b.jpg"
},
{
"index": 12,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/12/6d4bad25fa92e9461056c1eaadc93e70/12_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/12/6d4bad25fa92e9461056c1eaadc93e70/12_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/12/6d4bad25fa92e9461056c1eaadc93e70/12_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/12/6d4bad25fa92e9461056c1eaadc93e70/12_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/12/6d4bad25fa92e9461056c1eaadc93e70/12_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/12/6d4bad25fa92e9461056c1eaadc93e70/12_b.jpg"
},
{
"index": 13,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/13/2b9b1a2b485e4dd683ce5d41ee9a0496/13_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/13/2b9b1a2b485e4dd683ce5d41ee9a0496/13_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/13/2b9b1a2b485e4dd683ce5d41ee9a0496/13_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/13/2b9b1a2b485e4dd683ce5d41ee9a0496/13_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/13/2b9b1a2b485e4dd683ce5d41ee9a0496/13_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/13/2b9b1a2b485e4dd683ce5d41ee9a0496/13_b.jpg"
},
{
"index": 14,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/14/3e70bda28ffb996d55ae195bd6b2778f/14_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/14/3e70bda28ffb996d55ae195bd6b2778f/14_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/14/3e70bda28ffb996d55ae195bd6b2778f/14_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/14/3e70bda28ffb996d55ae195bd6b2778f/14_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/14/3e70bda28ffb996d55ae195bd6b2778f/14_f_BjInUD.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/14/3e70bda28ffb996d55ae195bd6b2778f/14_b.jpg"
},
{
"index": 15,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/15/ed7f794182dcbe686bd3ac6a839f3ecf/15_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/15/ed7f794182dcbe686bd3ac6a839f3ecf/15_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/15/ed7f794182dcbe686bd3ac6a839f3ecf/15_l_qgwXXM.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/15/ed7f794182dcbe686bd3ac6a839f3ecf/15_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/15/ed7f794182dcbe686bd3ac6a839f3ecf/15_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/15/ed7f794182dcbe686bd3ac6a839f3ecf/15_b.jpg"
},
{
"index": 16,
"up": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/16/3870e7385a96a9851cdd38ca05b58517/16_u.jpg",
"down": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/16/3870e7385a96a9851cdd38ca05b58517/16_d.jpg",
"left": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/16/3870e7385a96a9851cdd38ca05b58517/16_l.jpg",
"right": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/16/3870e7385a96a9851cdd38ca05b58517/16_r.jpg",
"front": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/16/3870e7385a96a9851cdd38ca05b58517/16_f.jpg",
"back": "https://vrlab-public.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/images/cube_2048/16/3870e7385a96a9851cdd38ca05b58517/16_b.jpg"
}
],
"work_code": "eQOd3gK40NL3m2Yy"
},
"observers": [
{
"visible_nodes": [
1,
2,
3,
5,
6,
7,
8,
9,
12
],
"accessible_nodes": [
1,
2,
3,
5,
6,
7,
8,
9,
12
],
"index": 0,
"quaternion": {
"w": -0.03691231474028851,
"x": -0.017175484923684377,
"y": -0.9991705628587692,
"z": 0.0008194003178188448
},
"standing_position": [
-0.4384010136127472,
-1.148597545783536,
-1.1174700260162354
],
"position": [
-0.4384010136127472,
-0.07818260043859482,
-1.1174700260162354
],
"floor_index": 0,
"offset_point_count": 938,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
2,
6,
7,
8,
10
],
"accessible_nodes": [
0,
2,
6,
7,
8,
10
],
"index": 1,
"quaternion": {
"w": -0.9173821543284494,
"x": -0.004080708476985418,
"y": -0.3979596399723817,
"z": -0.004632122139030195
},
"standing_position": [
-0.22841699421405792,
-1.14646548323973,
0.7902659773826599
],
"position": [
-0.22841699421405792,
-0.07862100005149841,
0.7902659773826599
],
"floor_index": 0,
"offset_point_count": 2434,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
1,
3,
4,
6,
7,
10
],
"accessible_nodes": [
0,
1,
3,
4,
6,
7,
10
],
"index": 2,
"quaternion": {
"w": -0.901708990654677,
"x": -0.009752170104207425,
"y": -0.4321979135862475,
"z": -0.005545547782353119
},
"standing_position": [
0.8500419855117798,
-1.1450893139625224,
2.230370044708252
],
"position": [
0.8500419855117798,
-0.07716090232133865,
2.230370044708252
],
"floor_index": 0,
"offset_point_count": 1783,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
2,
4,
7,
8
],
"accessible_nodes": [
0,
2,
4,
7,
8
],
"index": 3,
"quaternion": {
"w": -0.981662950060749,
"x": -0.012802354861816396,
"y": -0.19008506250833188,
"z": -0.006451386738944062
},
"standing_position": [
0.9633380174636841,
-1.1438182371956096,
3.3688900470733643
],
"position": [
0.9633380174636841,
-0.07024019956588745,
3.3688900470733643
],
"floor_index": 0,
"offset_point_count": 2802,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
2,
3
],
"accessible_nodes": [
2,
3
],
"index": 4,
"quaternion": {
"w": -0.9923227548397924,
"x": -0.009719494712311852,
"y": -0.12313786532770522,
"z": -0.0061766570681094065
},
"standing_position": [
0.37716901302337646,
-1.142371180838059,
4.507740020751953
],
"position": [
0.37716901302337646,
-0.0767567977309227,
4.507740020751953
],
"floor_index": 0,
"offset_point_count": 8209,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
6,
7,
12,
14,
15
],
"accessible_nodes": [
0,
6,
7,
12,
14,
15
],
"index": 5,
"quaternion": {
"w": -0.9943867511762224,
"x": -0.007193857968871499,
"y": -0.10503210069601222,
"z": -0.01055993056167469
},
"standing_position": [
-2.6099700927734375,
-1.1481385566220215,
-1.1990200281143188
],
"position": [
-2.6099700927734375,
-0.08056870102882385,
-1.1990200281143188
],
"floor_index": 0,
"offset_point_count": 7328,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
1,
2,
5,
7,
10,
11,
14,
15
],
"accessible_nodes": [
0,
1,
2,
5,
7,
10,
11,
14,
15
],
"index": 6,
"quaternion": {
"w": 0.007681727724638756,
"x": -0.013421419675251883,
"y": -0.999875537756284,
"z": 0.003125210785454978
},
"standing_position": [
-1.9352400302886963,
-1.1498299360240667,
-2.700309991836548
],
"position": [
-1.9352400302886963,
-0.072222501039505,
-2.700309991836548
],
"floor_index": 0,
"offset_point_count": 2542,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
1,
2,
3,
5,
6,
8,
9
],
"accessible_nodes": [
0,
1,
2,
3,
5,
6,
8,
9
],
"index": 7,
"quaternion": {
"w": -0.18508161356716557,
"x": -0.013675264302156726,
"y": -0.9826193548461816,
"z": 0.004120625158308685
},
"standing_position": [
-0.8597019910812378,
-1.1502490169119766,
-2.804450035095215
],
"position": [
-0.8597019910812378,
-0.07288269698619843,
-2.804450035095215
],
"floor_index": 0,
"offset_point_count": 673,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
1,
3,
7,
9,
14
],
"accessible_nodes": [
0,
1,
3,
7,
9,
14
],
"index": 8,
"quaternion": {
"w": 0.09462107959356941,
"x": 0.006155912741776892,
"y": -0.9953863287212414,
"z": 0.014663402372020356
},
"standing_position": [
-0.7484850287437439,
-1.151657936938551,
-3.8622100353240967
],
"position": [
-0.7484850287437439,
-0.07736130058765411,
-3.8622100353240967
],
"floor_index": 0,
"offset_point_count": 783,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
7,
8
],
"accessible_nodes": [
0,
7,
8
],
"index": 9,
"quaternion": {
"w": 0.15628516565026537,
"x": 0.006095048776918799,
"y": -0.9876574707627201,
"z": 0.008397191176492545
},
"standing_position": [
-0.6069440245628357,
-1.1535471678674418,
-5.456759929656982
],
"position": [
-0.6069440245628357,
-0.06334959715604782,
-5.456759929656982
],
"floor_index": 0,
"offset_point_count": 170,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
1,
2,
6,
11
],
"accessible_nodes": [
1,
2,
6,
11
],
"index": 10,
"quaternion": {
"w": 0.5458379970807644,
"x": 0.01959252850313683,
"y": -0.8376602042755396,
"z": 0.0015465959212839072
},
"standing_position": [
-2.338629961013794,
-1.152141388380851,
-4.635739803314209
],
"position": [
-2.338629961013794,
-0.05199360102415085,
-4.635739803314209
],
"floor_index": 0,
"offset_point_count": 782,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
6,
10
],
"accessible_nodes": [
6,
10
],
"index": 11,
"quaternion": {
"w": 0.5287496647322149,
"x": 0.028867335985526186,
"y": -0.8482851341244358,
"z": 0.0016776736820170012
},
"standing_position": [
-2.3377299308776855,
-1.153534165225507,
-5.852550029754639
],
"position": [
-2.3377299308776855,
-0.00972772017121315,
-5.852550029754639
],
"floor_index": 0,
"offset_point_count": 43,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
0,
5,
13
],
"accessible_nodes": [
0,
5,
13
],
"index": 12,
"quaternion": {
"w": 0.856249544369355,
"x": 0.006195543134385364,
"y": -0.5164934560808041,
"z": 0.005729135410147732
},
"standing_position": [
-4.057559967041016,
-1.1479340572919843,
-1.3465700149536133
],
"position": [
-4.057559967041016,
-0.06651479750871658,
-1.3465700149536133
],
"floor_index": 0,
"offset_point_count": 238,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
12
],
"accessible_nodes": [
12
],
"index": 13,
"quaternion": {
"w": 0.5707795072109211,
"x": -0.010381427471893303,
"y": -0.8209927916956682,
"z": 0.008591230817930149
},
"standing_position": [
-4.630559921264648,
-1.148490922924874,
-1.9553600549697876
],
"position": [
-4.630559921264648,
-0.07490730285644531,
-1.9553600549697876
],
"floor_index": 0,
"offset_point_count": 774,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
5,
6,
8,
15,
16
],
"accessible_nodes": [
5,
6,
8,
15,
16
],
"index": 14,
"quaternion": {
"w": -0.9338613948482004,
"x": -0.0007719763136449893,
"y": -0.3572759723132398,
"z": -0.016005316906433954
},
"standing_position": [
-2.9427900314331055,
-1.1460521774279584,
0.5403990149497986
],
"position": [
-2.9427900314331055,
-0.0819661021232605,
0.5403990149497986
],
"floor_index": 0,
"offset_point_count": 1028,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
5,
6,
14,
16
],
"accessible_nodes": [
5,
6,
14,
16
],
"index": 15,
"quaternion": {
"w": -0.9953331231057195,
"x": -0.009705864629332006,
"y": -0.09596429306927408,
"z": -0.002936113323321471
},
"standing_position": [
-3.02662992477417,
-1.1431833536804858,
3.031290054321289
],
"position": [
-3.02662992477417,
-0.07429129630327225,
3.031290054321289
],
"floor_index": 0,
"offset_point_count": 386,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
},
{
"visible_nodes": [
14,
15
],
"accessible_nodes": [
14,
15
],
"index": 16,
"quaternion": {
"w": 0.8111314768740711,
"x": 0.0036216260428032303,
"y": -0.5848109732902798,
"z": 0.006981715679511507
},
"standing_position": [
-4.644269943237305,
-1.1455537872979487,
0.59791100025177
],
"position": [
-4.644269943237305,
-0.07728219777345657,
0.59791100025177
],
"floor_index": 0,
"offset_point_count": 910,
"device_info": None,
"work_code": "eQOd3gK40NL3m2Yy"
}
],
"modelAsync": True
},
"title_info": {
"format": "saas",
"title": "山鑫公寓 3室2厅",
"sell_type": "",
"labels": [
{
"label": "售价",
"icon": "price",
"baseValue": 1420000,
"value": "约142万",
"block": False,
"blockFour": False,
"isRed": True
},
{
"label": "单价",
"icon": "unit_price",
"baseValue": 17318,
"value": "17,318元/平",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "户型",
"icon": "house_model",
"baseValue": "3室2厅",
"value": "3室2厅",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "面积",
"icon": "area",
"baseValue": "82㎡",
"value": "82㎡",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "挂牌",
"icon": "se_ctime",
"baseValue": "2020年06月06日",
"value": "2020年06月06日",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "朝向",
"icon": "orientation",
"baseValue": "南 北",
"value": "南 北",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "年代",
"icon": "building_finish_year",
"baseValue": "1997年",
"value": "1997年",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "楼型",
"icon": "building_type",
"baseValue": "板楼",
"value": "板楼",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "城市",
"icon": "city_name",
"baseValue": "上海",
"value": "上海",
"block": False,
"blockFour": False,
"isRed": False
},
{
"label": "楼层",
"icon": "floor_state",
"baseValue": "高楼层/6层",
"value": "高楼层/6层",
"block": False,
"blockFour": False,
"isRed": False
}
],
"schema": "lianjiabeike://ershou/detail?houseCode=107102634627",
"url": "https://m.ke.com/sh/ershoufang/107102634627.html",
"pc_url": "https://sh.ke.com/ershoufang/107102634627.html",
"more_text": "去贝壳查看更多",
"more_text_icon_type": "shell",
"mapping_title": "房源信息",
"agent": {
"avatar": "https://image1.ljcdn.com/usercenter/images/uc_ehr_avatar/3a87fc0a-5a8e-41b4-ad03-57707b632a0d.jpg.200x.jpg?v=1",
"id": "1000000026661940",
"name": "王鑫鹏",
"title": "买卖经纪人",
"virtual_phone": "4008896792,80359",
"im_enable": True,
"is_async_virtual_phone": False,
"certificate": None,
"card": {
"service_score": "",
"brand_name": "上海链家",
"store_name": "蒙山中学总店B分行",
"recent_sales_number": "暂无",
"average_sale_cycle_number": "暂无",
"recent_home_tours_number": "8"
}
},
"coordinates": {
"longitude": "121.3456377",
"latitude": "30.73340704"
},
"loading_logo_url": "https://vrlab-public.ljcdn.com/release/web/loading/beike.0c74b987.png",
"watermark_logo_url": "https://vrlab-public.ljcdn.com/frontend/watermark_logo_beike.png",
"platfond_url": "https://vrlab-public.ljcdn.com/frontend/platfond.png",
"recommends": [],
"introduction": {
"type": "2",
"pic_url": "https://vrlab-image4.ljcdn.com/release/web/information/mobile.146166d0.png",
"content": "如视是国内首个大规模将VR技术应用于不动产领域的服务提供商,为全行业提供三维空间的采集、展示能力。打造了VR看房、AI讲房、VR带看等一系列低成本、高效的看房产品。并且推出贝壳未来家服务,通过AI技术和海量真实空间数据,为用户在看房同时提供个性化的优质设计方案,满足更多用户选房和装修设计的双重需求。",
"title": "玩转黑科技·用VR记录家",
"more_text": "前往官网",
"logo_url": "https://vrlab-public.ljcdn.com/frontend/intro_logo.png",
"url": "https://realsee.com/",
"download_url": "https://h5.realsee.com/appDownload",
"lastContent": "如视是国内首个大规模将VR技术应用于不动产领域的服务提供商,为全行业提供三维空间的采集、展示能力..."
},
"realsee_introduction": {
"title": "产品简介",
"logo_url": "https://vrlab-public.ljcdn.com/frontend/intro_logo.png",
"content": "如视是国内首个大规模将VR技术应用于不动产领域的服务提供商。旗下现有产品VR看房和VR讲房。VR看房通过智能扫描设备和三维场景构建算法,实现了对海量多类房源的规模化数据采集,真实还原房屋的三维结构、户型朝向、装修内饰等信息。VR讲房在VR看房的基础上引入专业经纪人的语音讲解,给用户提供关于房屋的户型分析、周边配套等参考信息,旨在打造出更加高效便捷的看房新体验。",
"url": "https://realsee.com/",
"more_text": "去官网了解更多",
"summary": "本网站\"VR看房\"影像服务由如视团队提供技术支持",
"summary_logo_url": ""
},
"declaration": {
"title": "权责声明",
"type": "shell"
},
"page_title_prefix": "贝壳·VR看房",
"page_title": "山鑫公寓 3室2厅",
"share_config": {
"title_prefix": "VR看房",
"title": "山鑫公寓 3室2厅",
"description": "沙发上,地铁上,说看房,就看房!",
"picture_url": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/200x/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjIwMHgyMDAucG5n/gravity/center/",
"mini_program_picture_url": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/600x/crop/600x480/gravity/center/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjYwMHg0ODAucG5n/gravity/center/",
"shareURL": "https://realsee.com/ke/eQOd3gK40NL3m2Yy/B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX/"
},
"config": {
"measurement_enable": False,
"wall_card_enable": False,
"floor_plan_area_enable": True,
"side_map_enable": True,
"declaration_enable": True,
"watermark_logo_click_enable": True,
"tag_enable": True,
"lite_version_enable": False,
"scale_outline_enable": False,
"im_enable": True,
"webp_enable": False,
"new_live_enable": False,
"lazy_loading_phone_enable": False,
"decoration_enable": True,
"tv_video_enable": False,
"guidance_enable": True,
"blocked_reload_enable": True,
"image_mogr2_enable": True,
"agent_navigation_enable": False,
"live_enable": True,
"minimize_enable": True,
"resblock_panorama_enable": False,
"high_resolution_enable": True,
"hidden_wechat_icon_enable": False,
"hidden_weibo_icon_enable": False,
"hidden_measurement_icon_enable": False,
"hidden_tag_icon_enable": False,
"hidden_glasses_mode_icon_enable": False,
"hidden_feedback_icon_enable": False,
"hidden_render_icon_enable": True,
"panorama_min_latitude_degree": "-45",
"panorama_max_latitude_degree": "45",
"max_fov": 100,
"min_fov": 40,
"decoration_url": "https://myhome.realsee.com/ke/nQ6OwRkJSwgZ26rw/eEJwPYmZE6saPYDy/4rJ5MwQyVDQnSVcES4S6SOvl32jYn0Po/",
"is_sold": False,
"is_valid": True,
"vr_object_enable": True,
"new_ai_navigation_enable": True,
"proxy_cache_enable": True,
"new_year_activity_enable": False,
"photo_cdn_enable": False,
"websocket_endpoint": "wss://ws-access.realsee.com/endpoint/v1?open_app_id=1007&real_uri=single/v1/websocket",
"location_outage": None,
"live_lottery_enable": False
},
"area": "82㎡",
"business_info": {},
"decoration_picture_url": "https://vrlab-image4.ljcdn.com/release/auto3dhd/614e8790f193a997293e1d14f58ab4d8/screenshot/1593936770_0/pc0_6WdHBuNZx.jpg?imageMogr2/quality/50/thumbnail/750x",
"frame_list": [],
"resblock_id": "",
"frame_id": "",
"resblock_image_list": [],
"resblock_vr_list": [],
"related_vr_list": [],
"voice_type": "10002",
"http2_enable": False,
"project_name": "",
"floor_plan_area_enable": True,
"minimize_enable": True,
"hidden_render_icon_enable": True,
"callback_info": "{\"ad_id\":\"100010425\",\"medium_id\":\"100000035\",\"dig_v\":\"{\\\"u\\\":1000000026661940,\\\"v\\\":\\\"V1\\\",\\\"s\\\":\\\"NATURAL\\\",\\\"adId\\\":100010425,\\\"flow\\\":\\\"natural\\\",\\\"b\\\":\\\"HouseApproBrokerAgentBuilder\\\",\\\"p\\\":\\\"\\\",\\\"g\\\":\\\"\\\",\\\"sid\\\":\\\"1000000026661940_5011000018526\\\",\\\"rid\\\":\\\"5389733740111428065\\\"}\"}",
"ai_entry_url": "",
"resblock_panorama_enable": False,
"photo_cdn_enable": False,
"location_outage": None,
"websocket_endpoint": "wss://ws-access.realsee.com/endpoint/v1?open_app_id=1007&real_uri=single/v1/websocket",
"max_fov": 100,
"min_fov": 40,
"viewCount": 0
},
"agentInfo": {
"avatar": "https://image1.ljcdn.com/usercenter/images/uc_ehr_avatar/3a87fc0a-5a8e-41b4-ad03-57707b632a0d.jpg.200x.jpg?v=1",
"id": "1000000026661940",
"name": "王鑫鹏",
"title": "买卖经纪人",
"virtual_phone": "4008896792,80359",
"im_enable": True,
"is_async_virtual_phone": False,
"certificate": None,
"card": {
"service_score": "",
"brand_name": "上海链家",
"store_name": "蒙山中学总店B分行",
"recent_sales_number": "暂无",
"average_sale_cycle_number": "暂无",
"recent_home_tours_number": "8"
}
},
"minimap": {
"outline": [
{
"url": "https://vrlab-public.ljcdn.com/release/vrcustomer/outline_0_5c614d16-be70-4ee5-afd2-3f88e189ee3e.png",
"index": 0,
"checksum": "66b6fe144c6dabeb357a76a17b8a9234"
}
],
"bounding": {
"min": {
"x": 30921.125,
"y": 30989.942307692312
},
"max": {
"x": 40084.125,
"y": 45076.94230769231
},
"origin": {
"x": 37500,
"y": 37500
}
},
"roomLabels": [
[
{
"x": 37266.603759765625,
"y": 37387.442138671875,
"name": "客厅",
"localName": "客厅",
"size": 21196149.901331306
},
{
"x": 34459.5,
"y": 42412,
"name": "卧室B",
"localName": "卧室B",
"size": 10065329.142857075
},
{
"x": 36963.5,
"y": 42412,
"name": "厨房",
"localName": "厨房",
"size": 5229956.571428537
},
{
"x": 33085,
"y": 39383,
"name": "卫生间",
"localName": "卫生间",
"size": 5763998.571428537
},
{
"x": 33696.5,
"y": 35803.5,
"name": "卧室A",
"localName": "卧室A",
"size": 14784179
},
{
"x": 37634.20361328125,
"y": 33422.26025390625,
"name": "书房",
"localName": "书房",
"size": 9539899.57776177
}
]
]
},
"tags": [],
"roomObservers": [
{
"id": "u-5e8b1a54-264b-43c6-c5d1-4023f1925d68",
"name": "客厅",
"localName": "客厅",
"size": 21196149.901331306
},
{
"id": "u-5e8b1a54-264b-43c6-c5d1-4023f1925d68",
"name": "客厅",
"localName": "客厅",
"size": 21196149.901331306
},
{
"id": "u-5e8b1a54-264b-43c6-c5d1-4023f1925d68",
"name": "客厅",
"localName": "客厅",
"size": 21196149.901331306
},
{
"id": "u-a1f1075f-5592-4034-ca0e-f5c959343774",
"name": "书房",
"localName": "书房",
"size": 9539899.57776177
},
{
"id": "u-a1f1075f-5592-4034-ca0e-f5c959343774",
"name": "书房",
"localName": "书房",
"size": 9539899.57776177
},
{
"id": "u-5e8b1a54-264b-43c6-c5d1-4023f1925d68",
"name": "客厅",
"localName": "客厅",
"size": 21196149.901331306
},
{
"id": "u-5e8b1a54-264b-43c6-c5d1-4023f1925d68",
"name": "客厅",
"localName": "客厅",
"size": 21196149.901331306
},
{
"id": "u-5e8b1a54-264b-43c6-c5d1-4023f1925d68",
"name": "客厅",
"localName": "客厅",
"size": 21196149.901331306
},
{
"id": "u-c5875c68-fd7f-449e-d8b9-b202a0694cc0",
"name": "厨房",
"localName": "厨房",
"size": 5229956.571428537
},
{
"id": "u-c5875c68-fd7f-449e-d8b9-b202a0694cc0",
"name": "厨房",
"localName": "厨房",
"size": 5229956.571428537
},
{
"id": "u-760a1504-8770-4b59-a643-f6d64fbbd411",
"name": "卧室B",
"localName": "卧室B",
"size": 10065329.142857075
},
{
"id": "u-760a1504-8770-4b59-a643-f6d64fbbd411",
"name": "卧室B",
"localName": "卧室B",
"size": 10065329.142857075
},
{
"id": "u-4ea5a799-6005-4bfd-def7-7e0006593a01",
"name": "卫生间",
"localName": "卫生间",
"size": 5763998.571428537
},
{
"id": "u-4ea5a799-6005-4bfd-def7-7e0006593a01",
"name": "卫生间",
"localName": "卫生间",
"size": 5763998.571428537
},
{
"id": "u-eb18dc95-3ea4-4a8b-c2f0-8592b5ba9fac",
"name": "卧室A",
"localName": "卧室A",
"size": 14784179
},
{
"id": "u-eb18dc95-3ea4-4a8b-c2f0-8592b5ba9fac",
"name": "卧室A",
"localName": "卧室A",
"size": 14784179
},
{
"id": "u-eb18dc95-3ea4-4a8b-c2f0-8592b5ba9fac",
"name": "卧室A",
"localName": "卧室A",
"size": 14784179
}
],
"roomRules": {
"0,1,2,5,6,7": [
{
"x": 1.504013222587011,
"z": -1.5289423076910533,
"observers": [
0,
1,
2,
5
]
},
{
"x": 1.4963865693360276,
"z": 2.568710607656314,
"observers": [
0,
1,
2,
5,
7
]
},
{
"x": -1.967875,
"z": 2.60580437981509,
"observers": [
0,
1,
2,
6,
7
]
},
{
"x": -1.967875,
"z": -0.5789423076923121,
"observers": [
0,
1,
2,
5,
6,
7
]
},
{
"x": -3.1904464285714274,
"z": -0.5789423076923121,
"observers": [
0,
5,
6,
7
]
},
{
"x": -3.1904464285714274,
"z": -3.188942307692312,
"observers": [
0,
1,
5,
6,
7
]
},
{
"x": 0.279125,
"z": -3.188942307692312,
"observers": [
0,
1,
5,
6,
7
]
},
{
"x": 0.279125,
"z": -1.528942307692312,
"observers": [
0,
1,
2,
5,
6,
7
]
},
{
"x": 1.504013222587011,
"z": -1.528942307692312,
"observers": [
0,
1,
2,
5
]
}
],
"10,11": [
{
"x": -4.608732142857145,
"z": -6.516942307692312,
"observers": [
10,
11
]
},
{
"x": -1.4711607142857102,
"z": -6.516942307692312,
"observers": [
10,
11
]
},
{
"x": -1.4711607142857102,
"z": -3.308942307692312,
"observers": [
10,
11
]
},
{
"x": -4.608732142857145,
"z": -3.308942307692312,
"observers": [
10,
11
]
},
{
"x": -4.608732142857145,
"z": -6.516942307692312,
"observers": [
10,
11
]
}
],
"8,9": [
{
"x": 0.279125,
"z": -6.516942307692312,
"observers": [
8,
9
]
},
{
"x": 0.279125,
"z": -3.308942307692312,
"observers": [
8,
9
]
},
{
"x": -1.35116071428571,
"z": -3.308942307692312,
"observers": [
8,
9
]
},
{
"x": -1.35116071428571,
"z": -6.516942307692312,
"observers": [
8,
9
]
},
{
"x": 0.279125,
"z": -6.516942307692312,
"observers": [
8,
9
]
}
],
"12,13": [
{
"x": -4.668732142857145,
"z": -3.188942307692312,
"observers": [
12,
13
]
},
{
"x": -3.3104464285714275,
"z": -3.188942307692312,
"observers": [
12,
13
]
},
{
"x": -3.3104464285714275,
"z": -0.5789423076923121,
"observers": [
12,
13
]
},
{
"x": -5.518875,
"z": -0.5789423076923121,
"observers": [
12,
13
]
},
{
"x": -5.518875,
"z": -3.188942307692312,
"observers": [
12,
13
]
},
{
"x": -4.668732142857145,
"z": -3.188942307692312,
"observers": [
12,
13
]
}
],
"14,15,16": [
{
"x": -5.518875,
"z": 3.8500576923076877,
"observers": [
14,
15,
16
]
},
{
"x": -5.518875,
"z": -0.45894230769231215,
"observers": [
14,
15,
16
]
},
{
"x": -2.087875,
"z": -0.45894230769231215,
"observers": [
14,
15,
16
]
},
{
"x": -2.087875,
"z": 3.8500576923076877,
"observers": [
14,
15,
16
]
},
{
"x": -5.518875,
"z": 3.8500576923076877,
"observers": [
14,
15,
16
]
}
],
"3,4": [
{
"x": -1.967875,
"z": 3.9100576923076877,
"observers": [
3,
4
]
},
{
"x": -1.967875,
"z": 2.725811258722293,
"observers": [
3,
4
]
},
{
"x": 1.496859350721359,
"z": 2.688712424231977,
"observers": [
3,
4
]
},
{
"x": 1.5235424148050005,
"z": 5.450057692307986,
"observers": [
3,
4
]
},
{
"x": -1.967875,
"z": 5.450057692307688,
"observers": [
3,
4
]
},
{
"x": -1.967875,
"z": 3.9100576923076877,
"observers": [
3,
4
]
}
]
},
"navigation": [],
"navigationResblock": {
"source": "ALLIANCE",
"resblock_latitude": 30.73340704,
"resblock_longitude": 121.3456377,
"buildings": [
{
"name": "07",
"latitude": 30.74040703999991,
"longitude": 121.35263770000003
},
{
"name": "73",
"latitude": 30.733306,
"longitude": 121.347156
},
{
"name": "19",
"latitude": 30.737407039999905,
"longitude": 121.34963770000002
},
{
"name": "50",
"latitude": 30.738407039999906,
"longitude": 121.35063770000002
},
{
"name": "08",
"latitude": 30.743407039999912,
"longitude": 121.35563770000005
},
{
"name": "02",
"latitude": 30.74240703999991,
"longitude": 121.35463770000004
},
{
"name": "01",
"latitude": 30.74140703999991,
"longitude": 121.35363770000004
},
{
"name": "13",
"latitude": 30.746407039999916,
"longitude": 121.35863770000006
},
{
"name": "12",
"latitude": 30.745407039999915,
"longitude": 121.35763770000005
},
{
"name": "104",
"latitude": 30.744407039999913,
"longitude": 121.35663770000005
},
{
"name": "35",
"latitude": 30.74840703999992,
"longitude": 121.36063770000007
},
{
"name": "24",
"latitude": 30.747407039999917,
"longitude": 121.35963770000006
},
{
"name": "43",
"latitude": 30.733733,
"longitude": 121.344281
},
{
"name": "41",
"latitude": 30.752407039999923,
"longitude": 121.36463770000009
},
{
"name": "40",
"latitude": 30.751407039999922,
"longitude": 121.36363770000008
},
{
"name": "38",
"latitude": 30.75040703999992,
"longitude": 121.36263770000008
},
{
"name": "36",
"latitude": 30.74940703999992,
"longitude": 121.36163770000007
},
{
"name": "99",
"latitude": 30.754407039999926,
"longitude": 121.3666377000001
},
{
"name": "66",
"latitude": 30.753407039999924,
"longitude": 121.3656377000001
},
{
"name": "46",
"latitude": 30.733407,
"longitude": 121.34447
},
{
"name": "47",
"latitude": 30.788407039999967,
"longitude": 121.40063770000026
},
{
"name": "67",
"latitude": 30.78940703999997,
"longitude": 121.40163770000026
},
{
"name": "70",
"latitude": 30.731568,
"longitude": 121.345332
},
{
"name": "42",
"latitude": 30.732732,
"longitude": 121.345467
},
{
"name": "20",
"latitude": 30.786407039999965,
"longitude": 121.39863770000025
},
{
"name": "45",
"latitude": 30.787407039999966,
"longitude": 121.39963770000026
},
{
"name": "112",
"latitude": 30.784407039999962,
"longitude": 121.39663770000024
},
{
"name": "14",
"latitude": 30.785407039999964,
"longitude": 121.39763770000025
},
{
"name": "109",
"latitude": 30.78240703999996,
"longitude": 121.39463770000023
},
{
"name": "49",
"latitude": 30.796407039999977,
"longitude": 121.4086377000003
},
{
"name": "111",
"latitude": 30.78340703999996,
"longitude": 121.39563770000024
},
{
"name": "96",
"latitude": 30.794407039999975,
"longitude": 121.40663770000029
},
{
"name": "98",
"latitude": 30.795407039999976,
"longitude": 121.4076377000003
},
{
"name": "48",
"latitude": 30.797407039999978,
"longitude": 121.4096377000003
},
{
"name": "72",
"latitude": 30.792407039999972,
"longitude": 121.40463770000028
},
{
"name": "80",
"latitude": 30.793407039999973,
"longitude": 121.40563770000028
},
{
"name": "44",
"latitude": 30.735407039999902,
"longitude": 121.3476377
},
{
"name": "68",
"latitude": 30.79040703999997,
"longitude": 121.40263770000027
},
{
"name": "69",
"latitude": 30.79140703999997,
"longitude": 121.40363770000027
},
{
"name": "78",
"latitude": 30.736407039999904,
"longitude": 121.34863770000001
},
{
"name": "74",
"latitude": 30.778407039999955,
"longitude": 121.39063770000021
},
{
"name": "10",
"latitude": 30.780407039999957,
"longitude": 121.39263770000022
},
{
"name": "107",
"latitude": 30.78140703999996,
"longitude": 121.39363770000023
},
{
"name": "23",
"latitude": 30.733632,
"longitude": 121.346186
},
{
"name": "03",
"latitude": 30.779407039999956,
"longitude": 121.39163770000022
},
{
"name": "101",
"latitude": 30.76740703999994,
"longitude": 121.37963770000016
},
{
"name": "102",
"latitude": 30.768407039999943,
"longitude": 121.38063770000016
},
{
"name": "09",
"latitude": 30.734237,
"longitude": 121.346006
},
{
"name": "17",
"latitude": 30.771407039999946,
"longitude": 121.38363770000018
},
{
"name": "21",
"latitude": 30.772407039999948,
"longitude": 121.38463770000018
},
{
"name": "106",
"latitude": 30.769407039999944,
"longitude": 121.38163770000017
},
{
"name": "16",
"latitude": 30.770407039999945,
"longitude": 121.38263770000017
},
{
"name": "54",
"latitude": 30.77540703999995,
"longitude": 121.3876377000002
},
{
"name": "65",
"latitude": 30.776407039999953,
"longitude": 121.3886377000002
},
{
"name": "22",
"latitude": 30.77340703999995,
"longitude": 121.38563770000019
},
{
"name": "39",
"latitude": 30.77440703999995,
"longitude": 121.3866377000002
},
{
"name": "82",
"latitude": 30.800407039999982,
"longitude": 121.41263770000032
},
{
"name": "84",
"latitude": 30.801407039999983,
"longitude": 121.41363770000032
},
{
"name": "71",
"latitude": 30.79840703999998,
"longitude": 121.41063770000031
},
{
"name": "79",
"latitude": 30.79940703999998,
"longitude": 121.41163770000031
},
{
"name": "97",
"latitude": 30.802407039999984,
"longitude": 121.41463770000033
},
{
"name": "100",
"latitude": 30.7344070399999,
"longitude": 121.3466377
},
{
"name": "15",
"latitude": 30.739407039999907,
"longitude": 121.35163770000003
},
{
"name": "04",
"latitude": 30.777407039999954,
"longitude": 121.38963770000021
},
{
"name": "95",
"latitude": 30.76640703999994,
"longitude": 121.37863770000016
},
{
"name": "83",
"latitude": 30.76540703999994,
"longitude": 121.37763770000015
},
{
"name": "81",
"latitude": 30.764407039999938,
"longitude": 121.37663770000015
},
{
"name": "37",
"latitude": 30.763407039999937,
"longitude": 121.37563770000014
},
{
"name": "18",
"latitude": 30.762407039999935,
"longitude": 121.37463770000014
},
{
"name": "110",
"latitude": 30.761407039999934,
"longitude": 121.37363770000013
},
{
"name": "11",
"latitude": 30.760407039999933,
"longitude": 121.37263770000013
},
{
"name": "108",
"latitude": 30.75940703999993,
"longitude": 121.37163770000012
},
{
"name": "105",
"latitude": 30.75840703999993,
"longitude": 121.37063770000012
},
{
"name": "103",
"latitude": 30.75740703999993,
"longitude": 121.36963770000011
},
{
"name": "06",
"latitude": 30.756407039999928,
"longitude": 121.36863770000011
},
{
"name": "05",
"latitude": 30.755407039999927,
"longitude": 121.3676377000001
}
],
"building_images": [],
"images": [
{
"id": 5020044192944750,
"url": "https://ke-image.ljcdn.com/hdic-resblock/95d97ba8-972a-46ac-832d-05e158412cfa.jpg"
},
{
"id": 5020044192944792,
"url": "https://ke-image.ljcdn.com/hdic-resblock/1516b3df-8bcb-45f7-b6d1-a8df02aa8f47.jpg"
},
{
"id": 5020044192944797,
"url": "https://ke-image.ljcdn.com/hdic-resblock/64855879-5560-4336-9ef1-bae37697e6ab.jpg"
},
{
"id": 5020044192944824,
"url": "https://ke-image.ljcdn.com/hdic-resblock/7783c8ea-5b92-400e-b904-4adbc32de2f0.jpg"
},
{
"id": 5020044192944877,
"url": "https://ke-image.ljcdn.com/hdic-resblock/70e1d1c1-a84c-40e7-a4d2-597c583e79b2.jpg"
},
{
"id": 5020044192944916,
"url": "https://ke-image.ljcdn.com/hdic-resblock/3d1d50eb-df64-4f6f-aad7-72374abab5d6.jpg"
},
{
"id": 5020044192944928,
"url": "https://ke-image.ljcdn.com/hdic-resblock/54a8051b-378f-43d7-a89f-87bbc3e7d1ac.jpg"
},
{
"id": 5020044192944933,
"url": "https://ke-image.ljcdn.com/hdic-resblock/52af26a0-913d-4c57-a0f8-c23fc371c342.jpg"
},
{
"id": 5020044192944938,
"url": "https://ke-image.ljcdn.com/hdic-resblock/233369fe-5f5c-4297-a5f6-22fd3cb2fc51.jpg"
},
{
"id": 5020044192944987,
"url": "https://ke-image.ljcdn.com/hdic-resblock/120f52dc-5300-45c5-873c-9a54d00f3cdd.jpg"
}
]
},
"mainWall": [],
"entrance": {
"point": {
"x": 0.9840130147377276,
"z": -1.5889423076923122
},
"area": "客厅",
"localArea": "客厅",
"floor": 0,
"rad": 1.5708
},
"roomInfo": {
"客厅": {
"position": {
"x": 1.5602005934604821,
"z": 0.519565234014808
},
"rotation": 1.5688963267948965,
"data": {
"title": "客厅",
"localTitle": "客厅",
"size": 21196149.901331306,
"direction": [],
"attachments": []
}
},
"卧室B": {
"position": {
"x": -1.41116071428571,
"z": -4.912942307692312
},
"rotation": 1.5707963267948966,
"data": {
"title": "卧室B",
"localTitle": "卧室B",
"size": 10065329.142857075,
"direction": [
"北"
],
"attachments": []
}
},
"厨房": {
"position": {
"x": 0.339125,
"z": -4.912942307692312
},
"rotation": 1.5707963267948966,
"data": {
"title": "厨房",
"localTitle": "厨房",
"size": 5229956.571428537,
"direction": [
"北"
],
"attachments": []
}
},
"卫生间": {
"position": {
"x": -5.578875,
"z": -1.883942307692312
},
"rotation": 4.712396326794897,
"data": {
"title": "卫生间",
"localTitle": "卫生间",
"size": 5763998.571428537,
"direction": [
"北"
],
"attachments": []
}
},
"卧室A": {
"position": {
"x": -5.578875,
"z": 1.6955576923076878
},
"rotation": 4.712396326794897,
"data": {
"title": "卧室A",
"localTitle": "卧室A",
"size": 14784179,
"direction": [
"南"
],
"attachments": []
}
},
"书房": {
"position": {
"x": 1.5702005934604821,
"z": 4.069065234014808
},
"rotation": 1.5804963267948966,
"data": {
"title": "书房",
"localTitle": "书房",
"size": 9539899.57776177,
"direction": [
"南"
],
"attachments": []
}
}
},
"doors": {
"客厅": [
{
"name": "书房",
"localName": "书房",
"rad": -1.5601,
"point": {
"x": 1.0312415179150267,
"z": 2.633694613448992,
"y": -1.1350893139625224
}
},
{
"name": "卧室A",
"localName": "卧室A",
"rad": -1.5708,
"point": {
"x": -2.6460508241758216,
"z": -0.5189423076923122,
"y": -1.1381385566220215
}
},
{
"name": "卫生间",
"localName": "卫生间",
"rad": 3.1416,
"point": {
"x": -3.2504464285714274,
"z": -1.10570054945055,
"y": -1.1381385566220215
}
},
{
"name": "卧室B",
"localName": "卧室B",
"rad": 1.5708,
"point": {
"x": -1.959924450549457,
"z": -3.248942307692312,
"y": -1.1398299360240667
}
},
{
"name": "厨房",
"localName": "厨房",
"rad": 1.5708,
"point": {
"x": -0.9511607142857101,
"z": -3.248942307692312,
"y": -1.1402490169119766
}
}
],
"卧室B": [
{
"name": "客厅",
"localName": "客厅",
"rad": -1.5708,
"point": {
"x": -1.959924450549457,
"z": -3.248942307692312,
"y": -1.1421413883808509
}
}
],
"厨房": [
{
"name": "客厅",
"localName": "客厅",
"rad": -1.5708,
"point": {
"x": -0.9511607142857101,
"z": -3.248942307692312,
"y": -1.1416579369385509
}
}
],
"卫生间": [
{
"name": "客厅",
"localName": "客厅",
"rad": 0,
"point": {
"x": -3.2504464285714274,
"z": -1.10570054945055,
"y": -1.1379340572919843
}
}
],
"卧室A": [
{
"name": "客厅",
"localName": "客厅",
"rad": 1.5708,
"point": {
"x": -2.6460508241758216,
"z": -0.5189423076923122,
"y": -1.1360521774279584
}
}
],
"书房": [
{
"name": "客厅",
"localName": "客厅",
"rad": 1.5815,
"point": {
"x": 1.0312415179150267,
"z": 2.633694613448992,
"y": -1.1338182371956096
}
}
]
},
"realtime": "",
"isLive": False,
"dig": {
"house_id": "0",
"housedel_id": "107102634627",
"agent_ucid": "",
"request_id": "46664baf-f651-41e1-bd19-e97a0dec6984",
"referer": "",
"biz_belong": "deyou",
"bu_unit": "beike",
"platform": "pc",
"ucid": "",
"fb_expo_id": "",
"vrpage_type": "vr_housedel"
},
"floorNames": [
"一层"
],
"atadGvsPaminim": {
"gvsPaminim": [
[
[
{
"x": 39004.01322258701,
"y": 39028.94230769105
},
{
"x": 38996.38656933603,
"y": 34931.289392343686
},
{
"x": 35532.125,
"y": 34894.19562018491
},
{
"x": 35532.125,
"y": 38078.94230769231
},
{
"x": 34309.55357142857,
"y": 38078.94230769231
},
{
"x": 34309.55357142857,
"y": 40688.94230769231
},
{
"x": 37779.125,
"y": 40688.94230769231
},
{
"x": 37779.125,
"y": 39028.94230769231
},
{
"x": 39004.01322258701,
"y": 39028.94230769231
}
],
[
{
"x": 32891.267857142855,
"y": 44016.94230769231
},
{
"x": 36028.83928571429,
"y": 44016.94230769231
},
{
"x": 36028.83928571429,
"y": 40808.94230769231
},
{
"x": 32891.267857142855,
"y": 40808.94230769231
},
{
"x": 32891.267857142855,
"y": 44016.94230769231
}
],
[
{
"x": 37779.125,
"y": 44016.94230769231
},
{
"x": 37779.125,
"y": 40808.94230769231
},
{
"x": 36148.83928571429,
"y": 40808.94230769231
},
{
"x": 36148.83928571429,
"y": 44016.94230769231
},
{
"x": 37779.125,
"y": 44016.94230769231
}
],
[
{
"x": 32831.267857142855,
"y": 40688.94230769231
},
{
"x": 34189.55357142857,
"y": 40688.94230769231
},
{
"x": 34189.55357142857,
"y": 38078.94230769231
},
{
"x": 31981.125,
"y": 38078.94230769231
},
{
"x": 31981.125,
"y": 40688.94230769231
},
{
"x": 32831.267857142855,
"y": 40688.94230769231
}
],
[
{
"x": 31981.125,
"y": 33649.94230769231
},
{
"x": 31981.125,
"y": 37958.94230769231
},
{
"x": 35412.125,
"y": 37958.94230769231
},
{
"x": 35412.125,
"y": 33649.94230769231
},
{
"x": 31981.125,
"y": 33649.94230769231
}
],
[
{
"x": 35532.125,
"y": 33589.94230769231
},
{
"x": 35532.125,
"y": 34774.18874127771
},
{
"x": 38996.85935072136,
"y": 34811.28757576802
},
{
"x": 39023.542414805,
"y": 32049.942307692014
},
{
"x": 35532.125,
"y": 32049.942307692312
},
{
"x": 35532.125,
"y": 33589.94230769231
}
]
]
],
"semanMoor": [
"客厅",
"卧室B",
"厨房",
"卫生间",
"卧室A",
"书房"
]
},
"shareConfig": {
"onMenuShareAppMessageData": {
"title": "VR看房 | 山鑫公寓 3室2厅",
"desc": "沙发上,地铁上,说看房,就看房!",
"link": "https://realsee.com/ke/eQOd3gK40NL3m2Yy/B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX/",
"imgUrl": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/200x/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjIwMHgyMDAucG5n/gravity/center/"
},
"onMenuShareTimelineData": {
"title": "VR看房 | 山鑫公寓 3室2厅",
"link": "https://realsee.com/ke/eQOd3gK40NL3m2Yy/B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX/",
"imgUrl": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/200x/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjIwMHgyMDAucG5n/gravity/center/"
},
"miniProgramShareData": {
"data": {
"command": "share",
"type": "rushi",
"image": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/600x/crop/600x480/gravity/center/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjYwMHg0ODAucG5n/gravity/center/",
"title": "VR看房 | 山鑫公寓 3室2厅",
"url": "https://realsee.com/ke/eQOd3gK40NL3m2Yy/B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX/"
}
},
"jsbridgeShareData": {
"articleTitle": "VR看房 | 山鑫公寓 3室2厅",
"articleDiscription": "沙发上,地铁上,说看房,就看房!",
"requestUrl": "https://realsee.com/ke/eQOd3gK40NL3m2Yy/B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX/",
"headImageUrl": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/200x/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjIwMHgyMDAucG5n/gravity/center/"
},
"shareTitle": "VR看房 | 山鑫公寓 3室2厅",
"shareDesc": "沙发上,地铁上,说看房,就看房!",
"shareURL": "https://realsee.com/ke/eQOd3gK40NL3m2Yy/B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX/",
"shareImg": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/200x/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjIwMHgyMDAucG5n/gravity/center/",
"shareminiProgramImg": "https://vrlab-image4.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg?imageMogr2/thumbnail/600x/crop/600x480/gravity/center/%7Cwatermark/1/image/aHR0cDovL3ZybGFiLXB1YmxpYy0xMjU0MjM2MjY1LmNvcy5hcC1iZWlqaW5nLm15cWNsb3VkLmNvbS9yZWxlYXNlL3dhdGVybWFyay9yZWFsc2VlLjYwMHg0ODAucG5n/gravity/center/"
},
"moduleConfig": {
"viewport": {
"ruleSwitch": False,
"tagSwitch": True,
"panoramaMinLatitude": -0.7853981633974483,
"panoramaMaxLatitude": 0.7853981633974483,
"doorSwitch": True,
"roomLabelSwitch": True,
"minimapSwitch": {
"roomname": True
}
}
},
"realtimePermission": True,
"code": "eQOd3gK40NL3m2Yy",
"house_code": "107102634627",
"hash": "B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX",
"source": "alliance",
"platform": "shell",
"latitude": None,
"longitude": None,
"pano_index": None,
"fov": None,
"title": "山鑫公寓 3室2厅",
"title_picture": "http://vrlab-image.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc1_GyzGyfF73_1.jpg",
"preview_picture": "https://vrlab-image.ljcdn.com/release/auto3dhd/34a9d4021c7dfe48b2516fd9a98f2330/screenshot/1593936770_0/pc0_amGqmIMmT.jpg",
"northRad": 7.784172137099813,
"agentID": "1000000026661940",
"autoplay": 0,
"autoplayType": 0,
"playid": None,
"user_terminal": "pc",
"showfps": False,
"agentAudioEnable": False,
"aiAudioEnable": True,
"ifNeedTopview": False,
"isVisionApp": False,
"isAgentView": False,
"isEmbed": False,
"isErShou": True,
"params": "B7yw51J9rXlhYhjhGT1Vo2Oi6WeEqjNX",
"isLottery": False,
"isReplayPage": False,
"isSalesOffice": False,
"isResblockPage": False,
"isLitePage": False,
"isI18n": False,
"baiduSwan": False,
"isZiroomVRApp": False,
"isZiroomMiniprogram": False,
"openTime": 1610088134465
}
|
[
"zhoukun@ehousechina.com"
] |
zhoukun@ehousechina.com
|
df2331761715f4d8f86e9f4e234a7697f4d42221
|
b3e95a6c6f7ce283c82c6357d2137ee7db3dbe11
|
/hamming/hamming.py
|
7e206bff426b98dda6e0689cf3f076f32fb778ec
|
[] |
no_license
|
tarunkant/exercism_python
|
f3338353fc885c508f976bcb7ccd1565c7439522
|
1772acb5bc500ffa4cadc4be967f5bbd980b6295
|
refs/heads/master
| 2021-01-12T01:51:23.186012
| 2017-03-29T12:47:44
| 2017-03-29T12:47:44
| 78,438,642
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
str1= raw_input("enter alphabets for comparing ")
str2= raw_input("enter alphabets fom which you want to compare ")
count=0
i=0
if len(str1)>=len(str2):
for i in range(len(str2)):
if str1[i]==str2[i]:
count +=1
else: i=+1
else:
for i in range(len(str2)):
if str1[i]==str2[i]:
count +=1
else: i+=1
print"Counting of alphabet which are same is: ",count
|
[
"tarunkant05@gmail.com"
] |
tarunkant05@gmail.com
|
1b7268552686962f7e73c57ce2e1e80d69a6e9a8
|
bfbe8a27ce6f46a7f2d03731b1de1e80cc6056c9
|
/projects/programmers/easy/x_만큼_간격이_있는_n_개의_숫자.py
|
1e54da1f59097679b655cb4c4b31a202adbfe45a
|
[] |
no_license
|
paige0701/algorithms-and-more
|
95175a18fd9d4a41659c50e3c5e314fe2bb23b8b
|
763a4009f8fa87c24552b5e77375c72896672b58
|
refs/heads/master
| 2021-06-11T04:19:29.559758
| 2021-04-07T11:29:01
| 2021-04-07T11:29:01
| 184,376,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
def test(a,b):
print([a*i for i in range(1, b+1)])
def main():
a = int(input('input a: \n'))
b = int(input('input b: \n'))
test(a,b)
if __name__ == '__main__':
main()
|
[
"paigechoi0701@gmail.com"
] |
paigechoi0701@gmail.com
|
07e1e0e0c8bbfd406ceef507eed838493dee9eca
|
e82f28c2ff6312b027d6bfbda9c5c6585864dffd
|
/intown/apps/institutions/migrations/0005_auto_20150825_1927.py
|
2cd6bbd9634fecb5d23b89b5905f2747c245fd82
|
[
"MIT"
] |
permissive
|
Elika-/intown-1
|
a30435670c1bfcd6e3777ba41649cc045af31e7d
|
50742a903cff7710cda305557ca33760b3fb2a0e
|
refs/heads/master
| 2021-01-18T05:50:26.122554
| 2015-09-02T16:11:41
| 2015-09-02T16:11:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20150825_1909'),
('institutions', '0004_auto_20150825_1920'),
]
operations = [
migrations.CreateModel(
name='InstituteAddress',
fields=[
('address_ptr', models.OneToOneField(primary_key=True, to='core.Address', parent_link=True, auto_created=True, serialize=False)),
],
bases=('core.address',),
),
migrations.RemoveField(
model_name='institute',
name='address',
),
migrations.AddField(
model_name='instituteaddress',
name='institute_fk',
field=models.ForeignKey(to='institutions.Institute'),
),
]
|
[
"ablx@posteo.de"
] |
ablx@posteo.de
|
fc788246df6e41781ddf6e555e8b07bfc9c393f7
|
4ac0643056138d9caf903b9ad051c75cf1447df6
|
/src/learn/__init__.py
|
47ea3bb4ca97dc7623be8755d2c7d20aca658fc8
|
[
"MIT"
] |
permissive
|
ssd04/ml-project-template
|
361a5123518259020811f0b0a760b6e55ae81148
|
cea040176c620fa27b7537c7c9ced50a78fb591e
|
refs/heads/master
| 2023-08-25T18:34:57.973080
| 2021-10-26T21:23:05
| 2021-10-26T21:23:05
| 414,691,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
from .classification.randomforest import RandomForest
from .classification.xgboost import XGBoost
from .regression.logistic import LogisticR
from .regression.xgboost import XGBoostR
class GetModel:
@classmethod
def get_model(cls, alg, conf=None):
if alg == "random_forest":
model = RandomForest(conf=conf)
elif alg == "logistic_regression":
model = LogisticR(conf=conf)
elif alg == "dummy_classifier":
model = Dummy(conf=conf)
elif alg == "xgboost":
model = XGBoost(conf=conf)
elif alg == "xgboost_regressor":
model = XGBoostR(conf=conf)
else:
raise ValueError(alg)
return model
|
[
"dariussuirab@gmail.com"
] |
dariussuirab@gmail.com
|
3f982e8a36a779567542f4c382cd555febeef961
|
ed10dc841d5b4f6a038e8f24f603750992d9fae9
|
/lldb/test/API/lang/objc/foundation/TestFoundationDisassembly.py
|
bf9a40fc8da9b49c77e740cb835ab78aef313bfc
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
WYK15/swift-Ollvm10
|
90c2f0ade099a1cc545183eba5c5a69765320401
|
ea68224ab23470963b68dfcc28b5ac769a070ea3
|
refs/heads/main
| 2023-03-30T20:02:58.305792
| 2021-04-07T02:41:01
| 2021-04-07T02:41:01
| 355,189,226
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,449
|
py
|
"""
Test the lldb disassemble command on foundation framework.
"""
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@skipUnlessDarwin
class FoundationDisassembleTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@expectedFailureDarwin('rdar://problem/54977700')
@skipIfAsan
def test_foundation_disasm(self):
"""Do 'disassemble -n func' on each and every 'Code' symbol entry from the Foundation.framework."""
self.build()
# Enable synchronous mode
self.dbg.SetAsync(False)
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
foundation_framework = None
for module in target.modules:
if module.file.basename == "Foundation":
foundation_framework = module.file.fullpath
break
self.assertTrue(
foundation_framework is not None,
"Foundation.framework path located")
self.runCmd("image dump symtab '%s'" % foundation_framework)
raw_output = self.res.GetOutput()
# Now, grab every 'Code' symbol and feed it into the command:
# 'disassemble -n func'.
#
# The symbol name is on the last column and trails the flag column which
# looks like '0xhhhhhhhh', i.e., 8 hexadecimal digits.
codeRE = re.compile(r"""
\ Code\ {9} # ' Code' followed by 9 SPCs,
.* # the wildcard chars,
0x[0-9a-f]{8} # the flag column, and
\ (.+)$ # finally the function symbol.
""", re.VERBOSE)
for line in raw_output.split(os.linesep):
match = codeRE.search(line)
if match:
func = match.group(1)
self.runCmd('image lookup -s "%s"' % func)
self.runCmd('disassemble -n "%s"' % func)
@skipIfAsan
def test_simple_disasm(self):
"""Test the lldb 'disassemble' command"""
self.build()
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Stop at +[NSString stringWithFormat:].
symbol_name = "+[NSString stringWithFormat:]"
break_results = lldbutil.run_break_set_command(
self, "_regexp-break %s" % (symbol_name))
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name=symbol_name,
num_locations=1)
# Stop at -[MyString initWithNSString:].
lldbutil.run_break_set_by_symbol(
self,
'-[MyString initWithNSString:]',
num_expected_locations=1,
sym_exact=True)
# Stop at the "description" selector.
lldbutil.run_break_set_by_selector(
self,
'description',
num_expected_locations=1,
module_name='a.out')
# Stop at -[NSAutoreleasePool release].
break_results = lldbutil.run_break_set_command(
self, "_regexp-break -[NSAutoreleasePool release]")
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name='-[NSAutoreleasePool release]',
num_locations=1)
self.runCmd("run", RUN_SUCCEEDED)
# First stop is +[NSString stringWithFormat:].
self.expect(
"thread backtrace",
"Stop at +[NSString stringWithFormat:]",
substrs=["Foundation`+[NSString stringWithFormat:]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Skip another breakpoint for +[NSString stringWithFormat:].
self.runCmd("process continue")
# Followed by a.out`-[MyString initWithNSString:].
self.expect(
"thread backtrace",
"Stop at a.out`-[MyString initWithNSString:]",
substrs=["a.out`-[MyString initWithNSString:]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Followed by -[MyString description].
self.expect("thread backtrace", "Stop at -[MyString description]",
substrs=["a.out`-[MyString description]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Skip another breakpoint for -[MyString description].
self.runCmd("process continue")
# Followed by -[NSAutoreleasePool release].
self.expect("thread backtrace", "Stop at -[NSAutoreleasePool release]",
substrs=["Foundation`-[NSAutoreleasePool release]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
|
[
"wangyankun@ishumei.com"
] |
wangyankun@ishumei.com
|
971c56d879a5de0f10578db694c2688ca82f4d73
|
87a83b426988e5d0762a48cffb73fe48deb985d9
|
/roadrepair.py
|
9b996b5d99df7e0dd25ffbaeb7963240f4d4db23
|
[] |
no_license
|
biswassampad/coding_challenges
|
82049f31ffc87ffc8bd3d920cb0983e1ea711cf8
|
2a1d0e70231a7553b2c8093209741332df14b4be
|
refs/heads/master
| 2022-12-31T21:54:12.358748
| 2020-10-26T07:17:49
| 2020-10-26T07:17:49
| 307,289,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
def main():
crew_id = int(input("get the crewId"))
job_id = int(input("get the task id"))
distance = getMinCost(crew_id,job_id)
print('success')
def getMinCost(crew_id,job_id):
distance = job_id - crew_id
return distance
if __name__ == "__main__":
main()
|
[
"biswa.satpathy@iserveu.in"
] |
biswa.satpathy@iserveu.in
|
dc7ba5b213781e5b0e36e68d28b93bc27e52c663
|
d9503a748d51d6dbef6a76513382e19ad1c3107f
|
/第8日目/quantumWell_withBarrier_StarkEffect.py
|
df70897cc286330e945298578d845a0e9d51b218
|
[] |
no_license
|
quantumshiro/quantumcompute_python
|
0446fbea0a79b7fba1b26b811763d776fcf8ccdb
|
1092ceeea2737ddc6ad325ac603ae7618b13b12b
|
refs/heads/main
| 2023-02-10T05:41:29.803979
| 2021-01-05T12:17:07
| 2021-01-05T12:17:07
| 326,984,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,544
|
py
|
############################################################################
# ポテンシャル障壁ありの無限に深い量子井戸に静電場を加えた電子状態
############################################################################
import math
import cmath
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import scipy.integrate as integrate
import numpy as np
import numpy.linalg as LA
#全体設定
plt.rcParams['font.family'] = 'Times New Roman' #フォント
plt.rcParams['font.size'] = 12 #フォントサイズ
#複素数
I = 0.0 + 1.0j
######################################
# 物理定数
######################################
#プランク定数
h = 6.6260896 * 10**-34
hbar = h / (2.0 * math.pi)
#電子の質量
me = 9.10938215 * 10**-31
#電子ボルト
eV = 1.60217733 * 10**-19
#電気素量
e = 1.60217733 * 10**-19
######################################
# 物理系の設定
######################################
#量子井戸の幅
L = 1.0 * 10**-9
#計算区間
x_min = -L / 2.0
x_max = L / 2.0
#状態数
n_max = 30
#行列の要素数
DIM = n_max + 1
#空間分割数
NX = 500
#空間刻み間隔
dx = 1.0 * 10**-9
#壁の厚さ
W = L / 5
#壁の高さの最大値
V_max = 30.0 * eV
#電場の強さ
Ex_max = 1.0 * 10**8
#電場強度の分割数
NEx = 10
#基底状態と励起状態
N = 2
#固有関数
def varphi(n, x):
kn = math.pi * (n + 1) / L
return math.sqrt(2.0 / L) * math.sin(kn * (x + L / 2.0))
#ポテンシャル項
def V(x, Ex):
if(abs(x) <= W / 2.0):
return e * Ex * x + V_max
else:
return e * Ex * x
#固有エネルギー
def Energy(n):
kn = math.pi * (n + 1) / L
return hbar * hbar * kn * kn / (2.0 * me)
#被積分関数(行列要素計算用)
def integral_matrixElement(x, n1, n2, Ex):
return varphi(n1 ,x) * V(x, Ex) * varphi(n2, x) / eV
#被積分関数(平均計算用)
def average_x(x, a):
sum = 0
for n in range(n_max + 1):
sum += a[n] * varphi(n, x)
return x * sum**2
#固有値・固有ベクトルの初期化
eigenvalues = [0] * (NEx + 1)
vectors = [0] * (NEx + 1)
for nEx in range(NEx + 1):
eigenvalues[nEx] = []
vectors[nEx] = []
#存在確率分布グラフ描画用の配列初期化
xs = []
phi = [0] * (NEx + 1)
for nEx in range(NEx + 1):
phi[nEx] = [0] * N
for n in range( len(phi[nEx]) ):
phi[nEx][n] = [0] * (NX + 1)
#中心の電場依存性グラフ描画用の配列初期化
averageX = [0] * N
for n in range(len(averageX)):
averageX[n] = [0] * (NEx + 1)
#静電場強度ごとに
for nEx in range(NEx + 1):
print("電場強度:" + str( nEx * 100 / NEx ) + "%")
#静電場の強度を設定
Ex = Ex_max / NEx * nEx
#エルミート行列(リスト)
matrix = []
###行列要素の計算
for n1 in range(n_max + 1):
col=[]
for n2 in range(n_max + 1):
#ガウス・ルジャンドル積分
result = integrate.quad(
integral_matrixElement, #被積分関数
x_min, x_max, #積分区間の下端と上端
args=(n1, n2, Ex) #被積分関数へ渡す引数
)
real = result[0]
imag = 0j
#無静電場のエネルギー固有値(対角成分)
En = Energy(n1)/eV if (n1 == n2) else 0
#行の要素を追加
col.append( En + real )
#行を追加
matrix.append( col )
#リスト → 行列
matrix = np.array( matrix )
###固有値と固有ベクトルの計算
result = LA.eig( matrix )
eig = result[0] #固有値
vec = result[1] #固有ベクトル
#小さい順に並べるためのインデックス(配列)
index = np.argsort( eig )
#固有値を小さい順に並び替え
eigenvalues[nEx] = eig[ index ]
#転置行列
vec = vec.T
#固有ベクトルの並び替え
vectors[nEx] = vec[ index ]
### 検算:MA-EA=0 ?
sum = 0
for i in range(DIM):
v = matrix @ vectors[nEx][i] - eigenvalues[nEx][i] * vectors[nEx][i]
for j in range(DIM):
sum += abs(v[j])**2
print("|MA-EA| =" + str(sum))
###固有関数の空間分布
for nx in range(NX+1):
x = x_min + (x_max - x_min) / NX * nx
if(nEx == 0): xs.append( x/dx )
for n in range( len(phi[nEx]) ):
for m in range(n_max+1):
phi[nEx][n][nx] += vectors[nEx][n][m] * varphi(m, x)
#描画用データの整形
phi[nEx][n][nx] = abs(phi[nEx][n][nx])**2 / (1.0 * 10**9)
for n in range(len(averageX)):
#ガウス・ルジャンドル積分
result = integrate.quad(
average_x, #被積分関数
x_min, x_max, #積分区間の下端と上端
args=(vectors[nEx][n]) #被積分関数へ渡す引数
)
#計算結果の取得
averageX[n][nEx] = result[0] * (1.0 * 10**9)
#グラフの描画(エネルギー固有値)
fig1 = plt.figure(figsize=(10, 6))
plt.title("Energy at Electric field strength")
plt.xlabel("Electric field strength[V/m]")
plt.ylabel("Energy[eV]")
#描画範囲を設定
plt.xlim([0, 10])
#x軸
exs = range( NEx + 1)
#y軸
En_0 = []
En_1 = []
for nEx in range(NEx + 1):
En_0.append( eigenvalues[nEx][0] )
En_1.append( eigenvalues[nEx][1] )
#print( str(nV) + " " + str( eigenvalues[nV][0] ) + " " + str( eigenvalues[nV][1] ))
#基底状態と第1励起状態のグラフを描画
plt.plot(exs, En_0, marker="o", linewidth = 3)
plt.plot(exs, En_1, marker="o", linewidth = 3)
#グラフの描画(基底状態)
fig2 = plt.figure(figsize=(10, 6))
plt.title("Existence probability at Position (n=0)")
plt.xlabel("Position[nm]")
plt.ylabel("|phi|^2")
#描画範囲を設定
plt.xlim([-0.5, 0.5])
plt.ylim([0, 5.0])
#各
for nEx in range(NEx + 1):
plt.plot(xs, phi[nEx][0] , linewidth = 3)
#グラフの描画(第1励起状態)
fig3 = plt.figure(figsize=(10, 6))
plt.title("Existence probability at Position (n=1)")
plt.xlabel("Position[nm]")
plt.ylabel("|phi|^2")
#描画範囲を設定
plt.xlim([-0.5, 0.5])
plt.ylim([0, 5.0])
for nEx in range(NEx + 1):
plt.plot(xs, phi[nEx][1] , linewidth = 3)
'''
#グラフの描画(期待値)
fig4 = plt.figure(figsize=(10, 6))
plt.title("Position at Electric field strength")
plt.xlabel("Electric field strength[V/m]")
plt.ylabel("Position[nm]")
#描画範囲を設定
plt.xlim([0, 10])
#x軸
exs = range( NV + 1)
plt.plot(exs, averageX[0], marker="o", linewidth = 3)
plt.plot(exs, averageX[1], marker="o", linewidth = 3)
#グラフの表示
'''
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.