blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
867c39b81f0bd2f14694cd585a733a351b7c50fa
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_spinier.py
|
0482c9921fbb9ca2d0ffe46b31b1181f2f50e5f1
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
from xai.brain.wordbase.adjectives._spiny import _SPINY
#calss header
class _SPINIER(_SPINY, ):
def __init__(self,):
_SPINY.__init__(self)
self.name = "SPINIER"
self.specie = 'adjectives'
self.basic = "spiny"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
00a109d7ceb3af65458a2708817bd3fcbd90c405
|
38f19ae4963df9be7a851458e63ffb94d824eb03
|
/stellar_sdk/__version__.py
|
328854e119791d65c471bc64c026b784c86512b9
|
[
"Apache-2.0"
] |
permissive
|
brunodabo/py-stellar-base
|
e033da687e3a2a76076cfed88e82c7951ae4e57e
|
7897a23bc426324cb389a7cdeb695dfce10a673f
|
refs/heads/master
| 2022-11-05T12:35:37.140735
| 2020-06-03T13:41:30
| 2020-06-03T13:54:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
"""
_____ _______ ______ _ _ _____ _____ _____ _ __
/ ____|__ __| ____| | | | /\ | __ \ / ____| __ \| |/ /
| (___ | | | |__ | | | | / \ | |__) |____| (___ | | | | ' /
\___ \ | | | __| | | | | / /\ \ | _ /______\___ \| | | | <
____) | | | | |____| |____| |____ / ____ \| | \ \ ____) | |__| | . \
|_____/ |_| |______|______|______/_/ \_\_| \_\ |_____/|_____/|_|\_\
"""
__title__ = "stellar-sdk"
__description__ = "The Python Stellar SDK library provides APIs to build transactions and connect to Horizon."
__url__ = "https://github.com/StellarCN/py-stellar-base"
__issues__ = "{}/issues".format(__url__)
__version__ = "2.5.2"
__author__ = "Eno, overcat"
__author_email__ = "appweb.cn@gmail.com, 4catcode@gmail.com"
__license__ = "Apache License 2.0"
|
[
"4catcode@gmail.com"
] |
4catcode@gmail.com
|
ceb03e78d5da369eaa15cfadb539d06f2ad3979b
|
c3e2f56672e01590dc7dc7e184f30c2884ce5d3a
|
/Programs/MyPythonXII/Unit1/PyChap03/summatrix.py
|
a00722d07d1ce3b80baa6858b737eb411c6ad68e
|
[] |
no_license
|
mridulrb/Basic-Python-Examples-for-Beginners
|
ef47e830f3cc21cee203de2a7720c7b34690e3e1
|
86b0c488de4b23b34f7424f25097afe1874222bd
|
refs/heads/main
| 2023-01-04T09:38:35.444130
| 2020-10-18T15:59:29
| 2020-10-18T15:59:29
| 305,129,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
# File name: ...\\MyPythonXII\Unit1\PyChap03\summatrix.py
# Program to add two matrices
# Declaration of three 10 x 10 matrices
A = [[0 for x in range(10)] for x in range(10)]
B = [[0 for x in range(10)] for x in range(10)]
C = [[0 for x in range(10)] for x in range(10)]
print("Enter the number of Rows of matrix A: ", end='')
r = int(input())
print("Enter the number of Columns of matrix A: ", end='')
c = int(input())
print("Enter the number of Rows of matrix B: ", end='')
r1 = int(input())
print("Enter the number of Columns of matrix B: ", end='')
c1 = int(input())
# Before accepting the Elements Check if no of
# rows and columns of both matrices is equal
if (r == r1 and c == c1):
# Accept the Elements for matrix A
for i in range(r):
for j in range(c):
print("Enter the element A[%d][%d]: " % (i, j), end='')
A[i][j] = int(input())
# Accept the Elements for matrix B
for i in range(r):
for j in range(c):
print("Enter the element B[%d][%d]: " % (i, j), end='')
B[i][j] = int(input())
# Addition of two matrices
for i in range(r):
for j in range(c):
C[i][j] = A[i][j] + B[i][j]
# First matrix
print("Matrix A:")
for i in range(r):
print(" "*5, end="")
for j in range(c):
print("{0:^3}".format(A[i][j]), end=' ')
print()
print("Matrix B:")
for i in range(r):
print(" "*5, end="")
for j in range(c):
print("{0:^3}".format(B[i][j]), end=' ')
print()
# Print out the Resultant Matrix C
print("The Addition of two Matrices C is : ")
for i in range(r):
print(" "*5, end="")
for j in range(c):
print ("{0:^3}".format(C[i][j]), end=' ')
print()
else:
print("Order of two matrices is not same ")
|
[
"mridurb@gmail.com"
] |
mridurb@gmail.com
|
5581f1877e0859073d8bad360dbf8d6e6b5ed449
|
43949d3f05bf1d1212cc25fd5766a47940723f7b
|
/generators/gen.py
|
5202bb3d33b604cfc3856de4de253eacca2d802f
|
[] |
no_license
|
asing177/python_programs
|
f6aa5b53b6f0b2d8c824c8b796ca77d8a1121110
|
dfa18f7ec6bd0eb29311a393e011d89dac1a7e26
|
refs/heads/master
| 2020-12-21T14:39:27.761117
| 2020-02-06T08:12:32
| 2020-02-06T08:12:32
| 236,462,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
def my_gen():
n = 1
print('This is printed first')
# Generator function contains yield statements
yield n
n += 1
print('This is printed second')
yield n
n += 1
print('This is printed at last')
yield n
a = my_gen()
next(a)
next(a)
next(a)
next(a)
|
[
"adityasingh27@hotmail.com"
] |
adityasingh27@hotmail.com
|
1b58373ac66d0ddb7dc9aeda27c62602f8569f74
|
de707c94c91f554d549e604737b72e6c86eb0755
|
/math/0x01-plotting/2-change_scale.py
|
480bf1b1af07c3d6f3b678dbbd1083ba7969a535
|
[] |
no_license
|
ejonakodra/holbertonschool-machine_learning-1
|
885cf89c1737573228071e4dc8e26304f393bc30
|
8834b201ca84937365e4dcc0fac978656cdf5293
|
refs/heads/main
| 2023-07-10T09:11:01.298863
| 2021-08-11T03:43:59
| 2021-08-11T03:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
#!/usr/bin/env python3
""" plots x, y as a line graph where y-axis is scaled logarithmically """
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 28651, 5730)
r = np.log(0.5)
t = 5730
y = np.exp((r / t) * x)
plt.plot(x, y)
plt.xlabel('Time (years)')
plt.ylabel('Fraction Remaining')
plt.title("Exponential Decay of C-14")
plt.yscale("log")
plt.xlim((0, 28650))
plt.show()
|
[
"eislek02@gmail.com"
] |
eislek02@gmail.com
|
1c3fe0cc2873b5858a438ae7dbeaf43f1ace5c25
|
8c6466e12bb3351031c25677127dc86d13bd9b19
|
/Project data Modelling with Postgress/sql_queries.py
|
09f272907dc0ece9579ce135decdb08810006f0f
|
[] |
no_license
|
andreodendaal/udacity_data_engineering
|
ac8eb889db002014b3ccf1fe15b16f77361b8d55
|
03524ffbd1830d168761fcc996cab329dd064977
|
refs/heads/master
| 2020-05-05T09:24:02.921194
| 2020-01-15T20:22:14
| 2020-01-15T20:22:14
| 179,902,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,481
|
py
|
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS songplays;"
# CREATE TABLES
songplay_table_create = ("""CREATE TABLE IF NOT EXISTS songplays (songplay_id varchar, start_time timestamp, user_id varchar, level varchar, song_id varchar, artist_id varchar, session_id varchar, location varchar, user_agent varchar, PRIMARY KEY (songplay_id));""")
user_table_create = ("""CREATE TABLE IF NOT EXISTS users (user_id varchar, first_name varchar, last_name varchar, gender varchar, level varchar, PRIMARY KEY (user_id));
""")
song_table_create = ("""CREATE TABLE IF NOT EXISTS songs (song_id varchar, title varchar, artist_id varchar, year int, duration int, PRIMARY KEY (song_id));
""")
#song_id title artist_id year duration
artist_table_create = ("""CREATE TABLE IF NOT EXISTS artists (artist_id varchar, name varchar, location varchar, lattitude float, longitude float, PRIMARY KEY (artist_id));
""")
# https://www.postgresql.org/docs/9.1/functions-datetime.html
time_table_create = ("""CREATE TABLE IF NOT EXISTS time (start_time timestamp, hour int, day int, week int, month int, year int, weekday int, PRIMARY KEY (start_time));
""")
# INSERT RECORDS
songplay_table_insert = ("""INSERT INTO songplays (songplay_id, start_time, user_id, level, song_id, artist_id, session_id, location, user_agen) VALUES(%s, %s, %s, %s, %s, %s, %s, %s);""")
user_table_insert = ("""INSERT INTO users (user_id, first_name, last_name, gender, level) VALUES(%s, %s, %s, %s, %s ) ON CONFLICT (user_id) DO NOTHING;""")
song_table_insert = ("""INSERT INTO songs (song_id, title, artist_id, year, duration) VALUES(%s, %s, %s, %s, %s);""")
artist_table_insert = ("""INSERT INTO artists (artist_id, name, location, lattitude, longitude) VALUES(%s, %s, %s, %s, %s);""")
time_table_insert = ("""INSERT INTO time (start_time, hour, day, week, month, year, weekday) VALUES(%s, %s, %s, %s, %s, %s, %s);""")
# FIND SONGS
song_select = ("""
""")
# QUERY LISTS
#create_table_queries = [songplay_table_create]
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
#drop_table_queries = [songplay_table_drop]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
|
[
"aodendaal.direct@gmail.com"
] |
aodendaal.direct@gmail.com
|
d485cc88de5469d66c7dbc503dbb0e3206144138
|
b1ffcbd977595bccf15dd56e965bda62867d1e10
|
/omrdatasettools/downloaders/PrintedMusicSymbolsDatasetDownloader.py
|
818cb93eca0478aa48e520f084374eba04cbd048
|
[
"CC-BY-NC-SA-4.0",
"GPL-2.0-only",
"CC-BY-SA-3.0",
"MIT",
"GPL-1.0-or-later",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"AGPL-3.0-only"
] |
permissive
|
fzalkow/OMR-Datasets
|
7ded5bb9278e47c84a16de01081876d6bb2e6dbe
|
c9e7a986199998d6a735875503e6dcce5fdf1193
|
refs/heads/master
| 2020-09-14T15:30:45.824800
| 2020-01-06T12:07:52
| 2020-01-06T12:07:52
| 223,169,792
| 0
| 0
|
MIT
| 2019-11-21T12:32:31
| 2019-11-21T12:32:30
| null |
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
import argparse
import os
from omrdatasettools.downloaders.DatasetDownloader import DatasetDownloader
class PrintedMusicSymbolsDatasetDownloader(DatasetDownloader):
""" Loads the Printed Music Symbols dataset
https://github.com/apacha/PrintedMusicSymbolsDataset
Copyright 2017 by Alexander Pacha under MIT license
"""
def get_dataset_download_url(self) -> str:
# If this link does not work anymore, find the images at https://github.com/apacha/PrintedMusicSymbolsDataset
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/PrintedMusicSymbolsDataset.zip"
def get_dataset_filename(self) -> str:
return "PrintedMusicSymbolsDataset.zip"
def download_and_extract_dataset(self, destination_directory: str):
if not os.path.exists(self.get_dataset_filename()):
print("Downloading Printed Music Symbol dataset...")
self.download_file(self.get_dataset_download_url(), self.get_dataset_filename())
print("Extracting Printed Music Symbol dataset...")
absolute_path_to_temp_folder = os.path.abspath('PrintedMusicSymbolsDataset')
self.extract_dataset(absolute_path_to_temp_folder)
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "PrintedMusicSymbolsDataset"),
os.path.abspath(destination_directory))
self.clean_up_temp_directory(absolute_path_to_temp_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_directory",
type=str,
default="../data/printed_images",
help="The directory, where the extracted dataset will be copied to")
flags, unparsed = parser.parse_known_args()
dataset = PrintedMusicSymbolsDatasetDownloader()
dataset.download_and_extract_dataset(flags.dataset_directory)
|
[
"alexander.pacha@gmail.com"
] |
alexander.pacha@gmail.com
|
333c48d27ec8d5b0ea5633bffadd6e27638c0522
|
315450354c6ddeda9269ffa4c96750783963d629
|
/CMSSW_7_0_4/src/Configuration/TotemCommon/python/__init__.py
|
6cf341a440a2c973942ba5aad7ebac154203e274
|
[] |
no_license
|
elizamelo/CMSTOTEMSim
|
e5928d49edb32cbfeae0aedfcf7bd3131211627e
|
b415e0ff0dad101be5e5de1def59c5894d7ca3e8
|
refs/heads/master
| 2021-05-01T01:31:38.139992
| 2017-09-12T17:07:12
| 2017-09-12T17:07:12
| 76,041,270
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/Configuration/TotemCommon/',1)[0])+'/cfipython/slc6_amd64_gcc481/Configuration/TotemCommon')
|
[
"eliza@cern.ch"
] |
eliza@cern.ch
|
5ddfb724efcc821a79c4e342fe9315c9e87c4d99
|
038e6e41d117431869edad4952a5b1463d5131bc
|
/donations/urls.py
|
f1cd42016e4516213c853992d4476f9cab832f42
|
[
"MIT"
] |
permissive
|
MikaelSantilio/aprepi-django
|
c49290855b7c83ecaf08de82ee9eedf8e8baa15a
|
5e2b5ecffb287eab929c0759ea35ab073cc19d96
|
refs/heads/master
| 2023-06-19T00:18:15.986920
| 2021-06-15T20:15:59
| 2021-06-15T20:15:59
| 329,428,268
| 0
| 1
|
MIT
| 2021-02-05T16:21:45
| 2021-01-13T20:50:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
from django.urls import path
from donations import views
app_name = "donations"
urlpatterns = [
path('', views.MakeDonation.as_view(), name='unique-donation'),
path('historico/', views.DonationListView.as_view(), name='list'),
# path('checkout/<str:value>', views.MPCheckout.as_view(), name='mp-checkout'),
# path('anonima/', views.MakeAnonymousDonation.as_view(), name='anonymous-donation'),
# path('recorrente/', views.MakeRecurringDonation.as_view(), name='recurring-donation'),
path('obrigado/', views.ThankYouView.as_view(), name='thankyou'),
# path('cartoes/', views.CreditCardListView.as_view(), name='list-cc'),
# path('cartoes/cadastrar', views.CreditCardCreateView.as_view(), name='create-cc'),
# path('cartoes/<int:pk>', views.CreditCardDetailView.as_view(), name='detail-cc'),
# path('cartoes/atualizar/<int:pk>',
# views.CreditCardUpdateView.as_view(), name='update-cc'),
# path('cartoes/apagar/<int:pk>',
# views.CreditCardDeleteView.as_view(), name='delete-cc')
]
|
[
"mikael.santilio@gmail.com"
] |
mikael.santilio@gmail.com
|
427ad4d206db8a5e4f376c716b47b039b82fba5a
|
033da72a51c76e5510a06be93229a547a538cf28
|
/Data Engineer with Python Track/03. Streamlined Data Ingestion with Pandas/Chapter/03. Importing Data from Databases/03-Selecting columns with SQL.py
|
9ce77ea8c2536cbf0133ba6d5c5008e87273de10
|
[] |
no_license
|
ikhwan1366/Datacamp
|
d5dcd40c1bfeb04248977014260936b1fb1d3065
|
7738614eaebec446842d89177ae2bc30ab0f2551
|
refs/heads/master
| 2023-03-06T13:41:06.522721
| 2021-02-17T22:41:54
| 2021-02-17T22:41:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
'''
Selecting columns with SQL
Datasets can contain columns that are not required for an analysis, like the weather table in data.db does. Some, such as elevation, are redundant, since all observations occurred at the same place, while others contain variables we are not interested in. After making a database engine, you'll write a query to SELECT only the date and temperature columns, and pass both to read_sql() to make a data frame of high and low temperature readings.
pandas has been loaded as pd, and create_engine() has been imported from sqlalchemy.
Note: The SQL checker is quite picky about column positions and expects fields to be selected in the specified order.
Instructions
100 XP
- Create a database engine for data.db.
- Write a SQL query that SELECTs the date, tmax, and tmin columns from the weather table.
- Make a data frame by passing the query and engine to read_sql() and assign the resulting data frame to temperatures.
'''
# Create database engine for data.db
engine = create_engine('sqlite:///data.db')
# Write query to get date, tmax, and tmin from weather
query = """
SELECT date,
tmax,
tmin
FROM weather;
"""
# Make a data frame by passing query and engine to read_sql()
temperatures = pd.read_sql(query, engine)
# View the resulting data frame
print(temperatures)
|
[
"surel.chandrapratama@gmail.com"
] |
surel.chandrapratama@gmail.com
|
27c0b921e96a11906286be5d2fb8bac1c678ad1c
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc152/abc152_f/9661160.py
|
040bdabbffd7805e7f362fb6eff11285789dc375
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886
| 2021-03-01T12:52:26
| 2021-03-01T12:52:26
| 227,364,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
import collections
from itertools import combinations
N = int(input())
ab = [tuple(map(int, input().split())) for i in range(N-1)]
M = int(input())
uv = [tuple(map(int, input().split())) for i in range(M)]
def popcount(x):
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
return x & 0x0000007f
G = [[-1]*N for i in range(N)]
for i in range(N-1):
a, b = ab[i]
a -= 1
b -= 1
G[a][b] = i
G[b][a] = i
q = collections.deque()
G2 = [[0 for j in range(N)] for i in range(N)]
for i in range(N):
q.append((i, 0))
reached = [False]*N
reached[i] = True
while(q):
x, s = q.popleft()
for y in range(N):
if G[x][y] == -1 or reached[y]:
continue
else:
G2[i][y] = s | (1 << G[x][y])
q.append((y, s | 1 << G[x][y]))
reached[y] = True
ans = 2**(N-1)
ans2 = 0
for i in range(1, 2**M):
tmp = 2**(N-1) - 1
for j in range(M):
if (i >> j) % 2 == 1:
u, v = uv[j]
u -= 1
v -= 1
tmp &= ~G2[u][v]
ans2 += ((-1)**(popcount(i)-1)) * (1 << popcount(tmp))
# print(ans2, i)
print(ans-ans2)
|
[
"kouhei.k.0116@gmail.com"
] |
kouhei.k.0116@gmail.com
|
f37bf6d7e69087bad285023e03ef4723bb8ba36b
|
397e125e94f4f139f2bf5055824d81f24b8b1757
|
/ABC/145/B.py
|
c96cb419eb5372858a0ff144fa21e734a83f0e9e
|
[] |
no_license
|
tails1434/Atcoder
|
ecbab6ee238e3f225551297db961b1b502841fa4
|
e7c7fed36be46bbaaf020a70997842240ba98d62
|
refs/heads/master
| 2021-07-07T00:31:49.235625
| 2020-09-30T01:42:01
| 2020-09-30T01:42:01
| 189,009,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
def main():
N = int(input())
S = input()
if N % 2 != 0:
print('No')
else:
A = S[:N//2]
B = S[N//2:N]
if A == B:
print('Yes')
else:
print('No')
if __name__ == "__main__":
main()
|
[
"sososo1333@gmail.com"
] |
sososo1333@gmail.com
|
9900fb23966c7170f49463766fb9144b67096696
|
6323bd983f6304d95e62909bfc4883d2f9ef1a14
|
/Leetcode/Medium/Range Sum query.py
|
4e7d5a0537c1ad2d9022d5981e76015b68d98328
|
[] |
no_license
|
akshay-sahu-dev/PySolutions
|
4c2d67d5f66fe83a6e302e1742a5bf17dafe2b99
|
83552962805768914034a284bf39197f52ca5017
|
refs/heads/master
| 2023-06-17T06:36:50.252943
| 2021-07-09T17:28:53
| 2021-07-09T17:28:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
## https://leetcode.com/problems/range-sum-query-immutable
class NumArray:
def __init__(self, nums: List[int]):
self.nums = nums
def sumRange(self, i: int, j: int) -> int:
return sum(self.nums[i:j+1])
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
|
[
"akki5233@gmail.com"
] |
akki5233@gmail.com
|
d81bf82845c2f8e12980533f9d59d8e047901438
|
e07da133c4efa517e716af2bdf67a46f88a65b42
|
/hub20/apps/blockchain/management/commands/sync_blockchain.py
|
b28399a2a20dc1fa41b00b74038c1e23e9e449e6
|
[
"MIT"
] |
permissive
|
cryptobuks1/hub20
|
be1da5f77a884f70068fd41edaa45d5e65b7c35e
|
3a4d9cf16ed9d91495ac1a28c464ffb05e9f837b
|
refs/heads/master
| 2022-04-19T21:26:15.386567
| 2020-04-19T07:17:47
| 2020-04-19T07:17:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
import asyncio
import logging
from django.core.management.base import BaseCommand
from hub20.apps.blockchain.app_settings import START_BLOCK_NUMBER
from hub20.apps.blockchain.models import Block, make_web3
logger = logging.getLogger(__name__)
def split_block_lists(block_numbers, group_size=25):
for n in range(0, len(block_numbers), group_size):
yield block_numbers[n : n + group_size]
async def make_blocks_in_range(w3, start, end, speed=25):
chain_id = int(w3.net.version)
chain_blocks = Block.objects.filter(chain=chain_id)
block_range = (start, end)
recorded_block_set = set(
chain_blocks.filter(number__range=block_range).values_list("number", flat=True)
)
range_set = set(range(*block_range))
missing_blocks = list(range_set.difference(recorded_block_set))[::-1]
counter = 0
logger.info(f"{len(missing_blocks)} missing blocks between {start} and {end}")
for block_list in split_block_lists(missing_blocks, group_size=speed):
for block_number in block_list:
counter += 1
if (counter % speed) == 0:
await asyncio.sleep(1)
Block.make_all(block_number, w3)
else:
await asyncio.sleep(1)
async def save_new_blocks(w3):
current_block_number = w3.eth.blockNumber
while True:
logger.info(f"Current block number: {current_block_number}")
block_number = w3.eth.blockNumber
if block_number > current_block_number:
Block.make_all(block_number, w3)
current_block_number = block_number
else:
await asyncio.sleep(5)
async def backfill(w3):
SCAN_SIZE = 5000
end = w3.eth.blockNumber
while end > START_BLOCK_NUMBER:
start = max(end - SCAN_SIZE, START_BLOCK_NUMBER)
await make_blocks_in_range(w3, start, end)
end = start
logger.info(f"Backfill complete. All blocks from {end} now recorded")
class Command(BaseCommand):
help = "Listens to new blocks and transactions on event loop and saves on DB"
def handle(self, *args, **options):
w3 = make_web3()
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(asyncio.gather(save_new_blocks(w3), backfill(w3)))
finally:
loop.close()
|
[
"raphael@lullis.net"
] |
raphael@lullis.net
|
ade677f8e988685507a1c948ac73be652ce39b49
|
f0d3b759d9b0d2000cea2c291a4974e157651216
|
/apps/goods/migrations/0001_initial.py
|
303ea309f8cf6f7ee582bdc2901bd642b7490841
|
[] |
no_license
|
PYBPYB/Fresh-every-day
|
526265ae0a9b1fe8e8f8944e0320ea8a47b8571c
|
5b62fda9effe327a5da9ce45644bf44ee9d7108f
|
refs/heads/master
| 2020-04-12T14:39:31.325736
| 2019-05-31T02:31:54
| 2019-05-31T02:31:54
| 162,558,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,310
|
py
|
# Generated by Django 2.1.3 on 2018-11-26 09:10
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='商品SPU名称')),
('detail', tinymce.models.HTMLField(blank=True, verbose_name='商品详情')),
],
options={
'verbose_name': '商品SPU',
'verbose_name_plural': '商品SPU',
'db_table': 'df_goods',
},
),
migrations.CreateModel(
name='GoodsImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('image', models.ImageField(upload_to='goods', verbose_name='图片路径')),
],
options={
'verbose_name': '商品图片',
'verbose_name_plural': '商品图片',
'db_table': 'df_goods_image',
},
),
migrations.CreateModel(
name='GoodsSKU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='商品名称')),
('desc', models.CharField(max_length=250, verbose_name='商品简介')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='商品价格')),
('unite', models.CharField(max_length=20, verbose_name='商品单位')),
('image', models.ImageField(upload_to='goods', verbose_name='商品图片')),
('stock', models.IntegerField(default=1, verbose_name='商品库存')),
('sales', models.IntegerField(default=0, verbose_name='商品销量')),
('status', models.SmallIntegerField(choices=[(0, '下架'), (1, '上架')], default=1, verbose_name='是否上架')),
('goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品SPU')),
],
options={
'verbose_name': '商品',
'verbose_name_plural': '商品',
'db_table': 'df_goods_sku',
},
),
migrations.CreateModel(
name='GoodsType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='种类名称')),
('logo', models.CharField(max_length=20, verbose_name='标识')),
('image', models.ImageField(upload_to='type', verbose_name='商品类型图片')),
],
options={
'verbose_name': '商品种类',
'verbose_name_plural': '商品种类',
'db_table': 'df_goods_type',
},
),
migrations.CreateModel(
name='IndexGoodsBanner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('image', models.ImageField(upload_to='banner', verbose_name='图片')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
('sku', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsSKU', verbose_name='商品')),
],
options={
'verbose_name': '首页轮播商品',
'verbose_name_plural': '首页轮播商品',
'db_table': 'df_index_banner',
},
),
migrations.CreateModel(
name='IndexPromotionBanner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('name', models.CharField(max_length=20, verbose_name='活动名称')),
('url', models.URLField(verbose_name='活动链接')),
('image', models.ImageField(upload_to='banner', verbose_name='活动图片')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
],
options={
'verbose_name': '主页促销活动',
'verbose_name_plural': '主页促销活动',
'db_table': 'df_index_promotion',
},
),
migrations.CreateModel(
name='IndexTypeBanner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('display_type', models.SmallIntegerField(choices=[(0, '不展示'), (1, '展示')], default=1, verbose_name='展示表示')),
('index', models.SmallIntegerField(default=0, verbose_name='展示顺序')),
('sku', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsSKU', verbose_name='商品SKU')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsType', verbose_name='商品类型')),
],
options={
'verbose_name': '主页分类展示商品',
'verbose_name_plural': '主页分类展示商品',
'db_table': 'df_index_type_goods',
},
),
migrations.AddField(
model_name='goodssku',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsType', verbose_name='商品种类'),
),
migrations.AddField(
model_name='goodsimage',
name='sku',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsSKU', verbose_name='商品'),
),
]
|
[
"you@example.com"
] |
you@example.com
|
e3656c3a8b753864e8154ec4f8a46ac7e789e3b0
|
9decd5901a491d08e9235abc7fb8dade362d215e
|
/pastepwn/database/__init__.py
|
d67e426275658725e14ea82b809b9e95828cb0b9
|
[
"MIT"
] |
permissive
|
jonahrosenblum/pastepwn
|
b4e7644fefd289d8ffb2a1cc6e77224dd1545c46
|
26c9e426a195d403894f00638eca6c5687cbd959
|
refs/heads/master
| 2021-01-02T22:03:26.922322
| 2020-02-04T23:36:08
| 2020-02-04T23:36:08
| 239,809,524
| 0
| 0
|
MIT
| 2020-02-11T16:27:06
| 2020-02-11T16:27:05
| null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
# -*- coding: utf-8 -*-
from .abstractdb import AbstractDB
from .mongodb import MongoDB
from .mysqldb import MysqlDB
from .sqlitedb import SQLiteDB
__all__ = ('AbstractDB', 'MongoDB', 'SQLiteDB', 'MysqlDB')
|
[
"d-Rickyy-b@users.noreply.github.com"
] |
d-Rickyy-b@users.noreply.github.com
|
be0d795ee4a482be60cebd7782452cdb1ec3243e
|
5593b35f326748f18053e7ea042c98fe6b70a850
|
/tqt/function/_utils.py
|
fcfe437056c27c6c9f5efbfe6e9d8517486bdff4
|
[
"BSD-3-Clause"
] |
permissive
|
sicdl/TQT
|
7dfe3bce2bb5dace9a467945512e65525a0c3be9
|
27b73fcf27ddfb67cd28f6ed27e49341f27c9f16
|
refs/heads/main
| 2023-04-14T18:28:23.224689
| 2021-04-22T14:46:46
| 2021-04-22T14:46:46
| 362,503,682
| 0
| 0
|
BSD-3-Clause
| 2021-04-28T14:45:14
| 2021-04-28T14:45:13
| null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
import torch
def number_to_tensor(x, t):
r'''
Turn x in to a tensor with data type like tensor t.
'''
return torch.tensor(x).type_as(t)
|
[
"you@example.com"
] |
you@example.com
|
f7322bfe24f366e1da7e22987d6cb7ed70e9b213
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-SecurityInterface/PyObjCTest/test_sfchooseidentitypanel.py
|
128a8fe463da4b1756ea2b16a7730993712ab6e7
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
from PyObjCTools.TestSupport import *
import SecurityInterface
class TestSFChooseIdentityPanelHelper (SecurityInterface.NSObject):
def chooseIdentityPanelShowHelp_(self, v): return 1
class TestSFChooseIdentityPanel (TestCase):
def test_classes(self):
SecurityInterface.SFChooseIdentityPanel
def test_methods(self):
self.assertArgIsSEL(SecurityInterface.SFChooseIdentityPanel.beginSheetForWindow_modalDelegate_didEndSelector_contextInfo_identities_message_, 2, b'v@:@'+objc._C_NSInteger+b'^v')
self.assertArgIsBOOL(SecurityInterface.SFChooseIdentityPanel.setShowsHelp_, 0)
self.assertResultIsBOOL(SecurityInterface.SFChooseIdentityPanel.showsHelp)
self.assertResultIsBOOL(TestSFChooseIdentityPanelHelper.chooseIdentityPanelShowHelp_)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
307afce7174d1f60914d4a08060660c34b82e628
|
794be26e4ab7bdd9af017ce1d0c6ce1f087d968d
|
/functional_tests/test_create.py
|
b0b5e0c8678060723a0834273615afdbe0ad3866
|
[
"Apache-2.0",
"LGPL-3.0-only"
] |
permissive
|
jasinner/elliott
|
02fcc2f67b56d4e16eef28f0323d276fbd954593
|
67d77913517d0f7954dc02d918eb96ba78ec1ea8
|
refs/heads/master
| 2021-06-18T19:59:45.878716
| 2021-04-29T21:33:51
| 2021-04-29T21:33:51
| 215,217,286
| 0
| 0
|
Apache-2.0
| 2019-10-15T05:52:13
| 2019-10-15T05:52:13
| null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
from __future__ import absolute_import, print_function, unicode_literals
import unittest
import subprocess
from functional_tests import constants
class GreateTestCase(unittest.TestCase):
def test_create_rhba(self):
out = subprocess.check_output(
constants.ELLIOTT_CMD
+ [
"--group=openshift-4.2", "create", "--type=RHBA", "--impetus=standard", "--kind=rpm",
"--date=2020-Jan-1", "--assigned-to=openshift-qe-errata@redhat.com", "--manager=vlaad@redhat.com", "--package-owner=lmeyer@redhat.com"
]
)
self.assertIn("Would have created advisory:", out.decode("utf-8"))
|
[
"yuxzhu@redhat.com"
] |
yuxzhu@redhat.com
|
38f612204aaf7a5bb92d2ddfc8514649d07bdcad
|
a73cc710aa370be94b70248f2268d9c3b14059d0
|
/server/src/weblab/core/web/quickadmin.py
|
1fba2f6ba62d2380539e03e37e0669230626b289
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
weblabdeusto/weblabdeusto
|
05692d4cc0a36287191544551d4a1113b3d95164
|
62e488afac04242a68efa4eb09fd91d7e999d4dd
|
refs/heads/master
| 2023-05-10T23:14:05.407266
| 2022-08-31T14:16:23
| 2022-08-31T14:16:23
| 5,719,299
| 19
| 23
|
BSD-2-Clause
| 2023-05-01T20:18:53
| 2012-09-07T16:24:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,417
|
py
|
from __future__ import print_function, unicode_literals
import datetime
import calendar
from flask import render_template, request, send_file, Response, url_for
from functools import wraps, partial
from weblab.core.web import weblab_api
from weblab.core.db import UsesQueryParams
def check_credentials(func):
@wraps(func)
def wrapper(*args, **kwargs):
expected_token = weblab_api.config.get('quickadmin_token', None)
if expected_token:
token = request.args.get('token')
if not token:
return Response("You must provide a token like ?token=something")
if token != expected_token:
return Response("Invalid token")
return func(*args, **kwargs)
return wrapper
def get_url_for():
existing_args = dict(request.args)
existing_args.pop('page', None)
my_url_for = partial(url_for, **existing_args)
if 'token' in request.args:
return partial(my_url_for, token = request.args['token'])
return my_url_for
def create_query_params(**kwargs):
params = {}
for potential_arg in 'login', 'experiment_name', 'category_name', 'ip', 'country':
if potential_arg in request.args:
params[potential_arg] = request.args[potential_arg]
for potential_arg in 'start_date', 'end_date':
if potential_arg in request.args:
try:
params[potential_arg] = datetime.datetime.strptime(request.args[potential_arg], "%Y-%m-%d").date()
except ValueError:
pass
for potential_arg in 'page',:
if potential_arg in request.args:
try:
params[potential_arg] = int(request.args[potential_arg])
except ValueError:
pass
if 'page' not in params or params['page'] <= 0:
params['page'] = 1
for potential_arg in 'date_precision',:
if potential_arg in request.args:
if request.args[potential_arg] in ('day', 'month', 'year', 'week'):
params[potential_arg] = request.args[potential_arg]
if 'date_precision' not in params:
params['date_precision'] = 'month'
params.update(kwargs)
query_params = UsesQueryParams(**params)
metadata = weblab_api.db.quickadmin_uses_metadata(query_params)
params['count'] = metadata['count']
if 'start_date' in params:
params['min_date'] = params['start_date']
else:
params['min_date'] = metadata['min_date']
if 'end_date' in params:
params['max_date'] = params['end_date']
else:
params['max_date'] = metadata['max_date']
return UsesQueryParams(**params)
@weblab_api.route_web('/quickadmin/')
@check_credentials
def index():
return render_template("quickadmin/index.html", url_for = get_url_for())
LIMIT = 20
@weblab_api.route_web('/quickadmin/uses')
@check_credentials
def uses():
query_params = create_query_params()
uses = weblab_api.db.quickadmin_uses(LIMIT, query_params)
return render_template("quickadmin/uses.html", limit = LIMIT, uses = uses, filters = query_params.filterdict(), arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Uses', endpoint = '.uses')
@weblab_api.route_web('/quickadmin/use/<int:use_id>')
@check_credentials
def use(use_id):
return render_template("quickadmin/use.html", param_url_for = get_url_for(), **weblab_api.db.quickadmin_use(use_id = use_id))
@weblab_api.route_web('/quickadmin/file/<int:file_id>')
@check_credentials
def file(file_id):
file_path = weblab_api.db.quickadmin_filepath(file_id = file_id)
if file_path is None:
return "File not found", 404
return send_file(file_path, as_attachment = True)
@weblab_api.route_web('/quickadmin/uses/map')
@check_credentials
def uses_map():
query_params = create_query_params()
per_country = weblab_api.db.quickadmin_uses_per_country(query_params)
per_time = _per_country_by_to_d3(weblab_api.db.quickadmin_uses_per_country_by(query_params))
return render_template("quickadmin/uses_map.html", per_country = per_country, per_time = per_time, arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Uses map', endpoint = '.uses_map')
@weblab_api.route_web('/quickadmin/demos')
@check_credentials
def demos():
group_names = weblab_api.config.get_value('login_default_groups_for_external_users', [])
query_params = create_query_params(group_names = group_names)
uses = weblab_api.db.quickadmin_uses(LIMIT, query_params)
return render_template("quickadmin/uses.html", limit = LIMIT, uses = uses, arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Demo uses', endpoint = '.demos')
@weblab_api.route_web('/quickadmin/demos/map')
@check_credentials
def demos_map():
group_names = weblab_api.config.get_value('login_default_groups_for_external_users', [])
query_params = create_query_params(group_names = group_names)
per_country = weblab_api.db.quickadmin_uses_per_country(query_params)
per_time = _per_country_by_to_d3(weblab_api.db.quickadmin_uses_per_country_by(query_params))
return render_template("quickadmin/uses_map.html", per_country = per_country, per_time = per_time, arguments = query_params.pubdict(), param_url_for = get_url_for(), title = 'Demo uses map', endpoint = '.demos_map')
def _per_country_by_to_d3(per_time):
new_per_time = [
# {
# key : country,
# values : [
# [
# time_in_milliseconds,
# value
# ]
# ]
# }
]
total_per_country = [
# (country, number)
]
for country in per_time:
total_per_country.append( (country, sum([ value for key, value in per_time[country] ]) ))
total_per_country.sort(lambda x, y: cmp(x[1], y[1]), reverse = True)
top_countries = [ country for country, value in total_per_country[:10] ]
max_value = max([value for country, value in total_per_country[:10] ] or [0])
key_used = 'month'
times_in_millis = {
# millis : datetime
}
for country in top_countries:
for key in [ key for key, value in per_time[country] ]:
if len(key) == 1:
if isinstance(key[0], datetime.date):
key_used = 'day'
date_key = key[0]
else:
key_used = 'year'
date_key = datetime.date(year = key[0], month = 1, day = 1)
elif len(key) == 2:
key_used = 'month'
date_key = datetime.date(year = key[0], month = key[1], day = 1)
else:
continue
time_in_millis = calendar.timegm(date_key.timetuple()) * 1000
times_in_millis[time_in_millis] = key
for country in per_time:
if country not in top_countries:
continue
country_data = {'key' : country, 'values' : []}
country_time_data = dict(per_time[country])
for time_in_millis in sorted(times_in_millis):
key = times_in_millis[time_in_millis]
value = country_time_data.get(key, 0)
country_data['values'].append([time_in_millis, value])
new_per_time.append(country_data)
return { 'key_used' : key_used, 'per_time' : new_per_time, 'max_value' : max_value}
|
[
"pablo.orduna@deusto.es"
] |
pablo.orduna@deusto.es
|
a463d23256ed3b7f0178434ea5256ff915ef0430
|
4bb1a23a62bf6dc83a107d4da8daefd9b383fc99
|
/work/abc034_d2.py
|
4afb3860d7f983c4de267f774fec7425d98c023d
|
[] |
no_license
|
takushi-m/atcoder-work
|
0aeea397c85173318497e08cb849efd459a9f6b6
|
f6769f0be9c085bde88129a1e9205fb817bb556a
|
refs/heads/master
| 2021-09-24T16:52:58.752112
| 2021-09-11T14:17:10
| 2021-09-11T14:17:10
| 144,509,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
n,k = map(int, input().split())
wpl = [list(map(int, input().split())) for _ in range(n)]
def f(w,p,x):
p = p/100
return p*w - w*x
def check(x):
l = [f(wpl[i][0], wpl[i][1], x) for i in range(n)]
l.sort(reverse=True)
return sum(l[:k])>=0
ok = 0
ng = 1
while abs(ng-ok)>10**-7:
mid = (ok+ng)/2
if check(mid):
ok = mid
else:
ng = mid
print(ok*100)
|
[
"takushi-m@users.noreply.github.com"
] |
takushi-m@users.noreply.github.com
|
061e1a704629d8949be1743454ac0c89316349fb
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc025/C/2618079.py
|
c2927f9e67892662b56299b7a9fff478e70376c2
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
n = int(input())
l = []
r = []
for i in range(n):
lt, rt = map(int,input().split())
l.append(lt)
r.append(rt)
l.append(0)
r.append(0)
l.sort()
r.sort()
l.reverse()
ans = 0
i = 0
while r[i]<l[i]:
ans += 2*(l[i] - r[i])
i+=1
print(ans)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
594eaa6cce6464e3ce1165188820b67175525a11
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/opsworks_write_f/rds-db-instance_update.py
|
3257b4557a10fe6a781cb6c9086bdfa3b85a8b86
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
deregister-rds-db-instance : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/opsworks/deregister-rds-db-instance.html
describe-rds-db-instances : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/opsworks/describe-rds-db-instances.html
register-rds-db-instance : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/opsworks/register-rds-db-instance.html
"""
write_parameter("opsworks", "update-rds-db-instance")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
766973ba9748fa74c5378e42398721badd887cf3
|
2612f336d667a087823234daf946f09b40d8ca3d
|
/python/lib/Lib/site-packages/django/utils/decorators.py
|
17f2ea30b337f624c0f984698e31aebbb19f6d37
|
[
"Apache-2.0"
] |
permissive
|
tnorbye/intellij-community
|
df7f181861fc5c551c02c73df3b00b70ab2dd589
|
f01cf262fc196bf4dbb99e20cd937dee3705a7b6
|
refs/heads/master
| 2021-04-06T06:57:57.974599
| 2018-03-13T17:37:00
| 2018-03-13T17:37:00
| 125,079,130
| 2
| 0
|
Apache-2.0
| 2018-03-13T16:09:41
| 2018-03-13T16:09:41
| null |
UTF-8
|
Python
| false
| false
| 4,290
|
py
|
"Functions that help with dynamically creating decorators for views."
try:
from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
except ImportError:
from django.utils.functional import wraps, update_wrapper, WRAPPER_ASSIGNMENTS # Python 2.4 fallback.
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the view class.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined it.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func(self, *args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445.
"""
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception, e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(middleware, 'process_response'):
result = middleware.process_response(request, response)
if result is not None:
return result
return response
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view)
return _decorator
return _make_decorator
|
[
"dmitry.trofimov@jetbrains.com"
] |
dmitry.trofimov@jetbrains.com
|
e1a24bee538f55419b12446f7f37bc4f25bc8e38
|
03c8d75d11dd34a253d265ce5b44bf7984311bab
|
/root2yoda
|
ddf417c19d865e0e7c684d83a4ebafd3e9738188
|
[] |
no_license
|
raggleton/QGAnalysisRIVET
|
e8a57fbfa1380e1c67365b0d5a944119f715813b
|
0703bdf81bf27f5fc91d8eedb6e44651d978749a
|
refs/heads/master
| 2021-06-08T19:29:53.683282
| 2021-04-06T07:22:56
| 2021-04-06T07:22:56
| 142,179,672
| 0
| 1
| null | 2020-11-03T17:19:58
| 2018-07-24T15:40:48
|
Gnuplot
|
UTF-8
|
Python
| false
| false
| 1,656
|
#! /usr/bin/env python
"""\
%prog rootfile [yodafile]
Convert a ROOT data file to the YODA data format.
"""
import yoda, os, sys, optparse
from yoda.script_helpers import parse_x2y_args, filter_aos
parser = optparse.OptionParser(usage=__doc__)
parser.add_option("-m", "--match", dest="MATCH", metavar="PATT", default=None,
help="Only write out histograms whose path matches this regex")
parser.add_option("-M", "--unmatch", dest="UNMATCH", metavar="PATT", default=None,
help="Exclude histograms whose path matches this regex")
opts, args = parser.parse_args()
in_out = parse_x2y_args(args, ".root", ".yoda")
if not in_out:
sys.stderr.write("You must specify the ROOT and YODA file names\n")
sys.exit(1)
import ROOT
for i, o in in_out:
print "opening", i
rf = ROOT.TFile(i)
rootobjects_raw = list(yoda.root.getall(rf))
rootobjects = [(path, ro) for (path, ro) in rootobjects_raw if not isinstance(ro, ROOT.TH1F)]
th1f = [(path, ro) for (path, ro) in rootobjects_raw if isinstance(ro, ROOT.TH1F)]
print rootobjects
print th1f
# Conversion of TH1F into TH1D
for path, ro in th1f:
temp = ROOT.TH1D()
ro.Copy(temp)
rootobjects.append((path, temp))
def to_yoda(path, ro):
print path, ro
ao = yoda.root.to_yoda(ro)
ao.path = path
return ao
analysisobjects = [to_yoda(path, ro) for (path, ro) in rootobjects]
rf.Close()
analysisobjects = [ao for ao in analysisobjects if ao is not None]
filter_aos(analysisobjects, opts.MATCH, opts.UNMATCH)
yoda.writeYODA(analysisobjects, o)
|
[
"robin.aggleton@cern.ch"
] |
robin.aggleton@cern.ch
|
|
e0b51af08de583fc6d2449bff3c69e61e59ce414
|
3f3f2b3eaab992d3cc8f49fcd03e4824a11fddab
|
/diamond.releng.jenkins/job.scripts/email_owners_of_submittable_changes.py
|
7bfd821419fa2722938f0131ed624a2ce5f2ba3e
|
[] |
no_license
|
DiamondLightSource/diamond-releng
|
7bff1926e3fd2f9df3c056d8af5521b4e74aaf41
|
ba15336e7f7d3c160d3c3bc28316817cb4585305
|
refs/heads/master
| 2021-01-25T03:19:25.403769
| 2019-01-02T16:05:28
| 2019-01-02T16:05:28
| 19,986,689
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,159
|
py
|
#!/usr/bin/env python3
###
### Requires Python 3
###
'''
Identify Gerrit changes that are ready to submit, and email the owners
'''
from email.message import EmailMessage
from email.headerregistry import Address
import datetime
import itertools
import json
import logging
import operator
import os
import os.path
import smtplib
import stat
import sys
import time
import urllib.request
import urllib.parse
import urllib.error
GERRIT_HOST = 'gerrit.diamond.ac.uk'
# define module-wide logging
logger = logging.getLogger(__name__)
def setup_logging():
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s", "%Y-%m-%d %H:%M:%S")
# create console handler
logging_console_handler = logging.StreamHandler()
logging_console_handler.setFormatter(formatter)
logger.addHandler(logging_console_handler)
logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
class SubmittableChangesProcessor():
def __init__(self):
setup_logging()
self.logger = logger
self.gerrit_url_base = 'https://' + GERRIT_HOST + '/' # when using the REST API, this is the base URL to use
self.gerrit_url_browser = self.gerrit_url_base # when generating links, this is the base URL to use
# since the Gerrit REST API has been secured, then we need to use basic authentication
self.gerrit_url_base += 'a/'
handler = urllib2.HTTPBasicAuthHandler()
handler.add_password('Gerrit Code Review', self.gerrit_url_base, *self.get_gerrit_http_username_password())
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
@staticmethod
def get_gerrit_http_username_password():
''' the token required to authenticate to Gerrit is stored in a file
the file, in addition to comment and empty lines, contains a single line of the format
username:password
'''
token_filename = os.path.abspath(os.path.expanduser('~/passwords/http-password_Gerrit_for-REST.txt'))
assert os.path.isfile(token_filename)
assert os.stat(token_filename).st_mode == stat.S_IRUSR + stat.S_IFREG # permissions must be user-read + regular-file
last_nonempty_line = ''
with open(token_filename, 'r') as token_file:
for line in token_file: # standard OS terminator is converted to \n
line = line.rstrip('\n') # remove trailing newline
if line:
last_nonempty_line = line
if last_nonempty_line:
return last_nonempty_line.split(':', 1)
raise Exception('File %s appears empty' % token_filename)
def gerrit_REST_api(self, relative_url, accept404=False):
''' Call the Gerrit REST API
'''
url = self.gerrit_url_base + relative_url
request = urllib.request.Request(url, headers={'Accept': 'application/json', 'Accept-Charset': 'utf-8'}) # header specifies compact json, which is more efficient
self.logger.debug('gerrit_REST_api retrieving: %s' % (url,))
try:
rest_json = urllib.request.urlopen(request).read()
except (urllib.error.HTTPError) as err:
if accept404 and (err.code == 404):
self.logger.debug('Invalid response from Gerrit server reading %s: %s' % (url, err))
return None
self.logger.critical('Invalid response from Gerrit server reading %s: %s' % (url, err))
return None
gerrit_magic_prefix_line = b")]}'\n"
if not rest_json[:len(gerrit_magic_prefix_line)] == gerrit_magic_prefix_line:
self.logger.critical('Invalid response from Gerrit server reading %s: magic prefix line not found' % (url,))
return None
standard_json = json.loads(rest_json[len(gerrit_magic_prefix_line):].decode('utf-8')) # strip off the magic prefix line returned by Gerrit
# self.logger.debug(json.dumps(standard_json, indent=2))
return standard_json
def get_submittable_changes_from_gerrit(self):
''' Queries Gerrit to get a list of ChangeInfo records for the changes that can be submitted
'''
url = 'changes/?q=%s&o=CURRENT_REVISION&o=DETAILED_ACCOUNTS' % (urllib.parse.quote('is:open label:Code-Review+2 label:Verified+1 NOT label:Code-Review-2 NOT label:Verified-1'),)
changeinfos = self.gerrit_REST_api(url)
longest_string = {}
longest_string['_number'] = max(itertools.chain((len(str(ci['_number'])) for ci in changeinfos), (len('Change'),)))
longest_string['project'] = max(itertools.chain((len(ci['project']) for ci in changeinfos), (len('Project'),)))
longest_string['branch'] = max(itertools.chain((len(ci['branch']) for ci in changeinfos), (len('Branch'),)))
longest_string['owner'] = max(itertools.chain((len(ci['owner']['name']) for ci in changeinfos), (len('Owner'),)))
format = ('%' + str(longest_string['_number']) + 's ' +
'%-' + str(longest_string['project']) + 's ' +
'%-' + str(longest_string['branch']) + 's ' +
'%-' + str(longest_string['owner']) + 's ' +
'%-16s ' + # for the time last updated
'%s\n') # for the subject
emails = set()
report = format % ('Change', 'Project', 'Branch', 'Owner', 'Updated', 'Subject')
# use a sort key that transforms Firstname.Lastname@example.com to lastname.firstname
for ci in sorted(changeinfos, key=lambda ci:
'.'.join(operator.itemgetter(2,0)(ci['owner']['email'].partition('@')[0].lower().partition('.'))) +
os.path.basename(ci['project'])): # there can be multiple changeinfos
report += format % (ci['_number'], ci['project'], ci['branch'], ci['owner']['name'], ci['updated'][:16], ci['subject'])
emails.add(ci['owner']['email'])
self.emails = sorted(emails)
self.report = report
return
def make_email(self):
body = 'Below is a list of changes in Gerrit that have been verified and reviewed, but are still waiting for the change owner to submit them' + \
', as of ' + time.strftime("%a, %Y/%m/%d %H:%M:%S %Z") + '.\n'
body += '''
PLEASE CONSIDER EITHER:
Submit your change, it you still want it
Abandon your change, if it is no longer required
'''
body += self.report
body += '\n<<end report>>\n'
# we are going to create an email message with ASCII characters, so convert any non-ASCII ones
# note that this is really a hack, we should be smarter about constructing an email message
body = body.replace("’", "'").replace('“', '"').replace('”', '"')
message = EmailMessage()
message['Subject'] = 'Report on Gerrit changes waiting for the owner to submit'
message['From'] = Address('Jenkins Build Server (Diamond Light Source)', 'gerrit-no-reply@diamond.ac.uk')
message['List-Id'] = 'Gerrit awaiting submit <gerrit-awaiting-submit.jenkins.diamond.ac.uk>'
# use a sort key that transforms Firstname.Lastname@example.com to lastname.firstname
message['To'] = [Address(addr_spec=committer) for committer in sorted(
self.emails,
key=lambda email_addr: '.'.join(operator.itemgetter(2,0)(email_addr.partition('@')[0].lower().partition('.')))
) if '@' in committer]
message['CC'] = ('matthew.webber@diamond.ac.uk',)
message.set_content(body)
email_expires_days = 5
if email_expires_days:
message['Expiry-Date'] = (datetime.datetime.utcnow() + datetime.timedelta(days=email_expires_days)).strftime("%a, %d %b %Y %H:%M:%S +0000")
self.logger.info("Sending email ...")
with smtplib.SMTP('localhost') as smtp:
smtp.send_message(message)
return message
if __name__ == '__main__':
scp = SubmittableChangesProcessor()
scp.get_submittable_changes_from_gerrit()
message = scp.make_email()
print(message)
sys.exit(0)
|
[
"matthew.webber@diamond.ac.uk"
] |
matthew.webber@diamond.ac.uk
|
44a16f28b318d131dbeefaf200012cfa5e1bd8de
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/typeshed/tests/pytype_test.py
|
ee7ac0bb9cb9d9175b955f913e9188cc8bbc75a2
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 7,915
|
py
|
#!/usr/bin/env python3
"""Test runner for typeshed.
Depends on pytype being installed.
If pytype is installed:
1. For every pyi, do nothing if it is in pytype_blacklist.txt.
2. Otherwise, call 'pytype.io.parse_pyi'.
Option two will load the file and all the builtins, typeshed dependencies. This
will also discover incorrect usage of imported modules.
"""
import argparse
import itertools
import os
import re
import subprocess
import traceback
from typing import List, Match, Optional, Sequence, Tuple
from pytype import config as pytype_config, io as pytype_io
TYPESHED_SUBDIRS = ["stdlib", "third_party"]
TYPESHED_HOME = "TYPESHED_HOME"
UNSET = object() # marker for tracking the TYPESHED_HOME environment variable
def main() -> None:
args = create_parser().parse_args()
typeshed_location = args.typeshed_location or os.getcwd()
subdir_paths = [os.path.join(typeshed_location, d) for d in TYPESHED_SUBDIRS]
check_subdirs_discoverable(subdir_paths)
check_python_exes_runnable(python27_exe_arg=args.python27_exe, python36_exe_arg=args.python36_exe)
files_to_test = determine_files_to_test(typeshed_location=typeshed_location, subdir_paths=subdir_paths)
run_all_tests(
files_to_test=files_to_test,
typeshed_location=typeshed_location,
python27_exe=args.python27_exe,
python36_exe=args.python36_exe,
print_stderr=args.print_stderr,
dry_run=args.dry_run,
)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Pytype/typeshed tests.")
parser.add_argument("-n", "--dry-run", action="store_true", default=False, help="Don't actually run tests")
# Default to '' so that symlinking typeshed subdirs in cwd will work.
parser.add_argument("--typeshed-location", type=str, default="", help="Path to typeshed installation.")
# Set to true to print a stack trace every time an exception is thrown.
parser.add_argument(
"--print-stderr", action="store_true", default=False, help="Print stderr every time an error is encountered."
)
# We need to invoke python2.7 and 3.6.
parser.add_argument("--python27-exe", type=str, default="python2.7", help="Path to a python 2.7 interpreter.")
parser.add_argument("--python36-exe", type=str, default="python3.6", help="Path to a python 3.6 interpreter.")
return parser
class PathMatcher:
def __init__(self, patterns: Sequence[str]) -> None:
self.matcher = re.compile(r"({})$".format("|".join(patterns))) if patterns else None
def search(self, path: str) -> Optional[Match[str]]:
if not self.matcher:
return None
return self.matcher.search(path)
def load_blacklist(typeshed_location: str) -> List[str]:
filename = os.path.join(typeshed_location, "tests", "pytype_blacklist.txt")
skip_re = re.compile(r"^\s*([^\s#]+)\s*(?:#.*)?$")
skip = []
with open(filename) as f:
for line in f:
skip_match = skip_re.match(line)
if skip_match:
skip.append(skip_match.group(1))
return skip
def run_pytype(*, filename: str, python_version: str, python_exe: str, typeshed_location: str) -> Optional[str]:
"""Runs pytype, returning the stderr if any."""
options = pytype_config.Options.create(
filename,
module_name=_get_module_name(filename),
parse_pyi=True,
python_version=python_version,
python_exe=python_exe)
old_typeshed_home = os.environ.get(TYPESHED_HOME, UNSET)
os.environ[TYPESHED_HOME] = typeshed_location
try:
pytype_io.parse_pyi(options)
except Exception:
stderr = traceback.format_exc()
else:
stderr = None
if old_typeshed_home is UNSET:
del os.environ[TYPESHED_HOME]
else:
os.environ[TYPESHED_HOME] = old_typeshed_home
return stderr
def _get_relative(filename: str) -> str:
top = 0
for d in TYPESHED_SUBDIRS:
try:
top = filename.index(d)
except ValueError:
continue
else:
break
return filename[top:]
def _get_module_name(filename: str) -> str:
"""Converts a filename {subdir}/m.n/module/foo to module.foo."""
return ".".join(_get_relative(filename).split(os.path.sep)[2:]).replace(".pyi", "").replace(".__init__", "")
def can_run(exe: str, *, args: List[str]) -> bool:
try:
subprocess.run([exe] + args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except OSError:
return False
else:
return True
def _is_version(path: str, version: str) -> bool:
return any("{}{}{}".format(d, os.path.sep, version) in path for d in TYPESHED_SUBDIRS)
def check_subdirs_discoverable(subdir_paths: List[str]) -> None:
for p in subdir_paths:
if not os.path.isdir(p):
raise SystemExit("Cannot find typeshed subdir at {} (specify parent dir via --typeshed-location)".format(p))
def check_python_exes_runnable(*, python27_exe_arg: str, python36_exe_arg: str) -> None:
for exe, version_str in zip([python27_exe_arg, python36_exe_arg], ["27", "36"]):
if can_run(exe, args=["--version"]):
continue
formatted_version = ".".join(list(version_str))
script_arg = "--python{}-exe".format(version_str)
raise SystemExit(
"Cannot run Python {version}. (point to a valid executable via {arg})".format(
version=formatted_version, arg=script_arg
)
)
def determine_files_to_test(*, typeshed_location: str, subdir_paths: Sequence[str]) -> List[Tuple[str, int]]:
"""Determine all files to test, checking if it's in the blacklist and which Python versions to use.
Returns a list of pairs of the file path and Python version as an int."""
skipped = PathMatcher(load_blacklist(typeshed_location))
files = []
for root, _, filenames in itertools.chain.from_iterable(os.walk(p) for p in subdir_paths):
for f in sorted(f for f in filenames if f.endswith(".pyi")):
f = os.path.join(root, f)
rel = _get_relative(f)
if skipped.search(rel):
continue
if _is_version(f, "2and3"):
files.append((f, 2))
files.append((f, 3))
elif _is_version(f, "2"):
files.append((f, 2))
elif _is_version(f, "3"):
files.append((f, 3))
else:
print("Unrecognized path: {}".format(f))
return files
def run_all_tests(
*,
files_to_test: Sequence[Tuple[str, int]],
typeshed_location: str,
python27_exe: str,
python36_exe: str,
print_stderr: bool,
dry_run: bool
) -> None:
bad = []
errors = 0
total_tests = len(files_to_test)
print("Testing files with pytype...")
for i, (f, version) in enumerate(files_to_test):
stderr = (
run_pytype(
filename=f,
python_version="2.7" if version == 2 else "3.6",
python_exe=python27_exe if version == 2 else python36_exe,
typeshed_location=typeshed_location,
)
if not dry_run
else None
)
if stderr:
if print_stderr:
print(stderr)
errors += 1
stacktrace_final_line = stderr.rstrip().rsplit("\n", 1)[-1]
bad.append((_get_relative(f), stacktrace_final_line))
runs = i + 1
if runs % 25 == 0:
print(" {:3d}/{:d} with {:3d} errors".format(runs, total_tests, errors))
print("Ran pytype with {:d} pyis, got {:d} errors.".format(total_tests, errors))
for f, err in bad:
print("{}: {}".format(f, err))
if errors:
raise SystemExit("\nRun again with --print-stderr to get the full stacktrace.")
if __name__ == "__main__":
main()
|
[
"srusskih@users.noreply.github.com"
] |
srusskih@users.noreply.github.com
|
3092c08a731b61558189665e7d2e63d08603ab03
|
d9eafd325ab775b7b32af2dd0b63afc7310be53d
|
/pfwra/home/migrations/0004_auto_20210323_0728.py
|
3678a7c488fe83d6dd909f1c2f80b1f809a9fe79
|
[
"MIT"
] |
permissive
|
johnkellehernz/pfwra
|
54b0db7debaed629d6003e0826a15bde2fd4a197
|
5b8c718bb2f1aaa34e9a718e07baf270294f7ba6
|
refs/heads/main
| 2023-05-01T14:39:42.419993
| 2021-05-13T11:00:07
| 2021-05-13T11:00:07
| 353,514,688
| 0
| 0
|
MIT
| 2021-03-31T23:15:32
| 2021-03-31T23:15:31
| null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
# Generated by Django 3.0.11 on 2021-03-23 07:28
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20210219_0827'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='featured',
field=wagtail.core.fields.StreamField([('cards', wagtail.core.blocks.StructBlock([('link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link label', required=True)), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Choose a page to link to', label='Page', required=False)), ('external_url', wagtail.core.blocks.URLBlock(help_text='Or choose an external URL to link to', label='External URL', required=False))], required=False)), ('header', wagtail.core.blocks.CharBlock(label='Header text')), ('text', wagtail.core.blocks.TextBlock(help_text='Write an introduction for the card', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(required=False))]))], blank=True, help_text='Featured cards'),
),
migrations.AlterField(
model_name='homepage',
name='hero_cta',
field=models.CharField(blank=True, help_text='Text to display on Call to Action', max_length=255, null=True, verbose_name='Hero CTA'),
),
migrations.AlterField(
model_name='homepage',
name='hero_text',
field=models.CharField(blank=True, help_text='Write an introduction for the homepage', max_length=255, null=True),
),
]
|
[
"jordi.joan@gmail.com"
] |
jordi.joan@gmail.com
|
43b9efcb67283c12ab78d41bf4a139edda32f6a5
|
8101c599bdf68e0fcc2dbc8188640abfebc4a790
|
/test/test.py
|
f651e500372ecdf139f269049f79c37f139d61d8
|
[
"BSD-3-Clause"
] |
permissive
|
symbooglix/boogie-runner
|
2a39ddc86d1fee8e3750db6c07f3d20363195390
|
01e1fe993d5eacf7055f1d950a209583c0405fd6
|
refs/heads/master
| 2021-01-21T04:37:04.636241
| 2016-04-05T16:28:27
| 2016-04-05T16:28:27
| 28,610,541
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,484
|
py
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
import argparse
import logging
import os
import pprint
import re
import shutil
import subprocess
import sys
import yaml
testDir = os.path.dirname(os.path.abspath(__file__))
repoDir = os.path.dirname(testDir)
# Hack
sys.path.insert(0, repoDir)
from BoogieRunner import ProgramListLoader
# Another Hack
sys.path.insert(0, os.path.join(repoDir, 'analysis'))
from br_util import FinalResultType, classifyResult
class RunnerTool:
def __init__(self, configFile, listFile, relativePathPrefix, workDir, yamlOutput):
self.configFile = configFile
self.listLocation = listFile
self.relativePathPrefix = relativePathPrefix
self.workDir = workDir
self.yamlOutput = yamlOutput
assert os.path.exists(self.listLocation)
def doCleanUp(self):
shutil.rmtree(self.workDir)
os.remove(self.yamlOutput)
def getFileList(self):
return ProgramListLoader.load(self.listLocation, self.relativePathPrefix)
class BatchRunnerTool(RunnerTool):
def __init__(self, configFile, listFile, relativePathPrefix, workDir, yamlOutput):
super(BatchRunnerTool, self).__init__(configFile, listFile, relativePathPrefix, workDir, yamlOutput)
self.numJobs = 1
def setNumJobs(self, count):
assert count > 0
self.numJobs = count
def getResults(self, testFiles, clean=True):
if os.path.exists(self.yamlOutput):
os.remove(self.yamlOutput)
exitCode = subprocess.call([self.tool,
"--jobs={}".format(self.numJobs),
self.configFile,
self.listLocation,
self.workDir,
self.yamlOutput
])
if exitCode != 0:
logging.error('Tool failed')
sys.exit(1)
if not os.path.exists(self.yamlOutput):
logging.error('cannot find yaml output')
sys.exit(1)
results = None
with open(self.yamlOutput, 'r') as y:
results = yaml.load(y)
if clean:
self.doCleanUp()
return results
@property
def tool(self):
return os.path.join(repoDir, 'boogie-batch-runner.py')
class SingleRunTool(RunnerTool):
def getResults(self, testFiles, clean=False):
logging.warning('clean directive ignored')
# Run over the tests
results = [ ]
for testFile in testFiles.keys():
exitCode = subprocess.call([self.tool,
self.configFile,
testFile,
self.workDir,
self.yamlOutput
])
if exitCode != 0:
logging.error('Tool failed')
sys.exit(1)
if not os.path.exists(self.yamlOutput):
logging.error('Yaml output is missing')
sys.exit(1)
with open(self.yamlOutput, 'r') as f:
results.extend(yaml.load(f))
self.doCleanUp()
return results
@property
def tool(self):
return os.path.join(repoDir, 'boogie-runner.py')
def main(args):
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("-j", "--jobs", type=int, default=1,
help='jobs to run in parallel. Only works when using batch mode')
parser.add_argument("-k", "--keep-files", dest='keep_files',
action='store_true', default=False)
parser.add_argument("-l", "--list-file", dest='list_file',
type=str, default="simple_boogie_programs.txt")
parser.add_argument("config_file")
parser.add_argument("mode", choices=['single', 'batch'], help="Front end to use. Valid options %(choices)s")
pargs = parser.parse_args(args)
if pargs.mode != 'batch' and pargs.jobs > 1:
logging.error('Can only specify jobs when using "batch" mode')
return 1
# Compute some paths
workDir = os.path.join(testDir, 'working_dir')
yamlOutput = os.path.join(testDir, 'result.yml')
if not os.path.exists(pargs.config_file):
logging.error('Could not find config_file {}'.format(pargs.config_file))
return 1
listFile = os.path.join(testDir, pargs.list_file)
if not os.path.exists(listFile):
logging.error('Could not find list file "{}".'.format(listFile))
return 1
if pargs.mode == 'single':
runnerConstructor = SingleRunTool
elif pargs.mode == 'batch':
runnerConstructor = BatchRunnerTool
else:
logging.error('Invalid mode')
return 1
runner = runnerConstructor(pargs.config_file, listFile, testDir, workDir, yamlOutput)
if pargs.jobs > 1:
runner.setNumJobs(pargs.jobs)
if not os.path.exists(runner.tool):
logging.error('Cannot find {}'.format(runner.tool))
return 1
if os.path.exists(yamlOutput):
logging.error('Yaml output file "{}" exists. Remove it'.format(yamlOutput))
return 1
# Find all the tests
testFiles = {}
filenames = runner.getFileList()
for potentialTest in filenames:
if not os.path.exists(potentialTest):
logging.error('Could not find file "{}"'.format(potentialTest))
return 1
r = re.compile(r'^//\s*(\w+)')
# Read expected test result from first line of file
with open(potentialTest, 'r') as testFile:
line = testFile.readline()
m = r.match(line)
if m == None:
logging.error('Failed to find result on first line of file {}'.format(potentialTest))
return 1
expectedResultStr = m.group(1)
expectedResultEnum = FinalResultType[expectedResultStr]
logging.info('Found test:{} - {}'.format(potentialTest, expectedResultEnum))
testFiles[potentialTest] = expectedResultEnum
# Run tests
if os.path.exists(workDir):
logging.info('removing {}'.format(workDir))
shutil.rmtree(workDir)
os.mkdir(workDir)
results = runner.getResults(testFiles, clean= not pargs.keep_files)
# Check the results against the testFiles
logging.info('Got results:\n{}'.format(pprint.pformat(results)))
for result in results:
filename = result['program']
actualClassification = classifyResult(result)
expectedClassification = testFiles[filename]
if actualClassification != expectedClassification:
logging.error('Result mismatch for {}, expected {}, got {}'.format(
filename, expectedClassification, actualClassification))
return 1
logging.info('SUCCESS!')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"daniel.liew@imperial.ac.uk"
] |
daniel.liew@imperial.ac.uk
|
814bbf98eeee530f21372492d0a0d9f8a9ce62d1
|
d8f7b9943049bd483189fe58fd4abf37163866dd
|
/GUI Code/search.py
|
9d83c91ad738a58d3a07107996a978d96e19663f
|
[] |
no_license
|
NagahShinawy/python-data-structures-algorithms
|
d14ecd478caa13e36c4f2dcdf942e5f9e9f351e5
|
c254f12dca78444e3b2bbd667d4508a699b9fb89
|
refs/heads/main
| 2023-05-12T17:26:23.477742
| 2021-05-10T07:08:30
| 2021-05-10T07:08:30
| 365,436,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
"""
Python Data Structures - A Game-Based Approach
Robin Andrews - https://compucademy.net/
Search Algorithms for use in GUI.
"""
import config
import heapq
import helper_functions as helpers
from collections import deque
class PriorityQueue:
def __init__(self):
self.elements = []
def is_empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def dfs(board, start, goal):
stack = [start]
visited = set()
full_path = []
while stack:
current = stack.pop()
full_path.append(current)
if current == goal:
return full_path
for direction in ["up", "right", "down", "left"]: # Other orders are fine too.
row_offset, col_offset = config.offsets[direction]
neighbour = (current[0] + row_offset, current[1] + col_offset)
if helpers.is_legal_pos(board, neighbour) and neighbour not in visited:
stack.append(neighbour)
visited.add(neighbour)
def bfs(board, start, goal):
queue = deque()
queue.append(start)
visited = set()
full_path = []
while queue:
current = queue.popleft()
full_path.append(current)
if current == goal:
return full_path
for direction in ["up", "right", "down", "left"]:
row_offset, col_offset = config.offsets[direction]
neighbour = (current[0] + row_offset, current[1] + col_offset)
if helpers.is_legal_pos(board, neighbour) and neighbour not in visited:
queue.append(neighbour)
visited.add(neighbour)
def heuristic(a, b):
x1, y1 = a
x2, y2 = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star(board, start_pos, goal_pos):
pq = PriorityQueue()
pq.put(start_pos, 0)
g_values = {}
g_values[start_pos] = 0
full_path = []
while not pq.is_empty():
current_cell_pos = pq.get()
full_path.append(current_cell_pos)
if current_cell_pos == goal_pos:
return full_path
for direction in ["up", "right", "down", "left"]:
row_offset, col_offset = config.offsets[direction]
neighbour = (
current_cell_pos[0] + row_offset,
current_cell_pos[1] + col_offset,
)
new_cost = (
g_values[current_cell_pos] + 1
) # Would be edge weight in a weighted graph
if helpers.is_legal_pos(board, neighbour):
# Second check only applies to weighted graph.
if neighbour not in g_values or new_cost < g_values[neighbour]:
g_values[neighbour] = new_cost
f_value = new_cost + heuristic(goal_pos, neighbour)
pq.put(neighbour, f_value)
|
[
"E-n.Shinawy@lean.sa"
] |
E-n.Shinawy@lean.sa
|
4dee0daf77fa48f37448dd8cf7d857f94c9426d5
|
e91ba13a71dc8757e4c6f483d300bb32db8947d4
|
/kubernetes-mastery/slides/markmaker.py
|
d7ef7a0356e368ba4cf696f9414f7f69f63ba6cc
|
[
"Apache-2.0"
] |
permissive
|
sijoonlee/kubernetes_study
|
752788d4ecf542072436e13ad98b9c67c3b3db2c
|
668abacf4f855b55f23562486e420d29397bbe6d
|
refs/heads/master
| 2022-12-22T06:52:51.224364
| 2020-09-30T17:38:18
| 2020-09-30T17:38:18
| 276,719,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,687
|
py
|
#!/usr/bin/env python
# transforms a YAML manifest into a HTML workshop file
import glob
import logging
import os
import re
import string
import subprocess
import sys
import yaml
logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
class InvalidChapter(ValueError):
def __init__(self, chapter):
ValueError.__init__(self, "Invalid chapter: {!r}".format(chapter))
def anchor(title):
title = title.lower().replace(' ', '-')
title = ''.join(c for c in title if c in string.ascii_letters+'-')
return "toc-" + title
def interstitials_generator():
images = [url.strip() for url in open("interstitials.txt") if url.strip()]
while True:
for image in images:
yield image
interstitials = interstitials_generator()
def insertslide(markdown, title):
title_position = markdown.find("\n# {}\n".format(title))
slide_position = markdown.rfind("\n---\n", 0, title_position+1)
logging.debug("Inserting title slide at position {}: {}".format(slide_position, title))
before = markdown[:slide_position]
toclink = "toc-chapter-{}".format(title2path[title][0])
_titles_ = [""] + all_titles + [""]
currentindex = _titles_.index(title)
previouslink = anchor(_titles_[currentindex-1])
nextlink = anchor(_titles_[currentindex+1])
interstitial = interstitials.next()
extra_slide = """
---
class: pic
.interstitial[]
---
name: {anchor}
class: title
{title}
.nav[
[Previous section](#{previouslink})
|
[Back to table of contents](#{toclink})
|
[Next section](#{nextlink})
]
.debug[(automatically generated title slide)]
""".format(anchor=anchor(title), interstitial=interstitial, title=title, toclink=toclink, previouslink=previouslink, nextlink=nextlink)
after = markdown[slide_position:]
return before + extra_slide + after
def flatten(titles):
for title in titles:
if isinstance(title, list):
for t in flatten(title):
yield t
else:
yield title
def generatefromyaml(manifest, filename):
manifest = yaml.load(manifest)
markdown, titles = processchapter(manifest["chapters"], filename)
logging.debug("Found {} titles.".format(len(titles)))
toc = gentoc(titles)
markdown = markdown.replace("@@TOC@@", toc)
for title in flatten(titles):
markdown = insertslide(markdown, title)
exclude = manifest.get("exclude", [])
logging.debug("exclude={!r}".format(exclude))
if not exclude:
logging.warning("'exclude' is empty.")
exclude = ",".join('"{}"'.format(c) for c in exclude)
# Insert build info. This is super hackish.
markdown = markdown.replace(
".debug[",
".debug[\n```\n{}\n```\n\nThese slides have been built from commit: {}\n\n".format(dirtyfiles, commit),
1)
markdown = markdown.replace("@@TITLE@@", manifest["title"].replace("\n", "<br/>"))
html = open("workshop.html").read()
html = html.replace("@@MARKDOWN@@", markdown)
html = html.replace("@@EXCLUDE@@", exclude)
html = html.replace("@@CHAT@@", manifest["chat"])
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
return html
# Maps a section title (the string just after "^# ") to its position
# in the table of content (as a (chapter,part,subpart,...) tuple).
title2path = {}
path2title = {}
all_titles = []
# "tree" is a list of titles, potentially nested.
def gentoc(tree, path=()):
if not tree:
return ""
if isinstance(tree, str):
title = tree
title2path[title] = path
path2title[path] = title
all_titles.append(title)
logging.debug("Path {} Title {}".format(path, title))
return "- [{}](#{})".format(title, anchor(title))
if isinstance(tree, list):
if len(path) == 0:
return "\n---\n".join(gentoc(subtree, path+(i+1,)) for (i,subtree) in enumerate(tree))
elif len(path) == 1:
chapterslide = "name: toc-chapter-{n}\n\n## Chapter {n}\n\n".format(n=path[0])
for (i,subtree) in enumerate(tree):
chapterslide += gentoc(subtree, path+(i+1,)) + "\n\n"
chapterslide += ".debug[(auto-generated TOC)]"
return chapterslide
else:
return "\n\n".join(gentoc(subtree, path+(i+1,)) for (i,subtree) in enumerate(tree))
# Arguments:
# - `chapter` is a string; if it has multiple lines, it will be used as
# a markdown fragment; otherwise it will be considered as a file name
# to be recursively loaded and parsed
# - `filename` is the name of the file that we're currently processing
# (to generate inline comments to facilitate edition)
# Returns: (epxandedmarkdown,[list of titles])
# The list of titles can be nested.
def processchapter(chapter, filename):
if isinstance(chapter, unicode):
return processchapter(chapter.encode("utf-8"), filename)
if isinstance(chapter, str):
if "\n" in chapter:
titles = re.findall("^# (.*)", chapter, re.MULTILINE)
slidefooter = ".debug[{}]".format(makelink(filename))
chapter = chapter.replace("\n---\n", "\n{}\n---\n".format(slidefooter))
chapter += "\n" + slidefooter
return (chapter, titles)
if os.path.isfile(chapter):
return processchapter(open(chapter).read(), chapter)
if isinstance(chapter, list):
chapters = [processchapter(c, filename) for c in chapter]
markdown = "\n---\n".join(c[0] for c in chapters)
titles = [t for (m,t) in chapters if t]
return (markdown, titles)
raise InvalidChapter(chapter)
# Try to figure out the URL of the repo on GitHub.
# This is used to generate "edit me on GitHub"-style links.
try:
if "REPOSITORY_URL" in os.environ:
repo = os.environ["REPOSITORY_URL"]
else:
repo = subprocess.check_output(["git", "config", "remote.origin.url"])
repo = repo.strip().replace("git@github.com:", "https://github.com/")
if "BRANCH" in os.environ:
branch = os.environ["BRANCH"]
else:
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
branch = branch.strip()
base = subprocess.check_output(["git", "rev-parse", "--show-prefix"])
base = base.strip().strip("/")
urltemplate = ("{repo}/tree/{branch}/{base}/{filename}"
.format(repo=repo, branch=branch, base=base, filename="{}"))
except:
logging.exception("Could not generate repository URL; generating local URLs instead.")
urltemplate = "file://{pwd}/{filename}".format(pwd=os.environ["PWD"], filename="{}")
try:
commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
except:
logging.exception("Could not figure out HEAD commit.")
commit = "??????"
try:
dirtyfiles = subprocess.check_output(["git", "status", "--porcelain"])
except:
logging.exception("Could not figure out repository cleanliness.")
dirtyfiles = "?? git status --porcelain failed"
def makelink(filename):
if os.path.isfile(filename):
url = urltemplate.format(filename)
return "[{}]({})".format(filename, url)
else:
return filename
if len(sys.argv) != 2:
logging.error("This program takes one and only one argument: the YAML file to process.")
else:
filename = sys.argv[1]
if filename == "-":
filename = "<stdin>"
manifest = sys.stdin
else:
manifest = open(filename)
logging.info("Processing {}...".format(filename))
sys.stdout.write(generatefromyaml(manifest, filename))
logging.info("Processed {}.".format(filename))
|
[
"shijoonlee@gmail.com"
] |
shijoonlee@gmail.com
|
0c78396cacf3dcb777ca52b8bb646c14114b8fd8
|
b323fe5968aea700322428ba6bd239b45bc88c00
|
/sohpen/website/migrations/0004_auto_20170518_0707.py
|
9cbdfebe44d099d22afdb59741aada8fb2fc3ec3
|
[] |
no_license
|
aakashres/sophen
|
a1862be0fe4aaac51a03f111c1943c1e44f517cb
|
d84b8e8640f10eef22a79b8afba3e226405f9e5d
|
refs/heads/master
| 2022-11-08T01:46:05.697691
| 2017-11-06T11:10:22
| 2017-11-06T11:10:22
| 273,651,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-18 07:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0003_auto_20170518_0544'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='website.Menu'),
),
]
|
[
"aakash.shres@gmail.com"
] |
aakash.shres@gmail.com
|
50faf2e04d91afe1be4128df90c192dd546b38fe
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_06_01/models/application_gateway_probe.py
|
d04d03bf97211b720086089e3307cafdb95580c8
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,616
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
:param id: Resource ID.
:type id: str
:param protocol: The protocol used for the probe. Possible values are
'Http' and 'Https'. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is
sent to <Protocol>://<host>:<port><path>
:type path: str
:param interval: The probing interval in seconds. This is the time
interval between two consecutive probes. Acceptable values are from 1
second to 86400 seconds.
:type interval: int
:param timeout: the probe timeout in seconds. Probe marked as failed if
valid response is not received with this timeout period. Acceptable values
are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is
marked down after consecutive probe failure count reaches
UnhealthyThreshold. Acceptable values are from 1 second to 20.
:type unhealthy_threshold: int
:param pick_host_name_from_backend_http_settings: Whether the host header
should be picked from the backend http settings. Default value is false.
:type pick_host_name_from_backend_http_settings: bool
:param min_servers: Minimum number of servers that are always marked
healthy. Default value is 0.
:type min_servers: int
:param match: Criterion for classifying a healthy probe response.
:type match:
~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProbeHealthResponseMatch
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the probe that is unique within an Application
Gateway.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'},
'min_servers': {'key': 'properties.minServers', 'type': 'int'},
'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayProbe, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.host = kwargs.get('host', None)
self.path = kwargs.get('path', None)
self.interval = kwargs.get('interval', None)
self.timeout = kwargs.get('timeout', None)
self.unhealthy_threshold = kwargs.get('unhealthy_threshold', None)
self.pick_host_name_from_backend_http_settings = kwargs.get('pick_host_name_from_backend_http_settings', None)
self.min_servers = kwargs.get('min_servers', None)
self.match = kwargs.get('match', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
self.type = kwargs.get('type', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
30aec9891a46dbbe643a92b765ac61393ad4a416
|
56bd9b3518f21080a0493f5330249bf5e85289fd
|
/engineering/common/econstants.py
|
6c98788eeb4a4ac58e6294df9a553babe863a6f2
|
[
"Apache-2.0"
] |
permissive
|
kevin-zhangsen/badam
|
da680bf8669722b5bc922381537bc4762fa5c228
|
6823f7dcd7c1b54c3b38edeffe59c16317598a2c
|
refs/heads/master
| 2020-04-01T13:43:03.300155
| 2015-10-29T01:07:46
| 2015-10-29T01:07:46
| 45,371,347
| 2
| 0
| null | 2015-11-02T04:02:50
| 2015-11-02T04:02:47
| null |
UTF-8
|
Python
| false
| false
| 7,858
|
py
|
__author__ = 'nash.xiejun'
import os
class OperationType(object):
CFG_ALL_IN_ONE = 'cfg-all-in-one'
CFG_HOST_NAME = 'cfg-hostname'
DEPLOY_CASCADING = 'deploy-cascade-openstack'
DEPLOY_HYBRID_CLOUD = 'deploy-hybrid-cloud'
class EndpointType(object):
COMPUTE = 'compute'
VOLUME = 'volume'
VOLUME2 = 'volumev2'
IMAGE = 'image'
NETWORK = 'network'
ORCHESTRATION = 'orchestration'
EC2 = 'ec2'
METERING = 'metering'
class EndpointURL(object):
COMPUTE = 'http://%s:8774/v2/$(tenant_id)s'
VOLUME = 'http://%s:8776/v1/$(tenant_id)s'
VOLUME2 = 'http://%s:8776/v2/$(tenant_id)s'
IMAGE = 'http://%s:9292/'
NETWORK = 'http://%s:9696/'
ORCHESTRATION = 'http://%s:8004/v1/$(tenant_id)s'
EC2 = 'http://%s:8773/services/Cloud'
METERING = 'http://%s:8777/'
class ServiceName(object):
NOVA = 'nova'
CINDER = 'cinder'
GLANCE = 'glance'
NEUTRON = 'neutron'
KEYSTONE = 'keystone'
class PathConfigFile(object):
ROOT = os.path.sep
ETC = 'etc'
PLUGINS = 'plugins'
ML_2 = 'ml2'
ML2_CONF = 'ml2_conf.ini'
NOVA_CONF = 'nova.conf'
#etc/nova/nova.conf
NOVA = os.path.join(ETC, ServiceName.NOVA, NOVA_CONF)
NOVA_COMPUTE_CONF = 'nova-compute.conf'
#etc/nova/nova-compute.conf
NOVA_COMPUTE = os.path.join(ETC, ServiceName.NOVA, NOVA_COMPUTE_CONF)
NEUTRON_CONF = 'neutron.conf'
#etc/neutron/neutron.conf
NEUTRON = os.path.join(ETC, ServiceName.NEUTRON, NEUTRON_CONF)
# etc/neutron/plugins/ml2/ml2_conf.ini
ML2 = os.path.join(ETC, ServiceName.NEUTRON, PLUGINS, ML_2, ML2_CONF)
L3_PROXY_INI = 'l3_proxy.ini'
# etc/neutron/l3_proxy.ini
L3_PROXY = os.path.join(ETC, ServiceName.NEUTRON, L3_PROXY_INI)
#etc/keystone/keystone.conf
KEYSTONE_CONF = 'keystone.conf'
KEYSTONE = os.path.join(ETC, ServiceName.KEYSTONE, KEYSTONE_CONF)
#etc/glance/glance.conf
GLANCE_CONF = 'glance.conf'
GLANCE = os.path.join(ETC, ServiceName.GLANCE, GLANCE_CONF)
#etc/cinder/cinder.conf
CINDER_CONF = 'cinder.conf'
CINDER = os.path.join(ETC, ServiceName.CINDER, CINDER_CONF)
class PathTriCircle(object):
TRICIRCLE = 'tricircle-master'
JUNO_PATCHES = 'juno-patches'
NOVA_PROXY = 'novaproxy'
CINDER_PROXY = 'cinderproxy'
NEUTRON_PROXY = 'neutronproxy'
L2_PROXY = 'l2proxy'
L3_PROXY = 'l3proxy'
GLANCE_SYNC = 'glancesync'
GLANCE_STORE = 'glance_store'
PATCH_CINDER_CASCADED_TIMESTAMP = 'timestamp-query-patch'
PATCH_GLANCE_LOCATION = 'glance_location_patch'
PATCH_GLANCE_STORE = 'glance_store_patch'
PATCH_NEUTRON_CASCADED_BIG2LAYER = 'neutron_cascaded_big2layer_patch'
PATCH_NEUTRON_CASCADED_L3 = 'neutron_cascaded_l3_patch'
PATCH_NEUTRON_CASCADED_TIMESTAMP = 'neutron_timestamp_cascaded_patch'
PATCH_NEUTRON_CASCADING_BIG2LAYER = 'neutron_cascading_big2layer_patch'
PATCH_NEUTRON_CASCADING_L3 = 'neutron_cascading_l3_patch'
PATCH_NOVA_SCHEDULING = 'nova_scheduling_patch'
# tricircle-master/glancesync
PATH_CASCADING_GLANCE_SYNC = os.path.join(TRICIRCLE, GLANCE_SYNC)
# tricircle-master/cinderproxy
PATH_PROXY_CINDER = os.path.join(TRICIRCLE, CINDER_PROXY)
# tricircle-master/neutronproxy/l2proxy
PATH_PROXY_NEUTRON_L2 = os.path.join(TRICIRCLE, NEUTRON_PROXY, L2_PROXY)
# tricircle-master/neutronproxy/l3proxy
PATH_PROXY_NEUTRON_L3 = os.path.join(TRICIRCLE, NEUTRON_PROXY, L3_PROXY)
# tricircle-master/novaproxy
PATH_PROXY_NOVA = os.path.join(TRICIRCLE, NOVA_PROXY)
# tricircle-master/juno-patches/cinder/timestamp-query-patch
PATH_PATCH_CINDER_CASCADED_TIMESTAMP = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.CINDER, PATCH_CINDER_CASCADED_TIMESTAMP)
# tricircle-master/juno-patches/glance/glance_location_patch
PATH_PATCH_GLANCE_LOCATION = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.GLANCE, PATCH_GLANCE_LOCATION)
# tricircle-master/juno-patches/glance_store/glance_store_patch/
PATH_PATCH_GLANCE_STORE = os.path.join(TRICIRCLE, JUNO_PATCHES, GLANCE_STORE, PATCH_GLANCE_STORE)
# tricircle-master/juno-patches/neutron/neutron_cascaded_big2layer_patch
PATH_PATCH_NEUTRON_CASCADED_BIG2LAYER = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADED_BIG2LAYER)
# tricircle-master/juno-patches/neutron/neutron_cascaded_l3_patch
PATH_PATCH_NEUTRON_CASCADED_L3 = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADED_L3)
# tricircle-master/juno-patches/neutron/neutron_cascading_big2layer_patch
PATH_PATCH_NEUTRON_CASCADING_BIG2LAYER = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADING_BIG2LAYER)
# tricircle-master/juno-patches/neutron/neutron_cascading_l3_patch
PATH_PATCH_NEUTRON_CASCADING_L3 = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADING_L3)
# tricircle-master/juno-patches/neutron/neutron_timestamp_cascaded_patch
PATH_PATCH_NEUTRON_CASCADED_TIMESTAMP = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NEUTRON, PATCH_NEUTRON_CASCADED_TIMESTAMP)
# tricircle-master/juno-patches/nova/nova_scheduling_patch
PATH_PATCH_NOVA_SCHEDULING = os.path.join(TRICIRCLE, JUNO_PATCHES, ServiceName.NOVA, PATCH_NOVA_SCHEDULING)
PATCH_TO_PATH = {
PATCH_NOVA_SCHEDULING : PATH_PATCH_NOVA_SCHEDULING,
PATCH_NEUTRON_CASCADING_BIG2LAYER : PATH_PATCH_NEUTRON_CASCADING_BIG2LAYER,
PATCH_NEUTRON_CASCADING_L3 : PATH_PATCH_NEUTRON_CASCADING_L3,
PATCH_NEUTRON_CASCADED_BIG2LAYER : PATH_PATCH_NEUTRON_CASCADED_BIG2LAYER,
PATCH_NEUTRON_CASCADED_L3 : PATH_PATCH_NEUTRON_CASCADED_L3,
PATCH_NEUTRON_CASCADED_TIMESTAMP : PATH_PATCH_NEUTRON_CASCADED_TIMESTAMP,
PATCH_CINDER_CASCADED_TIMESTAMP : PATH_PATCH_CINDER_CASCADED_TIMESTAMP,
NOVA_PROXY : PATH_PROXY_NOVA,
CINDER_PROXY : PATH_PROXY_CINDER,
L2_PROXY : PATH_PROXY_NEUTRON_L2,
L3_PROXY : PATH_PROXY_NEUTRON_L3
}
class PathHybridCloud(object):
HYBRID_CLOUD_PATCHES = 'hybrid_cloud_patches'
THIRD_LIB = '3rd_lib'
PYTHON = 'python'
JAVA = 'java'
OPENSTACK_DASHBOARD = 'openstack_dashboard'
WSGI = 'wsgi'
ROOT = os.path.sep
#/usr/share/openstack-dashboard/openstack_dashboard/
# hybrid_cloud_patches/3rd_lib/java
PATH_THIRD_LIB_JAVA = os.path.join(HYBRID_CLOUD_PATCHES, THIRD_LIB, JAVA)
# hybrid_cloud_patches/3rd_lib/python
PATH_THIRD_LIB_PYTHON = os.path.join(HYBRID_CLOUD_PATCHES, THIRD_LIB, PYTHON)
# hybrid_cloud_patches/java
PATH_PATCHES_JAVA = os.path.join(HYBRID_CLOUD_PATCHES, JAVA)
# hybrid_cloud_patches/python
PATH_PATCHES_PYTHON = os.path.join(HYBRID_CLOUD_PATCHES, PYTHON)
# hybrid_cloud_patches/wsgi
PATH_PATCHES_OPENSTACK_DASHBOARD = os.path.join(HYBRID_CLOUD_PATCHES, WSGI)
# /usr/share/openstack-dashboard/
PATH_INSTALL_PATCH_OPENSTACK_DASHBOARD = ''.join([ROOT, os.path.join('usr', 'share', 'openstack-dashboard')])
class PathTricircleConfigFile(object):
PROXY_CINDER = os.path.join(PathTriCircle.PATH_PROXY_CINDER, PathConfigFile.CINDER)
PROXY_NEUTRON_L2 = os.path.join(PathTriCircle.PATH_PROXY_NEUTRON_L2, PathConfigFile.ML2)
PROXY_NEUTRON_L3 = os.path.join(PathTriCircle.PATH_PROXY_NEUTRON_L3, PathConfigFile.L3_PROXY)
PROXY_NOVA_COMPUTE = os.path.join(PathTriCircle.PATH_PROXY_NOVA, PathConfigFile.NOVA_COMPUTE)
PROXY_NOVA = os.path.join(PathTriCircle.PATH_PROXY_NOVA, PathConfigFile.NOVA)
class ConfigReplacement(object):
REGION_NAME = 'region_name'
CASCADED_NODE_IP = 'cascaded_node_ip'
CASCADING_NODE_IP = 'cascading_node_ip'
CINDER_TENANT_ID = 'cinder_tenant_id'
AVAILABILITY_ZONE = 'availability_zone'
CASCADING_OS_REGION_NAME = 'cascading_os_region_name'
ML2_LOCAL_IP = 'ml2_local_ip'
|
[
"nash.xiejun@gmail.com"
] |
nash.xiejun@gmail.com
|
200a81f58579323116fcf06d8ac860193ba85b33
|
c954904d3a3259f0bee4bc3942998c30f4714e68
|
/shortener/shorturl/__init__.py
|
841083c46d1e89eca6a52cddcb079e6658197c16
|
[] |
no_license
|
Alodhaib/django-shortener-example
|
9443e51191086fa1321468eb3fdefa137c25e330
|
d037c913ed18e0a7b24865b7f4f5aaf68df2cca3
|
refs/heads/master
| 2021-01-24T10:06:40.965556
| 2013-05-11T16:01:13
| 2013-05-11T16:01:13
| 69,673,280
| 0
| 0
| null | 2016-09-30T14:22:22
| 2016-09-30T14:22:22
| null |
UTF-8
|
Python
| false
| false
| 2,822
|
py
|
#!/usr/bin/env python
#
# Converts any integer into a base [BASE] number. I have chosen 62
# as it is meant to represent the integers using all the alphanumeric
# characters, [no special characters] = {0..9}, {A..Z}, {a..z}
#
# I plan on using this to shorten the representation of possibly long ids,
# a la url shortenters
#
# saturate() takes the base 62 key, as a string, and turns it back into an integer
# dehydrate() takes an integer and turns it into the base 62 string
#
import math
import sys
BASE = 62
UPPERCASE_OFFSET = 55
LOWERCASE_OFFSET = 61
DIGIT_OFFSET = 48
def true_ord(char):
"""
Turns a digit [char] in character representation
from the number system with base [BASE] into an integer.
"""
if char.isdigit():
return ord(char) - DIGIT_OFFSET
elif 'A' <= char <= 'Z':
return ord(char) - UPPERCASE_OFFSET
elif 'a' <= char <= 'z':
return ord(char) - LOWERCASE_OFFSET
else:
raise ValueError("%s is not a valid character" % char)
def true_chr(integer):
"""
Turns an integer [integer] into digit in base [BASE]
as a character representation.
"""
if integer < 10:
return chr(integer + DIGIT_OFFSET)
elif 10 <= integer <= 35:
return chr(integer + UPPERCASE_OFFSET)
elif 36 <= integer < 62:
return chr(integer + LOWERCASE_OFFSET)
else:
raise ValueError("%d is not a valid integer in the range of base %d" % (integer, BASE))
def saturate(key):
"""
Turn the base [BASE] number [key] into an integer
"""
int_sum = 0
reversed_key = key[::-1]
for idx, char in enumerate(reversed_key):
int_sum += true_ord(char) * int(math.pow(BASE, idx))
return int_sum
def dehydrate(integer):
"""
Turn an integer [integer] into a base [BASE] number
in string representation
"""
# we won't step into the while if integer is 0
# so we just solve for that case here
if integer == 0:
return '0'
string = ""
while integer > 0:
remainder = integer % BASE
string = true_chr(remainder) + string
integer /= BASE
return string
if __name__ == '__main__':
# not really unit tests just a rough check to see if anything is way off
if sys.argv[1] == '-tests':
passed_tests = True
for i in xrange(0, 1000):
passed_tests &= (i == saturate(dehydrate(i)))
print passed_tests
else:
user_input = sys.argv[2]
try:
if sys.argv[1] == '-s':
print saturate(user_input)
elif sys.argv[1] == '-d':
print dehydrate(int(user_input))
else:
print "I don't understand option %s" % sys.argv[1]
except ValueError as e:
print e
|
[
"allisson@gmail.com"
] |
allisson@gmail.com
|
6b8e063df39d1bc4647cc63b5d37bbb741026f94
|
84856442c382b0b670246636d378beb095effa0a
|
/dev_cloud/cc1/pkg/node/usr/sbin/cc1_node_update_config
|
e017f54f299318ea27deee56590d47e9bbbd9034
|
[
"Apache-2.0",
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
Dev-Cloud-Platform/Dev-Cloud
|
f50cc3292245156c4cf55942e4426fda22443fd6
|
b2fb9f4318aeb6dde1e8babca32da527943f1fb4
|
refs/heads/master
| 2020-12-29T02:43:14.022401
| 2017-05-05T07:18:21
| 2017-05-05T07:18:21
| 28,969,864
| 1
| 1
| null | 2015-01-14T16:46:57
| 2015-01-08T14:36:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @cond LICENSE
#
# Copyright [2010-2013] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @endcond LICENSE
"""
@author Maciej Nabozny <mn@mnabozny.pl>
"""
import sys
def set_value(key, value):
print "NODE: Updating config: %s:%s" % (key, value)
config = open('/etc/cc1/node/config.py', 'r')
lines = []
for line in config.readlines():
if line[-1] == '\n':
line = line[:-1]
if line.startswith(key):
lines.append(key + '="' + str(value) + '"')
else:
lines.append(line)
config.close()
config = open('/etc/cc1/node/config.py', 'w')
config.write('\n'.join(lines))
config.close()
return 0
if __name__ == "__main__":
try:
if len(sys.argv) == 3:
sys.exit(set_value(sys.argv[1], sys.argv[2]))
else:
print "Usage: %s [key] new_value" % sys.argv[0]
sys.exit(1)
except Exception as e:
print >> sys.stderr, "ERROR: %s" % str(e)
sys.exit(10)
|
[
"michal.szczygiel@wp.pl"
] |
michal.szczygiel@wp.pl
|
|
71bc398215f05023c66de7b67055c6c4452211b3
|
71dfa5d568d408fd8464a1313f87c1133e3d061c
|
/ATS/urls.py
|
b3d6e11901bbb899f6983c3d21e1d181763c2df1
|
[] |
no_license
|
harshdonga/Alumni-Tracking-System
|
3819e26e82145ca2cf277c1f260494cb6a6fbd4c
|
f0c836d5fb405f8b61fb73d78acc4c47802a9c11
|
refs/heads/master
| 2020-12-29T17:48:13.989148
| 2020-02-07T04:27:59
| 2020-02-07T04:27:59
| 238,687,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('alumni.urls')),
]
|
[
"harshdonga99@gmail.com"
] |
harshdonga99@gmail.com
|
5c107d3057995ffc314bc3eebe9f4fdb39227a36
|
321e58ab3e6b2385bb3549aaaefd56a58c2a51e7
|
/python/atpic/atcookies.py
|
1d2ea4668a4d530a17d2d2233e73e24b0279454c
|
[] |
no_license
|
alexmadon/atpic_photosharing
|
7829118d032344bd9a67818cd50e2c27a228d028
|
9fdddeb78548dadf946b1951aea0d0632e979156
|
refs/heads/master
| 2020-06-02T15:00:29.282979
| 2017-06-12T17:09:52
| 2017-06-12T17:09:52
| 94,095,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# cookies libary to process
# lang, format, resolution, wiki
# file:///home/madon/doc/python-3.3a0-docs-html/library/http.cookies.html
from http import cookies
C = cookies.SimpleCookie()
C["fig"] = "newton"
C["sugar"] = "wafer"
print(C)
C = cookies.SimpleCookie()
C["rocky"] = "road"
C["rocky"]["path"] = "/cookie"
print(C.output(header="Cookie:"))
print(C["rocky"].value)
print(C.output(header=''))
print(dir(C["rocky"]))
print(C["rocky"].values())
print(C["rocky"].output())
print(C["rocky"].coded_value)
print(C["rocky"].OutputString())
C = cookies.SimpleCookie()
C.load("chips=ahoy; vienna=finger")
print(C.keys())
|
[
"alex.madon@gmail.com"
] |
alex.madon@gmail.com
|
ff3179806be417683f17da0629967ff753f5acd1
|
c06d18ac5b87b3b82fc486454c422b119d6c1ee9
|
/src/demo/NLPBook/chapter5/stanford.py
|
e7a74651d98ea5d99c3a6e7fc528c3f3e51716fd
|
[] |
no_license
|
tangermi/nlp
|
b3a4c9612e6049463bf12bc9abb7aff06a084ace
|
aa36b8b20e8c91807be73a252ff7799789514302
|
refs/heads/master
| 2022-12-09T12:33:15.009413
| 2020-04-03T04:03:24
| 2020-04-03T04:03:24
| 252,056,010
| 0
| 0
| null | 2022-12-08T07:26:55
| 2020-04-01T02:55:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
# -*- coding: utf-8 -*-
import sys
import os
# CoreNLP 3.6 jar包和中文模型包
# ejml-0.23.jar
# javax.json.jar
# jollyday.jar
# joda-time.jar
# jollyday.jar
# protobuf.jar
# slf4j-api.jar
# slf4j-simple.jar
# stanford-corenlp-3.6.0.jar
# xom.jar
class StanfordCoreNLP(object):
def __init__(self,jarpath):
self.root = jarpath
self.tempsrcpath = "tempsrc" # 输入临时文件路径
self.jarlist = ["ejml-0.23.jar","javax.json.jar","jollyday.jar","joda-time.jar","protobuf.jar","slf4j-api.jar","slf4j-simple.jar","stanford-corenlp-3.6.0.jar","xom.jar"]
self.jarpath = ""
self.buildjars()
def buildjars(self): # 根据root路径构建所有的jar包路径
for jar in self.jarlist:
self.jarpath += self.root+jar+";"
def savefile(self,path,sent):
fp = open(path,"wb")
fp.write(sent)
fp.close()
# 读取和删除临时文件
def delfile(self,path):
os.remove(path)
class StanfordPOSTagger(StanfordCoreNLP):
def __init__(self,jarpath,modelpath):
StanfordCoreNLP.__init__(self,jarpath)
self.modelpath = modelpath # 模型文件路径
self.classfier = "edu.stanford.nlp.tagger.maxent.MaxentTagger"
self.delimiter = "/"
self.__buildcmd()
def __buildcmd(self): # 构建命令行
self.cmdline = 'java -mx1g -cp "'+self.jarpath+'" '+self.classfier+' -model "'+self.modelpath+'" -tagSeparator '+self.delimiter
def tag(self,sent): #标注句子
self.savefile(self.tempsrcpath,sent)
tagtxt = os.popen(self.cmdline+" -textFile "+self.tempsrcpath,'r').read() # 执行命令行
self.delfile(self.tempsrcpath)
return tagtxt
def tagfile(self,inputpath,outpath):# 标注文件
self.savefile(self.tempsrcpath,sent)
os.system(self.cmdline+' -textFile '+inputpath+' > '+outpath )
self.delfile(self.tempsrcpath)
def __buildprop(self): #创建属性文件
self.propline = 'java -mx1g -cp "'+self.jarpath+'" '+self.classfier+' -genprops'
def genpropfile(self,propath): # 获取属性文件
self.__buildprop()
propfile = os.popen(self.propline,'r').read()
self.savefile(propath,propfile)
print "save properties to ",propath
def __buildtrain(self,propspath): # 创建模型文件
self.trainline = 'java -mx4g -cp "'+self.jarpath+'" '+self.classfier +' -props "'+propspath+'"'
def trainmodel(self,propspath): # 训练模型
self.__buildtrain(propspath)
os.system(self.trainline)
print "save model to model.tagger"
|
[
"n10057862@qut.edu.au"
] |
n10057862@qut.edu.au
|
8a16091fafc3f2319884a057c8e434ab0e79a775
|
7759c0ad152fe9c369b074a24601e54806b0afa8
|
/backend/event/api/v1/viewsets.py
|
4cba77ae5ad9f884a97fa17647488092f6c46aa1
|
[] |
no_license
|
crowdbotics-apps/covidcheck-15163
|
be59f495e31b50948725fb332429751749f9b611
|
a2f80fc2541bbc069cf3ec6a7f4d740aa665c77b
|
refs/heads/master
| 2023-02-08T22:01:35.813215
| 2020-03-29T16:50:54
| 2020-03-29T16:50:54
| 250,877,575
| 0
| 0
| null | 2023-01-24T01:47:09
| 2020-03-28T19:33:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,066
|
py
|
from rest_framework import authentication
from event.models import (
Category,
Faq,
Favorites,
Location,
MySchedule,
Presenter,
Schedule,
Sponsor,
Vendor,
VendorDetail,
)
from .serializers import (
CategorySerializer,
FaqSerializer,
FavoritesSerializer,
LocationSerializer,
MyScheduleSerializer,
PresenterSerializer,
ScheduleSerializer,
SponsorSerializer,
VendorSerializer,
VendorDetailSerializer,
)
from rest_framework import viewsets
class MyScheduleViewSet(viewsets.ModelViewSet):
serializer_class = MyScheduleSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = MySchedule.objects.all()
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
class LocationViewSet(viewsets.ModelViewSet):
serializer_class = LocationSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Location.objects.all()
class PresenterViewSet(viewsets.ModelViewSet):
serializer_class = PresenterSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Presenter.objects.all()
class VendorViewSet(viewsets.ModelViewSet):
serializer_class = VendorSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Vendor.objects.all()
class FaqViewSet(viewsets.ModelViewSet):
serializer_class = FaqSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Faq.objects.all()
class ScheduleViewSet(viewsets.ModelViewSet):
serializer_class = ScheduleSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Schedule.objects.all()
class SponsorViewSet(viewsets.ModelViewSet):
serializer_class = SponsorSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Sponsor.objects.all()
class VendorDetailViewSet(viewsets.ModelViewSet):
serializer_class = VendorDetailSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = VendorDetail.objects.all()
class FavoritesViewSet(viewsets.ModelViewSet):
serializer_class = FavoritesSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Favorites.objects.all()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9b3e528238dee10f5bdee6ca543158322d95ff6a
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/scattergl/_xsrc.py
|
2c107c6b510e42d803ee235a58aa1eabf4f21690
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="scattergl", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
4aa11ff5954703255ef2662ebc7538a8a164e33c
|
0eb6c70503c680ebec415016ff1b0cfac92486ca
|
/lincdm/views/sitemap.py
|
f33aa93f833be7422fb0e7b0f58bb61365b8d717
|
[] |
no_license
|
alexliyu/lincdm
|
c8b473946f59aca9145b3291890635474f144583
|
eab93285f0b03217ea041a7910edae7e00095cd8
|
refs/heads/master
| 2020-12-30T10:50:05.248988
| 2011-08-09T15:52:38
| 2011-08-09T15:52:38
| 1,464,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
"""Views for entry sitemap"""
from django.views.generic.simple import direct_to_template
from lincdm.entry.models import Entry
from lincdm.entry.models import Category
def sitemap(*ka, **kw):
"""Wrapper around the direct to template generic view to
force the update of the extra context"""
kw['extra_context'] = {'entries': Entry.published.all(),
'categories': Category.tree.all()}
return direct_to_template(*ka, **kw)
|
[
"alexliyu2012@gmail.com"
] |
alexliyu2012@gmail.com
|
e7102e8a75f1b70c301e29ea4054d292404bf23c
|
709bd5f2ecc69a340da85f6aed67af4d0603177e
|
/tests/test_analytics.py
|
d6818e163a670b22b8bcf46edc578302d57d81ae
|
[
"BSD-3-Clause"
] |
permissive
|
Kenstogram/opensale
|
41c869ee004d195bd191a1a28bf582cc6fbb3c00
|
5102f461fa90f2eeb13b9a0a94ef9cb86bd3a3ba
|
refs/heads/master
| 2022-12-15T02:48:48.810025
| 2020-03-10T02:55:10
| 2020-03-10T02:55:10
| 163,656,395
| 8
| 0
|
BSD-3-Clause
| 2022-12-08T01:31:09
| 2018-12-31T09:30:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
from decimal import Decimal
from saleor.core.analytics import (
get_order_payloads, get_view_payloads, report_order, report_view)
def test_get_order_payloads(order_with_lines):
order = order_with_lines
generator = get_order_payloads(order)
data = list(generator)
assert len(data) == order.lines.count() + 1
transaction = data[0]
assert transaction['ti'] == order.pk
assert transaction['cu'] == order.total.currency
assert Decimal(transaction['tr']) == order.total.gross.amount
assert Decimal(transaction['tt']) == order.total.tax.amount
assert Decimal(transaction['ts']) == order.shipping_price.net.amount
for i, line in enumerate(order):
item = data[i + 1]
assert item['ti'] == order.pk
assert item['in'] == line.product_name
assert item['ic'] == line.product_sku
assert item['iq'] == str(int(line.quantity))
assert item['cu'] == line.unit_price.currency
assert Decimal(item['ip']) == line.unit_price.gross.amount
def test_report_order_has_no_errors(order_with_lines):
report_order('', order_with_lines)
def test_get_view_payloads():
headers = {'HTTP_HOST': 'getsaleor.com', 'HTTP_REFERER': 'example.com'}
generator = get_view_payloads('/test-path/', 'en-us', headers)
data = list(generator)[0]
assert data['dp'] == '/test-path/'
assert data['dh'] == 'getsaleor.com'
assert data['dr'] == 'example.com'
assert data['ul'] == 'en-us'
def test_report_view_has_no_errors():
headers = {'HTTP_HOST': 'getsaleor.com', 'HTTP_REFERER': 'example.com'}
report_view('', '/test-path/', 'en-us', headers)
|
[
"Kenstogram@gmail.com"
] |
Kenstogram@gmail.com
|
51a44f03eb696ececa3a9e650a63d3177d62f625
|
976a21364b7c54e7bccddf1c9deec74577ce8bb8
|
/build/rob_control/catkin_generated/pkg.develspace.context.pc.py
|
ae505c4d62a69cc0f460001a4541d25f31f1d6e7
|
[] |
no_license
|
jinweikim/catkin_ws
|
f0168b17c04863a6e5472f6199a4a9c525e0f3aa
|
268ce7e348a162019e90d0e4527de4c9140ac0f8
|
refs/heads/master
| 2023-01-02T17:23:06.834527
| 2020-10-23T12:03:49
| 2020-10-23T12:03:49
| 262,527,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rob_control"
PROJECT_SPACE_DIR = "/home/jinwei/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"jinweikim@gmail.com"
] |
jinweikim@gmail.com
|
fbca50ebf0262d7c137ebc41118f7bd0b71c47de
|
674649dc02390c4a60b9c62b586b81d405969047
|
/network/__init__.py
|
19f1226938b882bcbc77dc6b13f7782a531efc6f
|
[] |
no_license
|
weijiawu/Pytorch_Classification
|
709513be3e019a896ef11a1739829a97bb99c9db
|
7609a1d809590c1423f4ed0ee1f0d918954355a9
|
refs/heads/master
| 2022-12-06T00:51:26.716590
| 2020-09-01T07:38:22
| 2020-09-01T07:38:22
| 285,811,133
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,252
|
py
|
from __future__ import absolute_import
"""The models subpackage contains definitions for the following model for CIFAR10/CIFAR100
architectures:
- `AlexNet`_
- `VGG`_
- `ResNet`_
- `SqueezeNet`_
- `DenseNet`_
You can construct a model with random weights by calling its constructor:
.. code:: python
import torchvision.models as models
resnet18 = models.resnet18()
alexnet = models.alexnet()
squeezenet = models.squeezenet1_0()
densenet = models.densenet_161()
We provide pre-trained models for the ResNet variants and AlexNet, using the
PyTorch :mod:`torch.utils.model_zoo`. These can constructed by passing
``pretrained=True``:
.. code:: python
import torchvision.models as models
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
ImageNet 1-crop error rates (224x224)
======================== ============= =============
Network Top-1 error Top-5 error
======================== ============= =============
ResNet-18 30.24 10.92
ResNet-34 26.70 8.58
ResNet-50 23.85 7.13
ResNet-101 22.63 6.44
ResNet-152 21.69 5.94
Inception v3 22.55 6.44
AlexNet 43.45 20.91
VGG-11 30.98 11.37
VGG-13 30.07 10.75
VGG-16 28.41 9.62
VGG-19 27.62 9.12
SqueezeNet 1.0 41.90 19.58
SqueezeNet 1.1 41.81 19.38
Densenet-121 25.35 7.83
Densenet-169 24.00 7.00
Densenet-201 22.80 6.43
Densenet-161 22.35 6.20
======================== ============= =============
.. _AlexNet: https://arxiv.org/abs/1404.5997
.. _VGG: https://arxiv.org/abs/1409.1556
.. _ResNet: https://arxiv.org/abs/1512.03385
.. _SqueezeNet: https://arxiv.org/abs/1602.07360
.. _DenseNet: https://arxiv.org/abs/1608.06993
"""
# from .alexnet import *
# from .vgg import *
# from .resnet import *
# from .resnext import *
# from .wrn import *
# from .preresnet import *
# from .densenet import *
|
[
"wwj123@zju.edu.cn"
] |
wwj123@zju.edu.cn
|
38c685e5b3daa3c48549492e8305d7c6ec9b4a63
|
12b41c3bddc48a6df5e55bd16f7b2792ed6e4848
|
/k8_vmware/vsphere/VM.py
|
7d14aa69fb7f78566e20968c659a9f980499f6e4
|
[
"Apache-2.0"
] |
permissive
|
NourEddineX/k8-vmware
|
b128b03b988f8a94d6029458c5415cdd68e12b0a
|
80f2a6d56021a1298919487c8372a88aff3f1fb9
|
refs/heads/main
| 2023-01-23T22:59:41.767216
| 2020-12-11T12:33:11
| 2020-12-11T12:33:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,321
|
py
|
import pyVmomi
from osbot_utils.utils.Misc import wait
from k8_vmware.vsphere.VM_Keystroke import VM_Keystroke
class VM:
def __init__(self, vm):
self.vm = vm
def config(self):
return self.summary().config
def controller_scsi(self):
controllers = self.devices_SCSI_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide(self):
controllers = self.devices_IDE_Controllers()
if len(controllers) > 0:
return controllers[0] # default to returning the first one
def controller_ide_free_slot(self):
controllers = self.devices_IDE_Controllers()
for controller in controllers:
if len(controller.device) < 2:
return controller
def devices(self):
return self.vm.config.hardware.device
def devices_IDE_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualIDEController )
def devices_Cdroms (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualCdrom )
def devices_Disks (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualDisk )
def devices_AHCI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualAHCIController )
def devices_PCNet_32s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualPCNet32 )
def devices_Vmxnet_2s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet2 )
def devices_Vmxnet_3s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualVmxnet3 )
def devices_E1000s (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000 )
def devices_E1000es (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualE1000e )
def devices_SCSI_Controllers (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSCSIController )
def devices_Sriov_EthernetCards (self): return self.devices_of_type(pyVmomi.vim.vm.device.VirtualSriovEthernetCard )
def devices_of_type(self, type):
devices = []
for device in self.devices():
if isinstance(device, type):
devices.append(device)
return devices
def devices_indexed_by_label(self):
devices = {}
for device in self.devices():
key = device.deviceInfo.label
value = device
devices[key] = value
return devices
def guest(self):
return self.summary().guest
def info(self):
summary = self.summary() # need to do this since each reference to self.vm.summary.config is call REST call to the server
#print(summary)
config = summary.config # these values are retrieved on the initial call to self.vm.summary
guest = summary.guest # using self.vm.summary.guest here would had resulted in two more REST calls
runtime = summary.runtime
info = {
"Annotation" : config.annotation ,
"BootTime" : str(runtime.bootTime) ,
"ConnectionState" : runtime.connectionState,
"GuestId" : config.guestId ,
"GuestFullName" : config.guestFullName ,
"Host" : runtime.host ,
"HostName" : guest.hostName ,
"IP" : guest.ipAddress ,
"MemorySizeMB" : config.memorySizeMB ,
"MOID" : self.vm._moId ,
"Name" : config.name ,
"MaxCpuUsage" : runtime.maxCpuUsage ,
"MaxMemoryUsage" : runtime.maxMemoryUsage ,
"NumCpu" : config.numCpu ,
"PathName" : config.vmPathName ,
"StateState" : runtime.powerState ,
"Question" : None ,
"UUID" : config.uuid
}
# if guest != None: info['IP'] = guest.ipAddress
if runtime.question != None: info['Question'] = runtime.question.text,
return info
def hardware(self):
return self.vm.config.hardware
def host_name(self):
return self.guest().hostName
def ip(self):
return self.guest().ipAddress
def name(self):
return self.config().name
def moid(self):
return self.vm._moId
def powered_state(self):
return self.runtime().powerState
def power_on(self):
return self.task().power_on()
def power_off(self):
return self.task().power_off()
def powered_on(self):
return self.powered_state() == 'poweredOn'
def powered_off(self):
return self.powered_state() == 'poweredOff'
def screenshot(self, target_file=None):
from k8_vmware.vsphere.VM_Screenshot import VM_Screenshot
return VM_Screenshot(self, target_file=target_file).download()
def send_text(self, text):
VM_Keystroke(self).send_text(text)
return self
def send_key(self, text):
result = VM_Keystroke(self).send_key(text)
return self
def send_enter(self):
VM_Keystroke(self).enter()
return self
def summary(self):
return self.vm.summary # will make REST call to RetrievePropertiesEx
def task(self):
from k8_vmware.vsphere.VM_Task import VM_Task # have to do this import here due to circular dependencies (i.e. VM_Task imports VM)
return VM_Task(self)
def runtime(self):
return self.vm.summary.runtime
def uuid(self):
return self.config().uuid
def wait(self, seconds): # to help with fluent code
wait(seconds)
return self
def __str__(self):
return f'[VM] {self.name()}'
|
[
"dinis.cruz@owasp.org"
] |
dinis.cruz@owasp.org
|
0584747d8f65280307db8e8f7a973bf9d702eb19
|
39f1ae1e3b95d717f6d103b3ac534b468090c36f
|
/py_blackbox_backend/py_blackbox_backend/settings.py
|
52046da510da0ace923632f0a9d29badf4aa06f4
|
[] |
no_license
|
arron1993/blackbox.arron.id
|
5d532af4e9557986f8af5c9018d9d789bbd03470
|
4da60f3dd524bd0afbdc3613767a818bcab1cd8d
|
refs/heads/master
| 2023-05-04T05:19:15.792063
| 2021-05-20T20:34:28
| 2021-05-20T20:34:28
| 346,052,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,974
|
py
|
"""
Django settings for py_blackbox_backend project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import datetime
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
'DJANGO_SECRET_KEY',
'9qyc-ncc0jq(y*4y6j4w88bffe!isuzf)1e0*sxu4w1d=k4xxo')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": datetime.timedelta(days=7),
"REFRESH_TOKEN_LIFETIME": datetime.timedelta(days=14),
}
INSTALLED_APPS = [
'fuel.apps.FuelConfig',
'circuit.apps.CircuitConfig',
'car.apps.CarConfig',
'session.apps.SessionConfig',
'session_type.apps.SessionTypeConfig',
'metrics.apps.MetricsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'py_blackbox_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'py_blackbox_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "live-blackbox",
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"arron.moore93@gmail.com"
] |
arron.moore93@gmail.com
|
b0a4fdf7a72979e444fdfa623f69a8f29cd809db
|
13a179f6251d8354b058ff02b3101d904b606f0b
|
/src/shepherd_simu/src/sailboat_sim.py
|
55da970d98847cf36e6b2ca0d542b0e025b062a6
|
[
"MIT"
] |
permissive
|
ENSTA-Bretagne-Shepherd/Shepherd-Ros-Structure
|
4bb2ecb146e9fbc0897a780980634a711dc1788b
|
6ce33426911fc50dfd61f165d73efe9702c2009b
|
refs/heads/master
| 2021-01-12T08:36:38.511501
| 2017-02-22T13:20:17
| 2017-02-22T13:20:17
| 76,635,278
| 1
| 0
| null | 2017-02-10T16:13:33
| 2016-12-16T08:16:52
|
CMake
|
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
#!/usr/bin/env python
import rospy
from models.sailboat import Sailboat
from shepherd_reg.msg import SailboatCmd
from shepherd_disp.msg import SailboatPose
from std_msgs.msg import Float64
def update_cmd(msg):
global cmd
print 'Updated cmd:', msg.rudder_angle, msg.sail_angle
cmd = [msg.rudder_angle, msg.sail_angle]
def update_wind_direction(msg):
global wind_direction
wind_direction = msg.data
def update_wind_force(msg):
global wind_force
wind_force = msg.data
rospy.init_node('sailboat_simu')
sailboat = Sailboat(theta=0.1, v=3)
# Sailboat pose publisher
pose_pub = rospy.Publisher('sailboat/pose_real', SailboatPose, queue_size=1)
# Subscribe to the command of the sailboat
sub = rospy.Subscriber('sailboat/cmd', SailboatCmd, update_cmd)
# Subscribe to the wind
rospy.Subscriber('env/wind_direction', Float64, update_wind_direction)
rospy.Subscriber('env/wind_force', Float64, update_wind_force)
# Command
cmd = [0, 0]
wind_force = 3
wind_direction = 0
# rate
rate = rospy.Rate(10)
while not rospy.is_shutdown():
sailboat.simulate(cmd, wind_force, wind_direction)
pose = SailboatPose()
pose.pose.x = sailboat.x
pose.pose.y = sailboat.y
pose.pose.theta = sailboat.theta
pose_pub.publish(pose)
rate.sleep()
|
[
"ejalaa12@gmail.com"
] |
ejalaa12@gmail.com
|
3b5faf029aed4be7d85694ac734b8aed784d187a
|
b156aad4624ec6dbc2efcca93181bbb948d16cc6
|
/utils/utils.py
|
c128221bb9fdc076717f2e26e232be3b58d048cc
|
[] |
no_license
|
itang85/bookshop-django
|
d191e2af002db94073ee8c59eeb768002443958f
|
b136629b4e5b1dc7f0661e4b06618f31c95d7ede
|
refs/heads/master
| 2023-03-28T02:59:06.729909
| 2021-03-05T15:41:49
| 2021-03-05T15:41:49
| 332,227,518
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
import base64, json, re, jwt, datetime, time, hashlib, random
from calendar import timegm
# 导入谷歌验证码相关模块
# import pyotp
# 导入使用缓存的模块
# from django.core.cache import cache
from rest_framework.throttling import BaseThrottle
from django.conf import settings
from conf.area.area_list import area_dict
from utils.settings import api_settings
def jwt_payload_handler(account):
payload = {
'id': account.pk,
'exp': datetime.datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA # 过期时间
}
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.datetime.utcnow().utctimetuple()
)
if api_settings.JWT_AUDIENCE is not None:
payload['aud'] = api_settings.JWT_AUDIENCE
if api_settings.JWT_ISSUER is not None:
payload['iss'] = api_settings.JWT_ISSUER
return payload
def jwt_get_user_id_from_payload_handler(payload):
return payload.get('id')
def jwt_encode_handler(payload):
return jwt.encode(
payload,
api_settings.JWT_PRIVATE_KEY or api_settings.JWT_SECRET_KEY,
api_settings.JWT_ALGORITHM
)
def jwt_decode_handler(token):
options = {
'verify_exp': api_settings.JWT_VERIFY_EXPIRATION,
}
return jwt.decode(
token,
api_settings.JWT_PUBLIC_KEY or api_settings.JWT_SECRET_KEY,
[api_settings.JWT_ALGORITHM],
options=options,
verify=api_settings.JWT_VERIFY,
leeway=api_settings.JWT_LEEWAY,
audience=api_settings.JWT_AUDIENCE,
issuer=api_settings.JWT_ISSUER
)
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token
}
# 频率组件
VISIT_RECORD = {}
class VisitThrottle(BaseThrottle):
def __init__(self):
self.history = None
def allow_request(self, request, view):
remote_addr = request.META.get('HTTP_X_REAL_IP')
# print('请求的IP:',remote_addr)
ctime = time.time()
if remote_addr not in VISIT_RECORD:
VISIT_RECORD[remote_addr] = [ctime,]
return True
history = VISIT_RECORD.get(remote_addr)
self.history = history
while history and history[-1] < ctime - 60:
history.pop()
print(VISIT_RECORD)
if len(history) < 100: # 限制的频数 设置同一IP该接口一分钟内只能被访问100次
history.insert(0, ctime)
return True
else:
return False
def wait(self):
ctime = time.time()
return 60 - (ctime-self.history[-1])
def get_region_cn(code):
province = area_dict['province_list'][code[0:2] + '0000']
city = area_dict['city_list'][code[0:4] + '00']
county = area_dict['county_list'][code]
return province + '-' + city + '-' + county
|
[
"1094252227@qq.com"
] |
1094252227@qq.com
|
55058d7c8d58c89e603d4127debeb4b8df5bd25a
|
70730512e2643833e546e68761ee6cd3d7b95e1d
|
/01-python基础/code/day03/day02_exercise/exercise03.py
|
fadfaaf9e18f4ab6a6bc3a1b8aadc81dd9936e0a
|
[] |
no_license
|
Yuchen1995-0315/review
|
7f0b0403aea2da62566642c6797a98a0485811d1
|
502859fe11686cc59d2a6d5cc77193469997fe6a
|
refs/heads/master
| 2020-08-26T23:16:33.193952
| 2019-10-24T00:30:32
| 2019-10-24T00:30:32
| 217,177,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
(扩展)在控制台中录入一个秒,计算是几小时零几分钟零几秒钟.
"""
# 10:40
total_second = int(input("请输入总秒数:"))
second = total_second % 60
hour = total_second // 60 // 60
minute = total_second // 60 % 60
# 在字符串中插入变量:
# “...x...” “..."+变量+"...”
print(str(hour) + "小时零" + str(minute) + "分钟零" + str(second) + "秒钟")
|
[
"2456830920@qq.com"
] |
2456830920@qq.com
|
4fe27b358e04b2dd76cba83b1b138fdd6e369026
|
29fd3daff8c31764c00777e67d2cc9b3e94ba761
|
/examples/ch05_examples/mandelbrot/mandelbrot/cython_pure_python/setup.py
|
440f9ca05742dda490c59ab9d203bd5d6f221906
|
[] |
no_license
|
mwoinoski/crs1906
|
06a70a91fc99e2d80e2ed3cea5724afa22dce97d
|
202f7cc4cae684461f1ec2c2c497ef20211b3e5e
|
refs/heads/master
| 2023-06-23T17:13:08.163430
| 2023-06-12T21:44:39
| 2023-06-12T21:44:39
| 39,789,380
| 1
| 2
| null | 2022-01-26T20:43:18
| 2015-07-27T17:54:56
|
Python
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
# for notes on compiler flags e.g. using
# export CFLAGS=-O2
# so gcc has -O2 passed (even though it doesn't make the code faster!)
# http://docs.python.org/install/index.html
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("calculate_z", ["calculate_z.pyx"])]
)
|
[
"michaelw@articulatedesign.us.com"
] |
michaelw@articulatedesign.us.com
|
951e54a1ed72d5527bcb0dd1b534c6ef1079a65b
|
2cc84af3d2a146b4dbb04bed3cfd542fa0622489
|
/image-tools/image_clustering/tile_clustering.py
|
1047091f0afa7d8cb8376a27b8df124a3fda22b4
|
[
"MIT"
] |
permissive
|
flegac/deep-experiments
|
e6a05b1a58eadf4c39580e95bb56d311e3dfa0ac
|
e1b12e724f2c8340cbe9c51396cf3f42e3b4e934
|
refs/heads/master
| 2020-04-09T00:20:15.132255
| 2019-10-11T16:39:47
| 2019-10-11T16:39:47
| 159,862,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,458
|
py
|
import glob
import os
from typing import List, Callable
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import to_rgb
from scipy.stats import wasserstein_distance
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from image_clustering.tiler import GridTiler
from mydeep_api.tensor import Tensor
TagComputer = Callable[[Tensor], int]
HistComputer = Callable[[Tensor], Tensor]
class Params(object):
def __init__(self, bins: int = 64, pca_components: int = 64, tile_size: int = 64):
self.bins = bins
self.pca_components = pca_components
self.tiler = GridTiler(tile_size=tile_size)
def hist_computer(self, img: Tensor):
r, _ = np.histogram(img[2], bins=self.bins, range=[0, 256])
r, _ = np.histogram(img[2], bins=self.bins, range=[0, 256])
r = r / np.linalg.norm(r)
g, _ = np.histogram(img[1], bins=self.bins, range=[0, 256])
g = g / np.linalg.norm(g)
b, _ = np.histogram(img[0], bins=self.bins, range=[0, 256])
b = b / np.linalg.norm(b)
return np.hstack((r, g, b))
class ClusterTagComputer(TagComputer):
def __init__(self, path: str, hist_computer: HistComputer):
self.hist_computer = hist_computer
self.clusters = [
[hist_computer(cv2.imread(img_path)) for img_path in glob.glob('{}/{}/*.png'.format(path, _))]
for _ in os.listdir(path)
]
self.stats()
def stats(self):
for _ in self.clusters:
for c in _:
bins = np.array(range(len(c)))
prob = c / np.sum(c)
image = np.sort(np.random.choice(bins, size=128 * 128, replace=True, p=prob)).reshape((128, 128))
plt.imshow(image, 'gray')
def __call__(self, data: Tensor):
hist = self.hist_computer(data)
d2 = [min([wasserstein_distance(hist, _) for _ in c]) for c in self.clusters]
return int(np.argmin(d2))
class KmeanTagComputer(TagComputer):
def __init__(self, p: Params, images: List[str], cluster_number: int):
self.hist_computer = p.hist_computer
self.model = KMeans(n_clusters=cluster_number, n_init=20)
dataset = []
for _ in images:
img = cv2.imread(_)
boxes = GridTiler(tile_size=32).tiles(img.shape[:2])
histograms = [p.hist_computer(box.cut(img)) for box in boxes]
dataset.extend(histograms)
self.pipeline = Pipeline(steps=[
('pca', PCA(n_components=p.pca_components)),
('clustering', self.model),
])
self.pipeline.fit(dataset)
# self.stats()
def stats(self):
centers = (self.model.cluster_centers_ + 1) / 2
for c in centers:
bins = np.array(range(len(c))) * 4
prob = c / np.sum(c)
image = np.sort(np.random.choice(bins, size=128 * 128, replace=True, p=prob)).reshape((128, 128))
plt.imshow(image, 'gray')
def __call__(self, data: Tensor):
hist = self.hist_computer(data)
return self.pipeline.predict([hist])[0]
def tile_clustering(img: Tensor, tag_computer: TagComputer, tiler: GridTiler):
out = img.copy()
k = 8
for box in tiler.tiles(img.shape[:2]):
flag = tag_computer(box.cut(img))
pt1 = (box.left + k, box.top + k)
pt2 = (box.right - k, box.bottom - k)
cv2.rectangle(out, pt1, pt2, tuple(256 * _ for _ in to_rgb(COLORS[flag])), 2)
return out
COLORS = ['red', 'blue', 'green', 'white', 'yellow',
'orange', 'purple', 'cyan', 'magenta', 'gray']
P = Params(
bins=128,
pca_components=128,
tile_size=128
)
if __name__ == '__main__':
dataset = 'cs'
images = glob.glob('../tests/20190802_export_s2_it1/{}/*_?.png'.format(dataset))
model_tag_computer = KmeanTagComputer(P, images, cluster_number=4)
cluster_tag_computer = ClusterTagComputer('../image_editor/tiles', P.hist_computer)
os.makedirs(dataset, exist_ok=True)
for _ in images:
name = os.path.basename(_).replace('.tif', '')
img = cv2.imread(_)
img1 = tile_clustering(img, model_tag_computer, P.tiler)
cv2.imwrite('{}/{}_model.png'.format(dataset, name), img1)
# img2 = tile_clustering(img, cluster_tag_computer, P.tiler)
# cv2.imwrite('{}/{}_clusters.png'.format(dataset, name), img2)
|
[
"florent.legac@gmail.com"
] |
florent.legac@gmail.com
|
d6ce1c57d5d48ad3fcd540884b07b83997ecc399
|
4c3e992678341ccaa1d4d14e97dac2e0682026d1
|
/addons/mass_mailing/tests/test_mail.py
|
09822f1c111822ef55b13830edbcad40590accf1
|
[] |
no_license
|
gahan-corporation/wyatt
|
3a6add8f8f815bd26643e1e7c81aea024945130d
|
77e56da362bec56f13bf0abc9f8cf13e98461111
|
refs/heads/master
| 2021-09-03T18:56:15.726392
| 2018-01-08T02:54:47
| 2018-01-08T02:54:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from gerp.addons.mail.tests.common import TestMail
class test_message_compose(TestMail):
def test_OO_mail_mail_tracking(self):
""" Tests designed for mail_mail tracking (opened, replied, bounced) """
pass
|
[
"duchess@gahan-corporation.com"
] |
duchess@gahan-corporation.com
|
c3814fd79b1a1d8c165a84db0088b1cace467d56
|
417e6eb589d3441c3c8b9901e2d35873dd35f097
|
/src/structural/observer.py
|
0ea844bf4d99f8480fb048987da3a1e944975507
|
[] |
no_license
|
vmgabriel/pattern-python
|
4fc6127ebdb521d0a4a7b10b4b68880f691ee630
|
74f1cd1314a79060d1df1a6df018c39572bc2b4c
|
refs/heads/master
| 2023-04-24T06:45:16.773415
| 2021-05-10T21:14:51
| 2021-05-10T21:14:51
| 365,394,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
"""Observer Pattern"""
# Libraries
from abc import ABCMeta, abstractmethod
class Publisher(metaclass=ABCMeta):
def add_observer(self, observer):
pass
def remove_observer(self, observer):
pass
def notify_all(self):
pass
def write_post(self, text):
pass
class PlatziForum(Publisher):
def __init__(self):
self.users_list = []
self.post = None
def add_observer(self, observer):
if observer not in self.users_list:
self.users_list.append(observer)
def remove_observer(self, observer):
self.users_list.remove(observer)
def notify_all(self):
for observer in self.users_list:
observer.notify(self.post)
def write_post(self, text):
self.post = text
self.notify_all()
class Subscriber:
def notify(self, post):
pass
class UserA(Subscriber):
def __init__(self):
pass
def notify(self, post):
print('User A ha sido notificado - {}'.format(post))
class UserB(Subscriber):
def __init__(self):
pass
def notify(self, post):
print('User B ha sido notificado - {}'.format(post))
if __name__ == '__main__':
foro = PlatziForum()
user1 = UserA()
user2 = UserB()
foro.add_observer(user1)
foro.add_observer(user2)
foro.write_post('Post en Platzi')
|
[
"vmgabriel96@gmail.com"
] |
vmgabriel96@gmail.com
|
be868d1d34aa3dad1df6b4c850a30a4565685c4c
|
e22390ec9aa1a842626075113472f81076e1bf5f
|
/pullenti/semantic/SemFragment.py
|
3954b163c1693d239274475b64e5e3f1cd4930fb
|
[] |
no_license
|
pullenti/PullentiPython
|
ba9f450f3f49786732e80f34d0506d4a6d41afc3
|
815d550b99f113034c27f60d97493ce2f8e4cfcc
|
refs/heads/master
| 2021-06-22T17:12:36.771479
| 2020-12-11T06:10:23
| 2020-12-11T06:10:23
| 161,268,453
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,299
|
py
|
# Copyright (c) 2013, Pullenti. All rights reserved.
# Non-Commercial Freeware and Commercial Software.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project.
# The latest version of the code is available on the site www.pullenti.ru
import typing
from pullenti.unisharp.Utils import Utils
from pullenti.ner.core.GetTextAttr import GetTextAttr
from pullenti.ner.core.MiscHelper import MiscHelper
from pullenti.semantic.ISemContainer import ISemContainer
from pullenti.semantic.SemGraph import SemGraph
from pullenti.semantic.SemFragmentType import SemFragmentType
from pullenti.semantic.SemObjectType import SemObjectType
class SemFragment(ISemContainer):
""" Фрагмент блока (предложение)
"""
def __init__(self, blk : 'SemBlock') -> None:
self.__m_graph = SemGraph()
self.m_higher = None;
self.typ = SemFragmentType.UNDEFINED
self.is_or = False
self.begin_token = None;
self.end_token = None;
self.tag = None;
self.m_higher = blk
@property
def graph(self) -> 'SemGraph':
""" Объекты фрагмента (отметим, что часть объектов, связанных с этим блоком,
могут находиться в графах вышележащих уровней).
"""
return self.__m_graph
@property
def higher(self) -> 'ISemContainer':
return self.m_higher
@property
def block(self) -> 'SemBlock':
""" Владелец фрагмента """
return self.m_higher
@property
def root_objects(self) -> typing.List['SemObject']:
""" Список объектов SemObject, у которых нет связей. При нормальном разборе
такой объект должен быть один - это обычно предикат. """
res = list()
for o in self.__m_graph.objects:
if (len(o.links_to) == 0):
res.append(o)
return res
@property
def can_be_error_structure(self) -> bool:
cou = 0
vcou = 0
for o in self.__m_graph.objects:
if (len(o.links_to) == 0):
if (o.typ == SemObjectType.VERB):
vcou += 1
cou += 1
if (cou <= 1):
return False
return vcou < cou
@property
def spelling(self) -> str:
""" Текст фрагмента """
return MiscHelper.get_text_value(self.begin_token, self.end_token, GetTextAttr.KEEPREGISTER)
@property
def begin_char(self) -> int:
return (0 if self.begin_token is None else self.begin_token.begin_char)
@property
def end_char(self) -> int:
return (0 if self.end_token is None else self.end_token.end_char)
def __str__(self) -> str:
if (self.typ != SemFragmentType.UNDEFINED):
return "{0}: {1}".format(Utils.enumToString(self.typ), Utils.ifNotNull(self.spelling, "?"))
else:
return Utils.ifNotNull(self.spelling, "?")
|
[
"alex@alexkuk.ru"
] |
alex@alexkuk.ru
|
a39f0bac82f84873a6dbf8cfd3f6a437ad45d06c
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/stock_account/wizard/stock_picking_return.py
|
5d54d201e6b6a357cd694038a75f20b33ff45cc4
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601
| 2021-03-20T07:59:08
| 2021-03-20T07:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
# -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from harpiya import api, fields, models
class StockReturnPicking(models.TransientModel):
_inherit = "stock.return.picking"
@api.model
def default_get(self, default_fields):
res = super(StockReturnPicking, self).default_get(default_fields)
for i, k, vals in res.get('product_return_moves', []):
vals.update({'to_refund': True})
return res
def _create_returns(self):
new_picking_id, pick_type_id = super(StockReturnPicking, self)._create_returns()
new_picking = self.env['stock.picking'].browse([new_picking_id])
for move in new_picking.move_lines:
return_picking_line = self.product_return_moves.filtered(lambda r: r.move_id == move.origin_returned_move_id)
if return_picking_line and return_picking_line.to_refund:
move.to_refund = True
return new_picking_id, pick_type_id
class StockReturnPickingLine(models.TransientModel):
_inherit = "stock.return.picking.line"
to_refund = fields.Boolean(string="Update quantities on SO/PO", default=True,
help='Trigger a decrease of the delivered/received quantity in the associated Sale Order/Purchase Order')
|
[
"yasir@harpiya.com"
] |
yasir@harpiya.com
|
f9ae3dfa9e5cae2982f31a833e426773e239ed40
|
e77732bce61e7e97bad5cee1b07d1b5f9b6fa590
|
/cat/utils/data/exclude_corpus.py
|
08cc3af28cfc90a11efade2a6bdf4941e1369fb7
|
[
"Apache-2.0"
] |
permissive
|
entn-at/CAT
|
9f28f5ff75b37ac90baf63609226deb99d73dbe2
|
fc74841e8f6b7eb2f2f88bb7c09b30ad5a8c16f4
|
refs/heads/master
| 2023-04-10T13:32:31.333889
| 2023-02-27T16:50:43
| 2023-02-27T17:29:07
| 236,718,892
| 0
| 0
| null | 2020-01-28T11:24:01
| 2020-01-28T11:24:00
| null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
# Author: Huahuan Zheng (maxwellzh@outlook.com)
#
# Fetch n lines from source corpus and exclude part of the source if needed.
#
import sys
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("corpus", type=str,
help="Path to the source text corpus.")
parser.add_argument("--exclude-corpus", type=str, dest="f_exc",
help="Add this option if you want to exclude it from source corpus, take first column as index.")
parser.add_argument("-n", "--num-lines", type=int,
help="Number of lines to be prepared, if not specified, would take all of them (after excluded).")
args = parser.parse_args()
if not os.path.isfile(args.corpus):
raise FileNotFoundError(f"--corpus={args.corpus} is not a valid file.")
if args.f_exc is not None and not os.path.isfile(args.f_exc):
raise FileNotFoundError(
f"--exclude-corpus={args.f_exc} is not a valid file.")
if args.num_lines is not None:
if args.num_lines < 0:
raise ValueError(
f"--num-lines={args.num_lines} < 0 is invalid, expected valud >= 0")
num_lines = args.num_lines
else:
num_lines = sum(1 for _ in open(args.corpus, 'r'))
# prepare excluding list
excluding_list = set()
if args.f_exc is not None:
with open(args.f_exc, 'r') as fi:
for line in fi:
line = line.strip()
if ' ' in line or '\t' in line:
uid, _ = line.split(maxsplit=1)
else:
uid = line
excluding_list.add(uid)
cnt = 0
with open(args.corpus, 'r') as fi:
try:
for line in fi:
line = line.strip()
if ' ' in line or '\t' in line:
uid, _ = line.split(maxsplit=1)
else:
uid = line
if uid in excluding_list:
continue
if cnt >= num_lines:
break
sys.stdout.write(f"{line}\n")
cnt += 1
except IOError:
exit(0)
if cnt < num_lines and args.num_lines is not None:
raise RuntimeError(
f"Source corpus text doesn't have enough unique lines to export: {cnt} in total, expect {num_lines}")
|
[
"maxwellzh@outlook.com"
] |
maxwellzh@outlook.com
|
6c249c704fb9dcad286b896aac14b4023e741304
|
98dbb9cd9523809b4ee0e6b92334fa6a2a6af2a3
|
/bingads/v13/bulk/entities/audiences/bulk_campaign_negative_product_audience_association.py
|
80d8c4fc61c6babc61466c3ac50597c9c0a847f1
|
[
"MIT"
] |
permissive
|
BingAds/BingAds-Python-SDK
|
a2f9b0c099b574a4495d0052218f263af55cdb32
|
373a586402bf24af7137b7c49321dbc70c859fce
|
refs/heads/main
| 2023-07-27T15:31:41.354708
| 2023-07-10T03:21:03
| 2023-07-10T03:21:03
| 31,927,550
| 105
| 182
|
NOASSERTION
| 2023-09-04T06:51:20
| 2015-03-09T23:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 586
|
py
|
from bingads.v13.bulk.entities.audiences.bulk_campaign_negative_audience_association import *
class BulkCampaignNegativeProductAudienceAssociation(BulkCampaignNegativeAudienceAssociation):
""" Represents an Campaign Negative Product Audience Association that can be read or written in a bulk file.
For more information, see Campaign Negative Product Audience Association at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
|
[
"qitia@microsoft.com"
] |
qitia@microsoft.com
|
a1cf5368c4eea778d041c5af86d0bf3a3f4abd62
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_sodden.py
|
9a58a5d33739918fea93b36c32416d0d46ba6316
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
#calss header
class _SODDEN():
def __init__(self,):
self.name = "SODDEN"
self.definitions = [u'(of something that can absorb water) extremely wet: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
282399609317833cf35a9418fdac25bece55fe85
|
5afc3043b9b43a0e72bc94a90ed832a9576bb580
|
/base/skill_59/py_06/py_44_copyreg.py
|
d527bd7b1839bbf108f061884156436b5976dfb3
|
[] |
no_license
|
JR1QQ4/python
|
629e7ddec7a261fb8a59b834160ceea80239a0f7
|
a162a5121fdeeffbfdad9912472f2a790bb1ff53
|
refs/heads/main
| 2023-08-25T00:40:25.975915
| 2021-11-07T14:10:20
| 2021-11-07T14:10:20
| 311,769,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# 第 44 条: 用 copyreg 实现可靠的 pickle 操作
# 内置的 pickle 模块,只适合用来在彼此信任的程序之间,对相关对象执行序列化和反序列化操作
# 如果用法比较复杂,那么 pickle 模块的功能也许就会出问题
# 我们可以把内置的 copyreg 模块同 pickle 结合起来使用,以便为旧数据缺失的属性值、进行类的版本管理,
# 并给序列化之后的数据提供固定的引入路径
|
[
"chenjunrenyx@163.com"
] |
chenjunrenyx@163.com
|
627b37547257bf028218c394028ba638d78fb0a6
|
bdd8fe60144b364dade0c383ba9ac7a400457c69
|
/freight/api/task_log.py
|
1f03bac3b2cbdff6275b4d8d6b4886d30a94c799
|
[
"Apache-2.0"
] |
permissive
|
thoas/freight
|
61eda7cb397696eb2c3a7504d03f2f4654ad7e8f
|
9934cfb3c868b5e4b813259ca83c748676d598a0
|
refs/heads/master
| 2021-01-18T17:24:25.758448
| 2015-09-03T20:45:35
| 2015-09-03T20:45:36
| 41,413,179
| 1
| 0
| null | 2015-08-26T08:13:07
| 2015-08-26T08:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,200
|
py
|
from __future__ import absolute_import
from flask_restful import reqparse
from freight.api.base import ApiView
from freight.config import db
from freight.models import LogChunk
from .task_details import TaskMixin
class TaskLogApiView(ApiView, TaskMixin):
get_parser = reqparse.RequestParser()
get_parser.add_argument('offset', location='args', type=int, default=0)
get_parser.add_argument('limit', location='args', type=int)
def get(self, **kwargs):
"""
Retrieve task log.
"""
task = self._get_task(**kwargs)
if task is None:
return self.error('Invalid task', name='invalid_resource', status_code=404)
args = self.get_parser.parse_args()
queryset = db.session.query(
LogChunk.text, LogChunk.offset, LogChunk.size
).filter(
LogChunk.task_id == task.id,
).order_by(LogChunk.offset.asc())
if args.offset == -1:
# starting from the end so we need to know total size
tail = db.session.query(LogChunk.offset + LogChunk.size).filter(
LogChunk.task_id == task.id,
).order_by(LogChunk.offset.desc()).limit(1).scalar()
if tail is None:
logchunks = []
else:
if args.limit:
queryset = queryset.filter(
(LogChunk.offset + LogChunk.size) >= max(tail - args.limit + 1, 0),
)
else:
if args.offset:
queryset = queryset.filter(
LogChunk.offset >= args.offset,
)
if args.limit:
queryset = queryset.filter(
LogChunk.offset < args.offset + args.limit,
)
logchunks = list(queryset)
if logchunks:
next_offset = logchunks[-1].offset + logchunks[-1].size
else:
next_offset = args.offset
links = self.build_cursor_link('next', next_offset)
context = {
'text': ''.join(l.text for l in logchunks),
'nextOffset': next_offset,
}
return self.respond(context, links=links)
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
7e5e4f719b75a501b9e069ca581e0344b89df260
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17r_1_01a/brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp/input/__init__.py
|
c69003159bbe860e7c9e51d4eb0d278fd1ce133e
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,711
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/clear-mpls-auto-bandwidth-statistics-lsp/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'clear-mpls-auto-bandwidth-statistics-lsp', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'clear-mpls-auto-bandwidth-statistics-lsp', u'input']
def _get_lsp_name(self):
"""
Getter method for lsp_name, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp/input/lsp_name (string)
YANG Description: LSP Name
"""
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
"""
Setter method for lsp_name, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp/input/lsp_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_name() directly.
YANG Description: LSP Name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..64']}), is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
lsp_name = __builtin__.property(_get_lsp_name, _set_lsp_name)
_pyangbind_elements = {'lsp_name': lsp_name, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
8dcecbb2db91f0781c70434f88392b4d940ba544
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/ankiandroid/testcase/firstcases/testcase4_014.py
|
be1af9489a3d62454bebe7063c442ec02f4fe4d7
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,621
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.ichi2.anki',
'appActivity' : 'com.ichi2.anki.IntentHandler',
'resetKeyboard' : True,
'androidCoverage' : 'com.ichi2.anki/com.ichi2.anki.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase014
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"com.ichi2.anki:id/action_sync\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"You must log in to a third party account to use the cloud sync service. You can create one in the next step.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Cancel\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Default\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"0\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"0\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.ichi2.anki:id/action_sync\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Custom study session\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Options\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Define custom steps\")", "new UiSelector().className(\"android.widget.TextView\").instance(11)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\" rated:1:1\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys(" rated:1:1");
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("99999");
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("0");
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/edit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("1");
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"4_014\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.ichi2.anki'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
ce641dbede6f04804b41a0a8460de2268bda2a1e
|
b87f66b13293782321e20c39aebc05defd8d4b48
|
/maps/build/TraitsGUI/enthought/pyface/key_pressed_event.py
|
0072c81aca8d415f585dffb9111cb439544649df
|
[
"BSD-3-Clause"
] |
permissive
|
m-elhussieny/code
|
5eae020932d935e4d724c2f3d16126a0d42ebf04
|
5466f5858dbd2f1f082fa0d7417b57c8fb068fad
|
refs/heads/master
| 2021-06-13T18:47:08.700053
| 2016-11-01T05:51:06
| 2016-11-01T05:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
""" The event that is generated when a key is pressed. """
# Enthought library imports.
from enthought.traits.api import Bool, HasTraits, Int, Any
class KeyPressedEvent(HasTraits):
""" The event that is generated when a key is pressed. """
#### 'KeyPressedEvent' interface ##########################################
# Is the alt key down?
alt_down = Bool
# Is the control key down?
control_down = Bool
# Is the shift key down?
shift_down = Bool
# The keycode.
key_code = Int
# The original toolkit specific event.
event = Any
#### EOF ######################################################################
|
[
"fspaolo@gmail.com"
] |
fspaolo@gmail.com
|
d94eac8d709bc2eb7d4ac8bf5765e1247c5dc9c7
|
67ceb35320d3d02867350bc6d460ae391e0324e8
|
/practice/easy/0231-Power_of_Two.py
|
2937bb2cf84cca61705c0d77d1f846cbe4ef3766
|
[] |
no_license
|
mattjp/leetcode
|
fb11cf6016aef46843eaf0b55314e88ccd87c91a
|
88ccd910dfdb0e6ca6a70fa2d37906c31f4b3d70
|
refs/heads/master
| 2023-01-22T20:40:48.104388
| 2022-12-26T22:03:02
| 2022-12-26T22:03:02
| 184,347,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
while n > 0 and n % 2 == 0:
n /= 2
return True if n == 1 else False
|
[
"noreply@github.com"
] |
mattjp.noreply@github.com
|
52140a927a1c45b88b007bd1af0bfe4d2d942003
|
50402cc4388dfee3a9dbe9e121ef217759ebdba8
|
/django_wk/Mikesite/pubApp/pubmanager.py
|
fec872afed9ea1d9834c4cc18521c7088d2b2c74
|
[] |
no_license
|
dqyi11/SVNBackup
|
bd46a69ec55e3a4f981a9bca4c8340944d8d5886
|
9ad38e38453ef8539011cf4d9a9c0a363e668759
|
refs/heads/master
| 2020-03-26T12:15:01.155873
| 2015-12-10T01:11:36
| 2015-12-10T01:11:36
| 144,883,382
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,134
|
py
|
'''
Created on 2013-12-29
@author: Walter
'''
from pubApp.models import Paper
class PubManager(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.type = "all"
self.year = "all"
self.type_all = "all"
self.type_article = "article"
self.type_proceeding = "inproceedings"
self.year_all = "all"
self.typeIdx = 0
def getYearList(self):
years = []
publications = Paper.objects.all();
for p in publications:
if not (str(p.year) in years):
years.append(str(p.year))
years.sort(reverse=True)
return years
def getPubList(self):
papers = []
if self.type == "all" and self.year =="all":
papers = Paper.objects.all().order_by('-year','title')
elif self.type == "all":
papers = Paper.objects.filter(year=self.year).order_by('-year','title')
elif self.year == "all":
papers = Paper.objects.filter(type=self.type).order_by('-year','title')
else:
papers = Paper.objects.filter(type=self.type).filter(year=self.year).order_by('-year','title')
self.typeIdx = self.getTypeIndex()
pub_years = []
for p in papers:
ys = [y for y in pub_years if p.year==y[0]]
if len(ys) == 0:
pub_years.append((p.year, []))
for p in papers:
year = next(y for y in pub_years if p.year==y[0])
year[1].append(p)
return pub_years
def getTypeIndex(self):
if self.type == self.type_article:
return 1
if self.type == self.type_proceeding:
return 2
return 0
def getYearIndex(self):
if self.year == self.year_all:
return 0
return int(self.year)
|
[
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] |
walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39
|
3a42023dfd9ac8cc3bbee4b8459c832bd62732a1
|
9e38b45f555ffa08fe036b7b0429871ccdd85303
|
/Python/string_split_and_join.py
|
8b17eac33966a7d952c106de079a896dbe6307f7
|
[] |
no_license
|
shayaankhan05/HackerRank
|
b066969b0514046bd8620b55d0458d8284a12005
|
a975fac85af80310ec2ec5f6275c94ceefe3715b
|
refs/heads/master
| 2023-06-01T09:06:23.374474
| 2021-06-24T08:10:38
| 2021-06-24T08:10:38
| 294,485,980
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
def split_and_join(line):
a = line
a = a.split(" ")
a = "-".join(a)
return a
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
|
[
"you@example.com"
] |
you@example.com
|
8dbad1d4354d63d045a7b9f71ef8405a05615120
|
e16cc78f0e05e50d589558535ae0fc5e414dd4a0
|
/IM5.4.0_timing/ztest_e_send5_video.py
|
1c13ecf0caf88ccbce3128b36482d9396bea79b6
|
[] |
no_license
|
wenqiang1990/wenqiang_code
|
df825b089e3bd3c55bcff98f4946f235f50f2f3d
|
3c9d77e0a11af081c60a5b1f4c72ecd159945864
|
refs/heads/master
| 2020-06-19T04:38:39.052037
| 2019-12-18T03:40:39
| 2019-12-18T03:40:39
| 196,561,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,642
|
py
|
#coding:utf-8
import time
import datetime
import unittest
from appium.webdriver.common.touch_action import TouchAction
from robot.utils.asserts import *
from appium import webdriver
from public import login
from public import logout
from clear_massage import clear_massage
from set_driver import set_driver
class Imtest(unittest.TestCase):
def setUp(self):
wq=set_driver()
self.driver=wq.get_driver()
self.verificationErrors = []
self.driver.implicitly_wait(10)
def test_send_video(self):
'''群成员发送图片消息'''
clear_massage(self,name="groupname1")
clear_massage(self,name=u"系统通知")
driver = self.driver
with open('F:\Appium\group\groupID.txt','r') as f:
el=f.read()
driver.find_element_by_id("com.yuntongxun.eckuailiao:id/btn_address_list").click()#点击联系人
driver.find_element_by_id("com.yuntongxun.eckuailiao:id/tv_head_group").click()#点击群组
driver.find_element_by_id("com.yuntongxun.eckuailiao:id/p_list").click()#点击群组列表
el=u"群组id:"+el
driver.find_element_by_name(el).click()#点击群组id,以后改为读取上一条用例创建群组的id
#群成员发送图片
self.driver.find_element_by_id("chatting_attach_btn").click()#点击加号
self.driver.find_element_by_name(u"短视频").click()
time.sleep(2)
action1 = TouchAction(self.driver)
el = self.driver.find_element_by_id("com.yuntongxun.eckuailiao:id/start")
action1.long_press(el,duration=10000).perform()
self.driver.find_element_by_id("com.yuntongxun.eckuailiao:id/ok").click()#点击发送
time.sleep(5)
el=self.driver.find_element_by_id("tv_read_unread").get_attribute("text")
assert_equal(el, u"已读", msg=u"状态验证失败")
print el+u" 阅读状态验证成功"
el = self.driver.find_element_by_id("tv_read_unread")#状态
action1 = TouchAction(self.driver)
action1.long_press(el,duration=5000).perform()
self.driver.find_element_by_name(u"删除").click()
self.driver.find_element_by_id("dilaog_button3").click()#确认删除
time.sleep(2)
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
# 构造测试集
suite = unittest.TestSuite()
suite.addTest(Imtest("test_send_video"))
# 执行测试
runner = unittest.TextTestRunner()
runner.run(suite)
|
[
"1058099258@qq.com"
] |
1058099258@qq.com
|
088973a0dbd18923e03afc75a341c75a61a348e9
|
cb80ebc49bc92c350f6d6f039a6a4f0efa6b4c60
|
/EnvironmentVariables/EnvironmentVariables.py
|
9e6a9451d078f7f1d2002410d1982b10be2b1a30
|
[] |
no_license
|
rabramley/pythonTrials
|
9708ef1b39011c8c08909808132114ff3b30d34a
|
bbc93a9f69afbe3cd045de5835ad3c8a4a557050
|
refs/heads/master
| 2021-01-15T23:07:48.074817
| 2015-06-22T14:11:20
| 2015-06-22T14:11:20
| 32,924,481
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
#!/usr/bin/env python
import os
print os.environ['HOME']
# using get will return `None` if a key is not present rather than raise a `KeyError`
print os.environ.get('KEY_THAT_MIGHT_EXIST')
default_value = 'Use this instead'
# os.getenv is equivalent, and can also give a default value instead of `None`
print os.getenv('KEY_THAT_MIGHT_EXIST', default_value)
|
[
"rabramley@gmail.com"
] |
rabramley@gmail.com
|
745f90f519853d1de410ac75ee637f5d3b14f3a6
|
070b693744e7e73634c19b1ee5bc9e06f9fb852a
|
/python/problem-tree/maximum_width_of_binary_tree.py
|
a18203e5b1c59a32be6b1e9d83fef22553353874
|
[] |
no_license
|
rheehot/practice
|
a7a4ce177e8cb129192a60ba596745eec9a7d19e
|
aa0355d3879e61cf43a4333a6446f3d377ed5580
|
refs/heads/master
| 2021-04-15T22:04:34.484285
| 2020-03-20T17:20:00
| 2020-03-20T17:20:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,855
|
py
|
# https://leetcode.com/problems/maximum-width-of-binary-tree
# https://leetcode.com/problems/maximum-width-of-binary-tree/solution
from TreeNode import TreeNode
class Solution:
# Wrong Answer
def widthOfBinaryTree0(self, root):
if root is None:
return 0
print(root)
width, nodes, curDepth, q = 0, [], 0, [(0, root)]
while q:
depth, node = q.pop(0)
if depth != curDepth:
curDepth = depth
while nodes[0] is None:
nodes.pop(0)
while nodes[-1] is None:
nodes.pop()
width = max(width, len(nodes))
nodes = [node]
else:
nodes.append(node)
if node:
if node.left or node.right:
q.append((depth + 1, node.left))
q.append((depth + 1, node.right))
print(nodes)
while nodes[0] is None:
nodes.pop(0)
while nodes[-1] is None:
nodes.pop()
width = max(width, len(nodes))
return width
# Wrong Answer
def widthOfBinaryTree1(self, root):
if root is None:
return 0
if root.left is None and root.right is None:
return 1
def getWidth(width, minW, maxW):
if minW == maxW:
return max(width, 1)
return max(width, maxW - minW + 1)
width, curDepth, minW, maxW, q = 0, 0, 0, 0, [(0, 1, root)]
while q:
depth, pos, node = q.pop(0)
print(depth, pos, node.val)
if curDepth != depth:
width = getWidth(width, minW, maxW)
curDepth, minW, maxW = depth, pos, pos
else:
maxW = pos
if node.left:
q.append((depth + 1, pos * 2, node.left))
if node.right:
q.append((depth + 1, pos * 2 + 1, node.right))
width = getWidth(width, minW, maxW)
return width
# runtime; 40ms, 100.00%
# memory; 13MB, 100.00%
def widthOfBinaryTree(self, root):
if root is None:
return 0
nodesDict, prevDepth, q = {}, -1, [(0, 1, root)]
while q:
depth, pos, node = q.pop(0)
if prevDepth != depth:
prevDepth = depth
nodesDict[depth] = [pos, pos]
else:
nodesDict[depth][1] = pos
if node.left:
q.append((depth + 1, pos * 2, node.left))
if node.right:
q.append((depth + 1, pos * 2 + 1, node.right))
print(nodesDict)
return max([maxPos - minPos + 1 for minPos, maxPos in nodesDict.values()])
s = Solution()
root1 = TreeNode(1)
root1.left = TreeNode(3)
root1.right = TreeNode(2)
root1.left.left = TreeNode(5)
root1.left.right = TreeNode(3)
root1.right.right = TreeNode(9)
root2 = TreeNode(1)
root2.left = TreeNode(3)
root2.left.left = TreeNode(5)
root2.left.right = TreeNode(3)
root3 = TreeNode(1)
root3.left = TreeNode(3)
root3.right = TreeNode(2)
root3.left.left = TreeNode(5)
root4 = TreeNode(1)
root4.left = TreeNode(1)
root4.right = TreeNode(1)
root4.left.left = TreeNode(1)
root4.right.right = TreeNode(1)
root4.left.left.left = TreeNode(1)
root4.right.right.right = TreeNode(1)
root5 = TreeNode(1)
root6 = TreeNode(1)
root6.left = TreeNode(2)
root7 = TreeNode(1)
root7.left = TreeNode(3)
root7.right = TreeNode(2)
root7.left.left = TreeNode(5)
data = [(root1, 4),
(root2, 2),
(root3, 2),
(root4, 8),
(root5, 1),
(root6, 1),
(root7, 2),
]
for root, expected in data:
real = s.widthOfBinaryTree(root)
print('{}, expected {}, real {}, result {}'.format(root, expected, real, expected == real))
|
[
"agapelover4u@yahoo.co.kr"
] |
agapelover4u@yahoo.co.kr
|
e3f4e1bc264e4e9e928ef3ebb533de57033f0c84
|
600df3590cce1fe49b9a96e9ca5b5242884a2a70
|
/tools/perf/measurements/power.py
|
58551ae3207e2de8e876bea951fc32323d5b63c9
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] |
permissive
|
metux/chromium-suckless
|
efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a
|
72a05af97787001756bae2511b7985e61498c965
|
refs/heads/orig
| 2022-12-04T23:53:58.681218
| 2017-04-30T10:59:06
| 2017-04-30T23:35:58
| 89,884,931
| 5
| 3
|
BSD-3-Clause
| 2022-11-23T20:52:53
| 2017-05-01T00:09:08
| null |
UTF-8
|
Python
| false
| false
| 1,961
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from metrics import network
from metrics import power
from telemetry.core import util
from telemetry.page import legacy_page_test
class Power(legacy_page_test.LegacyPageTest):
"""Measures power draw and idle wakeups during the page's interactions."""
def __init__(self):
super(Power, self).__init__()
self._power_metric = None
self._network_metric = None
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
self._network_metric = network.NetworkMetric(platform)
def WillNavigateToPage(self, page, tab):
self._network_metric.Start(page, tab)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._network_metric.Stop(page, tab)
self._power_metric.Stop(page, tab)
self._network_metric.AddResults(tab, results)
self._power_metric.AddResults(tab, results)
def DidRunPage(self, platform):
del platform # unused
self._power_metric.Close()
class LoadPower(Power):
def WillNavigateToPage(self, page, tab):
self._network_metric.Start(page, tab)
self._power_metric.Start(page, tab)
def DidNavigateToPage(self, page, tab):
pass
class QuiescentPower(legacy_page_test.LegacyPageTest):
"""Measures power draw and idle wakeups after the page finished loading."""
# Amount of time to measure, in seconds.
SAMPLE_TIME = 30
def ValidateAndMeasurePage(self, page, tab, results):
if not tab.browser.platform.CanMonitorPower():
return
util.WaitFor(tab.HasReachedQuiescence, 60)
metric = power.PowerMetric(tab.browser.platform)
metric.Start(page, tab)
time.sleep(QuiescentPower.SAMPLE_TIME)
metric.Stop(page, tab)
metric.AddResults(tab, results)
|
[
"enrico.weigelt@gr13.net"
] |
enrico.weigelt@gr13.net
|
c144c9ebf7ac40827af104a5950dc340e65e4004
|
83d947dd8683ed447b6bdb9d15683109ca0195bc
|
/git_sew/ui/cli/containers/App.py
|
b38e83e444cd4ff51285d171725911c2c7266b75
|
[
"MIT"
] |
permissive
|
fcurella/git-sew
|
dda6b84a3b522bb1fc5982bfa610b174159cb691
|
920bc26125a127e257be3e37a9bf10cb90aa5368
|
refs/heads/master
| 2020-07-23T14:51:39.476225
| 2019-09-09T22:56:11
| 2019-09-10T15:45:11
| 207,599,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import urwid
from urwid_pydux import ConnectedComponent
from git_sew.ui.cli.components.gitlogs import Footer, Loading
class App(ConnectedComponent):
def map_state_to_props(self, state, own_props):
return {"body": state["box"]}
def render_component(self, props):
if props["body"] is None:
body = Loading()
else:
body = props["body"]
return urwid.Padding(urwid.Frame(body, footer=Footer()), left=2, right=2)
|
[
"flavio.curella@gmail.com"
] |
flavio.curella@gmail.com
|
17f859589c603a22117367624010559c8063f80b
|
f6a8d93c0b764f84b9e90eaf4415ab09d8060ec8
|
/Functions/orders.py
|
7fef95d38a0f0303db054de91a1ee188f9750e62
|
[] |
no_license
|
DimoDimchev/SoftUni-Python-Fundamentals
|
90c92f6e8128b62954c4f9c32b01ff4fbb405a02
|
970360dd6ffd54b852946a37d81b5b16248871ec
|
refs/heads/main
| 2023-03-18T17:44:11.856197
| 2021-03-06T12:00:32
| 2021-03-06T12:00:32
| 329,729,960
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
def order(product, times_ordered):
final_price = 0
for i in range (times_ordered):
if product == "coffee":
final_price += 1.50
elif product == "water":
final_price += 1.00
elif product == "coke":
final_price += 1.40
elif product == "snacks":
final_price += 2.00
return final_price
product_input = input()
number = int(input())
print('{0:.2f}'.format(order(product_input, number)))
|
[
"noreply@github.com"
] |
DimoDimchev.noreply@github.com
|
3f5447789a83847dcf555f556eaf2067a532731c
|
c06c2c4e084dad8191cbb6fb02227f7b05ba86e7
|
/chat/extras/output/output_adapter.py
|
33c6cd8c7c3458943de637f070c279efad3b0687
|
[] |
no_license
|
thiagorocha06/chatbot
|
053c525d0c6d037570851411618f3cb1186b32b4
|
2d22a355926c50d9b389d3db883f435950b47a77
|
refs/heads/master
| 2020-03-24T14:31:59.134462
| 2018-07-29T14:42:55
| 2018-07-29T14:42:55
| 142,770,645
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
from chat.extras.adapters import Adapter
class OutputAdapter(Adapter):
"""
A generic class that can be overridden by a subclass to provide extended
functionality, such as delivering a response to an API endpoint.
"""
def process_response(self, statement, session_id=None):
"""
Override this method in a subclass to implement customized functionality.
:param statement: The statement that the chat bot has produced in response to some input.
:param session_id: The unique id of the current chat session.
:returns: The response statement.
"""
return statement
|
[
"thiagorocha06@gmail.com"
] |
thiagorocha06@gmail.com
|
e2c63fd44222cfa6dd178d152e811377be48d2ef
|
25873da962b0acdcf2c46b60695866d29008c11d
|
/test/programrtest/aiml_tests/learn_tests/test_learn_aiml.py
|
face1b7f8aacdcafc02f4548da2466da762b9c4a
|
[] |
no_license
|
LombeC/program-r
|
79f81fa82a617f053ccde1115af3344369b1cfa5
|
a7eb6820696a2e5314d29f8d82aaad45a0dc0362
|
refs/heads/master
| 2022-12-01T14:40:40.208360
| 2020-08-10T21:10:30
| 2020-08-10T21:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
import unittest
import os
from programr.context import ClientContext
from programrtest.aiml_tests.client import TestClient
class LearnTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(LearnTestClient, self).load_configuration(arguments)
self.configuration.client_configuration.brain_config[0].brain_config[0].files.aiml_files._files = [os.path.dirname(__file__)]
class LearnAIMLTests(unittest.TestCase):
def setUp(self):
client = LearnTestClient()
self._client_context = client.create_client_context("testid")
def test_learn(self):
response = self._client_context.bot.ask_question(self._client_context, "MY NAME IS FRED")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember your name is FRED")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS MY NAME")
self.assertIsNotNone(response)
self.assertEqual(response, "YOUR NAME IS FRED")
def test_learn_x_is_y(self):
response = self._client_context.bot.ask_question(self._client_context, "LEARN THE SUN IS HOT")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember THE SUN is HOT")
response = self._client_context.bot.ask_question(self._client_context, "LEARN THE SKY IS BLUE")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember THE SKY is BLUE")
response = self._client_context.bot.ask_question(self._client_context, "LEARN THE MOON IS GREY")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember THE MOON is GREY")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS THE SUN")
self.assertIsNotNone(response)
self.assertEqual(response, "HOT")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS THE SKY")
self.assertIsNotNone(response)
self.assertEqual(response, "BLUE")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS THE MOON")
self.assertIsNotNone(response)
self.assertEqual(response, "GREY")
|
[
"hilbert.cantor@gmail.com"
] |
hilbert.cantor@gmail.com
|
39fe66b6f1dcefaec65de082d6af8a0c15789557
|
e77a7cc1ed343a85662f0ad3c448a350ab776261
|
/data_structures/array/number_of_1_in_sorted_array.py
|
79689872fbb5d35fdec0a24168779d5ce80f4454
|
[
"MIT"
] |
permissive
|
M4cs/python-ds
|
9dcecab10291be6a274130c42450319dc112ac46
|
434c127ea4c49eb8d6bf65c71ff6ee10361d994e
|
refs/heads/master
| 2020-08-10T03:40:22.340529
| 2019-10-10T17:52:28
| 2019-10-10T17:52:28
| 214,247,733
| 2
| 0
|
MIT
| 2019-10-10T17:43:31
| 2019-10-10T17:43:30
| null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# The array is sorted in decreasing order
def count(arr):
start = 0
end = len(arr) - 1
while start <= end:
mid = (start + end) // 2
if arr[mid] == 1 and (arr[mid + 1] == 0 or mid == high):
return mid + 1
if arr[mid] == 1:
start = mid + 1
else:
end = mid - 1
return 0
arr = [0,0,0,0]
print(count(arr))
|
[
"prabhupant09@gmail.com"
] |
prabhupant09@gmail.com
|
37c6df9686c851389868af110179898a2a55def7
|
8775aac665c4011cc743d737c12342e1b08d8f41
|
/config/hosts.py
|
3766ccf2d2f934ea128736f30b19f3dc8166cf79
|
[] |
no_license
|
kongp3/sys_deploy
|
734dfa3815c93305eca77f5d3f9488968c90ef6f
|
8cd750c4df3f3f64515e3b0051038569d6e8bce2
|
refs/heads/master
| 2020-04-09T06:53:01.340569
| 2018-12-03T04:13:22
| 2018-12-03T04:13:22
| 160,131,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# -*- coding: utf-8 -*-
from config import *
SERVER_HOSTS = [
SERVER1_USER + '@' + SERVER1_IP,
SERVER2_USER + '@' + SERVER2_IP,
SERVER3_USER + '@' + SERVER3_IP,
]
|
[
"kongp3@outlook"
] |
kongp3@outlook
|
7b57c6b8f00aee7146f0fe59c37715e1d98abd23
|
360558c34098ef95077e70a318cda7cb3895c6d9
|
/tests/test_observable/test_windowwithtimeorcount.py
|
266b08416b4ddd813c1b2536d83e66bbad25aa6f
|
[
"Apache-2.0"
] |
permissive
|
AlexMost/RxPY
|
8bcccf04fb5a0bab171aaec897e909ab8098b117
|
05cb14c72806dc41e243789c05f498dede11cebd
|
refs/heads/master
| 2021-01-15T07:53:20.515781
| 2016-03-04T04:53:10
| 2016-03-04T04:53:10
| 53,108,280
| 0
| 1
| null | 2016-03-04T04:50:00
| 2016-03-04T04:49:59
| null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
import unittest
from datetime import timedelta
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestWindowWithTime(unittest.TestCase):
def test_window_with_time_or_count_basic(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.map(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).map(projection).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"), on_next(420, "3 8"), on_next(470, "4 9"), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_time_or_count_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_error(600, ex))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.map(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).map(projection).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"), on_next(420, "3 8"), on_next(470, "4 9"), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_time_or_count_disposed(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.map(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).map(projection).merge_observable()
results = scheduler.start(create, disposed=370)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"))
xs.subscriptions.assert_equal(subscribe(200, 370))
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
f12ce4028eef8a875d3961103e02377c34e07746
|
7a1a65b0cda41ea204fad4848934db143ebf199a
|
/automatedprocesses_firststage/adsym_core_last60_test.py
|
c36727a8b0dc54c680f0dc9be9f8cf1ac23510a5
|
[] |
no_license
|
bpopovich44/ReaperSec
|
4b015e448ed5ce23316bd9b9e33966373daea9c0
|
22acba4d84313e62dbbf95cf2a5465283a6491b0
|
refs/heads/master
| 2021-05-02T18:26:11.875122
| 2019-06-22T15:02:09
| 2019-06-22T15:02:09
| 120,664,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,164
|
py
|
#!/usr/bin/python2.7
import json
from mysql.connector import MySQLConnection, Error
from python_dbconfig import read_db_config
import aol_api_R_test
def connect():
# """Gets AOL Data and writes them to a MySQL table"""
db = "mysql_sl"
api = "adsym"
# Connect To DB:
db_config = read_db_config(db)
try:
print('Connecting to database...')
conn = MySQLConnection(**db_config)
if conn.is_connected():
print('connection established.')
cursor = conn.cursor()
sql = "DROP TABLE IF EXISTS adsym_core_last60"
cursor.execute(sql)
sql = "CREATE TABLE adsym_core_last60 (date varchar(50), inventory_source varchar(255), ad_opportunities bigint, \
market_opportunities bigint, ad_attempts bigint, ad_impressions bigint, ad_errors bigint, ad_revenue decimal(15, 5), \
aol_cost decimal(15, 5), epiphany_gross_revenue decimal(15, 5), adsym_revenue decimal(15, 5), total_clicks int, \
iab_viewability_measurable_ad_impressions bigint, iab_viewable_ad_impressions bigint, platform int)"
cursor.execute(sql)
# calls get_access_token function and starts script
logintoken = aol_api_R_test.get_access_token(api)
print(logintoken)
result = aol_api_R_test.run_existing_report(logintoken, "161186")
#print(result)
info = json.loads(result)
#print(info)
for x in json.loads(result)['data']:
date = x['row'][0]
inventory_source = x['row'][1]
ad_opportunities = x['row'][2]
market_opportunities = x['row'][3]
ad_attempts = x['row'][4]
ad_impressions = x['row'][5]
ad_errors = x['row'][6]
ad_revenue = x['row'][7]
aol_cost = x['row'][7]
epiphany_gross_revenue = x['row'][7]
adsym_revenue = x['row'][7]
total_clicks = x['row'][8]
iab_viewability_measurable_ad_impressions = "0"
iab_viewable_ad_impressions = "0"
platform = '4'
list = (date, inventory_source, ad_opportunities, market_opportunities, ad_attempts, ad_impressions, \
ad_errors, ad_revenue, aol_cost, epiphany_gross_revenue, adsym_revenue, total_clicks, \
iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, platform)
#print(list)
sql = """INSERT INTO adsym_core_last60 VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s"*.20, "%s"*.56, \
"%s"*.24, "%s", "%s", "%s", "%s")""" % (date, inventory_source, ad_opportunities, market_opportunities, \
ad_attempts, ad_impressions, ad_errors, ad_revenue, aol_cost, epiphany_gross_revenue, adsym_revenue, \
total_clicks, iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, platform)
cursor.execute(sql)
cursor.execute('commit')
else:
print('connection failed.')
except Error as error:
print(error)
finally:
conn.close()
print('Connection closed.')
if __name__ == '__main__':
connect()
|
[
"bpopovich4@gmail.com"
] |
bpopovich4@gmail.com
|
6e4dd4e629e9a48bb151508f9ec6c2120f4cb676
|
ce3964c7195de67e07818b08a43286f7ec9fec3e
|
/angle_peaks.py
|
3f18f3fab246542885ea6329ac9dc15a38b0f1c8
|
[] |
no_license
|
zhuligs/physics
|
82b601c856f12817c0cfedb17394b7b6ce6b843c
|
7cbac1be7904612fd65b66b34edef453aac77973
|
refs/heads/master
| 2021-05-28T07:39:19.822692
| 2013-06-05T04:53:08
| 2013-06-05T04:53:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
#!/usr/bin/env python
"""
Create a data set of nn_dist peak distances vs rs & P, for a given neighbor
"""
import os, sys, commands, glob
# RS is a list of all the names of the rs directories
global RS
RS = commands.getoutput('ls -1 | grep "1\." | grep -v c').split()
def main():
# Open the output file
out = open('angle_EV.dat','w')
out.write('# rs, <angle>, P(GPa)\n')
for rs in RS:
# Get pressure
try:
P = commands.getoutput("tail -2 "+rs+"/analysis/pressure.blocker | head -1 | awk '{print $4}'").strip()
except:
P = '--------'
# Get location of peak
try:
EV = float(commands.getoutput("expectation_value.py "+rs+"/analysis/CO2_angles.dat 1 2").split()[-1])
except:
EV = '--------'
# Write to the output file
if '--' in P or '--' in str(EV):
out.write('#')
out.write(rs+' '+str(EV)+' '+P+'\n')
out.close()
if __name__ == '__main__':
main()
|
[
"boates@gmail.com"
] |
boates@gmail.com
|
d0b528903a9a1e72d759138a3f5ab4c43d124a28
|
494b763f2613d4447bc0013100705a0b852523c0
|
/cnn/answer/M1_cp32_3_m_d512.py
|
f18fed9ba897d5083554c7a56ca3c5934c11fd9c
|
[] |
no_license
|
DL-DeepLearning/Neural-Network
|
dc4a2dd5efb1b4ef1a3480a1df6896c191ae487f
|
3160c4af78dba6bd39552bb19f09a699aaab8e9e
|
refs/heads/master
| 2021-06-17T05:16:22.583816
| 2017-06-07T01:21:39
| 2017-06-07T01:21:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,704
|
py
|
# libraries & packages
import numpy
import math
import sys
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from os import listdir
from os.path import isfile, join
# this function is provided from the official site
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
# from PIL import Image
# def ndarray2image (arr_data, image_fn):
# img = Image.fromarray(arr_data, 'RGB')
# img.save(image_fn)
from scipy.misc import imsave
def ndarray2image (arr_data, image_fn):
imsave(image_fn, arr_data)
# set dataset path
dataset_path = '../cifar_10/'
# define the information of images which can be obtained from official website
height, width, dim = 32, 32, 3
classes = 10
''' read training data '''
# get the file names which start with "data_batch" (training data)
train_fns = [fn for fn in listdir(dataset_path) if isfile(join(dataset_path, fn)) & fn.startswith("data_batch")]
# list sorting
train_fns.sort()
# make a glace about the training data
fn = train_fns[0]
raw_data = unpickle(dataset_path + fn)
# type of raw data
type(raw_data)
# <type 'dict'>
# check keys of training data
raw_data_keys = raw_data.keys()
# output ['data', 'labels', 'batch_label', 'filenames']
# check dimensions of ['data']
raw_data['data'].shape
# (10000, 3072)
# concatenate pixel (px) data into one ndarray [img_px_values]
# concatenate label data into one ndarray [img_lab]
img_px_values = 0
img_lab = 0
for fn in train_fns:
raw_data = unpickle(dataset_path + fn)
if fn == train_fns[0]:
img_px_values = raw_data['data']
img_lab = raw_data['labels']
else:
img_px_values = numpy.vstack((img_px_values, raw_data['data']))
img_lab = numpy.hstack((img_lab, raw_data['labels']))
print img_px_values
print img_lab
c = raw_input("...")
# convert 1d-ndarray (0:3072) to 3d-ndarray(32,32,3)
X_train = numpy.asarray([numpy.dstack((r[0:(width*height)].reshape(height,width),
r[(width*height):(2*width*height)].reshape(height,width),
r[(2*width*height):(3*width*height)].reshape(height,width)
)) for r in img_px_values])
Y_train = np_utils.to_categorical(numpy.array(img_lab), classes)
# check is same or not!
# lab_eql = numpy.array_equal([(numpy.argmax(r)) for r in Y_train], numpy.array(img_lab))
# draw one image from the pixel data
ndarray2image(X_train[0],"test_image.png")
# print the dimension of training data
print 'X_train shape:', X_train.shape
print 'Y_train shape:', Y_train.shape
''' read testing data '''
# get the file names which start with "test_batch" (testing data)
test_fns = [fn for fn in listdir(dataset_path) if isfile(join(dataset_path, fn)) & fn.startswith("test_batch")]
# read testing data
raw_data = unpickle(dataset_path + fn)
# type of raw data
type(raw_data)
# check keys of testing data
raw_data_keys = raw_data.keys()
# ['data', 'labels', 'batch_label', 'filenames']
img_px_values = raw_data['data']
# check dimensions of data
print "dim(data)", numpy.array(img_px_values).shape
# dim(data) (10000, 3072)
img_lab = raw_data['labels']
# check dimensions of labels
print "dim(labels)",numpy.array(img_lab).shape
# dim(data) (10000,)
X_test = numpy.asarray([numpy.dstack((r[0:(width*height)].reshape(height,width),
r[(width*height):(2*width*height)].reshape(height,width),
r[(2*width*height):(3*width*height)].reshape(height,width)
)) for r in img_px_values])
Y_test = np_utils.to_categorical(numpy.array(raw_data['labels']), classes)
# scale image data to range [0, 1]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.0
X_test /= 255.0
# print the dimension of training data
print 'X_test shape:', X_test.shape
print 'Y_test shape:', Y_test.shape
# normalize inputs from 0-255 to 0.0-1.0
'''CNN model'''
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=X_train[0].shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation('softmax'))
'''setting optimizer'''
learning_rate = 0.01
learning_decay = 0.01/32
sgd = SGD(lr=learning_rate, decay=learning_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# check parameters of every layers
model.summary()
''' training'''
batch_size = 128
epoch = 32
# validation data comes from training data
# model.fit(X_train, Y_train, batch_size=batch_size,
# nb_epoch=epoch, validation_split=0.1, shuffle=True)
# validation data comes from testing data
fit_log = model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=epoch, validation_data=(X_test, Y_test), shuffle=True)
'''saving training history'''
import csv
history_fn = 'cp32_3_m_d512.csv'
with open(history_fn, 'wb') as csv_file:
w = csv.writer(csv_file)
temp = numpy.array(fit_log.history.values())
w.writerow(fit_log.history.keys())
for i in range(temp.shape[1]):
w.writerow(temp[:,i])
'''saving model'''
from keras.models import load_model
model.save('cp32_3_m_d512.h5')
del model
'''loading model'''
model = load_model('cp32_3_m_d512.h5')
'''prediction'''
pred = model.predict_classes(X_test, batch_size, verbose=0)
ans = [numpy.argmax(r) for r in Y_test]
# caculate accuracy rate of testing data
acc_rate = sum(pred-ans == 0)/float(pred.shape[0])
print "Accuracy rate:", acc_rate
|
[
"teinhonglo@gmail.com"
] |
teinhonglo@gmail.com
|
88743421203b00b54d21f449bdbbc3fddf47d0a0
|
faea85c8583771933ffc9c2807aacb59c7bd96e6
|
/python/pencilnew/visu/internal/MinorSymLogLocator.py
|
1e83f3c925453c62d8eeb6f112a86c81dcdb0538
|
[] |
no_license
|
JosephMouallem/pencil_code
|
1dc68377ecdbda3bd3dd56731593ddb9b0e35404
|
624b742369c09d65bc20fdef25d2201cab7f758d
|
refs/heads/master
| 2023-03-25T09:12:02.647416
| 2021-03-22T02:30:54
| 2021-03-22T02:30:54
| 350,038,447
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
##
## symlog tick helper
from matplotlib.ticker import Locator
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically
"""
self.linthresh = linthresh
def __call__(self):
import numpy as np
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
majorlocs = np.append(majorlocs, majorlocs[-1]*10.)
majorlocs = np.append(majorlocs[0]*0.1, majorlocs)
# iterate through minor locs
minorlocs = []
# handle the lowest part
for i in xrange(1, len(majorlocs)):
majorstep = majorlocs[i] - majorlocs[i-1]
if abs(majorlocs[i-1] + majorstep/2) < self.linthresh:
ndivs = 10
else:
ndivs = 9
minorstep = majorstep / ndivs
locs = np.arange(majorlocs[i-1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
|
[
"j3mouall@uwaterloo.ca"
] |
j3mouall@uwaterloo.ca
|
a69df7f43308fc5480efdd170214dcdb43a9bc12
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03288/s994428906.py
|
8f675834b3b195d3bece521191e038e01a6a4385
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
def main():
n = int(input())
if n < 1200:
print("ABC")
elif 1200 <= n < 2800:
print("ARC")
else:
print("AGC")
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
942bad6052ac0e1168ff2fd57652246ca6e3a2fd
|
3416464630bc3322dd677001811de1a6884c7dd0
|
/others/q14_longest_common_prefix/__init__.py
|
2e8fe4c2d56cc68aee767b789c99e32124d7ef6d
|
[] |
no_license
|
ttomchy/LeetCodeInAction
|
f10403189faa9fb21e6a952972d291dc04a01ff8
|
14a56b5eca8d292c823a028b196fe0c780a57e10
|
refs/heads/master
| 2023-03-29T22:10:04.324056
| 2021-03-25T13:37:01
| 2021-03-25T13:37:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
FileName: __init__.py.py
Description:
Author: Barry Chow
Date: 2020/12/4 10:45 PM
Version: 0.1
"""
|
[
"zhouenguo@163.com"
] |
zhouenguo@163.com
|
9fbd4414790f6c01e7a84591c3d5093412933571
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/1293ec85dd68dfc31183ae9ec654333301103660-<test_distribution_version>-fix.py
|
fb83f3cec75b577fead88273ad25d671dc08b97c
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
@pytest.mark.parametrize('testcase', TESTSETS, ids=(lambda x: x['name']))
def test_distribution_version(testcase):
'tests the distribution parsing code of the Facts class\n\n testsets have\n * a name (for output/debugging only)\n * input files that are faked\n * those should be complete and also include "irrelevant" files that might be mistaken as coming from other distributions\n * all files that are not listed here are assumed to not exist at all\n * the output of pythons platform.dist()\n * results for the ansible variables distribution* and os_family\n '
from ansible.module_utils import basic
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={
}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(argument_spec=dict())
_test_one_distribution(facts, module, testcase)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
c7097703377e37843c47abd796ec4f333f4d2e77
|
fde31c14f7a31bc98221e3959748748c32bfc7ad
|
/stock/tests.py
|
6c296e822db3a61d931e039b2f4a94f5f04f77dd
|
[] |
no_license
|
schoolofnetcom/django-avancado
|
d557d05a96db6bc8471fec6cfc1bc80b78ea2266
|
0a9e0c92c437928caf3e647b7d9a35a0633d1ff2
|
refs/heads/master
| 2021-05-04T19:47:14.636565
| 2018-12-19T22:13:39
| 2018-12-19T22:13:39
| 106,818,135
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
from django.contrib.auth.models import User
from django.test import TestCase
# Create your tests here.
from django.test.testcases import SimpleTestCase
from django.urls.base import reverse
from stock.models import Product, TimestampableMixin, StockEntry
class ProductTest(SimpleTestCase):
def test_value_initial_stock_field(self):
product = Product()
self.assertEquals(0, product.stock)
# self.assertEquals(1, product.stock)
def test_product_has_timestampable(self):
product = Product()
self.assertIsInstance(product, TimestampableMixin)
def test_exception_when_stock_less_zero(self):
product = Product()
with self.assertRaises(ValueError) as exception:
product.stock = 10
product.decrement(11)
self.assertEquals('Sem estoque disponível', str(exception.exception))
class ProductDatabaseTest(TestCase):
fixtures = ['data.json']
def setUp(self):
self.product = Product.objects.create(
name="Produto YY", stock_max=200, price_sale=50.50, price_purchase=25.25,
)
def test_product_save(self):
self.assertEquals('Produto YY', self.product.name)
self.assertEquals(0, self.product.stock)
def test_if_user_exists(self):
user = User.objects.all().first()
self.assertIsNotNone(user)
class StockEntryHttpTest(TestCase):
fixtures = ['data.json']
def test_list(self):
response = self.client.get('/stock_entries/')
self.assertEquals(200, response.status_code)
self.assertIn('Produto A', str(response.content))
def test_create(self):
url = reverse('entries_create')
self.client.post(url, {'product': 1, 'amount': 20})
entry = StockEntry.objects.filter(amount=20, product_id=1).first()
self.assertIsNotNone(entry)
self.assertEquals(31, entry.product.stock)
|
[
"argentinaluiz@gmail.com"
] |
argentinaluiz@gmail.com
|
7e5bb7c4fd4c0b14d3a1b3190ac870bc303b7697
|
a20c2e03720ac51191c2807af29d85ea0fa23390
|
/vowelorconsonant.py
|
18231b547eae391f8d07f55d552ba6abc0453b56
|
[] |
no_license
|
KishoreKicha14/Guvi1
|
f71577a2c16dfe476adc3640dfdd8658da532e0d
|
ddea89224f4f20f92ebc47d45294ec79040e48ac
|
refs/heads/master
| 2020-04-29T23:32:13.628601
| 2019-08-05T17:48:18
| 2019-08-05T17:48:18
| 176,479,262
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
n=input()
a=ord(n)
f=0
if (a in range(97,123))or(a in range(65,98)):
v=[65,69,73,79,85,97,101,105,111,117]
for i in v:
if(i==a):
f=1
print(vowel)
break
if(f==0):
print("Consonant")
else:
print("invalid")
|
[
"noreply@github.com"
] |
KishoreKicha14.noreply@github.com
|
3d48ec33045fb784ead90b898d450e65020f22cd
|
40ce4d7545309ca57f0670a3aa27573d43b18552
|
/com.ppc.Microservices/intelligence/daylight/location_midnight_microservice.py
|
c6ef5c09622b3603af1f31da45b9aac04e419e01
|
[
"Apache-2.0"
] |
permissive
|
slrobertson1/botlab
|
769dab97cca9ee291f3cccffe214544663d5178e
|
fef6005c57010a30ed8d1d599d15644dd7c870d8
|
refs/heads/master
| 2020-07-28T06:45:37.316094
| 2019-09-18T15:34:08
| 2019-09-18T15:34:08
| 209,341,818
| 0
| 0
|
Apache-2.0
| 2019-09-18T15:23:37
| 2019-09-18T15:23:37
| null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
'''
Created on February 25, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
class LocationMidnightMicroservice(Intelligence):
"""
Announce midnight throughout the microservices framework
"""
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
self.parent.track(botengine, "midnight")
if schedule_id == "MIDNIGHT":
self.parent.distribute_datastream_message(botengine, "midnight_fired", None, internal=True, external=False)
|
[
"dmoss@peoplepowerco.com"
] |
dmoss@peoplepowerco.com
|
bee0d4a64d8b86383ac57f7631cf62041079a8ed
|
b66bf5a58584b45c76b9d0c5bf828a3400ecbe04
|
/week-04/4-recursion/6.py
|
757f22bf72fb91301f22cd313315ff7f695c6926
|
[] |
no_license
|
greenfox-velox/szepnapot
|
1196dcb4be297f12af7953221c27cd1a5924cfaa
|
41c3825b920b25e20b3691a1680da7c10820a718
|
refs/heads/master
| 2020-12-21T08:11:41.252889
| 2016-08-13T10:07:15
| 2016-08-13T10:07:15
| 58,042,932
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# 6. We have bunnies standing in a line, numbered 1, 2, ... The odd bunnies
# (1, 3, ..) have the normal 2 ears. The even bunnies (2, 4, ..) we'll say
# have 3 ears, because they each have a raised foot. Recursively return the
# number of "ears" in the bunny line 1, 2, ... n (without loops or
# multiplication).
def handicap_bunny_ears(n):
if n == 1:
return 2
elif n % 2 == 0:
return 3 + handicap_bunny_ears(n - 1)
else:
return 2 + handicap_bunny_ears(n - 1)
print(handicap_bunny_ears(6))
|
[
"silentpocok@gmail.com"
] |
silentpocok@gmail.com
|
7ac8563d6aacb10540b4737d547e048a5b5d34cb
|
8723e6a6104e0aa6d0a1e865fcaaa8900b50ff35
|
/util/test_registration.py
|
552ee309cf404634bd31b78fa6016c8364671422
|
[] |
no_license
|
ejeschke/ginga-plugin-template
|
9c4324b7c6ffaa5009cce718de8ea2fc5172bc81
|
545c785a184aedb1535d161d3c5ca5e7bf5bed6e
|
refs/heads/master
| 2022-11-22T17:50:57.503956
| 2022-11-10T23:20:09
| 2022-11-10T23:20:09
| 78,906,928
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
"""
This program allows you to test whether your plugin will register
itself correctly.
"""
from pkg_resources import iter_entry_points
groups = ['ginga.rv.plugins']
available_methods = []
for group in groups:
for entry_point in iter_entry_points(group=group, name=None):
available_methods.append(entry_point.load())
d = dict(name='Name', ptype='Type', klass='Class', module='Module')
print("%(name)14.14s %(ptype)6.6s %(klass)20s %(module)20s" % d)
for method in available_methods:
spec = method()
# for debugging
#print(spec)
d = dict(spec)
d.setdefault('name', spec.get('name', spec.get('menu', spec.get('klass', spec.get('module')))))
d.setdefault('klass', spec.get('module'))
d.setdefault('ptype', 'local')
print("%(name)14.14s %(ptype)6.6s %(klass)20s %(module)20s" % d)
|
[
"eric@naoj.org"
] |
eric@naoj.org
|
4ab0aeb36367fb0df10d5a681a0fd858593a06fd
|
5d13e9565779b4123e1b72815396eee0051d4980
|
/parse_titles_with_dictionary.py
|
136868340d5c6c8b92b10d3a91f34b556fdbc104
|
[] |
no_license
|
futuresystems-courses/475-Dictionary-Based-Analysis-of-PubMed-Article-Titles-for-Mental-Disorders-Kia
|
b455574908971e487b99a175d8fdbd48d13f8c60
|
1ab992b1859905ee57e51a258777192b2ec32339
|
refs/heads/master
| 2021-01-10T11:06:37.112800
| 2015-12-28T17:03:00
| 2015-12-28T17:03:00
| 48,702,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,328
|
py
|
import sys
import nltk
import csv
import ast
from operator import itemgetter
import random
stopwords = nltk.corpus.stopwords.words('english')
disorder_phrases = {}
DICTIONARY_FILE = 'mental diseases dictionary.csv'
try:
TITLES_FILE = 'parsed_titles.txt'
except IndexError:
print "\nERROR: No file name given. Please add the filename of the titles file; e.g.,:\n\n"
sys.exit(0)
#parse target mental disorder phrases, but keep original version of phrase
with open(DICTIONARY_FILE, 'rb') as csvfile:
dictreader = csv.reader(csvfile, delimiter='\t')
for row in dictreader:
#print row
term_words = [w.lower() for w in nltk.RegexpTokenizer(r'\w+').tokenize(row[0])]
for word in term_words:
#print word
if word in stopwords:
term_words.remove(word)
disorder_phrases[row[0]] = term_words
#print disorder_phrases
with open(TITLES_FILE, 'rb') as csvfile:
file_id = str(random.randint(1,99999)) # add a random number to filenames to make them unique
with open('tagging_titles_' + file_id + '.txt', 'wb') as pubfile:
titlesreader = csv.reader(csvfile, delimiter='|', escapechar='\\')
recordwriter = csv.writer(pubfile, delimiter='|', quotechar='"', escapechar='\\')
recordwriter.writerow(["pmid","pubyear","lang","title","match_found","best_full_match","best_window_match"])
'''
run through titles in file
file format is "pmid|year|title|language"
'''
for row in titlesreader:
title = ''
match_found = ''
best_full_match = ''
best_window_match = ''
pmid = row[0]
pubyear = row[1]
title = row[2] #third column of original file
lang = row[3]
print title
#split and tokenize the title
title_words = [w.lower() for w in nltk.RegexpTokenizer(r'\w+').tokenize(title)]
full_title_scores = {}
small_window_scores = {}
# test paper titles for each word of disorder phrase from dictionary
# keep scores for all disorder phrases
for phrase in disorder_phrases:
word_matches = {}
for word in disorder_phrases[phrase]:
'''
for each word in disorder phrase, find the index of that word in the title
'''
indexes = [i for i, j in enumerate(title_words) if j == word]
'''
if word was found at least once, keep track of it and the indexes
'''
if len(indexes) > 0:
word_matches[word] = indexes
'''
if found as many words in the title as started with
in the disorder phrase, keep it as at least a "full title"
match, and possibly also a "within window" match
'''
if len(word_matches) == len(disorder_phrases[phrase]):
print "potential match (all words found in title):", phrase
full_title_scores[phrase] = len(disorder_phrases[phrase])
'''
if phrase is only one word long, it also counts as a small_window_match
'''
if len(disorder_phrases[phrase]) == 1:
small_window_scores[phrase] = len(disorder_phrases[phrase])
print "single word phrase; stored!"
else:
'''
if phrase is longer than one word, find min and max indexes
by sorting the matched words by the indexes (in descending
order), then taking the first and last elements
'''
sorted_word_matches = sorted(word_matches.items(), key=itemgetter(1), reverse=True)
max_index = sorted_word_matches[0][1][0] #first index value of first element
min_index = sorted_word_matches[-1][1][0] #first index value of last element
'''
if difference between max and min index is no more than 2
more than the length of the original phrase (that is, if there
are no more than 2 additional words intermingled in the disorder
phrase words), count it as a match
'''
if (max_index - min_index) <= len(disorder_phrases[phrase]) + 2:
small_window_scores[phrase] = len(disorder_phrases[phrase])
print "in window!"
else:
print "not in window!"
'''
Take all of the potential "full title" and "small window" matches.
If more than one match has been found in either category, sort
the matches by the length of the phrase, and take the longest phrase
as the best match.
'''
if len(full_title_scores) > 0 and len(small_window_scores) > 0:
match_found = 'Y'
print "all phrases checked; testing for best one"
if len(full_title_scores) == 1:
best_full_match = full_title_scores.keys()[0]
print "only one full match:", best_full_match
else:
sorted_full_scores = sorted(full_title_scores.items(), key=itemgetter(1), reverse=True)
best_full_match = sorted_full_scores[0][0]
print "found best full match:", best_full_match
#but need to check if first and second are tied
if len(small_window_scores) == 1:
best_window_match = small_window_scores.keys()[0]
print "only one window match:", best_window_match
else:
sorted_window_scores = sorted(small_window_scores.items(), key=itemgetter(1), reverse=True)
best_window_match = sorted_window_scores[0][0]
print "found best window match:", best_window_match
#but need to check if first and second are tied
else:
print "no matches found."
match_found = 'N'
recordwriter.writerow([pmid,pubyear,lang,title,match_found,best_full_match,best_window_match])
|
[
"hroe.lee@gmail.com"
] |
hroe.lee@gmail.com
|
dec11357af01c915a67ab8710067b1a3a55ad0ad
|
3f90f4f6876f77d6b43e4a5759b20e2e8d20e684
|
/ex10/ex10.py
|
89d4ec1d631136bdb021736fec2f1dbed4325af0
|
[] |
no_license
|
kwangilahn/kwang
|
00d82d1bdca45000ee44fa2be55bdb8bff182c91
|
df2fa199648ae894cbcf18952f6924a9b897d639
|
refs/heads/master
| 2021-01-21T04:47:02.719819
| 2016-06-07T03:59:57
| 2016-06-07T03:59:57
| 54,316,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
tabby_cat = "\t I'm tabbed in"
persian_cat = "I'm split\non a line"
backslash_cat = "I'm \\ a \\ cat."
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishes
\t* catnip\n\t* Grass
"""
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat
|
[
"CAD Client"
] |
CAD Client
|
6ac47a2b80b00b8628a0e5edd7e2a578430cde8a
|
7d2442279b6dbaae617e2653ded92e63bb00f573
|
/neupy/layers/transformations.py
|
ee9240912d7a646656b8944c68acbf4b57ff406b
|
[
"MIT"
] |
permissive
|
albertwy/neupy
|
c830526859b821472592f38033f8475828f2d389
|
a8a9a8b1c11b8039382c27bf8f826c57e90e8b30
|
refs/heads/master
| 2021-06-03T21:23:37.636005
| 2016-05-24T21:18:25
| 2016-05-24T21:18:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
import numpy as np
import theano.tensor as T
from neupy.core.properties import ProperFractionProperty, TypedListProperty
from .base import BaseLayer
__all__ = ('Dropout', 'Reshape')
class Dropout(BaseLayer):
""" Dropout layer
Parameters
----------
proba : float
Fraction of the input units to drop. Value needs to be
between 0 and 1.
"""
proba = ProperFractionProperty(required=True)
def __init__(self, proba, **options):
options['proba'] = proba
super(Dropout, self).__init__(**options)
@property
def size(self):
return self.relate_to_layer.size
def output(self, input_value):
# Use NumPy seed to make Theano code easely reproducible
max_possible_seed = 4e9
seed = np.random.randint(max_possible_seed)
theano_random = T.shared_randomstreams.RandomStreams(seed)
proba = (1.0 - self.proba)
mask = theano_random.binomial(n=1, p=proba,
size=input_value.shape,
dtype=input_value.dtype)
return (mask * input_value) / proba
def __repr__(self):
return "{name}(proba={proba})".format(
name=self.__class__.__name__,
proba=self.proba
)
class Reshape(BaseLayer):
""" Gives a new shape to an input value without changing
its data.
Parameters
----------
shape : tuple or list
New feature shape. ``None`` value means that feature
will be flatten in 1D vector. If you need to get the
output feature with more that 2 dimensions then you can
set up new feature shape using tuples. Defaults to ``None``.
"""
shape = TypedListProperty()
def __init__(self, shape=None, **options):
if shape is not None:
options['shape'] = shape
super(Reshape, self).__init__(**options)
def output(self, input_value):
""" Reshape the feature space for the input value.
Parameters
----------
input_value : array-like or Theano variable
"""
new_feature_shape = self.shape
input_shape = input_value.shape[0]
if new_feature_shape is None:
output_shape = input_value.shape[1:]
new_feature_shape = T.prod(output_shape)
output_shape = (input_shape, new_feature_shape)
else:
output_shape = (input_shape,) + new_feature_shape
return T.reshape(input_value, output_shape)
|
[
"mail@itdxer.com"
] |
mail@itdxer.com
|
b63a247f0dd508911578fa7843a6d083a5623821
|
ab79f8297105a7d412303a8b33eaa25038f38c0b
|
/mutif_all_vit_addons/vit_order_analysis/sale_order_analysis.py
|
c0ecfe10c045ce7610f50f2b9d062756034a7600
|
[] |
no_license
|
adahra/addons
|
41a23cbea1e35079f7a9864ade3c32851ee2fb09
|
c5a5678379649ccdf57a9d55b09b30436428b430
|
refs/heads/master
| 2022-06-17T21:22:22.306787
| 2020-05-15T10:51:14
| 2020-05-15T10:51:14
| 264,167,002
| 1
| 0
| null | 2020-05-15T10:39:26
| 2020-05-15T10:39:26
| null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
from openerp import tools
from openerp.osv import fields,osv
import openerp.addons.decimal_precision as dp
import time
import logging
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class sale_order_analysis(osv.osv):
_name = "vit_order_analysis.sale_order_analysis"
_columns = {
'order_id' : fields.many2one('sale.order', 'Sale Order'),
'order_date' : fields.related('order_id', 'date_order' ,
type="char", relation="sale.order", string="Order Date", store=True),
'product_id' : fields.many2one('product.product', 'Product'),
'categ_id' : fields.related('product_id', 'categ_id' , type="many2one",
relation="product.category", string="Category", store=True),
'name_template' : fields.char("Product Name"),
'real_order' : fields.float("Real Order"),
'qty_order' : fields.float("Qty Order"),
'delivered' : fields.float("Delivered"),
'back_order' : fields.float("Back Order"),
'unfilled' : fields.float("Un-Filled"),
'partner_id' : fields.many2one('res.partner','Partner',readonly=True),
'age' : fields.integer('Age (Days)'),
'status' : fields.related('order_id', 'state' ,
type="char", string="Status", store=True),
'qty_invoice' : fields.float("Qty in Invoice"),
}
|
[
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] |
prog1@381544ba-743e-41a5-bf0d-221725b9d5af
|
86c3fa0419163a6f6c84f50c5591118e84995339
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_233/ch29_2020_03_04_11_52_21_685286.py
|
86e106c02ae7810c6ae8a20278ff5d4c498791af
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
inicial = float(input())
taxa = float(input())
aumento = 0
for i in range(24):
valor = inicial * taxa ** i
aumento += valor - inicial
decimos = int(valor * 10 - int(valor))
centesimos = int(valor * 100 - decimos * 10 - int(valor) * 100)
print('%d,%d%d' % (int(valor), decimos, centesimos))
decimos = int(aumento * 10 - int(aumento))
centesimos = int(aumento * 100 - decimos * 10 - int(aumento))
print('%d,%d%d' % (int(aumento), decimos, centesimos)
|
[
"you@example.com"
] |
you@example.com
|
4f349f89eb2be66ea2a6218beb51cb31cef6cd36
|
8e62465c912ccbe41e322006a5c62b883e39143d
|
/src/boot/commands.py
|
91a37e3fd7c203c3e16b96d277ed893a5ada17b4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
JayHeng/NXP-MCUBootFlasher
|
b41dd2ffe0bf2cde61b9deacdb6353835e9e4538
|
a7643b2de6429481c3bf54bc2508d7e76c76562d
|
refs/heads/master
| 2022-05-27T06:26:49.532173
| 2022-03-21T07:23:01
| 2022-03-21T07:23:01
| 176,099,535
| 37
| 16
|
Apache-2.0
| 2022-03-14T06:28:23
| 2019-03-17T12:42:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
#! /usr/bin/env python
# Copyright 2021 NXP
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from collections import namedtuple
# Command constants.
kCommandTag_FlashEraseAll = 0x01
kCommandTag_FlashEraseRegion = 0x02
kCommandTag_ReadMemory = 0x03
kCommandTag_WriteMemory = 0x04
kCommandTag_FillMemory = 0x05
kCommandTag_FlashSecurityDisable = 0x06
kCommandTag_GetProperty = 0x07
kCommandTag_ReceiveSBFile = 0x08
kCommandTag_Execute = 0x09
kCommandTag_Call = 0x0a
kCommandTag_Reset = 0x0b
kCommandTag_SetProperty = 0x0c
kCommandTag_FlashEraseAllUnsecure = 0x0d
kCommandTag_FlashProgramOnce = 0x0e,
kCommandTag_FlashReadOnce = 0x0f,
kCommandTag_FlashReadResource = 0x10,
kCommandTag_ConfigureMemory = 0x11,
kCommandTag_ReliableUpdate = 0x12,
kCommandTag_GenerateKeyBlob = 0x13,
kCommandTag_KeyProvisoning = 0x15,
Command = namedtuple('Command', 'tag, propertyMask, name')
Commands = {
kCommandTag_FlashEraseAll : Command(kCommandTag_FlashEraseAll, 0x00000001, 'flash-erase-all'),
kCommandTag_FlashEraseRegion : Command(kCommandTag_FlashEraseRegion, 0x00000002, 'flash-erase-region'),
kCommandTag_ReadMemory : Command(kCommandTag_ReadMemory, 0x00000004, 'read-memory'),
kCommandTag_WriteMemory : Command(kCommandTag_WriteMemory, 0x00000008, 'write-memory'),
kCommandTag_FillMemory : Command(kCommandTag_FillMemory, 0x00000010, 'fill-memory'),
kCommandTag_FlashSecurityDisable : Command(kCommandTag_FlashSecurityDisable, 0x00000020, 'flash-security-disable'),
kCommandTag_GetProperty : Command(kCommandTag_GetProperty, 0x00000040, 'get-property'),
kCommandTag_ReceiveSBFile : Command(kCommandTag_ReceiveSBFile, 0x00000080, 'receive-sb-file'),
kCommandTag_Execute : Command(kCommandTag_Execute, 0x00000100, 'execute'),
kCommandTag_Call : Command(kCommandTag_Call, 0x00000200, 'call'),
kCommandTag_Reset : Command(kCommandTag_Reset, 0x00000400, 'reset'),
kCommandTag_SetProperty : Command(kCommandTag_SetProperty, 0x00000800, 'set-property'),
kCommandTag_FlashEraseAllUnsecure : Command(kCommandTag_FlashEraseAllUnsecure, 0x00001000, 'flash-erase-all-unsecure'),
kCommandTag_FlashProgramOnce : Command(kCommandTag_FlashProgramOnce, 0x00002000, 'flash-program-once'),
kCommandTag_FlashReadOnce : Command(kCommandTag_FlashReadOnce, 0x00004000, 'flash-read-once'),
kCommandTag_FlashReadResource : Command(kCommandTag_FlashReadResource, 0x00008000, 'flash-read-resource'),
kCommandTag_ConfigureMemory : Command(kCommandTag_ConfigureMemory, 0x00010000, 'configure-memory'),
kCommandTag_ReliableUpdate : Command(kCommandTag_ReliableUpdate, 0x00100000, 'reliable-update'),
kCommandTag_GenerateKeyBlob : Command(kCommandTag_GenerateKeyBlob, 0x00200000, 'generate-key-blob'),
kCommandTag_KeyProvisoning : Command(kCommandTag_KeyProvisoning, 0x00400000, 'key-provisioning'),
}
|
[
"jie.heng@nxp.com"
] |
jie.heng@nxp.com
|
747707c028e314a5eff983e4a9e35bede5aae0c0
|
fb133bb72cbc965f405e726796e02d01ef8905e2
|
/combinatorics/permutations.py
|
25809abf4e33cb533feaca93aa0fc997e499ce67
|
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
eklitzke/algorithms
|
a90b470c6ea485b3b6227fe74b23f40109cfd1f5
|
170b49c7aaeb06f0a91142b1c04e47246ec52fd1
|
refs/heads/master
| 2021-01-22T11:15:52.029559
| 2017-05-28T19:39:32
| 2017-05-28T19:39:32
| 92,677,550
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
"""Implementation of permutations.
This uses the "interleaving" technique, which I find the most intuitive. It's
not the most efficient algorithm.
"""
def interleave(x, xs):
"""Interleave x into xs."""
for pos in range(len(xs) + 1):
yield xs[:pos] + [x] + xs[pos:]
def permutations(xs):
"""Generate the permuations of xs."""
if len(xs) == 0:
yield []
else:
for subperm in permutations(xs[1:]):
for inter in interleave(xs[0], subperm):
yield inter
def list_permutations(xs):
"""Permutations as a list."""
return list(permutations(xs))
|
[
"evan@eklitzke.org"
] |
evan@eklitzke.org
|
0d4217a71c1443b49fbe2b08c76d0a241f2633fd
|
c237dfae82e07e606ba9385b336af8173d01b251
|
/lib/python/Products/ZCTextIndex/tests/mailtest.py
|
e8852d178ca4296bb828459acad38e1cdfcd1f25
|
[
"ZPL-2.0"
] |
permissive
|
OS2World/APP-SERVER-Zope
|
242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff
|
dedc799bd7eda913ffc45da43507abe2fa5113be
|
refs/heads/master
| 2020-05-09T18:29:47.818789
| 2014-11-07T01:48:29
| 2014-11-07T01:48:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,988
|
py
|
"""Test an index with a Unix mailbox file.
usage: python mailtest.py [options] <data.fs>
options:
-v -- verbose
Index Generation
-i mailbox
-n NNN -- max number of messages to read from mailbox
-t NNN -- commit a transaction every NNN messages (default: 1)
-p NNN -- pack <data.fs> every NNN messages (default: 500), and at end
-p 0 -- don't pack at all
-x -- exclude the message text from the data.fs
Queries
-q query
-b NNN -- return the NNN best matches (default: 10)
-c NNN -- context; if -v, show the first NNN lines of results (default: 5)
The script either indexes or queries depending on whether -q or -i is
passed as an option.
For -i mailbox, the script reads mail messages from the mailbox and
indexes them. It indexes one message at a time, then commits the
transaction.
For -q query, it performs a query on an existing index.
If both are specified, the index is performed first.
You can also interact with the index after it is completed. Load the
index from the database:
import ZODB
from ZODB.FileStorage import FileStorage
fs = FileStorage(<data.fs>
db = ZODB.DB(fs)
index = cn.open().root()["index"]
index.search("python AND unicode")
"""
import ZODB
import ZODB.FileStorage
from Products.ZCTextIndex.Lexicon import \
Lexicon, CaseNormalizer, Splitter, StopWordRemover
from Products.ZCTextIndex.ZCTextIndex import ZCTextIndex
from BTrees.IOBTree import IOBTree
from Products.ZCTextIndex.QueryParser import QueryParser
import sys
import mailbox
import time
def usage(msg):
print msg
print __doc__
sys.exit(2)
class Message:
total_bytes = 0
def __init__(self, msg):
subject = msg.getheader('subject', '')
author = msg.getheader('from', '')
if author:
summary = "%s (%s)\n" % (subject, author)
else:
summary = "%s\n" % subject
self.text = summary + msg.fp.read()
Message.total_bytes += len(self.text)
class Extra:
pass
def index(rt, mboxfile, db, profiler):
global NUM
idx_time = 0
pack_time = 0
start_time = time.time()
lexicon = Lexicon(Splitter(), CaseNormalizer(), StopWordRemover())
extra = Extra()
extra.lexicon_id = 'lexicon'
extra.doc_attr = 'text'
extra.index_type = 'Okapi BM25 Rank'
caller = Extra()
caller.lexicon = lexicon
rt["index"] = idx = ZCTextIndex("index", extra, caller)
if not EXCLUDE_TEXT:
rt["documents"] = docs = IOBTree()
else:
docs = None
get_transaction().commit()
mbox = mailbox.UnixMailbox(open(mboxfile, 'rb'))
if VERBOSE:
print "opened", mboxfile
if not NUM:
NUM = sys.maxint
if profiler:
itime, ptime, i = profiler.runcall(indexmbox, mbox, idx, docs, db)
else:
itime, ptime, i = indexmbox(mbox, idx, docs, db)
idx_time += itime
pack_time += ptime
get_transaction().commit()
if PACK_INTERVAL and i % PACK_INTERVAL != 0:
if VERBOSE >= 2:
print "packing one last time..."
p0 = time.clock()
db.pack(time.time())
p1 = time.clock()
if VERBOSE:
print "pack took %s sec" % (p1 - p0)
pack_time += p1 - p0
if VERBOSE:
finish_time = time.time()
print
print "Index time", round(idx_time / 60, 3), "minutes"
print "Pack time", round(pack_time / 60, 3), "minutes"
print "Index bytes", Message.total_bytes
rate = (Message.total_bytes / idx_time) / 1024
print "Index rate %.2f KB/sec" % rate
print "Indexing began", time.ctime(start_time)
print "Indexing ended", time.ctime(finish_time)
print "Wall clock minutes", round((finish_time - start_time)/60, 3)
def indexmbox(mbox, idx, docs, db):
idx_time = 0
pack_time = 0
i = 0
while i < NUM:
_msg = mbox.next()
if _msg is None:
break
i += 1
msg = Message(_msg)
if VERBOSE >= 2:
print "indexing msg", i
i0 = time.clock()
idx.index_object(i, msg)
if not EXCLUDE_TEXT:
docs[i] = msg
if i % TXN_SIZE == 0:
get_transaction().commit()
i1 = time.clock()
idx_time += i1 - i0
if VERBOSE and i % 50 == 0:
print i, "messages indexed"
print "cache size", db.cacheSize()
if PACK_INTERVAL and i % PACK_INTERVAL == 0:
if VERBOSE >= 2:
print "packing..."
p0 = time.clock()
db.pack(time.time())
p1 = time.clock()
if VERBOSE:
print "pack took %s sec" % (p1 - p0)
pack_time += p1 - p0
return idx_time, pack_time, i
def query(rt, query_str, profiler):
idx = rt["index"]
docs = rt["documents"]
start = time.clock()
if profiler is None:
results, num_results = idx.query(query_str, BEST)
else:
if WARM_CACHE:
print "Warming the cache..."
idx.query(query_str, BEST)
start = time.clock()
results, num_results = profiler.runcall(idx.query, query_str, BEST)
elapsed = time.clock() - start
print "query:", query_str
print "# results:", len(results), "of", num_results, \
"in %.2f ms" % (elapsed * 1000)
tree = QueryParser(idx.lexicon).parseQuery(query_str)
qw = idx.index.query_weight(tree.terms())
for docid, score in results:
scaled = 100.0 * score / qw
print "docid %7d score %6d scaled %5.2f%%" % (docid, score, scaled)
if VERBOSE:
msg = docs[docid]
ctx = msg.text.split("\n", CONTEXT)
del ctx[-1]
print "-" * 60
print "message:"
for l in ctx:
print l
print "-" * 60
def main(fs_path, mbox_path, query_str, profiler):
f = ZODB.FileStorage.FileStorage(fs_path)
db = ZODB.DB(f, cache_size=CACHE_SIZE)
cn = db.open()
rt = cn.root()
if mbox_path is not None:
index(rt, mbox_path, db, profiler)
if query_str is not None:
query(rt, query_str, profiler)
cn.close()
db.close()
f.close()
if __name__ == "__main__":
import getopt
NUM = 0
VERBOSE = 0
PACK_INTERVAL = 500
EXCLUDE_TEXT = 0
CACHE_SIZE = 10000
TXN_SIZE = 1
BEST = 10
CONTEXT = 5
WARM_CACHE = 0
query_str = None
mbox_path = None
profile = None
old_profile = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'vn:p:i:q:b:c:xt:w',
['profile=', 'old-profile='])
except getopt.error, msg:
usage(msg)
if len(args) != 1:
usage("exactly 1 filename argument required")
for o, v in opts:
if o == '-n':
NUM = int(v)
elif o == '-v':
VERBOSE += 1
elif o == '-p':
PACK_INTERVAL = int(v)
elif o == '-q':
query_str = v
elif o == '-i':
mbox_path = v
elif o == '-b':
BEST = int(v)
elif o == '-x':
EXCLUDE_TEXT = 1
elif o == '-t':
TXN_SIZE = int(v)
elif o == '-c':
CONTEXT = int(v)
elif o == '-w':
WARM_CACHE = 1
elif o == '--profile':
profile = v
elif o == '--old-profile':
old_profile = v
fs_path, = args
if profile:
import hotshot
profiler = hotshot.Profile(profile, lineevents=1, linetimings=1)
elif old_profile:
import profile
profiler = profile.Profile()
else:
profiler = None
main(fs_path, mbox_path, query_str, profiler)
if profile:
profiler.close()
elif old_profile:
import pstats
profiler.dump_stats(old_profile)
stats = pstats.Stats(old_profile)
stats.strip_dirs().sort_stats('time').print_stats(20)
|
[
"martin@os2world.com"
] |
martin@os2world.com
|
4f3c73e27f6f55f81e9f77fb85fc19fbed7f387b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02928/s256920562.py
|
c89b4ce6cab3fe975f18c176f86c82863230a7df
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
#第一回日本最強プログラマー学生選手権-予選- -B Kleene Inversion
"""
リストをk回繰り返したものの、転倒数の個数を求めよ
先ず純粋に与えられたリスト内での転倒数を求めた後、
それを1~k倍したものを足し合わせる
"""
import sys
readline = sys.stdin.buffer.readline
def even(n): return 1 if n%2==0 else 0
mod = 10**9+7
n,k = map(int,readline().split())
lst1 = list(map(int,readline().split()))
fall = 0
fall_al = 0
for i in range(n-1):
for j in range(i+1,n):
if lst1[i] > lst1[j]:
fall += 1
lst1.sort(reverse=True)
for i in range(n-1):
for j in range(i+1,n):
if lst1[i] > lst1[j]:
fall_al += 1
def reydeoro(n):
return n*(n+1)//2
print((fall*k+fall_al*reydeoro(k-1))%mod)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
96fd7941d77aa61220eefc52fc789617803291c0
|
eaba398a0ca5414c10dd1890e662fdcd87e157b6
|
/tests/steps/basic.py
|
1ca0ec3034bcc7987e7c7eee0bf0ef965a096c27
|
[
"MIT"
] |
permissive
|
coddingtonbear/jirafs
|
a78f47e59836d9a6024bc287ea2a1247fb297e62
|
778cba9812f99eeaf726a77c1bca5ae2650a35e9
|
refs/heads/development
| 2023-06-16T00:06:33.262635
| 2022-09-20T04:06:26
| 2022-09-20T04:06:26
| 21,588,191
| 125
| 17
|
MIT
| 2023-06-02T05:48:53
| 2014-07-07T21:54:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,047
|
py
|
from __future__ import print_function
import collections
import json
import os
import shutil
import subprocess
from behave import *
from jira.client import JIRA
@given("jirafs is installed and configured")
def installed_and_configured(context):
pass
@given("a cloned ticket with the following fields")
def cloned_ticket_with_following_fields(context):
jira_client = JIRA(
{
"server": context.integration_testing["url"],
"verify": False,
"check_update": False,
},
basic_auth=(
context.integration_testing["username"],
context.integration_testing["password"],
),
)
issue_data = {
"project": {"key": context.integration_testing["project"]},
"issuetype": {
"name": "Task",
},
}
for row in context.table:
issue_data[row[0]] = json.loads(row[1])
issue = jira_client.create_issue(issue_data)
if not "cleanup_steps" in context:
context.cleanup_steps = []
context.cleanup_steps.append(lambda context: issue.delete())
context.execute_steps(
u"""
when the command "jirafs clone {url}" is executed
and we enter the ticket folder for "{url}"
""".format(
url=issue.permalink()
)
)
@when('the command "{command}" is executed')
def execute_command(context, command):
command = command.format(**context.integration_testing)
env = os.environ.copy()
env["JIRAFS_GLOBAL_CONFIG"] = context.integration_testing["config_path"]
env["JIRAFS_ALLOW_USER_INPUT__BOOL"] = "0"
proc = subprocess.Popen(
command.encode("utf-8"),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
stdout, stderr = proc.communicate()
if not hasattr(context, "executions"):
context.executions = collections.deque()
context.executions.appendleft(
{
"command": command,
"stdout": stdout.decode("utf-8"),
"stderr": stderr.decode("utf-8"),
"return_code": proc.returncode,
}
)
@when('we enter the ticket folder for "{url}"')
def execute_command(context, url):
url = url.format(**context.integration_testing)
os.chdir(os.path.join(os.getcwd(), url.split("/")[-1]))
@then('the directory will contain a file named "{filename}"')
def directory_contains_file(context, filename):
assert filename in os.listdir("."), "%s not in folder" % filename
@then('the output will contain the text "{expected}"')
def output_will_contain(context, expected):
expected = expected.format(**context.integration_testing)
assert expected in context.executions[0]["stdout"], "%s not in %s" % (
expected,
context.executions[0]["stdout"],
)
@step("print execution results")
def print_stdout(context):
print(json.dumps(context.executions[0], indent=4, sort_keys=True))
@step("debugger")
def debugger(context):
import ipdb
ipdb.set_trace()
|
[
"me@adamcoddington.net"
] |
me@adamcoddington.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.