blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25a966ceab5d2deb560acac18c7d2c9729e93236
|
be999cad30c28d0370a57b73057cb734fdffbf23
|
/workshop_corona19/corona19_07_여행력.py
|
899ed6dd8d5552a0e7aa1dc68988569ffc65f5fa
|
[] |
no_license
|
choicoding1026/ML
|
341181d5b1088f48fa0336a6db773ed7cfbecc21
|
69db5fcd559a7a41ce9fb0ece543d3cf9b44d5bb
|
refs/heads/master
| 2023-01-01T07:35:09.655664
| 2020-10-15T07:41:50
| 2020-10-15T07:41:50
| 303,907,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,833
|
py
|
'''
서울시 코로나19 데이터 수집 및 분석
26. 여행력
'''
import pandas as pd
import numpy as np
file_name = "seoul_corona_10_11_.csv"
df = pd.read_csv(file_name, encoding="utf-8") # 한글처리
# 1. '연번' 기준으로 오름차순 정렬
df = df.sort_values(by="연번", ascending=False)
print("1. '연번' 기준으로 오름차순 정렬:\n", df.head())
# 2. 확진일의 빈도수 ==> 어느 날짜에 가장 많이 확진이 되었는지 확인 가능
# value_counts() 자동으로 내림차순 정렬해서 반환
print("2. 확진일의 빈도수: \n", df["확진일"].value_counts())
# 3. '확진일자' 컬럼 추가 => 2020_10_11 날짜형식
# 기존의 '확진일' 컬럼값은 문자이기 때문에 날짜로 변경해야 된다.
'''
1) 10.11 --> 10-11 변경
2) 10-11 --> 2020-10-11 로 변경
3) 2020-10-11 문자열 ---- > 2020-10-11 날짜로 변경 (pd.to_datetime 함수 )
4) df["확진일자"] = 날짜
'''
df["확진일자"] = pd.to_datetime("2020-"+df["확진일"].str.replace(".", "-"))
print("3. '확진일자' 컬럼 추가: \n", df.head())
# 4. '확진일자' 날짜 데이터 컬럼 이용하여 '월' 컬럼 추가
df["월"] = df["확진일자"].dt.month
print("4. '월' 컬럼 추가: \n", df.head())
# 5. '확진일자' 날짜 데이터 컬럼 이용하여 '주(week)' 컬럼 추가
# 해당년도의 몇번째 주(week)인지 반환
df["주"] = df["확진일자"].dt.isocalendar().week
print("5. '주' 컬럼 추가: \n", df.head())
# 6. '확진일자' 날짜 데이터 컬럼 이용하여 '월-일' 컬럼 추가
# m = df["확진일자"].dt.month
# d = df["확진일자"].dt.day
# df["월-일"] = m.astype(str) + "-" + d.astype(str)
df["월-일"] = df["확진일자"].astype(str).map(lambda x:x[-5:]) # map함수는 데이터가공시 사용
print("6. '월-일' 컬럼 추가: \n", df.head())
print("6. '월-일' 컬럼 추가: \n", df.tail())
########################################################################
# 26. 여행력
print(df["여행력"])
print(df["여행력"].unique())
print(df["여행력"].value_counts())
'''
1. '-' ==> NaN 처리
==> "-"을 np.nan 으로 변경 처리
2. 공통명으로 변경
'아랍에미리트', 'UAE' ===> 아랍에미리트
'중국 청도','우한교민','우한 교민', '중국 우한시', '중국' ==> 중국
'프랑스, 스페인','스페인, 프랑스' ==> 프랑스, 스페인
체코,헝가리,오스트리아,이탈리아,프랑스,모로코,독일,스페인,영국,폴란드,터키,아일랜드 ==>유럽
브라질,아르헨티아,칠레,볼리비아, 멕시코, 페루 => 남미
'''
## 공통명으로 변경하고 시각화
df["해외"]=df["여행력"]
print(df["해외"].str.contains('아랍에미리트|UAE'))
df.loc[df["해외"].str.contains('아랍에미리트|UAE'), "해외"] = "아랍에미리트"
df.loc[df["해외"].str.contains('우한|중국'), "해외"] = "중국"
df.loc[df["해외"].
str.contains('체코|헝가리|오스트리아|이탈리아|프랑스|모로코|독일,스페인|영국\폴란드|터키|아일랜드'),
"해외"] = "유럽"
df.loc[df["해외"].str.contains('브라질|아르헨티아|칠레|볼리비아|멕시코|페루'), "해외"] = "남미"
## "-"을 np.nan 으로 변경 처리
df["해외"]=df["해외"].replace("-", np.nan)
print(df["해외"].unique())
print(df["해외"].value_counts())
# 상위 15개만 시각화
import matplotlib.pyplot as plt
plt.rc("font", family="Malgun Gothic") # 한글 처리
# plt.rc("figure", titlesize=4) # title 크기
plt.rc("ytick", labelsize=8) # y축 라벨 크기
plt.rc("xtick", labelsize=8) # x축 라벨 크기
plt.style.use("fivethirtyeight")
g = df["해외"].value_counts().head(15).sort_values().plot.barh(title="xxxx", figsize=(16,4))
plt.show()
|
[
"justin6130@gmail.com"
] |
justin6130@gmail.com
|
156d6f7fc512c8f3ba50b7135ffd548e1d30f08e
|
8e75843fc2b27d50e1f8a95f0367a3a96a3dae30
|
/Code/python_quote.py
|
a1cb9f69bbba8935805a704b36ca94ea7291b786
|
[] |
no_license
|
franklin-phan/CS-2-Tweet-Generator
|
5f122e2aab7a6ee749feb888d094c8057671a7ee
|
fedb9ba46be3f31a1586f8d64986ec92c58296b6
|
refs/heads/master
| 2021-07-14T14:37:13.404088
| 2020-03-06T07:08:03
| 2020-03-06T07:08:03
| 236,772,553
| 0
| 0
| null | 2021-03-20T02:58:02
| 2020-01-28T15:47:39
|
Python
|
UTF-8
|
Python
| false
| false
| 332
|
py
|
import random
quotes = ("It's just a flesh wound.",
"He's not the Messiah. He's a very naughty boy!",
"THIS IS AN EX-PARROT!!")
def random_python_quote():
rand_index = random.randint(0, len(quotes) - 1)
return quotes[rand_index]
if __name__ == '__main__':
quote = random_python_quote()
print
|
[
"franklin.phan123@gmail.com"
] |
franklin.phan123@gmail.com
|
7e6aaa5e69e03122dd3e0dec7476a9bc38d155c2
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Difference/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Difference_MovingMedian_7__20.py
|
3857b72f05dfa8e9071a105b318bb037455121e2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
e39687a83d7901840b63d3e947089e5c408f944d
|
11137bde91389c04a95df6f6fdaf64f7f49f5f80
|
/secondTest/introduction_MIT2/5_1.py
|
47272dfbccb9fef4086d3fd1feaa61aff6aa3068
|
[] |
no_license
|
starschen/learning
|
cf3c5a76c867567bce73e9cacb2cf0979ba053d9
|
34decb8f9990117a5f40b8db6dba076a7f115671
|
refs/heads/master
| 2020-04-06T07:02:56.444233
| 2016-08-24T08:11:49
| 2016-08-24T08:11:49
| 39,417,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
#encoding:utf8
def findDivisors(n1,n2):
divisors=()
for i in range(1,min(n1,n2)+1):
if n1%i==0 and n2%i==0:
divisors=divisors+(i,)
return divisors
divisors=findDivisors(20,200)
# print divisors
total=0
for d in divisors:
total+=d
# print total
def findExtremeDivisors(n1,n2):
divisors=()
minVal,maxVal=None,None
for i in range(2,min(n1,n2)+1):
if n1%i==0 and n2%i==0:
if minVal==None or i<minVal:
minVal=i
if maxVal==None or i >maxVal:
maxVal=i
return (minVal,maxVal)
# minVal,maxVal=findExtremeDivisors(100,200)
# print 'minVal=',minVal
# print 'maxVal=',maxVal
print findExtremeDivisors(100,200)
|
[
"stars_chenjiao@163.com"
] |
stars_chenjiao@163.com
|
b3659978c254246c6d5f9ff0bb961a8029d82c3e
|
30e2a85fc560165a16813b0486a862317c7a486a
|
/tensorflow/test/misc/graph.py
|
f141c134d6cf1435e3b25c0f9515954553e7ee26
|
[] |
no_license
|
muryliang/python_prac
|
2f65b6fdb86c3b3a44f0c6452a154cd497eb2d01
|
0301e8f523a2e31e417fd99a968ad8414e9a1e08
|
refs/heads/master
| 2021-01-21T11:03:48.397178
| 2017-09-18T04:13:27
| 2017-09-18T04:13:27
| 68,801,688
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
import tensorflow as tf
g1 = tf.Graph()
with g1.as_default():
v = tf.get_variable(
"v", initializer=tf.zeros_initializer(shape=[1]))
g2 = tf.Graph()
with g2.as_default():
v= tf.get_variable(
"v", initializer=tf.ones_initializer(shape=[1]))
with tf.Session(graph=g1) as sess:
tf.initialize_all_variables().run()
with tf.variable_scope("", reuse=True):
print sess.run(tf.get_variable("v")
|
[
"muryliang@gmail.com"
] |
muryliang@gmail.com
|
ad62f9feb1c07f0e0d3a9a0db8defb334439b636
|
a2fdcd5252741bdd3ad96f20944c07d80bd57dc8
|
/class_sample.py
|
ca23e1669eeab4e7a15a44c5a304dc1c92735155
|
[] |
no_license
|
chaossky/Python2019
|
7cd11aab7cecf23acb42b7635f8bfb506555c856
|
fd63563f6a175a6aef1f3248aefb3f754f6658e1
|
refs/heads/master
| 2021-07-31T09:15:14.430835
| 2019-08-16T12:13:45
| 2019-08-16T12:13:45
| 200,347,544
| 0
| 0
| null | 2019-08-05T21:54:10
| 2019-08-03T07:43:34
|
Python
|
UTF-8
|
Python
| false
| false
| 365
|
py
|
class Ball:
color=""
speed=0
def setSpeed(self,value):
self.speed=value
ball01=Ball()
ball01.color="Red"
ball01.setSpeed(10)
ball02=Ball()
ball02.color="Blue"
ball02.setSpeed(20)
print("Ball01 color:%s" %ball01.color)
print("Ball01 speed:%s" %ball01.speed)
print("Ball02 color:%s" %ball02.color)
print("Ball02 speed:%s" %ball02.speed)
|
[
"user@email.mail"
] |
user@email.mail
|
38910cfa0d829421a6d14748e1081970a606abe0
|
2734b77a68f6d7e22e8b823418ad1c59fe1a34af
|
/opengever/dossier/upgrades/20170307184059_reindex_searchable_text_for_dossier_templates/upgrade.py
|
436c02f98340c2800590927a5f6bf366f0ad4ab2
|
[] |
no_license
|
4teamwork/opengever.core
|
5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1
|
a01bec6c00d203c21a1b0449f8d489d0033c02b7
|
refs/heads/master
| 2023-08-30T23:11:27.914905
| 2023-08-25T14:27:15
| 2023-08-25T14:27:15
| 9,788,097
| 19
| 8
| null | 2023-09-14T13:28:56
| 2013-05-01T08:28:16
|
Python
|
UTF-8
|
Python
| false
| false
| 365
|
py
|
from ftw.upgrade import UpgradeStep
class ReindexSearchableTextForDossierTemplates(UpgradeStep):
"""Reindex SearchableText for dossier templates.
"""
def __call__(self):
self.install_upgrade_profile()
self.catalog_reindex_objects(
{'portal_type': 'opengever.dossier.dossiertemplate'},
idxs=['SearchableText'])
|
[
"david.erni@4teamwork.ch"
] |
david.erni@4teamwork.ch
|
445c2230f975dd0e1e6f4f7c980b766500609f3a
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/mirrativ.py
|
0a8ee0c3a52eeff28f2d9e679e0ae5913bc34970
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,880
|
py
|
from .common import InfoExtractor
from ..utils import (
ExtractorError,
dict_get,
traverse_obj,
try_get,
)
class MirrativBaseIE(InfoExtractor):
def assert_error(self, response):
error_message = traverse_obj(response, ('status', 'error'))
if error_message:
raise ExtractorError('Mirrativ says: %s' % error_message, expected=True)
class MirrativIE(MirrativBaseIE):
IE_NAME = 'mirrativ'
_VALID_URL = r'https?://(?:www\.)?mirrativ\.com/live/(?P<id>[^/?#&]+)'
TESTS = [{
'url': 'https://mirrativ.com/live/UQomuS7EMgHoxRHjEhNiHw',
'info_dict': {
'id': 'UQomuS7EMgHoxRHjEhNiHw',
'title': 'ねむいぃ、。『参加型』🔰jcが初めてやるCOD✨初見さん大歓迎💗',
'is_live': True,
'description': 'md5:bfcd8f77f2fab24c3c672e5620f3f16e',
'thumbnail': r're:https?://.+',
'uploader': '# あ ち ゅ 。💡',
'uploader_id': '118572165',
'duration': None,
'view_count': 1241,
'release_timestamp': 1646229192,
'timestamp': 1646229167,
'was_live': False,
},
'skip': 'livestream',
}, {
'url': 'https://mirrativ.com/live/POxyuG1KmW2982lqlDTuPw',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('https://www.mirrativ.com/live/%s' % video_id, video_id)
live_response = self._download_json(f'https://www.mirrativ.com/api/live/live?live_id={video_id}', video_id)
self.assert_error(live_response)
hls_url = dict_get(live_response, ('archive_url_hls', 'streaming_url_hls'))
is_live = bool(live_response.get('is_live'))
if not hls_url:
raise ExtractorError('Neither archive nor live is available.', expected=True)
formats = self._extract_m3u8_formats(
hls_url, video_id,
ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', live=is_live)
return {
'id': video_id,
'title': self._og_search_title(webpage, default=None) or self._search_regex(
r'<title>\s*(.+?) - Mirrativ\s*</title>', webpage) or live_response.get('title'),
'is_live': is_live,
'description': live_response.get('description'),
'formats': formats,
'thumbnail': live_response.get('image_url'),
'uploader': traverse_obj(live_response, ('owner', 'name')),
'uploader_id': traverse_obj(live_response, ('owner', 'user_id')),
'duration': try_get(live_response, lambda x: x['ended_at'] - x['started_at']) if not is_live else None,
'view_count': live_response.get('total_viewer_num'),
'release_timestamp': live_response.get('started_at'),
'timestamp': live_response.get('created_at'),
'was_live': bool(live_response.get('is_archive')),
}
class MirrativUserIE(MirrativBaseIE):
IE_NAME = 'mirrativ:user'
_VALID_URL = r'https?://(?:www\.)?mirrativ\.com/user/(?P<id>\d+)'
_TESTS = [{
# Live archive is available up to 3 days
# see: https://helpfeel.com/mirrativ/%E9%8C%B2%E7%94%BB-5e26d3ad7b59ef0017fb49ac (Japanese)
'url': 'https://www.mirrativ.com/user/110943130',
'note': 'multiple archives available',
'only_matching': True,
}]
def _entries(self, user_id):
page = 1
while page is not None:
api_response = self._download_json(
f'https://www.mirrativ.com/api/live/live_history?user_id={user_id}&page={page}', user_id,
note=f'Downloading page {page}')
self.assert_error(api_response)
lives = api_response.get('lives')
if not lives:
break
for live in lives:
if not live.get('is_archive') and not live.get('is_live'):
# neither archive nor live is available, so skip it
# or the service will ban your IP address for a while
continue
live_id = live.get('live_id')
url = 'https://www.mirrativ.com/live/%s' % live_id
yield self.url_result(url, video_id=live_id, video_title=live.get('title'))
page = api_response.get('next_page')
def _real_extract(self, url):
user_id = self._match_id(url)
user_info = self._download_json(
f'https://www.mirrativ.com/api/user/profile?user_id={user_id}', user_id,
note='Downloading user info', fatal=False)
self.assert_error(user_info)
return self.playlist_result(
self._entries(user_id), user_id,
user_info.get('name'), user_info.get('description'))
|
[
"noreply@github.com"
] |
yt-dlp.noreply@github.com
|
4403759cc3a6535b10eb3e09928d293cb9555aad
|
bb151500b0fc5bb9ef1b1a9e5bba98e485b4b34d
|
/problemSet/591C_Median_Smoothing.py
|
9436f6108c5e3ab88ea40e68a7cd92378f7749a0
|
[] |
no_license
|
yamaton/codeforces
|
47b98b23da0a3a8237d9021b0122eaa498d98628
|
e0675fd010df852c94eadffdf8b801eeea7ad81b
|
refs/heads/master
| 2021-01-10T01:22:02.338425
| 2018-11-28T02:45:04
| 2018-11-28T03:21:45
| 45,873,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,697
|
py
|
"""
Codeforces Round #327 (Div. 2)
Problem 591 C. Median Smoothing
@author yamaton
@date 2015-11-06
"""
def reduce_consec(iterable):
"""
[1, 2, 3, 6, 7, 9, 10, 11, 12, 13, 20]
--> [(1, 3), (6, 2), (9, 5), (20, 1)]
Detect consecutive part and (starting_value, length) pair
:param xs: List of int
:return: List of pair of int
"""
stack = []
for x in iterable:
if stack:
# check if consective
if stack[-1] + 1 == x:
stack.append(x)
# if not consecutive, flush stack and start with new element
else:
yield (stack[0], len(stack))
stack = [x]
else:
# starting element
stack.append(x)
if stack:
yield (stack[0], len(stack))
def alternating_indices(xs):
for i, x in enumerate(xs):
if i == 0 or i == len(xs) - 1:
continue
if xs[i-1] != x and xs[i+1] != x:
yield i
def alternating_position_and_length(xs):
for x in xs:
pass
def solve(xs, n):
# zigzag = [] # alternating part
# for i, x in enumerate(xs):
# if i == 0 or i == n - 1:
# continue
# if xs[i-1] != x and xs[i+1] != x:
# zigzag.append(i)
zigzag = alternating_indices(xs)
zigzag_start_length_pairs = reduce_consec(zigzag)
count = 0
result = xs[:]
for (i, n) in zigzag_start_length_pairs:
n_half = n // 2
count = max(count, (n + 1) // 2)
if n % 2 == 0:
for j in range(i, i + n_half):
result[j] = xs[i-1]
for j in range(i + n_half, i + n):
result[j] = 1 - xs[i-1]
else:
for j in range(i, i + n):
result[j] = xs[i-1]
return count, result
def solve_bruteforce(xs, n):
def transform(ps):
result = []
for i in range(n):
if i == 0 or i == n-1:
result.append(ps[i])
else:
median = int(sum(ps[i-1:i+2]) >= 2)
result.append(median)
return tuple(result)
xs = tuple(xs)
seen = set()
seen.add(xs)
ys = transform(xs)
count = 0
while ys != xs:
# Actually, this system always ends up to a fixed point. No cycle exists.
if ys in seen:
return -1, xs
xs = ys
seen.add(xs)
count += 1
ys = transform(xs)
return count, xs
def main():
n = int(input())
xs = [int(i) for i in input().strip().split()]
count, seq = solve(xs, n)
print(count)
print(' '.join(str(n) for n in seq))
if __name__ == '__main__':
main()
|
[
"yamaton@gmail.com"
] |
yamaton@gmail.com
|
c8bca5286d0a9ad049f59155f5a9114d8f06dd8c
|
b92eee41d665314bc42043d1ff46c608af5ffdfd
|
/sesion_3/prog.4.py
|
eda17bf266e753571861d3d45fc42db362032da6
|
[] |
no_license
|
badillosoft/python-economics
|
40efe8326558a8fb93f84fdbd2137428844ee5f3
|
82af43c7a47297ce186dc0e23e30620d46e6693a
|
refs/heads/master
| 2021-01-11T18:55:15.762752
| 2017-05-09T01:15:59
| 2017-05-09T01:15:59
| 79,656,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
from openpyxl import load_workbook
from geg import *
wb = load_workbook("puntos.xlsx")
ws = wb.active
puntos = automatic_load_data(ws, "A2")
def f(x, y):
return x**2 + y**2
for p in puntos:
x = p["X"]
y = p["Y"]
z = f(x, y)
print "%f, %f, %f" %(x, y, z)
|
[
"kmmx@hsoft.local"
] |
kmmx@hsoft.local
|
713e56b0dfc1b28ab55d67e75f8720cff692e593
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=49/params.py
|
fcbfbdfe45d4c06dbfe8c250d00b2d4aa9ae3364
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.557024',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'RUN',
'trial': 49,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
c0cf0962495662ae563a1a6b07d1ec6c2b8f5619
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/9549174/snippet.py
|
312f68f442a31f1ee8acc642c7594905cdeb8ac0
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,652
|
py
|
import random
import sys
def print_grid(grid):
print ("\n%s\n" % "+".join([('-' * 4)] * 4)).join(
["|".join(["%4d" % item if item > 0 else " " * 4 for item in line]) for line in grid])
def get_available_cells(grid):
return [(y, x) for y in range(4) for x in range(4) if not grid[y][x]]
def insert_new_item(grid):
available_cells = get_available_cells(grid)
if len(available_cells) == 0:
return False
y, x = random.choice(available_cells)
grid[y][x] = 2 if random.random() < 0.9 else 4
return True
def is_legal_position(y, x):
return 0 <= y <= 3 and 0 <= x <= 3
def get_next_position(y, x, (y_offset, x_offset)):
return y + y_offset, x + x_offset
def get_next_nonzero_cell(grid, y, x, (y_offset, x_offset)):
next_y, next_x = get_next_position(y, x, (y_offset, x_offset))
if is_legal_position(next_y, next_x):
if grid[next_y][next_x]:
return next_y, next_x
else:
return get_next_nonzero_cell(grid, next_y, next_x, (y_offset, x_offset))
else:
return None, None
def merge_cells(grid, (write_y, write_x), (read_y, read_x), direction, virtual, winning=False):
if (write_y, write_x) == (read_y, read_x):
read_y, read_x = get_next_nonzero_cell(grid, read_y, read_x, direction)
if not is_legal_position(write_y, write_x) or not is_legal_position(read_y, read_x):
return winning if not virtual else False
if grid[write_y][write_x]:
if grid[read_y][read_x] == grid[write_y][write_x]:
if virtual:
return True
grid[write_y][write_x] *= 2
grid[read_y][read_x] = 0
return merge_cells(grid, get_next_position(write_y, write_x, direction),
get_next_nonzero_cell(grid, read_y, read_x, direction), direction, virtual,
winning or grid[write_y][write_x] > 1024)
else:
return merge_cells(grid, get_next_position(write_y, write_x, direction),
(read_y, read_x), direction, virtual, winning)
else:
if virtual:
return True
grid[write_y][write_x] = grid[read_y][read_x]
grid[read_y][read_x] = 0
return merge_cells(grid, (write_y, write_x),
get_next_nonzero_cell(grid, read_y, read_x, direction), direction, virtual, winning)
def get_movable_directions(grid):
return [direction for direction in ["a", "d", "w", "s"] if move(grid, direction, True)]
def move(grid, direction, virtual):
if direction == "a": #left
return any([merge_cells(grid, (i, 0), (i, 0), (0, 1), virtual) for i in range(4)])
elif direction == "d": #right
return any([merge_cells(grid, (i, 3), (i, 3), (0, -1), virtual) for i in range(4)])
elif direction == "w": #up
return any([merge_cells(grid, (0, i), (0, i), (1, 0), virtual) for i in range(4)])
elif direction == "s": #down
return any([merge_cells(grid, (3, i), (3, i), (-1, 0), virtual) for i in range(4)])
grid = [[0 for x in range(4)] for y in range(4)]
insert_new_item(grid)
while True:
insert_new_item(grid)
print_grid(grid)
movable_directions = get_movable_directions(grid)
if len(movable_directions) == 0:
print "You lose!"
break
direction_name = sys.stdin.readline().strip().lower()
while direction_name not in movable_directions:
print "Invalid direction."
direction_name = sys.stdin.readline().strip().lower()
if move(grid, direction_name, False):
print_grid(grid)
print "You win!"
break
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
87481f971aab378f0cea55dabcddcedecfdce3f5
|
4c704c60dcd8bba658f4e0cdc85f299c01f2058e
|
/002/for1.py
|
9727ffbd6c0fcc9cbb45013575fc2759408bb8fa
|
[] |
no_license
|
steveq1/py2016
|
acd6c80595637fb3be7f1f3378bbdca8d2dcf8cc
|
fb9b2708d49790efe03d84315442d7e93a7cc6d6
|
refs/heads/master
| 2021-01-17T13:00:25.787387
| 2016-07-18T16:28:07
| 2016-07-18T16:28:07
| 63,125,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
for x in range(0,10):
if x >=3:
is_break = False
break
print('x={0}'.format(x))
if is_break:
break
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
19a873e4e3896df4714cebbd65d8a78cd02da923
|
773aef0de494fde01ea5a444b0cfdf57deb88b10
|
/puchowebapp/urls.py
|
925ea082cf610643223dc59a8d2e26160968a8dc
|
[] |
no_license
|
gk90731/Pucho_Web
|
44c509f92950dc7f35cd5dfd6cf3e42fb6b2d720
|
041239934cd9303120e67d613b2ae90f23c17f20
|
refs/heads/master
| 2022-12-10T19:47:43.400760
| 2020-04-04T14:28:29
| 2020-04-04T14:28:29
| 253,017,818
| 0
| 0
| null | 2022-12-08T03:59:01
| 2020-04-04T14:26:52
|
HTML
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.index ,name="home"),
path('what_we_do/',views.what_we_do ,name="what_we_do"),
path('about/',views.about ,name="about"),
path('protfolio/',views.protfolio ,name="protfolio"),
path('gallery/',views.gallery ,name="gallery"),
path('contact/',views.contact ,name="contact"),
]
|
[
"gk90731@gmail.com"
] |
gk90731@gmail.com
|
91f683f5ae10fa0d17fac5d8d2ed8efc7e5b63a8
|
fc1c1e88a191b47f745625688d33555901fd8e9a
|
/meraki_sdk/models/universal_search_knowledge_base_search_enum.py
|
eeb90cdd62bbd16c19b2fcca21e1750437564fb5
|
[
"MIT",
"Python-2.0"
] |
permissive
|
RaulCatalano/meraki-python-sdk
|
9161673cfd715d147e0a6ddb556d9c9913e06580
|
9894089eb013318243ae48869cc5130eb37f80c0
|
refs/heads/master
| 2022-04-02T08:36:03.907147
| 2020-02-03T19:24:04
| 2020-02-03T19:24:04
| 416,889,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UniversalSearchKnowledgeBaseSearchEnum(object):
"""Implementation of the 'UniversalSearchKnowledgeBaseSearch' enum.
The universal search box always visible on Dashboard will, by default,
present results from the Meraki KB. This configures
whether these Meraki KB results should be returned. Can be one of
'default or inherit', 'hide' or 'show'.
Attributes:
ENUM_DEFAULT OR INHERIT: TODO: type description here.
HIDE: TODO: type description here.
SHOW: TODO: type description here.
"""
ENUM_DEFAULT_OR_INHERIT = 'default or inherit'
HIDE = 'hide'
SHOW = 'show'
|
[
"api-pm@meraki.com"
] |
api-pm@meraki.com
|
d0b8df90a505c6ce70739548052cf57d31f3c545
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/422/usersdata/328/89006/submittedfiles/lecker.py
|
646555fee493b246ee37fbf271bb339645a2e877
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
n1=int(input('n1:'))
n2=int(input('n2:'))
n3=int(input('n3:'))
n4=int(input('n4:'))
if n1 >n2 and n4<n3:
print('S')
elif n2 >n1> n3 and n4<n3 :
print('S')
elif n3>n4>n2 and n1<n2:
print('S')
elif n4>n3 :
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5f5b4e4172a9aafe394060657cf1b1bd9a055427
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631572862566400_0/Python/ugo/c.py
|
fc210345694d8b61a3644358a93468fbce72a716
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,899
|
py
|
def get_candidates(bffs):
ret = []
for i in range(len(bffs)):
for j in range(i+1, len(bffs)):
if bffs[i] == j and bffs[j] == i:
ret.append((i, j))
return ret
def longest(n, dontgo, edges):
print 'longest', n, dontgo
ret = 1
for nb in edges[n]:
if nb != dontgo:
ret = max(ret, longest(nb, dontgo, edges) + 1)
return ret
# def dfs(n, starting, visited, edges):
# next = edges[n]
# if starting in visited
f = open('c.small.in')
fout = open('c.out', 'w')
numCases = int(f.readline().strip())
for numCase in range(numCases):
print 'CASE: {}'.format(numCase+1)
N = int(f.readline().strip())
bffs = [None] * N
reverse_bffs = []
for i in range(N):
reverse_bffs.append([])
ss = f.readline().split()
for i in range(N):
bffs[i] = int(ss[i]) - 1
reverse_bffs[int(ss[i]) - 1].append(i)
# print bffs
# print reverse_bffs
#case 1
case1max = 0
candidates = get_candidates(bffs)
len_candidates = len(candidates)
for (c_x, c_y) in candidates:
# print c_x, c_y
print c_x
d1 = longest(c_x, c_y, reverse_bffs)
print c_y
d2 = longest(c_y, c_x, reverse_bffs)
case1max = max(case1max, d1+d2 + 2 * (len_candidates-1) )
print c_x, d1
print c_y, d2
print case1max
case2max = 0
for n in range(0, N):
if len(reverse_bffs[n]) == 0:
continue
cnt = 1
cur = n
visited = set()
visited.add(cur)
while True:
next = bffs[cur]
if next == n:
break
if next in visited:
cnt = 0
break
visited.add(next)
cur = next
cnt += 1
print 'cycle starting n:', n, cnt
case2max = max(case2max, cnt)
# visited = set()
# visited.add(n)
# d = dfs(n, n, visited, bffs)
# print n, d
# case2max = max(case2max, d)
#case 2
# for node in range(1, N+1):
# print ' '.join(result)
print 'case1max', case1max, 'case2max', case2max
fout.write('Case #{}: {}\n'.format(numCase+1, max(case1max, case2max)))
fout.close()
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
f59015df0fd96a8dc9105e2b9aec3f31d216ca8f
|
df7b40e95718ac0f6071a0ba571b42efc81cf6de
|
/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py
|
5305689d09b944f6e37aa85567ce3f29fc6974a7
|
[
"Apache-2.0"
] |
permissive
|
shinianzhihou/ChangeDetection
|
87fa2c498248e6124aeefb8f0ee8154bda36deee
|
354e71234bef38b6e142b6ba02f23db958582844
|
refs/heads/master
| 2023-01-23T20:42:31.017006
| 2023-01-09T11:37:24
| 2023-01-09T11:37:24
| 218,001,748
| 162
| 29
|
Apache-2.0
| 2022-11-03T04:11:00
| 2019-10-28T08:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
_base_ = [
'../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
[
"1178396201@qq.com"
] |
1178396201@qq.com
|
08e64277223c06c607a305f6816878e91c7112f9
|
3b6b76aae93eb8a2c738a1364e923d3bad20e0a6
|
/articles/wsgi-intro/twill-wsgi-example.py
|
c8d9450fb8f6a4c7610666f7a9687e7e5a2e8ccb
|
[] |
no_license
|
ctb/ivory.idyll.org
|
24e4a0f67fbbde399118aff3c27a03bac304aa8f
|
88df5f33361e6e13eda248ee55f1e4e460b998d9
|
refs/heads/master
| 2020-04-10T10:42:00.111811
| 2018-12-08T19:54:05
| 2018-12-08T19:54:05
| 160,973,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
#! /usr/bin/env python
import twill
def simple_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!\n']
if __name__ == '__main__':
print '*** installing WSGI intercept hook ***\n'
twill.add_wsgi_intercept('localhost', 80, lambda: simple_app)
twill.shell.main()
|
[
"titus@idyll.org"
] |
titus@idyll.org
|
bd7f88508e67dbfcf5ecffbf0562f7a05eb1619b
|
e49a07ad215172e9c82cb418b10371bf0ce1c0f7
|
/第1章 python基础/Python基础08/10-异常传递.py
|
a53af9d709038f15ce320e9490696f4377f4e232
|
[] |
no_license
|
taogangshow/python_Code
|
829c25a7e32ead388c8b3ffa763cb9cf587bfd7b
|
4b3d6992ec407d6069f3187ca7e402a14d863fff
|
refs/heads/master
| 2022-12-16T01:26:17.569230
| 2018-11-16T10:07:59
| 2018-11-16T10:07:59
| 157,832,985
| 0
| 1
| null | 2022-11-25T09:55:32
| 2018-11-16T08:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
def test1():
print("---test1-1---")
print(num)
print("---test1-2---")
def test2():
print("---test2-1---")
test1()
print("---test2-2---")
def test3():
try:
print("---test3-1---")
test1()
print("---test3-2---")
except Exception as result:
print("捕获到了异常,信息是:%s"%result)
test3()
print("---华丽的分割线---")
test2()
|
[
"cdtaogang@163.com"
] |
cdtaogang@163.com
|
f4c50198426a22de4657d97af5065df4920d777b
|
4f111dfacab0acc93900e7746538f85e0b3d8d78
|
/day3/01关系运算符.py
|
7daa883448340f101de2cd7477971865e50ce034
|
[] |
no_license
|
ljxproject/basecode
|
5541f25cfe90d5fad26eac0b6e72802aa1fad1f4
|
485e4b41593839bfc61e67261247fb88dc80cc1d
|
refs/heads/master
| 2020-03-26T16:16:26.422617
| 2018-08-17T08:05:11
| 2018-08-17T08:05:11
| 145,091,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
'''
关系元算符与关系表达式
关系运算符有:
> < == != >= <=
格式: 表达式1 关系运算符 表达式2
功能: 运算表达式1与表达式2的值,
值: 如果关系成立,则返回True,否则False
'''
num1 = 2
num2 = 5
mum3 = num1 != num2
print(mum3)
print(num1 != num2)
|
[
"403496369@qq.com"
] |
403496369@qq.com
|
8e4afde0ad3d7cdf9500900a9d52568869e8ccec
|
b9d7194bb50a01e7e56d19ba2f3c048084af54b5
|
/_OLD_/bottle.py
|
8171ee3221df8251f6911cd57ccc179a1fc2edcf
|
[] |
no_license
|
BernardoGO/TCC---2017
|
099e72d788974446b58fe5f409a2df25e3613cc5
|
75025e095956624470c22d8f3118441d5c28bdd7
|
refs/heads/master
| 2018-12-04T10:53:07.809161
| 2018-09-06T04:59:30
| 2018-09-06T04:59:30
| 64,803,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,571
|
py
|
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.models import save_model, load_model
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 150000
nb_validation_samples = 24000
epochs = 50
batch_size = 16
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_train = model.predict_generator(
generator, nb_train_samples // batch_size)
np.save(open('bottleneck_features_train.npy', 'wb'),
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_validation = model.predict_generator(
generator, nb_validation_samples // batch_size)
np.save(open('bottleneck_features_validation.npy', 'wb'),
bottleneck_features_validation)
def train_top_model():
train_data = np.load(open('bottleneck_features_train.npy', "rb"))
train_labels = np.array(
[0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load(open('bottleneck_features_validation.npy', "rb"))
validation_labels = np.array(
[0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
#model.load_weights(top_model_weights_path)
#save_model(model, "model1111.h5")
save_bottlebeck_features()
train_top_model()
|
[
"bernardo.godinho.oliveira@gmail.com"
] |
bernardo.godinho.oliveira@gmail.com
|
e7e3f19d55f167659b9939895e3c7c8b47ad52da
|
c6818c06aacb1eca1fffa8bbc51b6f3aac25c177
|
/acre/asgi.py
|
7a5ee240ac0ce6bd5657ed8a2e6ac3c7e5f609cc
|
[] |
no_license
|
Acon94/ACRE
|
2d0769780c9f81eba05085ffd8b0af225666d6de
|
73622a6dc4ba0f30e8d3e90b02d23c8efd14a5e1
|
refs/heads/master
| 2022-08-02T02:07:53.004308
| 2020-05-29T15:25:50
| 2020-05-29T15:25:50
| 267,840,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
ASGI config for acre project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'acre.settings')
application = get_asgi_application()
|
[
"andrew@Andrews-MacBook-Pro.local"
] |
andrew@Andrews-MacBook-Pro.local
|
def00a2abdbb12ba52b231da7124685b93516b93
|
23ef81cb94356fd321c07f06dab2877e04131b4d
|
/yiyuanduobao_shop/migrations/0058_item_proxy_sale_qr_code.py
|
da3d99780c61df4a84d1c939d53fdc4bb41fd205
|
[] |
no_license
|
dongshaohui/one_dolor
|
0c688787d8cee42957bec087b74b5ea353cc80fc
|
13dea458568152a3913c6f70ecd9a7e1f6e9514e
|
refs/heads/master
| 2020-07-03T03:12:22.409542
| 2016-11-21T08:15:06
| 2016-11-21T08:15:06
| 74,202,604
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('yiyuanduobao_shop', '0057_item_winner_customer'),
]
operations = [
migrations.AddField(
model_name='item',
name='proxy_sale_qr_code',
field=models.CharField(default=b'', max_length=500, verbose_name='\u672c\u671f\u4ee3\u5356\u4e8c\u7ef4\u7801'),
preserve_default=True,
),
]
|
[
"405989455@qq.com"
] |
405989455@qq.com
|
0b231fcc73526c6bd8bb5185239f91dd9e68d7cf
|
13edd8f1bc3b86fd881f85fbeafe94811392d7fc
|
/fourth_module/多线程多进程/new/多线程/01 开启线程的两种方式.py
|
cbdb541d1193f3e8f003cc5d85896cfbaa111812
|
[] |
no_license
|
ryan-yang-2049/oldboy_python_study
|
f4c90c9d8aac499e1d810a797ab368217f664bb1
|
6e1ab7f217d9bf9aa7801266dee7ab4d7a602b9f
|
refs/heads/master
| 2022-07-22T23:49:28.520668
| 2019-06-11T13:26:25
| 2019-06-11T13:26:25
| 129,877,980
| 0
| 1
| null | 2022-07-18T17:12:54
| 2018-04-17T09:12:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
# -*- coding: utf-8 -*-
"""
__title__ = '01 开启线程的两种方式.py'
__author__ = 'yangyang'
__mtime__ = '2018.02.07'
"""
from threading import Thread
import os,time
# def task(name):
# print("%s is running,PID: %s" % (name,os.getpid()))
#
# if __name__ == '__main__':
# p = Thread(target=task,args=('ryan',))
# p.start()
# print("主线程,PID:%s"%os.getpid())
class MyThread(Thread):
def __init__(self,name):
super().__init__()
self.name = name
def run(self):
print("%s is running,PID: %s"%(self.name,os.getpid()))
if __name__ == '__main__':
obj = MyThread('ryan')
obj.start()
print("主线程,PID: %s"%os.getpid())
|
[
"11066986@qq.com"
] |
11066986@qq.com
|
00d288a2b6044bd45e41cb8a04842120a28cf531
|
90047daeb462598a924d76ddf4288e832e86417c
|
/chromecast/browser/DEPS
|
c273dc2c7d0751e9b9e547fd0285090933fa1b4b
|
[
"BSD-3-Clause"
] |
permissive
|
massbrowser/android
|
99b8c21fa4552a13c06bbedd0f9c88dd4a4ad080
|
a9c4371682c9443d6e1d66005d4db61a24a9617c
|
refs/heads/master
| 2022-11-04T21:15:50.656802
| 2017-06-08T12:31:39
| 2017-06-08T12:31:39
| 93,747,579
| 2
| 2
|
BSD-3-Clause
| 2022-10-31T10:34:25
| 2017-06-08T12:36:07
| null |
UTF-8
|
Python
| false
| false
| 990
|
include_rules = [
"+cc/base/switches.h",
"+chromecast/common",
"+chromecast/graphics",
"+chromecast/app/grit/chromecast_settings.h",
"+chromecast/app/resources/grit/shell_resources.h",
"+chromecast/media",
"+chromecast/net",
"+chromecast/service",
"+components/cdm/browser",
"+components/crash",
"+components/network_hints/browser",
"+components/prefs",
"+components/proxy_config",
"+content/public/android",
"+content/public/browser",
"+content/public/common",
"+content/public/test",
"+device/geolocation",
"+gin/v8_initializer.h",
"+gpu/command_buffer/service/gpu_switches.h",
"+media/audio",
"+media/base",
"+media/mojo",
"+mojo/public",
"+net",
"+services/service_manager/public",
"+ui/aura",
"+ui/base",
"+ui/compositor",
"+ui/events",
"+ui/gfx",
"+ui/gl",
"+ui/display",
"+ui/ozone/platform/cast/overlay_manager_cast.h",
# TODO(sanfin): Remove this by fixing the crash handler on android.
"!chromecast/app",
]
|
[
"xElvis89x@gmail.com"
] |
xElvis89x@gmail.com
|
|
26e94e33c7d3dda0924333d6df8e6e72572d6ac1
|
a842f224d1b0c2e74b2043e8d03f49e3298086df
|
/grep_scales.py
|
2b83cfe14e04d138314104c9309a15a7056c7411
|
[] |
no_license
|
ronsengupta/grep-scales
|
68f8037171cdfd3f43c02d3d77f4f633e4196856
|
5740902b4694ae8d1cdee04e213f41c3d99bc428
|
refs/heads/master
| 2020-06-12T23:00:48.071262
| 2016-04-10T08:48:04
| 2016-04-10T08:48:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,232
|
py
|
from shutit_module import ShutItModule
class grep_scales(ShutItModule):
def build(self, shutit):
afile = r'''THIS LINE IS THE 1ST UPPER CASE LINE IN THIS FILE.
this line is the 1st lower case line in this file.
This Line Has All Its First Character Of The Word With Upper Case.
Two lines above this line is empty.
And this is the last line.
'''
shutit.send_file('afile',afile)
shutit.send('alias grep=grep')
afile_message = '''I have created a file called 'afile' that looks like this:
BEGINS
''' + afile + '''
ENDS
'''
follow_on_context={'check_command':'ls','context':'docker'}
#shutit.challenge('move file afile to filename: 1',challenge_type='golf',expect='1',follow_on_context=follow_on_context)
shutit.challenge(afile_message + '''
For your first task, grep out the last line, ie the one that reads: 'And this is the last line.'.''','And this is the last line.',hints=['last','grep last afile'])
shutit.golf(afile_message + 'Return a count of the number of lines with "UPPER" in it (case sensitive)','1',hints=['-c','ask again to get answer','grep -c UPPER afile'])
shutit.golf(afile_message + 'Return a count of the number of lines with "UPPER" in it (case insensitive)','2',hints=['-c','-i','ask again to get answer','grep -c -i UPPER afile'])
shutit.golf(afile_message + 'Return lines that have the word "in" in it (case insensitive)','264200b0557e7c2e75cffc57778311f4',expect_type='md5sum',hints=['-w','-i','ask again to get answer','grep -w -i in afile'])
shutit.golf(afile_message + '''Return lines that DON'T have the word 'case' (case insensitive) in it.''','ca75d0d8558569109e342ac5e09c4d01',expect_type='md5sum',hints=['-v','-i','ask again to get answer','grep -v case afile'])
shutit.golf(afile_message + '''Return line with "UPPER" in it, along with the line number.''','cc9246de53156c4259be5bf05dacadf6',expect_type='md5sum',hints=['-n','ask again to get answer','grep -n UPPER afile'])
shutit.golf(afile_message + 'Print the line after the empty line.','63b6f5fd46648742a6f7aacff644dd92',expect_type='md5sum',hints=['-A','-A1','ask again to get answer','grep -A1 ^$ afile'])
shutit.golf(afile_message + 'Print the two lines that come before the first line with nothing in it.','444cc6679be200fc6579678b6afe19e9',expect_type='md5sum',hints=['-B','-B2','^$ to match the empty line','ask again to get answer','grep -B2 ^$ afile'])
shutit.golf(afile_message + 'Print the line before the empty line, the empty line, and the line after.','7ba4233c4599e0aefd11e93a66c4bf17',expect_type='md5sum',hints=['-C','-C1','ask again to get answer','grep -C1 ^$ afile'],congratulations='Well done, all done!')
#-o, --only-matching Print only the matched (non-empty) parts of a matching line, with each such part on a separate output line.
#-l, --files-with-matches Suppress normal output; instead print the name of each input file from which output would normally have been printed. The scanning will stop on the first match.
#-r
#-e
return True
def module():
return grep_scales(
'tk.shutit.grep_scales.grep_scales', 1845506479.0001,
description='Practice your grep scales!',
maintainer='ian.miell@gmail.com',
delivery_methods=['docker'],
depends=['shutit.tk.setup']
)
|
[
"ian.miell@gmail.com"
] |
ian.miell@gmail.com
|
f76d667d0fdea002d1fd512d3a7f98728174a0a4
|
2ece848b37f7fa6f13ce0e94ddfd0fbd46c72b8f
|
/backend/utils/crawl_mode.py
|
c0224bad7279761898d0187465fdb7edceb18649
|
[
"Apache-2.0"
] |
permissive
|
rockeyzhu/eastmoney
|
1a2d2db18bd658abe8e65875bf863f1cfcefd545
|
c8aa33a69ebee54c64f22a8edbcf30ed0f29b293
|
refs/heads/master
| 2023-03-06T12:20:03.896607
| 2021-02-20T07:20:53
| 2021-02-20T07:20:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
import configparser
def get_crawl_mode():
config = configparser.ConfigParser()
config.sections()
config.read("config.ini")
return config['CRAWL_MODE']['crawl_mode']
|
[
"1397991131@qq.com"
] |
1397991131@qq.com
|
bc770a4a78f1a3e117c15af7a3ea4c7b4937bf1e
|
63b0fed007d152fe5e96640b844081c07ca20a11
|
/ABC/ABC200~ABC299/ABC291/c.py
|
468e2709c60f01b71d7144cca09a88563e9ae6c3
|
[] |
no_license
|
Nikkuniku/AtcoderProgramming
|
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
|
fbaf7b40084c52e35c803b6b03346f2a06fb5367
|
refs/heads/master
| 2023-08-21T10:20:43.520468
| 2023-08-12T09:53:07
| 2023-08-12T09:53:07
| 254,373,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from collections import defaultdict
N = int(input())
S = input()
d = defaultdict(lambda: False)
d[(0, 0)] = True
nx, ny = 0, 0
ans = 'No'
for i in range(N):
s = S[i]
if s == 'R':
nx += 1
elif s == 'L':
nx -= 1
elif s == 'U':
ny += 1
else:
ny -= 1
if d[(nx, ny)]:
ans = 'Yes'
d[(nx, ny)] = True
print(ans)
|
[
"ymdysk911@gmail.com"
] |
ymdysk911@gmail.com
|
7fc78a96811a0f46faa2e7fdc489c6ccfdf5de20
|
b7f1b4df5d350e0edf55521172091c81f02f639e
|
/components/arc/video_accelerator/DEPS
|
be1c9c99ce26a0e5b89f2611421f734fc2f70e77
|
[
"BSD-3-Clause"
] |
permissive
|
blusno1/chromium-1
|
f13b84547474da4d2702341228167328d8cd3083
|
9dd22fe142b48f14765a36f69344ed4dbc289eb3
|
refs/heads/master
| 2023-05-17T23:50:16.605396
| 2018-01-12T19:39:49
| 2018-01-12T19:39:49
| 117,339,342
| 4
| 2
|
NOASSERTION
| 2020-07-17T07:35:37
| 2018-01-13T11:48:57
| null |
UTF-8
|
Python
| false
| false
| 296
|
include_rules = [
"+components/arc/common",
"+gpu/command_buffer/service/gpu_preferences.h",
"+media/video",
"+media/base/video_frame.h",
"+media/base/video_types.h",
"+media/gpu",
"+mojo/edk/embedder",
"+services/service_manager/public/cpp",
"+ui/gfx",
"+ui/ozone/public",
]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
00728e4101b62fa2bf7ba2c3784d4576344c6cc3
|
d5b3de6729e165bddcc17b8c3c285df808cd9fd0
|
/application/modules/fonction/views_fct.py
|
209fd03dd4976dbac54b11d2915ca69f51eb9231
|
[] |
no_license
|
wilrona/Gesacom
|
907848d44d9fa1a285b5c7a452c647fc6cbbc2fa
|
31ec26c78994030844f750039a89a43a66d61abf
|
refs/heads/master
| 2020-04-06T15:00:36.522832
| 2016-09-08T13:19:06
| 2016-09-08T13:19:06
| 49,956,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
__author__ = 'Ronald'
from ...modules import *
from models_fct import Fonction
from forms_fct import FormFonction
# Flask-Cache (configured to use App Engine Memcache API)
cache = Cache(app)
prefix = Blueprint('fonction', __name__)
@prefix.route('/fonction')
@login_required
@roles_required([('super_admin', 'fonction')])
def index():
menu = 'societe'
submenu = 'entreprise'
context = 'fonction'
title_page = 'Parametre - Fonctions'
search = False
q = request.args.get('q')
if q:
search = True
try:
page = int(request.args.get('page', 1))
except ValueError:
page = 1
datas = Fonction.query()
pagination = Pagination(css_framework='bootstrap3', page=page, total=datas.count(), search=search, record_name='fonctions')
if datas.count() > 10:
if page == 1:
offset = 0
else:
page -= 1
offset = page * 10
datas = datas.fetch(limit=10, offset=offset)
return render_template('fonction/index.html', **locals())
@prefix.route('/fonction/edit', methods=['GET', 'POST'])
@prefix.route('/fonction/edit/<int:fonction_id>', methods=['GET', 'POST'])
@login_required
@roles_required([('super_admin', 'fonction')], ['edit'])
def edit(fonction_id=None):
if fonction_id:
grades = Fonction.get_by_id(fonction_id)
form = FormFonction(obj=grades)
else:
grades = Fonction()
form = FormFonction()
success = False
if form.validate_on_submit():
grades.libelle = form.libelle.data
grades.put()
flash('Enregistement effectue avec succes', 'success')
success = True
return render_template('fonction/edit.html', **locals())
@prefix.route('/fonction/delete/<int:fonction_id>')
@login_required
@roles_required([('super_admin', 'fonction')], ['edit'])
def delete(fonction_id):
fonctions = Fonction.get_by_id(fonction_id)
if not fonctions.count():
fonctions.key.delete()
flash('Suppression reussie', 'success')
else:
flash('Impossible de supprimer', 'danger')
return redirect(url_for('fonction.index'))
|
[
"wilrona@gmail.com"
] |
wilrona@gmail.com
|
da0c2a1cf4183a389e9a794b268a35920914e270
|
226be49a7396e7c6004ba4de567f6c22b5b245c0
|
/packaging/fremantle/.py2deb_build_folder/gread/src/opt/GRead/views/basic/utils/toolbar.py
|
ce31b63c1a91f4abdca09d651a501e4d2d0b2425
|
[] |
no_license
|
twidi/GRead
|
0e315c0c924fa169cb5d16e927c6b54e79e25bd9
|
51429189762b706fbe8ca1b927d89071a556d51e
|
refs/heads/master
| 2021-01-10T19:54:43.098022
| 2010-11-23T00:41:17
| 2010-11-23T00:41:17
| 1,146,572
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,891
|
py
|
# -*- coding: utf-8 -*-
"""
Lib to manage toolbars which appear on mousedown(maemo) or mousemove(not maem0)
and stay visible a few seconds
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import time
class ToolbarOwnerEventFilter(QObject):
def __init__(self, *args, **kwargs):
super(ToolbarOwnerEventFilter, self).__init__(*args, **kwargs)
def eventFilter(self, obj, e):
if e.type() == QEvent.Resize:
self.parent().replace_toolbars()
return False
class ToolbarManager(QObject):
def __init__(self, toolbars, event_target, *args, **kwargs):
super(ToolbarManager, self).__init__(*args, **kwargs)
parent = self.parent()
self.event_target = event_target
self.toolbars = toolbars
self.mode_opacity = False # don't know how to change opacity !
self.timer = QTimer()
self.delay = 0
self.max_delay = 1000.0 # ms (don't forget ".0")
parent.installEventFilter(self)
parent.installEventFilter(ToolbarOwnerEventFilter(parent=self))
QObject.connect(self.timer, SIGNAL("timeout()"), self.hide)
def add_toolbar(self, toolbar):
if toolbar not in self.toolbars:
self.toolbars.append(toolbar)
toolbar.action.triggered.connect(self.display)
def replace_toolbars(self):
for toolbar in self.toolbars:
toolbar.replace()
def display(self):
for toolbar in self.toolbars:
if self.mode_opacity:
toolbar.setStyleSheet("opacity:1")
toolbar.show()
self.timer.stop()
self.delay = self.max_delay
self.timer.start(self.max_delay)
def hide(self):
if not self.delay:
return
if self.mode_opacity:
self.delay = int(self.delay/20)*10
else:
self.delay = 0
if self.delay == 0:
self.timer.stop()
for toolbar in self.toolbars:
toolbar.hide()
else:
opacity = 255*self.delay/self.max_delay
for toolbar in self.toolbars:
toolbar.setStyleSheet("opacity:%f" % opacity)
self.timer.setInterval(self.delay)
def eventFilter(self, obj, e):
if e.type() == QEvent.HoverMove:
if (not self.delay) or self.delay < 500:
self.display()
return False
class Toolbar(QObject):
def __init__(self, text, tooltip, callback, x, y, *args, **kwargs):
super(Toolbar, self).__init__(*args, **kwargs)
parent = self.parent()
self.enabled = False
self.x = x
self.y = y
self.toolbar = QToolBar(parent)
self.toolbar.setAllowedAreas(Qt.NoToolBarArea)
parent.addToolBar(Qt.NoToolBarArea, self.toolbar)
self.action = QAction(text, parent)
self.action.setToolTip(tooltip)
self.toolbar.addAction(self.action)
self.button = self.toolbar.children()[-1]
self.toolbar.setContentsMargins(0, 0, 0, 0)
font = self.button.font()
font.setPointSizeF(font.pointSizeF() * 3)
self.button.setFont(font)
palette = self.toolbar.palette()
self.button.setStyleSheet(
"""
QToolButton {
border : none;
border-radius : %(border_radius)s;
background: transparent;
color: %(background_hover)s;
}
QToolButton:hover {
background: %(background_hover)s;
color: %(foreground_hover)s;
}
""" %
{
'border_radius': int(self.button.height()/2),
'background_hover': palette.color(palette.Highlight).name(),
'foreground_hover': palette.color(palette.HighlightedText).name(),
}
)
self.toolbar.setStyleSheet("border:none;background:transparent")
self.toolbar.resize(self.button.sizeHint())
self.move(x, y)
self.toolbar.setMovable(False)
self.toolbar.hide()
if callback:
self.action.triggered.connect(callback)
def set_tooltip(self, tooltip):
self.action.setToolTip(tooltip)
def replace(self):
self.move(self.x, self.y)
def move(self, x, y):
"""
Move the toolbar to coordinates x,y
If a coordinate is 0 < ? <= 1, it's a percent
of the width or height
"""
w_width = self.parent().width()
t_width = self.toolbar.width()
if not x or x < 0:
_x = 0
elif x > 1:
_x = x
else:
_x = int(x * (w_width - t_width))
if _x < 2:
_x = 2
elif _x > (w_width - t_width -2):
_x = (w_width - t_width -2)
w_height = self.parent().height()
t_height = self.toolbar.height()
if not y or y < 0:
_y = 0
elif y > 1:
_y = y
else:
_y = int(y * (w_height - t_height))
if _y < 2:
_y = 2
elif _y > (w_height - t_height -2):
_y = (w_height - t_height -2)
self.toolbar.move(_x, _y)
def move_x(self, x):
self.move(x, self.toolbar.y())
def move_y(self, y):
self.move(self.toolbar.x(), y)
def disable(self):
self.enabled = False
def enable(self):
self.enabled = True
def hide(self):
self.toolbar.hide()
def show(self):
if not self.enabled:
return
#self.toolbar.setStyleSheet("opacity:1")
self.toolbar.show()
|
[
"s.angel@twidi.com"
] |
s.angel@twidi.com
|
c5a5e944bd41c1e4cfadd2c3c620774ec34c22e1
|
31e41995dea5e4a41bc9b942da7e5266cd686757
|
/learning/training/python/py2/pgms/sec4/outputparams.py
|
5894f5ae44a48540fe4caeee5abca53df43f5154
|
[] |
no_license
|
tamle022276/python
|
3b75758b8794801d202565c05d32976c146beffd
|
4fec225d1e5e2bf0adac5048f7f9f3313ac76e23
|
refs/heads/master
| 2020-04-01T21:03:01.458768
| 2017-03-13T20:47:35
| 2017-03-13T20:47:35
| 64,878,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
#!/usr/bin/env python
# outputparams.py - simulate output parameters
def assign(m, n):
m = 10
n = [3, 4]
return m, n
a = 5; b = [1, 2]
(a, b) = assign(a, b) # updates a, b
print a, b
#####################################
#
# $ outputparams.py
# 10 [3, 4]
#
|
[
"tam.le@teradata.com"
] |
tam.le@teradata.com
|
5ebdc3a4b1499d03dc0954911ba0248fd4c5dfb8
|
e254a1124bbe6be741159073a22898b0824e2a4f
|
/customuser/admin.py
|
6c225a0579ce6bb67949bffc24b32ad6df83f3a0
|
[] |
no_license
|
skiboorg/stdiplom
|
0df83b8e42e999abc43a01157cb24cffd10d0666
|
13101381c7db8a4b949048e8cbfcf9673cf7ecde
|
refs/heads/master
| 2022-11-12T23:55:21.136176
| 2020-06-29T05:57:03
| 2020-06-29T05:57:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import User,Guest
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
"""Define admin model for custom User model with no email field."""
fieldsets = (
(None, {'fields': ('email', 'password', 'used_promo')}),
(_('Personal info'), {'fields': ('fio', 'phone', 'comment', 'is_allow_email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2', 'phone'),
}),
)
list_display = ('email', 'fio', 'phone')
ordering = ('email',)
search_fields = ('email', 'fio', 'phone')
admin.site.register(Guest)
|
[
"ddnnss.i1@gmail.com"
] |
ddnnss.i1@gmail.com
|
d6522db0345b146f5c997b5624fec7901716705a
|
006341ca12525aa0979d6101600e78c4bd9532ab
|
/CMS/Zope-3.2.1/Dependencies/zope.server-Zope-3.2.1/zope.server/linereceiver/linetask.py
|
b6e21554887b4b549e2db8b1c9d3414ff467116b
|
[
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] |
permissive
|
germanfriday/code-examples-sandbox
|
d0f29e20a3eed1f8430d06441ac2d33bac5e4253
|
4c538584703754c956ca66392fdcecf0a0ca2314
|
refs/heads/main
| 2023-05-30T22:21:57.918503
| 2021-06-15T15:06:47
| 2021-06-15T15:06:47
| 377,200,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,064
|
py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Line Task
$Id: linetask.py 27442 2004-09-03 08:16:55Z shane $
"""
import socket
import time
from zope.server.interfaces import ITask
from zope.interface import implements
class LineTask(object):
"""This is a generic task that can be used with command line
protocols to handle commands in a separate thread.
"""
implements(ITask)
def __init__(self, channel, command, m_name):
self.channel = channel
self.m_name = m_name
self.args = command.args
self.close_on_finish = 0
def service(self):
"""Called to execute the task.
"""
try:
try:
self.start()
getattr(self.channel, self.m_name)(self.args)
self.finish()
except socket.error:
self.close_on_finish = 1
if self.channel.adj.log_socket_errors:
raise
except:
self.channel.exception()
finally:
if self.close_on_finish:
self.channel.close_when_done()
def cancel(self):
'See ITask'
self.channel.close_when_done()
def defer(self):
'See ITask'
pass
def start(self):
now = time.time()
self.start_time = now
def finish(self):
hit_log = self.channel.server.hit_log
if hit_log is not None:
hit_log.log(self)
|
[
"chris@thegermanfriday.com"
] |
chris@thegermanfriday.com
|
09748ed4d962cf5b7f4a079ab8e5b4811299f4c0
|
99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6
|
/Django/PROJECT02/PROJECT02/jobs/models.py
|
5d8ee670119eeaf75fc29f8879c7f9b7d6106061
|
[] |
no_license
|
HSx3/TIL
|
92acc90758015c2e31660617bd927f7f100f5f64
|
981c9aaaf09c930d980205f68a28f2fc8006efcb
|
refs/heads/master
| 2020-04-11T21:13:36.239246
| 2019-05-08T08:18:03
| 2019-05-08T08:18:03
| 162,099,042
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from django.db import models
# Create your models here.
class Job(models.Model):
name = models.CharField(max_length=20)
pastjob = models.CharField(max_length=30)
def __str__(self):
return self.name
|
[
"hs.ssafy@gmail.com"
] |
hs.ssafy@gmail.com
|
b1b17de27b17b10b04b0d215f31b42d2845350ab
|
c31ee8136a57a96649196081e1cfde0676c2a481
|
/larcv/app/arxiv/arxiv/LArOpenCVHandle/cfg/mac/arxiv/analyze.py
|
1ff1c17fba6bd79428e15f9dc424c3ee27064942
|
[
"MIT"
] |
permissive
|
DeepLearnPhysics/larcv2
|
b12b46168e5c6795c70461c9495e29b427cd88b5
|
31863c9b094a09db2a0286cfbb63ccd2f161e14d
|
refs/heads/develop
| 2023-06-11T03:15:51.679864
| 2023-05-30T17:51:19
| 2023-05-30T17:51:19
| 107,551,725
| 16
| 19
|
MIT
| 2023-04-10T10:15:13
| 2017-10-19T13:42:39
|
C++
|
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
from larcv import larcv
import ROOT, sys
from ROOT import std
if len(sys.argv) < 2:
print 'Usage: python',sys.argv[0],'CONFIG_FILE [LARCV_FILE1 LARCV_FILE2 ...]'
sys.exit(1)
proc = larcv.ProcessDriver('ProcessDriver')
print "Loading config... ",sys.argv[1]
proc.configure(sys.argv[1])
print "Loaded"
print sys.argv
if len(sys.argv) > 1:
flist=ROOT.std.vector('std::string')()
for x in xrange(len(sys.argv)-6):
print "Pushing back...",sys.argv[x+6]
flist.push_back(sys.argv[x+6])
proc.override_input_file(flist)
proc.override_ana_file(sys.argv[2] + ".root")
proc.override_output_file(sys.argv[3] + ".root")
ana_id = proc.process_id("LArbysImageAna")
ext_id = proc.process_id("LArbysImageExtract")
out_id = proc.process_id("LArbysImageOut")
ana_proc = proc.process_ptr(ana_id)
ext_proc = proc.process_ptr(ext_id)
out_proc = proc.process_ptr(out_id)
out_proc.SetLArbysImageAna(ana_proc)
out_proc.SetLArbysImageExtract(ext_proc)
ana_proc.SetInputLArbysMCFile(sys.argv[4]);
ana_proc.SetInputLArbysRecoFile(sys.argv[5])
proc.initialize()
proc.batch_process()
proc.finalize()
|
[
"kazuhiro@nevis.columbia.edu"
] |
kazuhiro@nevis.columbia.edu
|
c5c4b6f0b936cd29d654915642a877ac48a21b78
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03806/s696918602.py
|
35f161589d7dd759d0031fd48f8415a6aae2215a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
def main():
INF = 100 * 40 + 1
MX = 4000
N, Ma, Mb = map(int, input().split())
dp = [[INF] * (MX * 2 + 1) for _ in range(2)]
i, j = 0, 1
for _ in range(N):
ai, bi, ci = map(int, input().split())
x = Ma * bi - Mb * ai # Σai:Σbi=Ma:Mb<->Ma*Σbi-Mb*Σai=0
for k in range(-MX, MX + 1):
dp[j][k] = dp[i][k]
dp[j][x] = min(dp[j][x], ci)
for k in range(-MX + x, MX + 1):
dp[j][k] = min(
dp[j][k],
dp[i][k - x] + ci
)
i, j = j, i
res = dp[i][0]
print(-1 if res == INF else res)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4b076855d9faf7d4b9b52c1ba3bcabde38de220d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/3477.py
|
445b282b68ddf7bc5304da572da944985b261730
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,508
|
py
|
"""
Problem B. Tidy Numbers
Problem
Tatiana likes to keep things tidy. Her toys are sorted from smallest to largest,
her pencils are sorted from shortest to longest and her computers from oldest to
newest. One day, when practicing her counting skills, she noticed that some
integers, when written in base 10 with no leading zeroes, have their digits
sorted in non-decreasing order. Some examples of this are 8, 123, 555, and
224488. She decided to call these numbers tidy. Numbers that do not have this
property, like 20, 321, 495 and 999990, are not tidy.
She just finished counting all positive integers in ascending order from 1 to N.
What was the last tidy number she counted?
Input
The first line of the input gives the number of test cases, T. T lines follow.
Each line describes a test case with a single integer N, the last number counted
by Tatiana.
Output
For each test case, output one line containing Case #x: y, where x is the test
case number (starting from 1) and y is the last tidy number counted by Tatiana.
Limits
1 ≤ T ≤ 100.
Small dataset
1 ≤ N ≤ 1000.
Large dataset
1 ≤ N ≤ 1018.
Sample
Input
Output
4
132
1000
7
111111111111111110
Case #1: 129
Case #2: 999
Case #3: 7
Case #4: 99999999999999999
"""
def get_closest_tidy_number(n):
if n < 10:
return n
n_str = str(n)
n_len = len(n_str)
prev_value = -1
break_idx = -1
# find position and value of the first digit to the left that breaks
# non-decreasing order
for idx in range(len(n_str)):
value = int(n_str[idx])
if value < prev_value:
break_idx = idx
break
prev_value = value
if break_idx == -1:
return n
# decimal place from the right: 0 means 1s, 1 means 10s and so on
# place = len(n_str) - break_idx - 1
tidy_value = int(n_str[:break_idx] + '0' * (n_len - break_idx)) - 1
n_str = str(tidy_value)
while break_idx > 1:
break_idx -= 1
if int(n_str[break_idx]) < int(n_str[break_idx - 1]):
tidy_value = int(n_str[:break_idx] + '0' * (n_len - break_idx)) - 1
n_str = str(tidy_value)
else:
return tidy_value
return tidy_value
test_cases = int(input())
for i in range(1, test_cases + 1):
input_str = int(input())
tidy_number = get_closest_tidy_number(input_str)
print("Case #{}: {}".format(i, tidy_number))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
68e4482c14a3dab16659aa7b39e7e1d5f4c639ed
|
edd1adb88112045d16d3e6417117d45ceed4a634
|
/classical/tidybot-opt14-strips/api.py
|
fb141663dc9a4e046bd1d3dc18576e2df06bd7ef
|
[] |
no_license
|
AI-Planning/classical-domains
|
26de25bf23622f95c877960c1d52f444922d8737
|
4bd0b42d89ea02bd38af6f93cf20a0ab0cbda9d9
|
refs/heads/main
| 2023-04-27T07:55:55.832869
| 2023-03-29T01:46:11
| 2023-03-29T01:46:11
| 253,298,999
| 24
| 12
| null | 2023-04-18T01:45:39
| 2020-04-05T18:02:53
|
PDDL
|
UTF-8
|
Python
| false
| false
| 2,822
|
py
|
domains = [
{'description': 'The Tidybot domain models a household cleaning task, in which one or more robots must pick up a set of objects and put them into goal locations. The world is structured as a 2d grid, divided into navigable locations and surfaces on which objects may lie. Robots have a gripper, which moves relative to the robot, up to some maximum radius. Existing objects block the gripper, so that it may be necessary to move one object out of the way to put another one down. Robots can carry one object at a time in the gripper, but may also make use of a cart, that can hold multiple objects. The instance generator creates worlds that contain rectangular surfaces ("tables"), as well as U-shaped enclosures ("cupboards"), which are the goal locations of objects.',
'ipc': '2014',
'name': 'tidybot',
'problems': [('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p01.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p02.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p03.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p04.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p05.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p06.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p07.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p08.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p09.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p10.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p11.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p12.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p13.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p14.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p15.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p16.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p17.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p18.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p19.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p20.pddl')]}
]
|
[
"christian.muise@gmail.com"
] |
christian.muise@gmail.com
|
9704f2152ae475830dc15c917f3fe61bda494b73
|
55ceefc747e19cdf853e329dba06723a44a42623
|
/_CodeTopics/LeetCode/201-400/000387/000387.py
|
f9281d3ccb22db12c9f847e92d1c3e8f262be557
|
[] |
no_license
|
BIAOXYZ/variousCodes
|
6c04f3e257dbf87cbe73c98c72aaa384fc033690
|
ee59b82125f100970c842d5e1245287c484d6649
|
refs/heads/master
| 2023-09-04T10:01:31.998311
| 2023-08-26T19:44:39
| 2023-08-26T19:44:39
| 152,967,312
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
def str_to_dict(s):
dic = {}
for ch in s:
if ch in dic:
dic[ch] += 1
else:
dic[ch] = 1
return dic
dic = str_to_dict(s)
for i in range(len(s)):
if dic[s[i]] == 1:
return i
return -1
"""
https://leetcode-cn.com/submissions/detail/133018753/
104 / 104 个通过测试用例
状态:通过
执行用时: 108 ms
内存消耗: 13.8 MB
执行用时:108 ms, 在所有 Python 提交中击败了79.54%的用户
内存消耗:13.8 MB, 在所有 Python 提交中击败了21.14%的用户
"""
|
[
"noreply@github.com"
] |
BIAOXYZ.noreply@github.com
|
eb6724585a47c16c7058930111a03405d5276fc7
|
69439004c494c2d56018468d3fec8c9e56036fc8
|
/tests/zeus/utils/test_auth.py
|
5c2197a339a137df799456193c58afdd897db536
|
[
"Apache-2.0"
] |
permissive
|
buchiputaobutuputaopi/zeus-1
|
6dbc54e65c925040b1c1e01683625cea49299b4e
|
8a606642d9ef8f239df2e8d7079ea4d130d78cb3
|
refs/heads/master
| 2021-06-25T07:26:52.278251
| 2017-09-06T03:53:04
| 2017-09-06T03:53:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from zeus import auth
def test_login_user(client, default_user):
with client.session_transaction() as session:
auth.login_user(default_user.id, session=session)
assert session['uid'] == default_user.id
assert session['expire']
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
05a469cc480e500bf829d0d976976b2b96478216
|
2d4af29250dca8c72b74e190e74d92f1467120a0
|
/TaobaoSdk/Response/SimbaAdgroupOnlineitemsGetResponse.py
|
f660d7d21eb7fabf204fd071e5a8506e12f10f55
|
[] |
no_license
|
maimiaolmc/TaobaoOpenPythonSDK
|
2c671be93c40cf487c0d7d644479ba7e1043004c
|
d349aa8ed6229ce6d76a09f279a0896a0f8075b3
|
refs/heads/master
| 2020-04-06T03:52:46.585927
| 2014-06-09T08:58:27
| 2014-06-09T08:58:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,175
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 获取用户上架在线销售的全部宝贝
# @author wuliang@maimiaotech.com
# @date 2013-03-07 19:54:48
# @version: 0.0.0
from datetime import datetime
import os
import sys
import time
_jsonEnode = None
try:
import demjson
_jsonEnode = demjson.encode
except Exception:
try:
import simplejson
except Exception:
try:
import json
except Exception:
raise Exception("Can not import any json library")
else:
_jsonEnode = json.dumps
else:
_jsonEnode = simplejson.dumps
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__parentPath = os.path.normpath(os.path.join(__getCurrentPath(), os.path.pardir))
if __parentPath not in sys.path:
sys.path.insert(0, __parentPath)
from Domain.SimbaItemPartition import SimbaItemPartition
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">Response: 获取用户上架在线销售的全部宝贝</SPAN>
# <UL>
# </UL>
class SimbaAdgroupOnlineitemsGetResponse(object):
def __init__(self, kargs=dict()):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">请求的返回信息,包含状态等</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">dict</SPAN>
# </LI>
# </UL>
self.responseStatus = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">请求的响应内容</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.responseBody = None
self.code = None
self.msg = None
self.sub_code = None
self.sub_msg = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">带分页的淘宝商品</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">SimbaItemPartition</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Object</SPAN>
# </LI>
# </UL>
self.page_item = None
self.__init(kargs)
def isSuccess(self):
return self.code == None and self.sub_code == None
def _newInstance(self, name, value):
types = self._getPropertyType(name)
propertyType = types[0]
isArray = types[1]
if propertyType == bool:
if isArray:
if not value:
return []
return [x for x in value[value.keys()[0]]]
else:
return value
elif propertyType == datetime:
format = "%Y-%m-%d %H:%M:%S"
if isArray:
if not value:
return []
return [datetime.strptime(x, format) for x in value[value.keys()[0]]]
else:
return datetime.strptime(value, format)
elif propertyType == str:
if isArray:
if not value:
return []
return [x for x in value[value.keys()[0]]]
else:
#like taobao.simba.rpt.adgroupbase.get, response.rpt_adgroup_base_list is a json string,but will be decode into a list via python json lib
if not isinstance(value, basestring):
#the value should be a json string
return _jsonEnode(value)
return value
else:
if isArray:
if not value:
return []
return [propertyType(x) for x in value[value.keys()[0]]]
else:
return propertyType(value)
def _getPropertyType(self, name):
properties = {
"page_item": "SimbaItemPartition",
}
levels = {
"page_item": "Object",
}
nameType = properties[name]
pythonType = None
if nameType == "Number":
pythonType = int
elif nameType == "String":
pythonType = str
elif nameType == 'Boolean':
pythonType = bool
elif nameType == "Date":
pythonType = datetime
elif nameType == 'Field List':
pythonType == str
elif nameType == 'Price':
pythonType = float
elif nameType == 'byte[]':
pythonType = str
else:
pythonType = getattr(sys.modules["Domain.%s" % nameType], nameType)
# 是单个元素还是一个对象
level = levels[name]
if "Array" in level:
return (pythonType, True)
else:
return (pythonType, False)
def __init(self, kargs):
if kargs.has_key("page_item"):
self.page_item = self._newInstance("page_item", kargs["page_item"])
if kargs.has_key("code"):
self.code = kargs["code"]
if kargs.has_key("msg"):
self.msg = kargs["msg"]
if kargs.has_key("sub_code"):
self.sub_code = kargs["sub_code"]
if kargs.has_key("sub_msg"):
self.sub_msg = kargs["sub_msg"]
|
[
"liyangmin@maimiaotech.com"
] |
liyangmin@maimiaotech.com
|
7b0209b5129a33a20957245a3ed25f1bda5ed1ce
|
e6d556d97081576da6469cf1e8c1dd14565db2da
|
/code/tkinter/icons.py
|
32bdcc5c4a086dae60cb06cb946bb8bd9480cc34
|
[] |
no_license
|
Scotth72/codePractice
|
0b7c795050d08a34dff2b99507b20094d233739a
|
475482fab0d69f93d936dc1ba8c2511174089b7c
|
refs/heads/master
| 2023-01-19T02:58:40.977634
| 2020-11-26T15:55:47
| 2020-11-26T15:55:47
| 313,863,106
| 0
| 0
| null | 2020-11-26T15:55:48
| 2020-11-18T08:00:39
|
Python
|
UTF-8
|
Python
| false
| false
| 234
|
py
|
from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title("Learn to use Icons")
root.iconbitmap('../icons/mando.png')
btn_quit = Button(root, text="Exit Program", command=root.quit)
btn_quit.pack()
root.mainloop()
|
[
"you@example.com"
] |
you@example.com
|
96b9713d9cbdcfaf580b86638d5ce9c0f08f5285
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_4/run_cfg.py
|
d8c7fb4def9e957dabac9d51c5ec12ae8fa44d92
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_23_1_Yum.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_24_1_892.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_25_1_9AW.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_26_1_347.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_27_1_dAe.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
1b2603db33a6d30fc510ef9b6fd941c16bf4721d
|
c4750ec6eeda0092e3a5515d4878cfe42e117e90
|
/test/test_inference/test_compiled.py
|
d863528604a3ebdc39f003c9c320c12eab01a952
|
[
"MIT"
] |
permissive
|
phgn0/jedi
|
6e5e83778fe699d9735ab52a46ee94dec2a8be99
|
eb9af151ea0f447ab9d5d00d14e8fee542bc09d1
|
refs/heads/master
| 2020-09-02T23:38:36.442447
| 2019-11-10T14:03:49
| 2019-11-10T14:03:49
| 219,332,443
| 1
| 0
|
NOASSERTION
| 2019-11-03T16:42:27
| 2019-11-03T16:42:26
| null |
UTF-8
|
Python
| false
| false
| 5,139
|
py
|
from textwrap import dedent
import math
import sys
from collections import Counter
from datetime import datetime
import pytest
from jedi.inference import compiled
from jedi.inference.compiled.access import DirectObjectAccess
from jedi.inference.gradual.conversion import _stub_to_python_value_set
def test_simple(inference_state, environment):
obj = compiled.create_simple_object(inference_state, u'_str_')
upper, = obj.py__getattribute__(u'upper')
objs = list(upper.execute_with_values())
assert len(objs) == 1
if environment.version_info.major == 2:
expected = 'unicode'
else:
expected = 'str'
assert objs[0].name.string_name == expected
def test_builtin_loading(inference_state):
string, = inference_state.builtins_module.py__getattribute__(u'str')
from_name, = string.py__getattribute__(u'__init__')
assert from_name.tree_node
assert not from_name.py__doc__() # It's a stub
def test_next_docstr(inference_state):
next_ = compiled.builtin_from_name(inference_state, u'next')
assert next_.tree_node is not None
assert next_.py__doc__() == '' # It's a stub
for non_stub in _stub_to_python_value_set(next_):
assert non_stub.py__doc__() == next.__doc__
def test_parse_function_doc_illegal_docstr():
docstr = """
test_func(o
doesn't have a closing bracket.
"""
assert ('', '') == compiled.value._parse_function_doc(docstr)
def test_doc(inference_state):
"""
Even CompiledObject docs always return empty docstrings - not None, that's
just a Jedi API definition.
"""
str_ = compiled.create_simple_object(inference_state, u'')
# Equals `''.__getnewargs__`
obj, = str_.py__getattribute__(u'__getnewargs__')
assert obj.py__doc__() == ''
def test_string_literals(Script, environment):
def typ(string):
d = Script("a = %s; a" % string).goto_definitions()[0]
return d.name
assert typ('""') == 'str'
assert typ('r""') == 'str'
if environment.version_info.major > 2:
assert typ('br""') == 'bytes'
assert typ('b""') == 'bytes'
assert typ('u""') == 'str'
else:
assert typ('b""') == 'str'
assert typ('u""') == 'unicode'
def test_method_completion(Script, environment):
code = dedent('''
class Foo:
def bar(self):
pass
foo = Foo()
foo.bar.__func__''')
assert [c.name for c in Script(code).completions()] == ['__func__']
def test_time_docstring(Script):
import time
comp, = Script('import time\ntime.sleep').completions()
assert comp.docstring(raw=True) == time.sleep.__doc__
expected = 'sleep(secs: float) -> None\n\n' + time.sleep.__doc__
assert comp.docstring() == expected
def test_dict_values(Script, environment):
if environment.version_info.major == 2:
# It looks like typeshed for Python 2 returns Any.
pytest.skip()
assert Script('import sys\nsys.modules["alshdb;lasdhf"]').goto_definitions()
def test_getitem_on_none(Script):
script = Script('None[1j]')
assert not script.goto_definitions()
issue, = script._inference_state.analysis
assert issue.name == 'type-error-not-subscriptable'
def _return_int():
return 1
@pytest.mark.parametrize(
'attribute, expected_name, expected_parent', [
('x', 'int', 'builtins'),
('y', 'int', 'builtins'),
('z', 'bool', 'builtins'),
('cos', 'cos', 'math'),
('dec', 'Decimal', 'decimal'),
('dt', 'datetime', 'datetime'),
('ret_int', '_return_int', 'test.test_inference.test_compiled'),
]
)
def test_parent_context(same_process_inference_state, attribute, expected_name, expected_parent):
import decimal
class C:
x = 1
y = int
z = True
cos = math.cos
dec = decimal.Decimal(1)
dt = datetime(2000, 1, 1)
ret_int = _return_int
o = compiled.CompiledObject(
same_process_inference_state,
DirectObjectAccess(same_process_inference_state, C)
)
x, = o.py__getattribute__(attribute)
assert x.py__name__() == expected_name
module_name = x.parent_context.py__name__()
if module_name == '__builtin__':
module_name = 'builtins' # Python 2
assert module_name == expected_parent
assert x.parent_context.parent_context is None
@pytest.mark.skipif(sys.version_info[0] == 2, reason="Ignore Python 2, because EOL")
@pytest.mark.parametrize(
'obj, expected_names', [
('', ['str']),
(str, ['str']),
(''.upper, ['str', 'upper']),
(str.upper, ['str', 'upper']),
(math.cos, ['cos']),
(Counter, ['Counter']),
(Counter(""), ['Counter']),
(Counter.most_common, ['Counter', 'most_common']),
(Counter("").most_common, ['Counter', 'most_common']),
]
)
def test_qualified_names(same_process_inference_state, obj, expected_names):
o = compiled.CompiledObject(
same_process_inference_state,
DirectObjectAccess(same_process_inference_state, obj)
)
assert o.get_qualified_names() == tuple(expected_names)
|
[
"davidhalter88@gmail.com"
] |
davidhalter88@gmail.com
|
6429ff3a5cdd451090741ad95d4eb7c834662443
|
7ae0f100b49763f79b276260bbc0e87bd904da3e
|
/src/wdf/management/commands/prepare_dump.py
|
e65ea353701bb3108f1a5dec39c80cfd359756f9
|
[] |
no_license
|
wondersell/wildsearch-indexer
|
d88a5b3bce17acc1cb61d365f55ab5d9f63f61ae
|
67d5f29f6d405c055cfa211ddf0b70521382a671
|
refs/heads/master
| 2023-07-19T00:33:34.371231
| 2020-12-31T11:20:00
| 2020-12-31T11:20:00
| 285,488,583
| 2
| 0
| null | 2021-07-19T06:26:44
| 2020-08-06T06:09:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
import logging
from django.core.management.base import BaseCommand
from wdf.exceptions import DumpStateError
from wdf.indexer import Indexer
from wdf.tasks import prepare_dump
class Command(BaseCommand):
help = 'Prepares job for importing' # noqa: VNE003
def add_arguments(self, parser):
parser.add_argument('job_id', type=str)
parser.add_argument('--chunk_size', type=int, default=5000, required=False)
parser.add_argument('--background', choices=['yes', 'no'], default='yes')
def handle(self, *args, **options):
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('[%(levelname)s] %(name)s: %(message)s'))
logger = logging.getLogger('')
logger.addHandler(console)
job_id = options['job_id']
if options['background'] == 'yes':
prepare_dump.delay(job_id=job_id)
self.stdout.write(self.style.SUCCESS(f'Job #{job_id} added to process queue for preparing'))
else:
try:
indexer = Indexer(get_chunk_size=options['chunk_size'])
indexer.prepare_dump(job_id=options['job_id'])
except DumpStateError as error:
self.stdout.write(self.style.ERROR(f'Job #{job_id} processing failed: {error}'))
|
[
"artem.kiselev@gmail.com"
] |
artem.kiselev@gmail.com
|
c4e8dbc6684184e78245deb69b8a5f098817f5d9
|
f6f632bee57875e76e1a2aa713fdbe9f25e18d66
|
/python/CrackingTheCodingInterview_6/01_08_zero-matrix-lcci.py
|
064080aa330c0cdb7f50aa9177f2c29ebc6ce08e
|
[] |
no_license
|
Wang-Yann/LeetCodeMe
|
b50ee60beeeb3661869bb948bef4fbe21fc6d904
|
44765a7d89423b7ec2c159f70b1a6f6e446523c2
|
refs/heads/master
| 2023-08-07T05:31:23.428240
| 2021-09-30T15:33:53
| 2021-09-30T15:33:53
| 253,497,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-07-12 00:02:47
# @Last Modified : 2020-07-12 00:02:47
# @Mail : lostlorder@gmail.com
# @Version : 1.0.0
"""
# 编写一种算法,若M × N矩阵中某个元素为0,则将其所在的行与列清零。
#
#
#
# 示例 1:
#
# 输入:
# [
# [1,1,1],
# [1,0,1],
# [1,1,1]
# ]
# 输出:
# [
# [1,0,1],
# [0,0,0],
# [1,0,1]
# ]
#
#
# 示例 2:
#
# 输入:
# [
# [0,1,2,0],
# [3,4,5,2],
# [1,3,1,5]
# ]
# 输出:
# [
# [0,0,0,0],
# [0,4,5,0],
# [0,3,1,0]
# ]
#
# Related Topics 数组
# 👍 10 👎 0
"""
from typing import List
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not matrix:
return
rows = set()
cols = set()
m, n = len(matrix), len(matrix[0])
for i in range(m):
for j in range(n):
if not matrix[i][j]:
rows.add(i)
cols.add(j)
for i in range(m):
for j in range(n):
if i in rows or j in cols:
matrix[i][j] = 0
# leetcode submit region end(Prohibit modification and deletion)
@pytest.mark.parametrize("args,expected", [
(
[
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[0, 0, 0],
[1, 0, 1]
]),
pytest.param(
[
[0, 1, 2, 0],
[3, 4, 5, 2],
[1, 3, 1, 5]
],
[
[0, 0, 0, 0],
[0, 4, 5, 0],
[0, 3, 1, 0]
]),
])
def test_solutions(args, expected):
Solution().setZeroes(args)
assert args == expected
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=tee-sys", __file__])
|
[
"wzy-511@163.com"
] |
wzy-511@163.com
|
0c9f915ad0956041421ba3152c8f1d36b03896a0
|
b0a64cf2d36c7da2c81f920cab6f67e8a8e5b2d4
|
/models/VGG_mini_BN_PReLU.py
|
c0390f9195d03450ae354830944220579419c08a
|
[] |
no_license
|
OminiaVincit/chainer-cifar10
|
69407a114e35b9100af56142092ee9e14577a423
|
449c55f205ea5fd59313598af0f27feb51b18da4
|
refs/heads/master
| 2021-01-19T06:31:02.379472
| 2015-07-15T20:29:14
| 2015-07-15T20:29:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from chainer import Variable, FunctionSet
import chainer.functions as F
class VGG_mini_BN_PReLU(FunctionSet):
"""
VGGnet for CIFAR-10
"""
def __init__(self):
super(VGG_mini_BN_PReLU, self).__init__(
conv1_1=F.Convolution2D(3, 64, 3, stride=1, pad=1),
bn1_1=F.BatchNormalization(64, decay=0.9, eps=1e-5),
prelu1_1=F.PReLU(64),
conv1_2=F.Convolution2D(64, 64, 3, stride=1, pad=1),
bn1_2=F.BatchNormalization(64, decay=0.9, eps=1e-5),
prelu1_2=F.PReLU(64),
conv2_1=F.Convolution2D(64, 128, 3, stride=1, pad=1),
bn2_1=F.BatchNormalization(128, decay=0.9, eps=1e-5),
prelu2_1=F.PReLU(128),
conv2_2=F.Convolution2D(128, 128, 3, stride=1, pad=1),
bn2_2=F.BatchNormalization(128, decay=0.9, eps=1e-5),
prelu2_2=F.PReLU(128),
conv3_1=F.Convolution2D(128, 256, 3, stride=1, pad=1),
prelu3_1=F.PReLU(256),
conv3_2=F.Convolution2D(256, 256, 3, stride=1, pad=1),
prelu3_2=F.PReLU(256),
conv3_3=F.Convolution2D(256, 256, 3, stride=1, pad=1),
prelu3_3=F.PReLU(256),
conv3_4=F.Convolution2D(256, 256, 3, stride=1, pad=1),
prelu3_4=F.PReLU(256),
fc4=F.Linear(4096, 1024),
prelu4=F.PReLU(),
fc5=F.Linear(1024, 1024),
prelu5=F.PReLU(),
fc6=F.Linear(1024, 10)
)
def forward(self, x_data, y_data, train=True):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h = self.prelu1_1(self.bn1_1(self.conv1_1(x)))
h = self.prelu1_2(self.bn1_2(self.conv1_2(h)))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(h, ratio=0.25, train=train)
h = self.prelu2_1(self.bn2_1(self.conv2_1(h)))
h = self.prelu2_2(self.bn2_2(self.conv2_2(h)))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(h, ratio=0.25, train=train)
h = self.prelu3_1(self.conv3_1(h))
h = self.prelu3_2(self.conv3_2(h))
h = self.prelu3_3(self.conv3_3(h))
h = self.prelu3_4(self.conv3_4(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(h, ratio=0.25, train=train)
h = F.dropout(self.prelu4(self.fc4(h)), train=train, ratio=0.5)
h = F.dropout(self.prelu5(self.fc5(h)), train=train, ratio=0.5)
h = self.fc6(h)
if train:
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
else:
return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
|
[
"shunta.saito@gmail.com"
] |
shunta.saito@gmail.com
|
d1e8d70b961b1be945693a91169e369f2646ef5b
|
ac216a2cc36f91625e440247986ead2cd8cce350
|
/appengine/findit/pipelines/test/send_notification_for_culprit_pipeline_test.py
|
511524ebf3afcb0224df7cc05d4923d14340ae07
|
[
"BSD-3-Clause"
] |
permissive
|
xinghun61/infra
|
b77cdc566d9a63c5d97f9e30e8d589982b1678ab
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
refs/heads/master
| 2023-01-12T21:36:49.360274
| 2019-10-01T18:09:22
| 2019-10-01T18:09:22
| 212,168,656
| 2
| 1
|
BSD-3-Clause
| 2023-01-07T10:18:03
| 2019-10-01T18:22:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from common.waterfall import failure_type
from pipelines.send_notification_for_culprit_pipeline import (
SendNotificationForCulpritPipeline)
from services import constants
from services import culprit_action
from services.parameters import SendNotificationForCulpritParameters
from waterfall.test import wf_testcase
class SendNotificationForCulpritPipelineTest(wf_testcase.WaterfallTestCase):
@mock.patch.object(
culprit_action, 'SendNotificationForCulprit', return_value=True)
def testSendNotification(self, _):
pipeline_input = SendNotificationForCulpritParameters(
cl_key='mockurlsafekey',
force_notify=True,
revert_status=constants.CREATED_BY_SHERIFF,
failure_type=failure_type.COMPILE)
pipeline = SendNotificationForCulpritPipeline(pipeline_input)
self.assertTrue(pipeline.run(pipeline_input))
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
ee28104231e39d74f9252de0420dfa501e38557a
|
6efacaed48c9c2015b20baae5b1e7812cf2614a0
|
/Po/test/Abackground_mouse_one.py
|
533335d6d971031ab7fe5f3398b20fcedabe8681
|
[] |
no_license
|
Felixshao/play
|
53e12b7b592634a3e5515addde978e1b2a2a4591
|
4364cb91141bbbca835688d19bddb87aa0beb6b4
|
refs/heads/master
| 2021-05-23T19:49:56.095083
| 2020-04-07T06:09:10
| 2020-04-07T06:09:10
| 253,441,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
import pygame, os
from pygame.locals import *
from sys import exit
from config.GetProjectPath import get_project_path
path = get_project_path()
background_img_filepath = os.path.join(path, 'img', 'sushiplate.jpg')
mouse_img_filepath = os.path.join(path, 'img', 'fugu.png')
# 初始化pygame,未硬件做准备
pygame.init()
# 新建窗口, 传入参数:分辨率、标志(0代表不用特性)、色深
screen = pygame.display.set_mode((1920, 1080), 0, 32)
# 设置窗口标题
pygame.display.set_caption('Abcakground_mouse_one')
# 加载并转换图像, convert()方法,将图像数据转化为Surface对象,convert_alpha()处理掉透明部分
background = pygame.image.load(background_img_filepath).convert()
mouse = pygame.image.load(mouse_img_filepath).convert_alpha()
# 游戏主循环
while True:
for event in pygame.event.get():
# 接收到退出指令后退出游戏
if event.type == QUIT:
exit()
# 画上背景, bit方法,传参:Surface对象,左上角坐标
screen.blit(background, (0, 0))
# 获取鼠标位置
x, y = pygame.mouse.get_pos()
# 计算光标左上角位置
x -= mouse.get_width() / 2
y -= mouse.get_height() / 2
# 画上光标
screen.blit(mouse, (x, y))
# 刷新画面
pygame.display.update()
|
[
"shaoyufei1234@163.com"
] |
shaoyufei1234@163.com
|
b5ddb5c8af232999eb8ae226c25d305066c76157
|
fddc2ed5301b00f668bcb772518e0348db459538
|
/convokit/communityEmbedder.py
|
143d36c2f5b9fefe78dddccf919d797401191a38
|
[
"MIT"
] |
permissive
|
deepthimhegde/Cornell-Conversational-Analysis-Toolkit
|
289fd22a81d9d06c7aeb5270c11acc4ec40424bf
|
eb9a103f8d5e34d378b0b6d6bda3fa43587363a1
|
refs/heads/master
| 2020-05-29T11:16:01.765154
| 2019-05-17T18:29:27
| 2019-05-17T18:29:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,889
|
py
|
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from collections import defaultdict
from .transformer import Transformer
class CommunityEmbedder(Transformer):
"""
Must be run after threadEmbedder.fit_transform()
Groups threads together into communities
in this space for visualization or other such purposes.
:param community_key: Key in "meta" dictionary of each utterance
whose corresponding value we'll use as the community label for that
utterance (see threadEmbedder)
:param n_components: Number of dimensions to embed communities into
:param method: Embedding method; "svd", "tsne" or "none"
"""
def __init__(self, community_key=None, n_components=2, method="none"):
self.community_key = community_key
self.n_components = n_components
self.method = method
def transform(self, corpus):
"""
Same as fit_transform()
"""
return self.fit_transform(corpus)
def fit_transform(self, corpus):
"""
:param corpus: the Corpus to use
:return: a Corpus with new meta key: "communityEmbedder",
value: Dict, containing "pts": an array with rows corresponding
to embedded communities, and "labels": an array whose ith entry is
the community of the ith row of X.
"""
if self.community_key is None:
raise RuntimeError("Must specify community_key to retrieve label information from utterance")
corpus_meta = corpus.get_meta()
if "threadEmbedder" not in corpus_meta:
raise RuntimeError("Missing threadEmbedder metadata: "
"threadEmbedder.fit_transform() must be run on the Corpus first")
thread_embed_data = corpus_meta["threadEmbedder"]
X_mid = thread_embed_data["X"]
roots = thread_embed_data["roots"]
if self.method.lower() == "svd":
f = TruncatedSVD
elif self.method.lower() == "tsne":
f = TSNE
elif self.method.lower() == "none":
f = None
else:
raise Exception("Invalid embed_communities embedding method")
if f is not None:
X_embedded = f(n_components=self.n_components).fit_transform(X_mid)
else:
X_embedded = X_mid
labels = [corpus.get_utterance(root).get("meta")[self.community_key]
for root in roots]
# label_counts = Counter(labels)
subs = defaultdict(list)
for x, label in zip(X_embedded, labels):
subs[label].append(x / np.linalg.norm(x))
labels, subs = zip(*subs.items())
pts = [np.mean(sub, axis=0) for sub in subs]
retval = {"pts": pts, "labels": labels}
corpus.add_meta("communityEmbedder", retval)
return corpus
|
[
"calebchiam@gmail.com"
] |
calebchiam@gmail.com
|
765478bbc01b00211d961da6d0bd4bdab237f828
|
208baab269ddffab1a93e7dc70b052d07bf50560
|
/hood/migrations/0002_auto_20200120_1140.py
|
a6e6fe56c70f1ee382edb53a3eebe174b83a3671
|
[] |
no_license
|
marysinaida/Neighborhood
|
a1035f09515ae9a24bed74ddf1263e06db134c94
|
a285df5528bb99d6cb69f9ab41e320682422fe9d
|
refs/heads/master
| 2020-12-13T23:29:18.148498
| 2020-01-21T15:04:53
| 2020-01-21T15:04:53
| 234,562,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,691
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-01-20 08:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hood', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bName', models.CharField(max_length=100)),
('bEmail', models.EmailField(max_length=100)),
],
options={
'ordering': ['bName'],
},
),
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('location', models.CharField(max_length=50)),
('occupants', models.PositiveIntegerField()),
('health_contact', models.PositiveIntegerField()),
('police_contact', models.PositiveIntegerField()),
('hood_pic', models.ImageField(blank=True, upload_to='images/')),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField()),
('image', models.ImageField(blank=True, upload_to='posts/')),
('date_posted', models.DateTimeField(auto_now_add=True)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hood.Neighborhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_posted'],
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True)),
('email', models.EmailField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=50)),
('profile_pic', models.ImageField(blank=True, upload_to='images/')),
('hood', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hood.Neighborhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['first_name']},
),
migrations.AddField(
model_name='business',
name='hood',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hood.Neighborhood'),
),
migrations.AddField(
model_name='business',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"marydorcassinaida54@gmail.com"
] |
marydorcassinaida54@gmail.com
|
d33535490a49ccc63731773d42cd5a17f661d234
|
a2948d87a8f1901c6faf922f7b8cfba825f84d9b
|
/resources.py
|
c5484e2f0cc861b20e66986f69bf1105fbfacb38
|
[] |
no_license
|
sourcery-ai-bot/4x2d
|
03360fdcd5cfb135acbe0dfbdf571fb1e4d98a5a
|
68a5daf2410ae6ffe1220bb7ce85b95647097157
|
refs/heads/main
| 2023-03-11T10:38:01.353467
| 2021-02-28T06:57:06
| 2021-02-28T06:57:06
| 344,102,678
| 0
| 0
| null | 2021-03-03T11:25:28
| 2021-03-03T11:25:27
| null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
import os
import sys
def resource_path(relative_path): # needed for bundling
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
|
[
"morganquirk@gmail.com"
] |
morganquirk@gmail.com
|
48d6e9a8f1cd30cb302f9c81eb5ca4370302e805
|
c190538d85c00e03bf655af83629a5bddfd6d797
|
/src/dcos_e2e_cli/dcos_vagrant/commands/install_dcos.py
|
111fd161ceac49b9b4021c8e8b78de2ff50e1e44
|
[
"Apache-2.0"
] |
permissive
|
yankcrime/dcos-e2e
|
e8d52aa10ecfba029c28b269354fea9fe0f85f7b
|
449ca9ebc98399efc00e424d9131d2634de0471c
|
refs/heads/master
| 2020-05-30T00:00:07.725954
| 2019-05-30T15:57:37
| 2019-05-30T15:57:37
| 189,449,013
| 0
| 0
|
Apache-2.0
| 2019-05-30T16:42:28
| 2019-05-30T16:42:28
| null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
"""
Install DC/OS on a provisioned Vagrant cluster.
"""
from pathlib import Path
from typing import Any, Dict, Optional
import click
from dcos_e2e.backends import Vagrant
from dcos_e2e_cli.common.arguments import installer_argument
from dcos_e2e_cli.common.create import get_config
from dcos_e2e_cli.common.doctor import get_doctor_message
from dcos_e2e_cli.common.install import (
install_dcos_from_path,
run_post_install_steps,
)
from dcos_e2e_cli.common.options import (
cluster_id_option,
extra_config_option,
genconf_dir_option,
license_key_option,
security_mode_option,
variant_option,
verbosity_option,
)
from dcos_e2e_cli.common.utils import command_path
from dcos_e2e_cli.common.variants import get_install_variant
from dcos_e2e_cli.common.workspaces import workspace_dir_option
from ._common import ClusterVMs
from ._wait_for_dcos import wait_for_dcos_option
from .doctor import doctor
from .wait import wait
@click.command('install')
@installer_argument
@extra_config_option
@workspace_dir_option
@variant_option
@license_key_option
@genconf_dir_option
@security_mode_option
@cluster_id_option
@verbosity_option
@wait_for_dcos_option
@click.pass_context
def install_dcos(
ctx: click.core.Context,
installer: Path,
extra_config: Dict[str, Any],
variant: str,
workspace_dir: Path,
license_key: Optional[Path],
security_mode: Optional[str],
cluster_id: str,
genconf_dir: Optional[Path],
wait_for_dcos: bool,
) -> None:
"""
Install DC/OS on a provisioned Vagrant cluster.
"""
doctor_command_name = command_path(sibling_ctx=ctx, command=doctor)
wait_command_name = command_path(sibling_ctx=ctx, command=wait)
doctor_message = get_doctor_message(
doctor_command_name=doctor_command_name,
)
dcos_variant = get_install_variant(
given_variant=variant,
installer_path=installer,
workspace_dir=workspace_dir,
doctor_message=doctor_message,
)
cluster_backend = Vagrant()
cluster_vms = ClusterVMs(cluster_id=cluster_id)
dcos_config = get_config(
cluster_representation=cluster_vms,
extra_config=extra_config,
dcos_variant=dcos_variant,
security_mode=security_mode,
license_key=license_key,
)
cluster = cluster_vms.cluster
install_dcos_from_path(
cluster=cluster,
cluster_representation=cluster_vms,
dcos_config=dcos_config,
ip_detect_path=cluster_backend.ip_detect_path,
doctor_message=doctor_message,
dcos_installer=installer,
local_genconf_dir=genconf_dir,
)
run_post_install_steps(
cluster=cluster,
cluster_id=cluster_id,
dcos_config=dcos_config,
doctor_command_name=doctor_command_name,
http_checks=True,
wait_command_name=wait_command_name,
wait_for_dcos=wait_for_dcos,
)
|
[
"adamdangoor@gmail.com"
] |
adamdangoor@gmail.com
|
52425699c2b0f3f4f3701d850f4388930fbaf38d
|
62babb33b9bede95aac217db04636956279bb2e2
|
/sort/topological sort/1385E Directing Edges.py
|
269591b4f10d040c69bde6a7be642cc5a8b56613
|
[] |
no_license
|
tycyd/codeforces
|
0322e31daf18544944c769fd2a50c6d006015e34
|
e0773f069c6c5793f9d9a07b61878a589e375a5f
|
refs/heads/master
| 2023-08-12T05:00:39.467404
| 2021-09-30T16:39:21
| 2021-09-30T16:39:21
| 266,847,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
from sys import stdin, stdout
if __name__ == '__main__':
def directing_edges(n, m, ind, dic, seq):
q = []
res = []
for i in range(n):
if ind[i] == 0:
q.append(i)
#while len(q) > 0:
while q:
#cnt = len(q)
#for i in range(cnt):
cur = q.pop()
res.append(cur)
if cur in dic:
for next in dic[cur]:
ind[next] -= 1
if ind[next] == 0:
q.append(next)
#print(res)
if len(res) < n:
stdout.write("NO\n")
else:
stdout.write("YES\n")
pos = [0]*n
for i in range(n):
pos[res[i]] = i
#print(pos)
for sq in seq:
if pos[sq[0]] < pos[sq[1]]:
#stdout.write(str(sq[0]+1) + " " + str(sq[1]+1) + '\n')
print(sq[0]+1, sq[1]+1)
else:
#stdout.write(str(sq[1]+1) + " " + str(sq[0]+1) + '\n')
print(sq[1] + 1, sq[0] + 1)
t = int(stdin.readline())
for i in range(t):
n, m = map(int, stdin.readline().split())
dic = {}
ind = [0] * n
seq = []
for j in range(m):
t, x, y = map(int, stdin.readline().split())
x -= 1
y -= 1
seq.append([x, y])
if t == 1:
if x not in dic:
dic[x] = []
dic[x].append(y)
ind[y] += 1
directing_edges(n, m, ind, dic, seq)
|
[
"tycyd@hotmail.com"
] |
tycyd@hotmail.com
|
51488b6af889fd61bcc3bde0f432eebce76ef284
|
fb84e82ab80f2af43d3cdcf9a6c0351228d0f682
|
/validate.py
|
e93c4b1bb4adf2936a69d41ba81724c3c0b0e580
|
[] |
no_license
|
doctorwk007/semseg
|
bf1ea79e8e5f9a0084de98e0bd588a2c46af30b0
|
39f7e642014a1e8e21a84d0ff1e0057469b5d8e4
|
refs/heads/master
| 2020-04-12T01:10:35.164155
| 2018-12-15T03:03:27
| 2018-12-15T03:03:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,095
|
py
|
# -*- coding: utf-8 -*-
import torch
import os
import argparse
import cv2
import time
import numpy as np
import visdom
from torch.autograd import Variable
from scipy import misc
from semseg.dataloader.camvid_loader import camvidLoader
from semseg.dataloader.cityscapes_loader import cityscapesLoader
from semseg.dataloader.freespace_loader import freespaceLoader
from semseg.loss import cross_entropy2d
from semseg.metrics import scores
from semseg.modelloader.EDANet import EDANet
from semseg.modelloader.bisenet import BiSeNet
from semseg.modelloader.deeplabv3 import Res_Deeplab_101, Res_Deeplab_50
from semseg.modelloader.drn import drn_d_22, DRNSeg, drn_a_asymmetric_18, drn_a_asymmetric_ibn_a_18, drnseg_a_50, drnseg_a_18, drnseg_a_34, drnseg_e_22, drnseg_a_asymmetric_18, drnseg_a_asymmetric_ibn_a_18, drnseg_d_22, drnseg_d_38
from semseg.modelloader.drn_a_irb import drnsegirb_a_18
from semseg.modelloader.drn_a_refine import drnsegrefine_a_18
from semseg.modelloader.duc_hdc import ResNetDUC, ResNetDUCHDC
from semseg.modelloader.enet import ENet
from semseg.modelloader.enetv2 import ENetV2
from semseg.modelloader.erfnet import erfnet
from semseg.modelloader.fc_densenet import fcdensenet103, fcdensenet56, fcdensenet_tiny
from semseg.modelloader.fcn import fcn, fcn_32s, fcn_16s, fcn_8s
from semseg.modelloader.fcn_mobilenet import fcn_MobileNet, fcn_MobileNet_32s, fcn_MobileNet_16s, fcn_MobileNet_8s
from semseg.modelloader.fcn_resnet import fcn_resnet18, fcn_resnet34, fcn_resnet18_32s, fcn_resnet18_16s, \
fcn_resnet18_8s, fcn_resnet34_32s, fcn_resnet34_16s, fcn_resnet34_8s, fcn_resnet50_32s, fcn_resnet50_16s, fcn_resnet50_8s
from semseg.modelloader.lrn import lrn_vgg16
from semseg.modelloader.segnet import segnet, segnet_squeeze, segnet_alignres, segnet_vgg19
from semseg.modelloader.segnet_unet import segnet_unet
from semseg.modelloader.sqnet import sqnet
def validate(args):
init_time = str(int(time.time()))
if args.vis:
vis = visdom.Visdom()
if args.dataset_path == '':
HOME_PATH = os.path.expanduser('~')
local_path = os.path.join(HOME_PATH, 'Data/CamVid')
else:
local_path = args.dataset_path
local_path = os.path.expanduser(args.dataset_path)
if args.dataset == 'CamVid':
dst = camvidLoader(local_path, is_transform=True, split=args.dataset_type)
elif args.dataset == 'CityScapes':
dst = cityscapesLoader(local_path, is_transform=True, split=args.dataset_type)
elif args.dataset == 'FreeSpace':
dst = freespaceLoader(local_path, is_transform=True, split=args.dataset_type)
else:
pass
val_loader = torch.utils.data.DataLoader(dst, batch_size=1, shuffle=False)
# if os.path.isfile(args.validate_model):
if args.validate_model != '':
model = torch.load(args.validate_model)
else:
try:
model = eval(args.structure)(n_classes=args.n_classes, pretrained=args.init_vgg16)
except:
print('missing structure or not support')
exit(0)
if args.validate_model_state_dict != '':
try:
model.load_state_dict(torch.load(args.validate_model_state_dict, map_location='cpu'))
except KeyError:
print('missing key')
if args.cuda:
model.cuda()
# some model load different mode different performance
model.eval()
# model.train()
gts, preds, errors, imgs_name = [], [], [], []
for i, (imgs, labels) in enumerate(val_loader):
print(i)
# if i==1:
# break
img_path = dst.files[args.dataset_type][i]
img_name = img_path[img_path.rfind('/')+1:]
imgs_name.append(img_name)
# print('img_path:', img_path)
# print('img_name:', img_name)
# print(labels.shape)
# print(imgs.shape)
# 将np变量转换为pytorch中的变量
imgs = Variable(imgs, volatile=True)
labels = Variable(labels, volatile=True)
if args.cuda:
imgs = imgs.cuda()
labels = labels.cuda()
outputs = model(imgs)
loss = cross_entropy2d(outputs, labels)
loss_np = loss.cpu().data.numpy()
loss_np_float = float(loss_np)
# print('loss_np_float:', loss_np_float)
errors.append(loss_np_float)
# 取axis=1中的最大值,outputs的shape为batch_size*n_classes*height*width,
# 获取max后,返回两个数组,分别是最大值和相应的索引值,这里取索引值为label
pred = outputs.cpu().data.max(1)[1].numpy()
gt = labels.cpu().data.numpy()
if args.save_result:
if not os.path.exists('/tmp/'+init_time):
os.mkdir('/tmp/'+init_time)
pred_labels = outputs.cpu().data.max(1)[1].numpy()
label_color = dst.decode_segmap(labels.cpu().data.numpy()[0]).transpose(2, 0, 1)
pred_label_color = dst.decode_segmap(pred_labels[0]).transpose(2, 0, 1)
label_color_cv2 = label_color.transpose(1, 2, 0)
label_color_cv2 = cv2.cvtColor(label_color_cv2, cv2.COLOR_RGB2BGR)
cv2.imwrite('/tmp/'+init_time+'/gt_{}'.format(img_name), label_color_cv2)
pred_label_color_cv2 = pred_label_color.transpose(1, 2, 0)
pred_label_color_cv2 = cv2.cvtColor(pred_label_color_cv2, cv2.COLOR_RGB2BGR)
cv2.imwrite('/tmp/'+init_time+'/pred_{}'.format(img_name), pred_label_color_cv2)
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
# print('errors:', errors)
# print('imgs_name:', imgs_name)
errors_indices = np.argsort(errors).tolist()
# print('errors_indices:', errors_indices)
# for top_i in range(len(errors_indices)):
# for top_i in range(10):
# top_index = errors_indices.index(top_i)
# # print('top_index:', top_index)
# img_name_top = imgs_name[top_index]
# print('img_name_top:', img_name_top)
score, class_iou = scores(gts, preds, n_class=dst.n_classes)
for k, v in score.items():
print(k, v)
class_iou_list = []
for i in range(dst.n_classes):
class_iou_list.append(round(class_iou[i], 2))
# print(i, round(class_iou[i], 2))
print('classes:', range(dst.n_classes))
print('class_iou_list:', class_iou_list)
# best validate: python validate.py --structure fcn32s --validate_model_state_dict fcn32s_camvid_9.pt
if __name__=='__main__':
# print('validate----in----')
parser = argparse.ArgumentParser(description='training parameter setting')
parser.add_argument('--structure', type=str, default='fcn32s', help='use the net structure to segment [ fcn32s ResNetDUC segnet ENet drn_d_22 ]')
parser.add_argument('--validate_model', type=str, default='', help='validate model path [ fcn32s_camvid_9.pkl ]')
parser.add_argument('--validate_model_state_dict', type=str, default='', help='validate model state dict path [ fcn32s_camvid_9.pt ]')
parser.add_argument('--init_vgg16', type=bool, default=False, help='init model using vgg16 weights [ False ]')
parser.add_argument('--dataset', type=str, default='CamVid', help='train dataset [ CamVid CityScapes FreeSpace ]')
parser.add_argument('--dataset_path', type=str, default='~/Data/CamVid', help='train dataset path [ ~/Data/CamVid ~/Data/cityscapes ~/Data/FreeSpaceDataset ]')
parser.add_argument('--dataset_type', type=str, default='val', help='dataset type [ train val test ]')
parser.add_argument('--n_classes', type=int, default=12, help='train class num [ 12 ]')
parser.add_argument('--vis', type=bool, default=False, help='visualize the training results [ False ]')
parser.add_argument('--cuda', type=bool, default=False, help='use cuda [ False ]')
parser.add_argument('--save_result', type=bool, default=False, help='save the val dataset prediction result [ False True ]')
args = parser.parse_args()
# print(args.resume_model)
# print(args.save_model)
print(args)
validate(args)
# print('validate----out----')
|
[
"guanfuchen@zju.edu.cn"
] |
guanfuchen@zju.edu.cn
|
68ac0eeb5d55a38888952d35a6cd32b67c9bde23
|
d7b4e2e391e1f15fd7cb4fbf4d9aee598131b007
|
/AE_Datasets/R_A/datasets/CWRUCWT.py
|
66ff726731086772786eee97b0378a32b4c39b8e
|
[
"MIT"
] |
permissive
|
wuyou33/DL-based-Intelligent-Diagnosis-Benchmark
|
eba2ce6f948b5abe68069e749f64501a32e1d7ca
|
e534f925cf454d07352f7ef82d75a8d6dac5355c
|
refs/heads/master
| 2021-01-02T15:06:29.041349
| 2019-12-28T21:47:21
| 2019-12-28T21:47:21
| 239,673,952
| 1
| 0
|
MIT
| 2020-02-11T04:15:21
| 2020-02-11T04:15:20
| null |
UTF-8
|
Python
| false
| false
| 5,887
|
py
|
import os
import numpy as np
import pandas as pd
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from datasets.MatrixDatasets import dataset
from datasets.matrix_aug import *
from tqdm import tqdm
import pickle
import pywt
signal_size=100
datasetname = ["12k Drive End Bearing Fault Data", "12k Fan End Bearing Fault Data", "48k Drive End Bearing Fault Data",
"Normal Baseline Data"]
normalname = ["97.mat", "98.mat", "99.mat", "100.mat"]
# For 12k Drive End Bearing Fault Data
dataname1 = ["105.mat", "118.mat", "130.mat", "169.mat", "185.mat", "197.mat", "209.mat", "222.mat",
"234.mat"] # 1797rpm
dataname2 = ["106.mat", "119.mat", "131.mat", "170.mat", "186.mat", "198.mat", "210.mat", "223.mat",
"235.mat"] # 1772rpm
dataname3 = ["107.mat", "120.mat", "132.mat", "171.mat", "187.mat", "199.mat", "211.mat", "224.mat",
"236.mat"] # 1750rpm
dataname4 = ["108.mat", "121.mat", "133.mat", "172.mat", "188.mat", "200.mat", "212.mat", "225.mat",
"237.mat"] # 1730rpm
# For 12k Fan End Bearing Fault Data
dataname5 = ["278.mat", "282.mat", "294.mat", "274.mat", "286.mat", "310.mat", "270.mat", "290.mat",
"315.mat"] # 1797rpm
dataname6 = ["279.mat", "283.mat", "295.mat", "275.mat", "287.mat", "309.mat", "271.mat", "291.mat",
"316.mat"] # 1772rpm
dataname7 = ["280.mat", "284.mat", "296.mat", "276.mat", "288.mat", "311.mat", "272.mat", "292.mat",
"317.mat"] # 1750rpm
dataname8 = ["281.mat", "285.mat", "297.mat", "277.mat", "289.mat", "312.mat", "273.mat", "293.mat",
"318.mat"] # 1730rpm
# For 48k Drive End Bearing Fault Data
dataname9 = ["109.mat", "122.mat", "135.mat", "174.mat", "189.mat", "201.mat", "213.mat", "250.mat",
"262.mat"] # 1797rpm
dataname10 = ["110.mat", "123.mat", "136.mat", "175.mat", "190.mat", "202.mat", "214.mat", "251.mat",
"263.mat"] # 1772rpm
dataname11 = ["111.mat", "124.mat", "137.mat", "176.mat", "191.mat", "203.mat", "215.mat", "252.mat",
"264.mat"] # 1750rpm
dataname12 = ["112.mat", "125.mat", "138.mat", "177.mat", "192.mat", "204.mat", "217.mat", "253.mat",
"265.mat"] # 1730rpm
# label
label = [1, 2, 3, 4, 5, 6, 7, 8, 9] # The failure data is labeled 1-9
axis = ["_DE_time", "_FE_time", "_BA_time"]
def CWT(lenth,data):
scale = np.arange(1,lenth)
cwtmatr, freqs = pywt.cwt(data, scale, 'mexh')
return cwtmatr
# generate Training Dataset and Testing Dataset
def get_files(root, test=False):
'''
This function is used to generate the final training set and test set.
root:The location of the data set
normalname:List of normal data
dataname:List of failure data
'''
data_root1 = os.path.join('/tmp', root, datasetname[3])
data_root2 = os.path.join('/tmp', root, datasetname[0])
path1 = os.path.join('/tmp', data_root1, normalname[0]) # 0->1797rpm ;1->1772rpm;2->1750rpm;3->1730rpm
data, lab = data_load(path1, axisname=normalname[0],label=0) # nThe label for normal data is 0
for i in tqdm(range(len(dataname1))):
path2 = os.path.join('/tmp', data_root2, dataname1[i])
data1, lab1 = data_load(path2, dataname1[i], label=label[i])
data += data1
lab += lab1
return [data, lab]
def data_load(filename, axisname, label):
'''
This function is mainly used to generate test data and training data.
filename:Data location
axisname:Select which channel's data,---->"_DE_time","_FE_time","_BA_time"
'''
datanumber = axisname.split(".")
if eval(datanumber[0]) < 100:
realaxis = "X0" + datanumber[0] + axis[0]
else:
realaxis = "X" + datanumber[0] + axis[0]
fl = loadmat(filename)[realaxis]
fl = fl.reshape(-1,)
data = []
lab = []
start, end = 0, signal_size
while end <= fl.shape[0]/10:
x = fl[start:end]
imgs = CWT(signal_size + 1, x)
data.append(imgs)
lab.append(label)
start += signal_size
end += signal_size
return data, lab,
def data_transforms(dataset_type="train", normlize_type="-1-1"):
transforms = {
'train': Compose([
ReSize(size=0.32),
Reshape(),
Normalize(normlize_type),
RandomScale(),
RandomCrop(),
Retype(),
]),
'val': Compose([
ReSize(size=0.32),
Reshape(),
Normalize(normlize_type),
Retype(),
])
}
return transforms[dataset_type]
class CWRUCWT(object):
num_classes = 10
inputchannel = 1
def __init__(self, data_dir,normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
if len(os.path.basename(self.data_dir).split('.')) == 2:
with open(self.data_dir, 'rb') as fo:
list_data = pickle.load(fo, encoding='bytes')
else:
list_data = get_files(self.data_dir, test)
with open(os.path.join(self.data_dir, "CWRUCWT.pkl"), 'wb') as fo:
pickle.dump(list_data, fo)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd["label"])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train',self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val',self.normlizetype))
return train_dataset, val_dataset
|
[
"646032073@qq.com"
] |
646032073@qq.com
|
669a113c17fd1fe1e8f0256f0d625bbbc78a9be4
|
46404c77e04907225475e9d8be6e0fd33227c0b1
|
/wildcard pattern matching.py
|
0ed16783c406fd5ec5eaf2858e1c35ca373e0e95
|
[] |
no_license
|
govardhananprabhu/DS-task-
|
84b46e275406fde2d56c301fd1b425b256b29064
|
bf54f3d527f52f61fefc241f955072f5ed9a6558
|
refs/heads/master
| 2023-01-16T07:41:27.064836
| 2020-11-27T11:52:50
| 2020-11-27T11:52:50
| 272,928,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
"""
Given two strings 'str' and a wildcard pattern 'pattern' of length N and M respectively, You have to print '1' if the wildcard pattern is matched with str else print '0' .
The wildcard pattern can include the characters ‘?’ and ‘*’
‘?’ – matches any single character
‘*’ – Matches any sequence of characters (including the empty sequence)
Note: The matching should cover the entire str (not partial str).
Constraints:
1 <= length of(str,pat) <= 200
H 7
T 2300
Tag yahoo string
In des
First line contain string s.
Second line contain string,denotes the pattern.
Ot des
Print 1 if it is wildcard pattern else 0.
baaabab
ba*a?
1
baaabab
*****ba*****ab
1
baaabab
a*ab
0
water
*r
1
master
m*e
0
Exp
From sample:replace '*' with "aab" and '?' with 'b'.
Hint
Each occurrence of ‘?’ character in wildcard pattern can be replaced with any other character and each occurrence of ‘*’ with a sequence of characters such that the wildcard pattern becomes identical to the input string after replacement.
"""
def strrmatch(strr, pattern, n, m):
if (m == 0):
return (n == 0)
lookup = [[False for i in range(m + 1)] for j in range(n + 1)]
lookup[0][0] = True
for j in range(1, m + 1):
if (pattern[j - 1] == '*'):
lookup[0][j] = lookup[0][j - 1]
for i in range(1, n + 1):
for j in range(1, m + 1):
if (pattern[j - 1] == '*'):
lookup[i][j] = lookup[i][j - 1] or lookup[i - 1][j]
elif (pattern[j - 1] == '?' or strr[i - 1] == pattern[j - 1]):
lookup[i][j] = lookup[i - 1][j - 1]
else:
lookup[i][j] = False
return lookup[n][m]
strr = input()
pattern = input()
if (strrmatch(strr, pattern, len(strr),len(pattern))):
print("1")
else:
print("0")
|
[
"noreply@github.com"
] |
govardhananprabhu.noreply@github.com
|
2ea59d15a88cd4a3cfba74fb74162da032c006d3
|
d613fecbe4845ed4a0f1d667439640ed10c8922a
|
/app1/views/ajax.py
|
e9581d351b9923bc2a953751021d2bda01cc0396
|
[] |
no_license
|
AnyiYim/DjangoTeacherManagerDemo
|
e18bdb312237e39da00f62006e9e7a98d817d08c
|
eecfaac3bd5badfb3ac1aed5b2e3f034e505e26e
|
refs/heads/master
| 2021-04-27T00:23:38.853148
| 2018-03-04T16:08:46
| 2018-03-04T16:08:46
| 123,805,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
from django.shortcuts import render, redirect, HttpResponse
from app1 import models
def ajax1(request):
return render(request, 'ajax1.html')
def ajax2(request):
u = request.GET.get('username')
p = request.GET.get('password')
return HttpResponse('我愿意')
def ajax4(request):
nid=request.GET.get('nid')
msg='成功'
try:
models.Students.objects.get(id=nid).delete()
except Exception as e:
msg = str(e)
return HttpResponse(msg)
|
[
"759502117@qq.com"
] |
759502117@qq.com
|
3f84b9dcb1f883353278b6f06f472d8d32a06e47
|
1521332438d4e711b6fa4af825047a3466925511
|
/WorkshopWeek8/problem5.py
|
1925e67c31009097d9b36fdcb1b950cb256b497e
|
[] |
no_license
|
JakeAttard/Python-2807ICT-NoteBook
|
df0907bdca9ff10f347498233260c97f41ea783b
|
9a38035d467e569b3fb97f5ab114753efc32cecc
|
refs/heads/master
| 2020-04-26T17:33:18.184447
| 2019-11-05T13:04:56
| 2019-11-05T13:04:56
| 173,717,675
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
def function(list, diff):
counter = 1
for a in list[::2]:
for b in list[1::2]:
if int(b) - int(a) == diff:
counter += 1
elif int(b) - int(a) == -1 * diff:
counter += 1
else:
break
return counter
def testString(a):
list1 = a.split()
if len(a) == 1:
print(1)
elif len(a) == 0:
exit()
else:
difference = int(list1[1]) - int(list1[0])
print(function(list1, difference))
a = input("List: ")
testString(a)
while len(a) != 0:
a = input("List: ")
testString(a)
|
[
"jakeattard18@gmail.com"
] |
jakeattard18@gmail.com
|
a7f24ef184928de29cb7077c5a33eb6c01eae3b5
|
d8422247ecbe450c75df45dcf2c92fb4438b65af
|
/horizon/openstack_dashboard/dashboards/admin/instances/forms.py
|
9d2bf6d665256ffd420ae81e10ff16ed18c8cfd8
|
[
"Apache-2.0"
] |
permissive
|
yianjiajia/openstack_horizon
|
deb9beca534b494b587ae401904c84ddbed64c4a
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
refs/heads/master
| 2016-09-12T21:34:25.718377
| 2016-04-28T05:29:56
| 2016-04-28T05:29:56
| 57,273,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,293
|
py
|
# Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.host_name,
host.host_name)
for host in hosts
if (host.service.startswith('compute') and
host.host_name != current_host)]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
# operation log
config = '\n'.join(['Host ID: '+ data['host'], 'Instance ID: '+ data['instance_id']])
api.logger.Logger(request).create(resource_type='instance', action_name='Live Migrate Instance',
resource_name='Instance', config=config,
status='Success')
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
# operation log
api.logger.Logger(request).create(resource_type='instance', action_name='Live Migrate Instance',
resource_name='Instance', config='Failed to live migrate instance',
status='Error')
|
[
"yanjj@syscloud.cn"
] |
yanjj@syscloud.cn
|
d893d6bda716d9a47904627e4d218b88be59669f
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/pytorch_pytorch/pytorch-master/test/test_sparse.py
|
11b51eaf3f1f94a07eaf3d721684547d9a17be77
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,617
|
py
|
import torch
from torch import sparse
import itertools
import random
import unittest
from common import TestCase, run_tests
from numbers import Number
SparseTensor = sparse.DoubleTensor
class TestSparse(TestCase):
@staticmethod
def _gen_sparse(d, nnz, with_size):
v = torch.randn(nnz)
if isinstance(with_size, Number):
i = (torch.rand(d, nnz) * with_size).type(torch.LongTensor)
x = SparseTensor(i, v)
else:
i = torch.rand(d, nnz) * \
torch.Tensor(with_size).repeat(nnz, 1).transpose(0, 1)
i = i.type(torch.LongTensor)
x = SparseTensor(i, v, torch.Size(with_size))
return x, i, v
def test_basic(self):
x, i, v = self._gen_sparse(3, 10, 100)
self.assertEqual(i, x.indices())
self.assertEqual(v, x.values())
x, i, v = self._gen_sparse(3, 10, [100, 100, 100])
self.assertEqual(i, x.indices())
self.assertEqual(v, x.values())
self.assertEqual(x.ndimension(), 3)
self.assertEqual(x.nnz(), 10)
for i in range(3):
self.assertEqual(x.size(i), 100)
# Make sure we can access empty indices / values
x = SparseTensor()
self.assertEqual(x.indices().numel(), 0)
self.assertEqual(x.values().numel(), 0)
def test_to_dense(self):
i = torch.LongTensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
])
v = torch.Tensor([2, 1, 3, 4])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
res = torch.Tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
])
x.to_dense() # Tests double to_dense for memory corruption
x.to_dense()
x.to_dense()
self.assertEqual(res, x.to_dense())
def test_contig(self):
i = torch.LongTensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
])
v = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x = SparseTensor(i, v, torch.Size([100, 100]))
exp_i = torch.LongTensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
])
exp_v = torch.Tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
i = torch.LongTensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
])
v = torch.Tensor([3, 2, 4, 1])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
exp_i = torch.LongTensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
])
exp_v = torch.Tensor([2, 1, 3, 4])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
# Duplicate indices
i = torch.LongTensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
])
v = torch.Tensor([3, 2, 4, 1])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
exp_i = torch.LongTensor([
[0, 2],
[0, 3],
[0, 4],
])
exp_v = torch.Tensor([6, 4])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
def test_transpose(self):
x = self._gen_sparse(4, 20, 5)[0]
y = x.to_dense()
for i, j in itertools.combinations(range(4), 2):
x = x.transpose_(i, j)
y = y.transpose(i, j)
self.assertEqual(x.to_dense(), y)
x = x.transpose(i, j)
y = y.transpose(i, j)
self.assertEqual(x.to_dense(), y)
def test_mm(self):
def test_shape(di, dj, dk):
x, _, _ = self._gen_sparse(2, 20, [di, dj])
t = torch.randn(di, dk)
y = torch.randn(dj, dk)
alpha = random.random()
beta = random.random()
expected = torch.addmm(alpha, t, beta, x.to_dense(), y)
res = torch.addmm(alpha, t, beta, x, y)
self.assertEqual(res, expected)
expected = torch.addmm(t, x.to_dense(), y)
res = torch.addmm(t, x, y)
self.assertEqual(res, expected)
expected = torch.mm(x.to_dense(), y)
res = torch.mm(x, y)
self.assertEqual(res, expected)
test_shape(10, 100, 100)
test_shape(100, 1000, 200)
test_shape(64, 10000, 300)
def test_saddmm(self):
def test_shape(di, dj, dk):
x = self._gen_sparse(2, 20, [di, dj])[0]
t = self._gen_sparse(2, 20, [di, dk])[0]
y = torch.randn(dj, dk)
alpha = random.random()
beta = random.random()
expected = torch.addmm(alpha, t.to_dense(), beta, x.to_dense(), y)
res = torch.saddmm(alpha, t, beta, x, y)
self.assertEqual(res.to_dense(), expected)
expected = torch.addmm(t.to_dense(), x.to_dense(), y)
res = torch.saddmm(t, x, y)
self.assertEqual(res.to_dense(), expected)
expected = torch.mm(x.to_dense(), y)
res = torch.smm(x, y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3)
test_shape(1000, 100, 100)
test_shape(3000, 64, 300)
def test_spadd(self):
def test_shape(*shape):
x, _, _ = self._gen_sparse(len(shape), 10, shape)
y = torch.randn(*shape)
r = random.random()
expected = y + r * x.to_dense()
res = torch.add(y, r, x)
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s).transpose_(0, len(s) - 1)
r = random.random()
expected = y + r * x.to_dense()
res = torch.add(y, r, x)
self.assertEqual(res, expected)
test_shape(5, 6)
test_shape(10, 10, 10)
test_shape(50, 30, 20)
test_shape(5, 5, 5, 5, 5, 5)
if __name__ == '__main__':
run_tests()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
d7ef8890a6ce56916383b518e78a04c723e683ff
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/BprimeBprime/BprimeBprimeToBHBZinc_M_950_TuneZ2star_8TeV-madgraph_cff.py
|
425c01667e5ad92ae0b9a16636c284b2b8579120
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,054
|
py
|
import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=7 ! User defined processes',
'MWID(7)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(7,1) = 950.0D0 ! bprime quarks mass',
'PMAS(7,2) = 9.50D0',
'PMAS(7,3) = 95.0D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(56,1)=0 ! g b4',
'MDME(57,1)=0 ! gamma b4',
'KFDP(58,2)=5 ! defines Z0 b',
'MDME(58,1)=1 ! Z0 b',
'MDME(59,1)=0 ! W u',
'MDME(60,1)=0 ! W c',
'MDME(61,1)=0 ! W t',
'MDME(62,1)=0 ! W t4',
'KFDP(63,2)=5 ! defines H0 b',
'MDME(63,1)=1 ! h0 b4',
'MDME(64,1)=-1 ! H- c',
'MDME(65,1)=-1 ! H- t',
'BRAT(56) = 0.0D0',
'BRAT(57) = 0.0D0',
'BRAT(58) = 0.5D0',
'BRAT(59) = 0.0D0',
'BRAT(60) = 0.0D0',
'BRAT(61) = 0.0D0',
'BRAT(62) = 0.0D0',
'BRAT(63) = 0.5D0',
'BRAT(64) = 0.0D0',
'BRAT(65) = 0.0D0',
'MDME(210,1)=1 !Higgs decay into dd',
'MDME(211,1)=1 !Higgs decay into uu',
'MDME(212,1)=1 !Higgs decay into ss',
'MDME(213,1)=1 !Higgs decay into cc',
'MDME(214,1)=1 !Higgs decay into bb',
'MDME(215,1)=1 !Higgs decay into tt',
'MDME(216,1)=1 !Higgs decay into',
'MDME(217,1)=1 !Higgs decay into Higgs decay',
'MDME(218,1)=1 !Higgs decay into e nu e',
'MDME(219,1)=1 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=1 !Higgs decay into Higgs decay',
'MDME(222,1)=1 !Higgs decay into g g',
'MDME(223,1)=1 !Higgs decay into gam gam',
'MDME(224,1)=1 !Higgs decay into gam Z',
'MDME(225,1)=1 !Higgs decay into Z Z',
'MDME(226,1)=1 !Higgs decay into W W',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] |
sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch
|
035f2485d9238b11a68df3adc4d304e7add9874d
|
2687412dd10032667e50e74d9d3f832133bc2536
|
/code/disasters/reload_landslide_data.py
|
9963d89459014edca49ca7efbc21837e02e92c30
|
[
"MIT"
] |
permissive
|
wfp-ose/sparc2-pipeline
|
644e040c27517889c84598c34397c06f3d82ca96
|
fdd3bd29426d9231956f449cb5e78afd33446a8a
|
refs/heads/master
| 2021-01-17T18:07:58.641768
| 2016-12-02T12:40:54
| 2016-12-02T12:40:54
| 57,199,382
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
from geodash.enumerations import MONTHS_SHORT3
from geodash.data import GeoDashDatabaseConnection
print "Inserting Landslide Data..."
print "..."
print ""
prob_classes = [
{'input': 'low', 'output_text': 'low', "output_int": 1},
{'input': 'medium', 'output_text': 'medium', "output_int": 2},
{'input': 'high', 'output_text': 'high', "output_int": 3},
{'input': 'very_h', 'output_text': 'very_high', "output_int": 4}
]
tpl = None
with open('insert_landslide_data.tpl.sql', 'r') as f:
tpl = f.read()
with GeoDashDatabaseConnection() as geodash_conn:
try:
geodash_conn.exec_update("DELETE FROM landslide.admin2_popatrisk;")
except:
pass
for month in MONTHS_SHORT3:
for prob_class in prob_classes:
# Population at Risk Data
sql = tpl.format(** {
'month': month,
'prob_class_input': prob_class['input'],
'prob_class_output_text': prob_class['output_text'],
'prob_class_output_int': str(prob_class['output_int'])
})
geodash_conn.exec_update(sql)
print "Done Inserting Landslide Data"
|
[
"pjdufour.dev@gmail.com"
] |
pjdufour.dev@gmail.com
|
19d2071c90dfbf39c31669b82ef26d4c0d376a89
|
4edd89e807ac9a70d4fb4a258015e6889b01ff27
|
/md5decoder.py
|
f0610781b1f9b91c3f091c3120739488857dd15c
|
[] |
no_license
|
karimmakynch/PYTHON
|
ca68576fb3079fdd56559959edb3b4e1ba8ccf04
|
4842269368d49a3954c39ce4e8f2a0bc03b2e99c
|
refs/heads/main
| 2023-03-15T21:16:38.610893
| 2021-02-26T05:42:26
| 2021-02-26T05:42:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
# -*- coding: utf-8 -*-
import hashlib
import sys
#variables
count = 0
tour = 0
tourclone = 0
tourx = 0
creds = ''
part = 1
inputfilelines = 0
try:
try:
inputfile = sys.argv[1]
dicfile = sys.argv[2]
outputfile = sys.argv[3]
fout = open(outputfile,'w')
fouttx = '[+] inputfile: '+str(inputfile)+' DictionaryFile: '+str(dicfile)+' Outputfile: '+str(outputfile)+'\n'
fout.write(fouttx)
except:
print 'err: Ex: python md5decoder.py inputfile(hashes).txt dic.txt outputfile(result).txt'
sys.exit()
print 'Text Content:'
print '1)Hashes:Email'
print '2)Email :Hashes'
hashpos = input('input: ')
if hashpos == 1:
hashes = 0
emails = 1
if hashpos == 2:
hashes = 1
emails = 0
if str(hashpos) not in '12':
print '[-] err 1)Hashes:Email !!'
print '[-] err 2)Email :Hashes !!'
sys.exit()
inputfilelineslen = len(open(inputfile,'r').readlines())
for i in range(0,inputfilelineslen):
if len(open(inputfile,'r').readlines()[i].split()) == 2:
inputfilelines +=1
dicfilelines = len(open(dicfile,'r').readlines())
print '\n'
for i in open(inputfile,'r'):
if len(i.split()) == 2:
for ii in open(dicfile,'r'):
hashtext = hashlib.md5(ii.split()[0]).hexdigest()
prog1 = int(float(tour)/dicfilelines*100)
if tourclone > inputfilelines:
tourclone = 0
prog2 = int(float(tourclone)/inputfilelines*100)
sym1 = 10*tour/dicfilelines
p1 = '▓'*sym1+'░'*(10-sym1)
sym2 = 10*tourclone/inputfilelines
p2 = '▓'*sym2+'░'*(10-sym2)
prog3 = int(float(tourx)/inputfilelines*100)
sym3 = 10*tourx/inputfilelines
p3 = '▓'*sym3+'░'*(10-sym3)
sys.stdout.write('\r '+str(prog3)+'% ['+p3+'] '+str(prog1)+'% ['+p1+'] '+str(prog2)+'% ['+p2+'] count : '+str(count)+' tested: '+str(part)+'/'+str(inputfilelines)+' ')
sys.stdout.flush()
if i.split()[hashes] == hashtext:
count += 1
creds = str(i.split()[emails])+':'+str(ii.split()[0])
fout = open(outputfile,'a')
fout.write(creds)
tourclone +=1
tour += 1
if tour > dicfilelines:
tour = 0
part +=1
tourx +=1
print '\n'
except:
pass
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
dfa52f8f4a5c08260ca6f9c4014300383b6ab5f7
|
dd9571236f35807e130bb987b4f1f5f0b2676efb
|
/users/admin_user_api.py
|
41fce13a4ea094ff16f8ec70ab22cde148d74c67
|
[] |
no_license
|
sekhorroy/bysterdjango
|
58337e6ac1191ae945fcbd2ec1c47229e598a570
|
fd016bcc3414875cd874a3c69733722815a84e05
|
refs/heads/master
| 2022-12-13T12:49:04.802319
| 2020-09-06T06:28:50
| 2020-09-06T06:28:50
| 292,861,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
from rest_framework.exceptions import ValidationError
from rest_framework.generics import CreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView
from rest_framework.permissions import AllowAny
from rest_framework import status
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter
from users.admin_serializer import AdminUserSerializer, AdminLoginSerializer
from users.models import MtAdminUser as Admin
class UserPagination(LimitOffsetPagination):
default_limit = 10
max_limit = 100
class CreateAdminUser(CreateAPIView):
# Allow authenticate users to hit this endpoint
permission_classes = (IsAuthenticated, )
serializer_class = AdminUserSerializer
def post(self, request):
#restore those native datatypes into a dictionary of validated data.
serializers = self.serializer_class(data=request.data)
#checks if the data is as per serializer fields otherwise throws an exception.
serializers.is_valid(raise_exception=True)
serializers.save()
status_code = status.HTTP_201_CREATED
response = {
'success' : 'True',
'statuc code' : status_code,
'message' : 'User registered successfully'
}
return Response(response, status=status_code)
class AdminLogin(RetrieveUpdateDestroyAPIView):
permission_classes = (AllowAny, )
serializer_class = AdminLoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success' : 'True',
'status_code' : status.HTTP_200_OK,
'firstname' : serializer.data['first_name'],
'lastname' : serializer.data['last_name'],
'email' : serializer.data['email'],
'token' : serializer.data['token'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
class UserListView(ListAPIView):
permission_classes=(IsAuthenticated, )
queryset = Admin.objects.all()
serializer_class = AdminUserSerializer
|
[
"apple@Apples-MacBook-Pro.local"
] |
apple@Apples-MacBook-Pro.local
|
1a7ee7ad25d703905a1b326105e18c566f03cf65
|
d7cd51a7aaa9bd5a7c39409a39d1be1944ecb9c4
|
/Assignments/Python_Stack/Django/Django_ORM/users_template/users_template/wsgi.py
|
5725974a941c17bdca19fd76e2fc66d918edd371
|
[] |
no_license
|
Geneveroth/Coding_Dojo_Assignments
|
ae525e6d95e0f3fcf10b44a6734e8996b53ec7e1
|
9643845e237d5029de03dfe1ae2d43a49350ba22
|
refs/heads/master
| 2022-12-23T18:46:08.971696
| 2020-07-21T20:44:17
| 2020-07-21T20:44:17
| 251,153,510
| 0
| 0
| null | 2021-01-06T03:08:14
| 2020-03-29T23:10:09
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for users_template project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'users_template.settings')
application = get_wsgi_application()
|
[
"black.samlh@gmail.com"
] |
black.samlh@gmail.com
|
f5ba807cf4377fe11e6a9eac40676eed893527a6
|
fe1349a9bd25586f830f2a44618a4012ea20184a
|
/stanford_tf_research/01_plot_histogram_random.py
|
838a63c687196773d418188816a03661ad3095dc
|
[] |
no_license
|
EmbraceLife/LIE
|
cdca29b8308f2cd7740743cea379a72d7bde51db
|
8c30b6aabc5842092c18dd97a0c20aa19f62000f
|
refs/heads/master
| 2022-12-04T05:56:37.393552
| 2017-08-16T04:54:55
| 2017-08-16T04:54:55
| 87,597,172
| 4
| 3
| null | 2022-11-26T15:26:45
| 2017-04-08T00:39:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
"""
=========================================================
Demo of the histogram (hist) function with a few features
=========================================================
In addition to the basic histogram, this demo shows a few optional
features:
* Setting the number of data bins
* The ``normed`` flag, which normalizes bin heights so that the
integral of the histogram is 1. The resulting histogram is an
approximation of the probability density function.
* Setting the face color of the bars
* Setting the opacity (alpha value).
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
"""
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
np.random.seed(0)
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
x = mu + sigma * np.random.randn(437)
num_bins = 50
fig, ax = plt.subplots()
# the histogram of the data, is add on figure
n, bins, patches = ax.hist(x, num_bins, normed=1)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
ax.plot(bins, y, '--')
# set labels, title
ax.set_xlabel('Smarts')
ax.set_ylabel('Probability density')
ax.set_title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
plt.show()
|
[
"1227561934@qq.com"
] |
1227561934@qq.com
|
a2afcdbb25e5d5358991ecaf4ea9ef99624a88ba
|
912021bc754e9b6f62efaf0d69e4179dda376d62
|
/splatify/spopulate.py
|
5d26f2fcf6d4c130bc7636eab1a4cff76fea7336
|
[] |
no_license
|
micnem/splatify
|
5439cfb21ada1b194cea3f17661b9e02dd60d403
|
792e3be4bd9bcc2c34ace6dd0aea3acf512b8829
|
refs/heads/master
| 2023-07-22T02:39:34.123446
| 2023-02-18T21:55:37
| 2023-02-18T21:55:37
| 123,298,090
| 2
| 1
| null | 2023-07-15T00:54:42
| 2018-02-28T14:42:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,334
|
py
|
from django.shortcuts import render, redirect
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from requests import Request, post
from .models import Artist, TopArtist, RelatedArtist, Profile
from django.utils import timezone
from datetime import timedelta
import requests as r
import json
import base64
from splatify2.settings import CLIENT_ID, CLIENT_SECRET
BASE_URL = "https://api.spotify.com/v1/"
def execute_spotify_api_request(access_token, endpoint, post_=False, put_=False):
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + access_token}
if post_:
r.post(BASE_URL + endpoint, headers=headers)
if put_:
r.put(BASE_URL + endpoint, headers=headers)
response = r.get(BASE_URL + endpoint, {}, headers=headers)
try:
return response.json()
except:
return {'Error': 'Issue with request'}
def create_artist(items):
artist_list = []
for item in items:
spotify_id = item.get('id')
# image = item.get('images')[0].get('url')
name = item.get('name')
popularity = item.get('popularity')
uri = item.get('uri')
artist = {
'spotify_id': spotify_id,
'name': name,
# 'image': image,
'popularity': popularity,
'uri': uri
}
artist_list.append(artist)
return artist_list
def get_top_artists(profile):
access_token = refresh_tokens(profile)
endpoint = "me/top/artists?time_range=long_term&limit=20"
response = execute_spotify_api_request(access_token, endpoint)
if response == None:
endpoint = "me/top/artists?time_range=short_term&limit=20"
response = execute_spotify_api_request(access_token, endpoint)
items = response.get('items')
artist_list = create_artist(items)
for num, artist in enumerate(artist_list[::-1]):
current_artist, created = Artist.objects.get_or_create(name = artist['name'], spotify_id = artist['spotify_id'], popularity = artist['popularity'], uri = artist['uri'])
endpoint = f"artists/{current_artist.spotify_id}/related-artists"
response = execute_spotify_api_request(access_token, endpoint)
items = response.get('artists')
rel_artist_list = create_artist(items)
for number, rel_artist in enumerate(rel_artist_list[::-1]):
related_artist, created = Artist.objects.get_or_create(name = rel_artist['name'], spotify_id = rel_artist['spotify_id'], popularity = rel_artist['popularity'], uri = rel_artist['uri'])
RelatedArtist.objects.get_or_create(root_artist=current_artist, artist2=related_artist, affinity=number + 1)
ta, created = TopArtist.objects.get_or_create(artist=current_artist, profile=profile, affinity=num+1)
profile.populated = True
profile.save()
def match(user_list):
master_artist_list = []
for num, user in enumerate(user_list):
top_artists = user.profile.fave_artists.all()
related_artists = RelatedArtist.objects.filter(root_artist__in = top_artists).distinct().values_list("artist2", flat=True)
artist_list = (Artist.objects.filter(id__in = related_artists)|top_artists).distinct()
if num == 0:
master_artist_list = artist_list
else:
master_artist_list = master_artist_list.intersection(artist_list)
return master_artist_list
def create_playlist(profile, user2):
access_token = refresh_tokens(profile)
user_id = profile.account.social_auth.first().uid
endpoint = f"users/{user_id}/playlists"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token}
body = json.dumps({
"name": f"SplatList for {profile.account.first_name} and {user2.first_name}",
"description": "A playlist generated for you, by Splatify, with love.",
"public": False
})
response = r.post(BASE_URL + endpoint, body, headers=headers)
playlist_id = response.json()
return playlist_id['id']
def add_to_playlist(profile, track_uri_list, playlist_id):
access_token = refresh_tokens(profile)
track_urls = '%2c'.join(track_uri_list)
endpoint = f"playlists/{playlist_id}/tracks?uris=" + track_urls
response = execute_spotify_api_request(access_token, endpoint, post_=True)
return response
def get_artist_top_songs(artist, profile):
access_token = refresh_tokens(profile)
artist_id = artist.spotify_id
endpoint = f"artists/{artist_id}/top-tracks?country=IL"
response = execute_spotify_api_request(access_token, endpoint)
tracks = response['tracks']
track_uri_list = []
while len(track_uri_list)<3:
for track in tracks:
track_uri_list.append(track['uri'])
return track_uri_list
def main(master_artist_list, profile, user2):
master_artist_list = master_artist_list[0:20]
playlist_id = create_playlist(profile, user2)
if len(master_artist_list) > 5:
for artist in master_artist_list:
add_to_playlist(profile, get_artist_top_songs(artist, profile), playlist_id)
else:
track_uri_list = seeder(master_artist_list, profile)
add_to_playlist(profile, track_uri_list, playlist_id)
def refresh_tokens(profile):
endpoint = "https://accounts.spotify.com/api/token"
refresh_token = profile.account.social_auth.first().extra_data['refresh_token']
auth_str = '{}:{}'.format(CLIENT_ID, CLIENT_SECRET)
b64_auth_str = base64.urlsafe_b64encode(auth_str.encode()).decode()
headers = {'Authorization': f'Basic {b64_auth_str}'}
body = {
'grant_type': 'refresh_token',
'refresh_token':refresh_token,
}
response = r.post(endpoint, body, headers=headers)
return response.json()['access_token']
def seeder(artist_list, profile):
seed_artists = []
for artist in artist_list:
seed_artists.append(artist.spotify_id)
seed_artists = seed_artists[:5]
artists = '%2c'.join(seed_artists)
endpoint = f"recommendations?seed_artists=" + artists
access_token = refresh_tokens(profile)
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + access_token}
response = r.get(BASE_URL + endpoint, headers = headers)
track_uri_list = []
if response.json()['error']['status'] == 400:
track_uri_list.append('spotify:track:4uLU6hMCjMI75M1A2tKUQC')
else:
rec_tracks = response.json()['tracks']
for track in rec_tracks:
track_uri_list.append(track['uri'])
return track_uri_list
def artist_search(query, profile):
access_token = refresh_tokens(profile)
endpoint = f"https://api.spotify.com/v1/search?q={query}&type=artist"
headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + access_token}
response = r.get(endpoint, headers = headers)
artist = response.json()['artists']['items'][0]
current_artist, created = Artist.objects.get_or_create(name = artist['name'], spotify_id = artist['id'], popularity = artist['popularity'], uri = artist['uri'])
TopArtist.objects.get_or_create(profile=profile, artist=current_artist, affinity=30)
return current_artist
|
[
"michael.nemni@gmail.com"
] |
michael.nemni@gmail.com
|
04d6541daf0a5a782f444e495432b9f0bc9d80a1
|
fcaa0395a7c6aa74cbc47c40f35fdc312e44b9c5
|
/aok/comparisons/_basics.py
|
30b87c970c9d3869bf7cb89261e8ca2a4506b453
|
[] |
no_license
|
rocketboosters/a-ok
|
b6f1a70d262123c2df5e4969a687cbcfdfbafc8c
|
06f31404a4ce34d561253ba74b533ce3fb73c60c
|
refs/heads/main
| 2023-09-02T19:18:18.158296
| 2021-11-03T01:54:36
| 2021-11-03T01:54:36
| 388,142,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,951
|
py
|
import typing
import yaml
from aok import _definitions
from aok import _operations
class Equals(_definitions.Comparator):
"""Compares two values as an equality."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Make an equals comparison."""
return _operations.cast_compatible(self.value, observed) == observed
class Unequals(_definitions.Comparator):
"""Compares two values as an inequality."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Make an inequality comparison."""
return _operations.cast_compatible(self.value, observed) != observed
class Anything(_definitions.Comparator):
"""Allows anything for the given value."""
def __init__(self):
"""Create an Anything comparison operation."""
super(Anything, self).__init__(None)
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Anything will always be true."""
return True
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "Anything":
return cls()
class Less(_definitions.Comparator):
"""Allows anything less than the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Less than will be true."""
return _operations.cast_compatible(self.value, observed) > observed
class LessOrEqual(_definitions.Comparator):
"""Allows anything less than or equal the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Less than or equal will be true."""
return _operations.cast_compatible(self.value, observed) >= observed
class Greater(_definitions.Comparator):
"""Allows anything greater than the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than will be true."""
return _operations.cast_compatible(self.value, observed) < observed
class GreaterOrEqual(_definitions.Comparator):
"""Allows anything greater than or equal to the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than or equal will be true."""
return _operations.cast_compatible(self.value, observed) <= observed
class Between(_definitions.Comparator):
"""Allows between the given values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than or equal will be true."""
casted_min = _operations.cast_compatible(self.value["min"], observed)
casted_max = _operations.cast_compatible(self.value["max"], observed)
return casted_min <= observed <= casted_max
@classmethod
def construct(cls, minimum: typing.Any, maximum: typing.Any) -> "Between":
"""Create a Between comparison operator with the specified options."""
return cls({"min": minimum, "max": maximum})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "Between":
if isinstance(node, yaml.SequenceNode):
loaded = loader.construct_sequence(node, deep=True)
value = {"min": loaded[0], "max": loaded[1]}
else:
value = loader.construct_mapping(node, deep=True)
return cls(value)
class OneOf(_definitions.Comparator):
"""Allows a matching comparison between any of the listed values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Succeeds if at least one of the options are equal."""
failures: typing.Dict[str, _definitions.Comparison] = {}
for index, option in enumerate(self.value["options"]):
if isinstance(option, _definitions.Comparator):
comparator = option
else:
comparator = Equals(option)
result = comparator.compare(observed, subset=subset)
if getattr(result, "success", result):
return result
if isinstance(result, _definitions.Comparison):
failures[str(index)] = result
else:
failures[str(index)] = _definitions.Comparison(
operation=comparator.operation_name(),
success=False,
expected=comparator.value,
observed=observed,
)
return _definitions.Comparison(
operation="one_of",
success=False,
expected=", ".join([f"({i}) {f.expected}" for i, f in failures.items()]),
observed=observed,
)
@classmethod
def construct(cls, options: typing.List[typing.Any]) -> "OneOf":
"""Create a OneOf comparison operator with the specified options."""
return cls({"options": options})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "OneOf":
options = loader.construct_sequence(node, deep=True)
return cls({"options": options})
class NoneOf(_definitions.Comparator):
"""Allows a mismatching comparison between none of the listed values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Succeeds if none of the options are equal."""
for index, option in enumerate(self.value["options"]):
if isinstance(option, _definitions.Comparator):
comparator = option
else:
comparator = Equals(option)
result = comparator.compare(observed, subset=subset)
if getattr(result, "success", False):
return _definitions.Comparison(
operation=f"not {result.operation}",
success=False,
expected=result.expected,
observed=result.observed,
children=result.children,
)
return _definitions.Comparison(
operation="none_of",
success=True,
expected=self.value,
observed=observed,
)
@classmethod
def construct(cls, options: typing.List[typing.Any]) -> "NoneOf":
"""Create a NoneOf comparison operator with the specified options."""
return cls({"options": options})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "NoneOf":
options = loader.construct_sequence(node, deep=True)
return cls({"options": options})
Anything.register()
anything = getattr(Anything, "constructor", Anything)
Between.register()
between = getattr(Between, "constructor", Between)
Equals.register()
equals = getattr(Equals, "constructor", Equals)
Unequals.register()
unequals = getattr(Unequals, "constructor", Unequals)
Greater.register()
greater = getattr(Greater, "constructor", Greater)
GreaterOrEqual.register()
greater_or_equal = getattr(GreaterOrEqual, "constructor", GreaterOrEqual)
Less.register()
less = getattr(Less, "constructor", Less)
LessOrEqual.register()
less_or_equal = getattr(LessOrEqual, "constructor", LessOrEqual)
NoneOf.register()
none_of = getattr(NoneOf, "constructor", NoneOf)
OneOf.register()
one_of = getattr(OneOf, "constructor", OneOf)
|
[
"swernst@gmail.com"
] |
swernst@gmail.com
|
648e5ca36c4d9b01db5a8637ad045c23b07bf7f6
|
80aabbd44790ec4feee93624f61c29e87d691d6a
|
/drawBot/ui/drawView.py
|
24fac94c74d4a3c9c44d2a34358e011c780327b5
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
asaumierdemers/drawbot
|
546961ead63f71859725a87f190f7ebbd45995f2
|
9ba1ef902bdd5c8e291d5d6835e09f05bfa00261
|
refs/heads/master
| 2020-12-25T19:59:00.391766
| 2016-08-05T10:04:57
| 2016-08-05T10:04:57
| 29,844,501
| 0
| 0
| null | 2015-01-26T04:12:30
| 2015-01-26T04:12:30
| null |
UTF-8
|
Python
| false
| false
| 2,955
|
py
|
from AppKit import *
from Quartz import PDFView, PDFThumbnailView, PDFDocument
from vanilla import Group
epsPasteBoardType = "CorePasteboardFlavorType 0x41494342"
class DrawBotPDFThumbnailView(PDFThumbnailView):
def draggingUpdated_(self, draggingInfo):
return NSDragOperationNone
class ThumbnailView(Group):
nsViewClass = DrawBotPDFThumbnailView
def setDrawView(self, view):
self.getNSView().setPDFView_(view.getNSView())
def getSelection(self):
try:
# sometimes this goes weirdly wrong...
selection = self.getNSView().selectedPages()
except:
return -1
if selection:
for page in selection:
document = page.document()
index = document.indexForPage_(page)
return index
return -1
class DrawBotPDFView(PDFView):
def performKeyEquivalent_(self, event):
# catch a bug in PDFView
# cmd + ` causes a traceback
# DrawBot[15705]: -[__NSCFConstantString characterAtIndex:]: Range or index out of bounds
try:
return super(DrawBotPDFView, self).performKeyEquivalent_(event)
except:
return False
class DrawView(Group):
nsViewClass = DrawBotPDFView
def __init__(self, posSize):
super(DrawView, self).__init__(posSize)
pdfView = self.getNSView()
pdfView.setAutoScales_(True)
view = pdfView.documentView()
scrollview = view.enclosingScrollView()
scrollview.setBorderType_(NSBezelBorder)
def get(self):
pdf = self.getNSView().document()
if pdf is None:
return None
return pdf.dataRepresentation()
def set(self, pdfData):
pdf = PDFDocument.alloc().initWithData_(pdfData)
self.setPDFDocument(pdf)
def setPath(self, path):
url = NSURL.fileURLWithPath_(path)
document = PDFDocument.alloc().initWithURL_(url)
self.setPDFDocument(document)
def setPDFDocument(self, document):
if document is None:
document = PDFDocument.alloc().init()
self.getNSView().setDocument_(document)
def getPDFDocument(self):
return self.getNSView().document()
def setScale(self, scale):
self.getNSView().setScaleFactor_(scale)
def scale(self):
return self.getNSView().scaleFactor()
def scrollDown(self):
document = self.getNSView().documentView()
document.scrollPoint_((0, 0))
def scrollToPageIndex(self, index):
pdf = self.getPDFDocument()
if pdf is None:
self.scrollDown()
elif 0 <= index < pdf.pageCount():
try:
# sometimes this goes weirdly wrong...
page = pdf.pageAtIndex_(index)
self.getNSView().goToPage_(page)
except:
self.scrollDown()
else:
self.scrollDown()
|
[
"frederik@typemytype.com"
] |
frederik@typemytype.com
|
4ed7b0073e5f3f21e7883ee46de2d41af70f1429
|
b00840e56173dc2a196442bd354b9e3cc13b17df
|
/code_util/createJobScript.py
|
c360a93fc09e90dace29b76e6b66c43797d94224
|
[] |
no_license
|
Sportsfan77777/vortex
|
56c28fb760f6c98de4a7c8fdcf1168d78b4e57af
|
780ec14937d1b79e91a367d58f75adc905b8eef2
|
refs/heads/master
| 2023-08-31T02:50:09.454230
| 2023-08-24T10:55:05
| 2023-08-24T10:55:05
| 41,785,163
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,804
|
py
|
"""
makes a new job script
"""
import argparse
def new_argument_parser(description = "Make a new job script."):
parser = argparse.ArgumentParser()
# File
parser.add_argument("fn",
help = 'job file name (.sh appended to the end) that must be included, error otherwise')
# Basic Parameters
parser.add_argument('-c', dest = "num_cores", type = int, default = 1,
help = 'number of cores (default: 1)')
parser.add_argument('-p', dest = "ptile", type = int, default = None,
help = 'number of cores needed on each computer (default: num_cores)')
parser.add_argument('--err', dest = "err_name", default = "err_%I",
help = 'job error file name (default: err_%I)')
parser.add_argument('--out', dest = "out_name", default = "out_%I",
help = 'job output file name (default: out_%I)')
parser.add_argument('-q', dest = "queue", default = "medium",
help = 'queue (default: medium)')
parser.add_argument('--name', dest = "name", default = None,
help = 'queue (default: fn)')
parser.add_argument('--gpu', dest = "gpu", action = 'store_true', default = False,
help = 'request gpu resource (default: no gpus)')
# Modules
parser.add_argument('--python_off', dest = "python", action = 'store_false', default = True,
help = 'include python module (default: include)')
parser.add_argument('--fftw_off', dest = "fftw", action = 'store_false', default = True,
help = 'include fftw module (default: include)')
parser.add_argument('--openmpi_off', dest = "openmpi", action = 'store_false', default = True,
help = 'include openmpi module (default: include)')
# Job
parser.add_argument('--mpi', dest = "mpirun", action = 'store_true', default = False,
help = 'use mpirun (default: do not use mpirun)')
parser.add_argument('-j', dest = "job", default = "",
help = 'job command (default: empty string)')
parser.add_argument('-o', dest = "output", default = None,
help = 'output file (.out appended to the end) (default: name)')
return parser
###############################################################################
### Parse Arguments ###
args = new_argument_parser().parse_args()
# Names
if args.name is None:
args.name = args.fn
if args.output is None:
args.output = args.name
args.fn = "%s.sh" % args.fn
args.output = "%s.out" % args.output
# Cores
if (args.ptile is None) or (args.ptile > args.num_cores):
args.ptile = args.num_cores
###############################################################################
### Write File ###
with open(args.fn, 'w') as f:
f.write("#!/bin/bash\n")
### Basic Parameters ###
f.write("#BSUB -n %d\n" % args.num_cores)
f.write("#BSUB -e %s\n" % args.err_name)
f.write("#BSUB -o %s\n" % args.out_name)
f.write('#BSUB -q "%s"\n' % args.queue)
f.write("#BSUB -u mhammer\n")
f.write("#BSUB -J %s\n" % args.name)
if args.gpu:
f.write("#BSUB -R gpu\n")
f.write('#BSUB -R "span[ptile=%d]"\n' % args.ptile)
# Line Break #
f.write("\n")
### Modules ###
if args.python:
f.write("module load python/2.7.3\n")
if args.fftw:
f.write("module load fftw/2.1.5\n")
if args.openmpi:
f.write("module load openmpi\n")
# Line Break
f.write("\n")
### Job ###
if args.mpirun:
f.write("mpirun -np %d " % args.num_cores)
f.write("%s " % args.job)
f.write("> %s\n" % args.output)
# Line Break
f.write("\n")
|
[
"mhammer44444@gmail.com"
] |
mhammer44444@gmail.com
|
d3be12214002bf0e8ed2b4e329795a1e62b70612
|
b2f755bdb8c5a73cf28679b14de1a7100cd48b35
|
/Interview/4/31.py
|
398dfb0ecf5c8643733ea6c6524bdb8f8ed60db3
|
[] |
no_license
|
Futureword123456/Interview
|
cc50e1a3e4e85e4ac570469fc8a839029cdc6c50
|
5cb36dc5f2459abd889e1b29f469d5149139dc5f
|
refs/heads/master
| 2023-03-25T15:24:23.939871
| 2021-03-13T08:15:54
| 2021-03-13T08:15:54
| 345,374,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/3/8 0008
# @Author : yang
# @Email : 2635681517@qq.com
# @File : 31.py
"""Python 获取昨天日期"""
import datetime
def getyesterday():
days = datetime.date.today()
"""
datetime.timedelta对象代表两个时间之间的时间差
两个date或datetime对象相减就可以返回一个timedelta对象。
"""
day = datetime.timedelta(days=1)
return days-day
if __name__ == "__main__":
print(getyesterday())
print(datetime.datetime.now())
|
[
"2635681517@qq.com"
] |
2635681517@qq.com
|
4238d3e59229db3f82e82deeaea7ce90768f81e6
|
036a41c913b3a4e7ae265e22a672dd89302d3200
|
/未完成题目/LCP/LCP25/LCP25_Python_1.py
|
dafd8c2c8eabcccd19a5f5df0444b87409140e43
|
[] |
no_license
|
ChangxingJiang/LeetCode
|
e76f96ebda68d7ade53575354479cfc33ad4f627
|
a2209206cdd7229dd33e416f611e71a984a8dd9e
|
refs/heads/master
| 2023-04-13T15:23:35.174390
| 2021-04-24T05:54:14
| 2021-04-24T05:54:14
| 272,088,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
class Solution:
def keyboard(self, k: int, n: int) -> int:
pass
if __name__ == "__main__":
print(Solution().keyboard(1, 1)) # 26
print(Solution().keyboard(1, 2)) # 650
|
[
"1278729001@qq.com"
] |
1278729001@qq.com
|
aac36e5e97effc021d51bddce00836cf86108ad9
|
e1fe1ed4f2ba8ab0146ce7c08d65bc7947150fc8
|
/credit11315/pipelines.py
|
6e80a0ff0684dd2011f6c21e58ced8a6f581ef7f
|
[] |
no_license
|
yidun55/credit11315
|
0d88ceef314efa444de58eb5da8939c1acff3abe
|
b048ec9db036a382287d5faacb9490ccbf50735c
|
refs/heads/master
| 2021-01-20T01:03:30.617914
| 2015-07-31T09:58:24
| 2015-07-31T09:58:24
| 38,853,611
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import log
import os
os.chdir("/home/dyh/data/credit11315/infoDetail")
class Credit11315Pipeline(object):
def process_item(self, item, spider):
if len(item.keys()) == 1: #存的是content
try:
os.chdir("/home/dyh/data/credit11315/infoDetail")
with open(spider.writeInFile,"a") as f:
f.write(item["content"])
except Exception,e:
log.msg("content pipeline error_info=%s"%e, level=log.ERROR)
else:
for key in item.iterkeys():
try:
os.chdir("/home/dyh/data/credit11315/infoDetail")
with open('detailInfoScrapy_'+key,"a") as f:
f.write(item[key]+"\n")
except Exception,e:
log.msg("DetailInformation(Item) pipeline error_info=%s"%e, level=log.ERROR)
|
[
"heshang1203@sina.com"
] |
heshang1203@sina.com
|
7f7be7515b49d2339d45739a3d6096151dc8de80
|
9381c0a73251768441dc45c7e181548742b9bdbc
|
/src/educative/fibonacci_numbers/house_thief_memo.py
|
dfe266791fa02380306c6208bd07804a7c2fbd97
|
[] |
no_license
|
Flaeros/leetcode
|
45cc510ec513bfb26dbb762aa1bd98f3b42dce18
|
1dcea81a21bd39fee3e3f245a1418526bd0a5e8f
|
refs/heads/master
| 2022-06-02T14:15:31.539238
| 2022-04-18T14:44:18
| 2022-04-18T14:49:05
| 250,183,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
def find_max_steal(wealth):
memo = [-1 for _ in range(len(wealth))]
return find_max_steal_rec(wealth, memo, 0)
def find_max_steal_rec(wealth, memo, index):
if index >= len(wealth):
return 0
if memo[index] == -1:
inclusive = wealth[index] + find_max_steal_rec(wealth, memo, index + 2)
exclusive = find_max_steal_rec(wealth, memo, index + 1)
memo[index] = max(inclusive, exclusive)
return memo[index]
def main():
print(find_max_steal([2, 5, 1, 3, 6, 2, 4]))
print(find_max_steal([2, 10, 14, 8, 1]))
if __name__ == '__main__':
main()
|
[
"flaeross@yandex-team.ru"
] |
flaeross@yandex-team.ru
|
9ad86092e385a8f8238bb7bb27ac3740c79a39f7
|
1ecb282756c95d9ae19035761c6e4bb480fdaf26
|
/python/lsst/ctrl/stats/records/generic.py
|
a07b96fbfc651a578c7b2e48c3f7924b5d26cf16
|
[] |
no_license
|
provingground-moe/ctrl_stats
|
58cba09f95a30007fc5df10d6d8992719b0f5368
|
14790770765b3a167d0d9f318b40e12bbb5df0bb
|
refs/heads/master
| 2020-06-10T20:42:34.260304
| 2017-08-24T21:26:34
| 2017-08-24T21:26:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
#
# LSST Data Management System
# Copyright 2008-2012 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
from __future__ import absolute_import
from .record import Record
class Generic(Record):
"""Generic log event
Listed in documention as not used, but here for completeness.
Parameters
----------
year: `str`
the year to tag the job with
lines: list
the strings making up this record
"""
def __init__(self, year, lines):
Record.__init__(self, year, lines)
eventClass = Generic
eventCode = "008"
|
[
"srp@ncsa.illinois.edu"
] |
srp@ncsa.illinois.edu
|
0ad73be05ea4a42a3b2118023282236427d3145d
|
6a95112805b64322953429270a305d01fef3faea
|
/dist/weewx-4.3.0/examples/stats.py
|
86a1e5c5b193afe5fb375e4eef30098d3dbc84b2
|
[
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
tomdotorg/docker-weewx
|
c6d59dc492a9e53f3bc898f7b9f593717092d72c
|
7085654f455d39b06acc688738fde27e1f78ad1e
|
refs/heads/main
| 2023-06-08T17:57:44.184399
| 2023-01-30T11:21:23
| 2023-01-30T11:21:23
| 54,113,384
| 21
| 16
|
Apache-2.0
| 2022-10-19T23:46:26
| 2016-03-17T11:39:29
|
Dockerfile
|
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
# See the file LICENSE.txt for your rights.
"""Example of how to extend the search list used by the Cheetah generator.
*******************************************************************************
This search list extension offers two extra tags:
'alltime': All time statistics.
For example, "what is the all time high temperature?"
'seven_day': Statistics for the last seven days.
That is, since midnight seven days ago.
*******************************************************************************
To use this search list extension:
1) Copy this file to the user directory. See https://bit.ly/33YHsqX for where your user
directory is located.
2) Modify the option search_list in the skin.conf configuration file, adding
the name of this extension. When you're done, it will look something like
this:
[CheetahGenerator]
search_list_extensions = user.stats.MyStats
You can then use tags such as $alltime.outTemp.max for the all-time max
temperature, or $seven_day.rain.sum for the total rainfall in the last
seven days.
*******************************************************************************
"""
import datetime
import time
from weewx.cheetahgenerator import SearchList
from weewx.tags import TimespanBinder
from weeutil.weeutil import TimeSpan
class MyStats(SearchList): # 1
def __init__(self, generator): # 2
SearchList.__init__(self, generator)
def get_extension_list(self, timespan, db_lookup): # 3
"""Returns a search list extension with two additions.
Parameters:
timespan: An instance of weeutil.weeutil.TimeSpan. This will
hold the start and stop times of the domain of
valid times.
db_lookup: This is a function that, given a data binding
as its only parameter, will return a database manager
object.
"""
# First, create TimespanBinder object for all time. This one is easy
# because the object timespan already holds all valid times to be
# used in the report.
all_stats = TimespanBinder(timespan,
db_lookup,
context='year',
formatter=self.generator.formatter,
converter=self.generator.converter,
skin_dict=self.generator.skin_dict) # 4
# Now get a TimespanBinder object for the last seven days. This one we
# will have to calculate. First, calculate the time at midnight, seven
# days ago. The variable week_dt will be an instance of datetime.date.
week_dt = datetime.date.fromtimestamp(timespan.stop) \
- datetime.timedelta(weeks=1) # 5
# Convert it to unix epoch time:
week_ts = time.mktime(week_dt.timetuple()) # 6
# Form a TimespanBinder object, using the time span we just
# calculated:
seven_day_stats = TimespanBinder(TimeSpan(week_ts, timespan.stop),
db_lookup,
context='week',
formatter=self.generator.formatter,
converter=self.generator.converter,
skin_dict=self.generator.skin_dict) # 7
# Now create a small dictionary with keys 'alltime' and 'seven_day':
search_list_extension = {'alltime' : all_stats,
'seven_day' : seven_day_stats} # 8
# Finally, return our extension as a list:
return [search_list_extension] # 9
|
[
"tom@tom.org"
] |
tom@tom.org
|
8dd6db002b7cfee421083e2f1a14012671d69f19
|
3941f6b431ccb00ab75f19c52e40e5dad2e98b9b
|
/Dasymetric/dasym_tables.py
|
41bc20d47b5ee4b9da2c2f6b66632d0c1d6ba20e
|
[
"Apache-2.0"
] |
permissive
|
scw/global-threats-model
|
70c375c1633e8578f1e41f278b443f1501ceb0ec
|
11caa662373c5dbfbb08bb0947f3dd5eedc0b4e0
|
refs/heads/master
| 2016-09-05T11:25:13.056352
| 2013-08-22T22:10:13
| 2013-08-22T22:10:13
| 3,566,652
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,033
|
py
|
# ---------------------------------------------------------------------------
# dasym_tables.py
# Created on: Wed Jan 11 2006
# Written by: Matthew Perry
# Usage: See the "script arguments" section
# ---------------------------------------------------------------------------
#================================================================#
# Prepare Environment
# Import system modules
import sys, string, os, win32com.client
# Create the Geoprocessor object
gp = win32com.client.Dispatch("esriGeoprocessing.GpDispatch.1")
# Set the necessary product code
gp.SetProduct("ArcInfo")
# Check out any necessary licenses
gp.CheckOutExtension("spatial")
# Load required toolboxes...
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Spatial Analyst Tools.tbx")
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Conversion Tools.tbx")
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Data Management Tools.tbx")
#----------------------------------------#
# Script Arguments
Temp_Workspace = "C:\\WorkSpace\\temp"
try:
#INPUTS
Spatial_Units_Raster = sys.argv[1] # raster containing country code
Attribute_Lookup_Table = sys.argv[2] # dbf containing countries and all attributes of interest
Attribute_Lookupt_Table_Join_Item = sys.argv[3] # country code
Attribute_Lookup_Table_Value_Item = sys.argv[4] # the variable of interest
Aux_Raster = sys.argv[5] # landcover
Weighting_Table = sys.argv[6] # Table relating land cover classes to relative weights
Weighting_Table_Join_Field = sys.argv[7] # column with landcover codes
Weighting_Table_Weight_Field = sys.argv[8] # column with relative wieghts
#OUTPUTS
Combined_Raster = sys.argv[9] # output of aml, input to gp script
Combined_Raster_Table = sys.argv[10] # output of aml, input to gp script
Output_Raster = sys.argv[11] # the dasymetric map
except:
#INPUTS
Spatial_Units_Raster = "C:\\WorkSpace\\FAO\\dasym\\units\\units_as"
Attribute_Lookup_Table = "C:\\WorkSpace\\FAO\\dasym\\lookups\\faocia.dbf"
Attribute_Lookupt_Table_Join_Item = "CODE"
Attribute_Lookup_Table_Value_Item = "FERT"
Aux_Raster = "C:\\WorkSpace\\clipped_rusle_inputs\\as_igbp"
Weighting_Table = "C:\\WorkSpace\\FAO\\dasym\\weights\\C.dbf"
Weighting_Table_Join_Field = "LANDCOVER"
Weighting_Table_Weight_Field = "WEIGHT"
#OUTPUTS
Combined_Raster = Temp_Workspace + "\\ctpc"
Combined_Raster_Table = Temp_Workspace + "\\ctpc.dbf"
Output_Raster = "C:\\WorkSpace\\FAO\\dasym\\outputs\\as_fertC"
#--------------------------------#
# Constants
Joined_Output_Table_Name = "combine_weight_join"
Joined_Output_Table = Temp_Workspace + "\\" + Joined_Output_Table_Name + ".dbf"
Combine_Reclass = Temp_Workspace + "\\combine2_rcl"
Temp_Raster = Temp_Workspace + "\\temp_dasy"
Combined_Raster_Table_Variable_Field = "VOI" # Should be constant
#================================================================#
# Main
#---------------------------------#
# Call the AML as the first step
# b/c ArcGIS can't handle raster attribute tables
amlPath = os.path.dirname(sys.argv[0]) + "\\"
sCommandLine = "arc.exe \"&run\" \"" + amlPath + "dasym_combine.aml \" "
sCommandLine += Spatial_Units_Raster + " " + Attribute_Lookup_Table + " "
sCommandLine += Attribute_Lookupt_Table_Join_Item + " " + Attribute_Lookup_Table_Value_Item + " "
sCommandLine += Aux_Raster + " "
sCommandLine += Combined_Raster + " " + Combined_Raster_Table + " " + Temp_Workspace + "'"
os.system(sCommandLine)
# gp.AddMessage(" ****** Combined Layers")
print " ****** Combined Layers"
#------------------------------------------------#
# Determine the column names based on user input
base = os.path.basename(Combined_Raster_Table)
split = base.split(".")
combinedPrefix = split[0]
base = os.path.basename(Weighting_Table)
split = base.split(".")
weightedPrefix = split[0]
base = os.path.basename(Aux_Raster)
split = base.split(".")
auxprefix = split[0]
auxprefix = auxprefix[:10]
Variable_Field = combinedPrefix + "_VOI" # "ctfc_VOI" # Combined_Raster_Table _ VOI
Variable_Field = Variable_Field[:10]
Weight_Field = weightedPrefix + "_" + Weighting_Table_Weight_Field # "TFC_WEIGHT"
Weight_Field = Weight_Field[:10]
Count_Field = combinedPrefix + "_COUNT" # Combined_Raster_Table _ COUNT
Count_Field = Count_Field[:10]
Value_Field = combinedPrefix + "_VALUE" # Combined_Raster_Table _ VALU
Value_Field = Value_Field[:10]
Combined_Raster_Table_Join_Field = auxprefix.upper() # "LANDCOVER2" # Name of aux raster truncated and caps
try:
#------------------------------------------------#
# Join Tables and create new output table
gp.MakeTableView_management(Combined_Raster_Table, "ctable")
gp.AddJoin_management("ctable", Combined_Raster_Table_Join_Field, Weighting_Table, Weighting_Table_Join_Field, "KEEP_ALL")
gp.TableToTable_conversion("ctable", Temp_Workspace, Joined_Output_Table_Name)
print " ****** Created joined table"
#------------------------------------------------#
# Add fields
gp.AddField_management(Joined_Output_Table, "totalpc", "DOUBLE", "", "", "", "", "NON_NULLABLE", "NON_REQUIRED", "")
gp.AddField_management(Joined_Output_Table, "valuepp", "LONG", "", "", "", "", "NON_NULLABLE", "NON_REQUIRED", "")
gp.MakeTableView_management(Joined_Output_Table, "jtable")
print " ****** Added Fields and reloaded table view"
#------------------------------------------------#
# Calculate Total of Variable Per Auxillary Data Class
gp.CalculateField_management("jtable", "totalpc", "[" + Variable_Field + "] * [" + Weight_Field + "]")
# Calculate Value of variable per pixel
gp.CalculateField_management("jtable", "valuepp", "int( [totalpc] * 10000.0 / [" + Count_Field + "]) ")
print " ****** Calculated New Fields"
#------------------------------------------------#
# Reclass by Table...
gp.ReclassByTable_sa(Combined_Raster, "jtable", Value_Field , Value_Field, "valuepp", Temp_Raster , "DATA")
print " ****** Reclassed Raster"
#------------------------------------------------#
# Scale Raster to original units
Map_Algebra_expression = Temp_Raster + " / 10000.0"
gp.SingleOutputMapAlgebra_sa(Map_Algebra_expression, Output_Raster)
print " ****** Scaled raster"
except:
print gp.GetMessages()
sys.exit(1)
|
[
"perrygeo@gmail.com"
] |
perrygeo@gmail.com
|
5aae57fc607a70052c54ad09b04cbd25840d0f28
|
9fc6604ae98e1ae91c490e8201364fdee1b4222a
|
/eg_msg_base/models/msg_status.py
|
4501bdc8b00a217bae754eaa0a5b5c32b395123c
|
[] |
no_license
|
nabiforks/baytonia
|
b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4
|
58cb304d105bb7332f0a6ab685015f070988ba56
|
refs/heads/main
| 2023-03-23T21:02:57.862331
| 2021-01-04T03:40:58
| 2021-01-04T03:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
from odoo import models, fields
class MsgStatus(models.Model):
_name = "msg.status"
name = fields.Char(string="Status", readonly=True)
is_last_status = fields.Boolean(string="Is Last Status")
sms_instance_id = fields.Many2one(comodel_name="sms.instance", string="Sms Instance", readonly=True)
|
[
"ash@odoxsofthub.com"
] |
ash@odoxsofthub.com
|
3707942092f8a2717e1e159fd36fc8769e28c5ee
|
5d22d9b2cb5cad7970c1055aeef55d2e2a5acb8e
|
/py/topcoder/TCCC 2003 Semifinals 2/TicSolver.py
|
a7805ff035c64217729de5ff4c0bd9d4ebc789e0
|
[
"MIT"
] |
permissive
|
shhuan/algorithms
|
36d70f1ab23dab881bf1a15573fbca7b2a3f4235
|
2830c7e2ada8dfd3dcdda7c06846116d4f944a27
|
refs/heads/master
| 2021-05-07T14:21:15.362588
| 2017-11-07T08:20:16
| 2017-11-07T08:20:16
| 109,799,698
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,968
|
py
|
# -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class TicSolver:
def whoWins(self, board):
return ""
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(board, __expected):
startTime = time.time()
instance = TicSolver()
exception = None
try:
__result = instance.whoWins(board);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("TicSolver (500 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("TicSolver.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
board = []
for i in range(0, int(f.readline())):
board.append(f.readline().rstrip())
board = tuple(board)
f.readline()
__answer = f.readline().rstrip()
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(board, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1430750694
PT, TT = (T / 60.0, 75.0)
points = 500 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
|
[
"shuangquanhuang@gmail.com"
] |
shuangquanhuang@gmail.com
|
f095c17c392697ec5fb7da951dd4309508663a2f
|
c3d0a0b6336a3ff73724fe1615eb1809dbdaaed8
|
/Hacker Rank/Day3_04_02_20.py
|
c7cd53f8193dae0cfdc503af27bf0d8b26745ef5
|
[] |
no_license
|
Silentsoul04/FTSP_2020
|
db0dae6cd9c371f3daa9219f86520dfa66348236
|
7e603af918da2bcfe4949a4cf5a33107c837894f
|
refs/heads/master
| 2022-12-21T20:44:32.031640
| 2020-09-20T12:29:58
| 2020-09-20T12:29:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 4 22:00:31 2020
@author: Rajesh
"""
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
#########################
a = "this is a string"
b = a.split(" ") # a is converted to a list of strings.
print(b)
c= "-".join(b)
print(c)
#####################
def b(a):
c= a.split()
d = "-".join(c)
return d
if __name__ == '__main__':
line = input()
result = b(line)
print(result)
######################
def print_full_name(a, b):
print("Hello" , a , b+"! You just delved into python.")
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name)
##############################
def mutate_string(string, position, character):
l = list(string)
l[position] = character
string = ''.join(l)
return string
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
|
[
"sharma90126@gmail.com"
] |
sharma90126@gmail.com
|
175b341a56c39c15bc473eabefdea8436aba734f
|
09d79c3509252cfccac35bb28de9a0379094823a
|
/alx/movies/migrations/0002_auto_20201123_1045.py
|
1ac4f1ab4103dc7788ff628ea113fe1d93025510
|
[] |
no_license
|
marianwitkowski/python2311
|
73ad491016cd6d0010d0203db43aca2c6debe0ad
|
9bbeca3fb6d8658a1321ab099ff2102cd7de76e0
|
refs/heads/master
| 2023-01-22T13:13:56.695680
| 2020-12-02T14:58:15
| 2020-12-02T14:58:15
| 315,350,865
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
# Generated by Django 3.1.3 on 2020-11-23 09:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='movie',
options={'verbose_name': 'Film', 'verbose_name_plural': 'Filmy'},
),
]
|
[
"marian.witkowski@gmail.com"
] |
marian.witkowski@gmail.com
|
9252178bd560c85b23332610a4299b0ec0f71f57
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/q4bBcq5NET4CH5Rcb_16.py
|
5f42fed2a73573979ea8acc56462e2f23301b0ed
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
def jay_and_bob(txt):
a={"half":"14 grams","quarter":"7 grams","eighth":"3.5 grams","sixteenth":"1.75 grams"}
return a[txt]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f0f7d898a452de3ce1b9a7940f8dcd61c38c6500
|
18f8abb90efece37949f5b5758c7752b1602fb12
|
/py/django_tools/django-haystack/tests/simple_tests/tests/simple_backend.py
|
d9b5120d942eb0f05a4fcbd1769c58de0da181cd
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
marceltoben/evandrix.github.com
|
caa7d4c2ef84ba8c5a9a6ace2126e8fd6db1a516
|
abc3fbfb34f791f84e9a9d4dc522966421778ab2
|
refs/heads/master
| 2021-08-02T06:18:12.953567
| 2011-08-23T16:49:33
| 2011-08-23T16:49:33
| 2,267,457
| 3
| 5
| null | 2021-07-28T11:39:25
| 2011-08-25T11:18:56
|
C
|
UTF-8
|
Python
| false
| false
| 5,799
|
py
|
from datetime import date
from django.conf import settings
from django.test import TestCase
from haystack import connections, connection_router
from haystack import indexes
from haystack.query import SearchQuerySet
from haystack.utils.loading import UnifiedIndex
from core.models import MockModel
from core.tests.mocks import MockSearchResult
class SimpleMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author', faceted=True)
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return MockModel
class SimpleSearchBackendTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SimpleSearchBackendTestCase, self).setUp()
self.backend = connections['default'].get_backend()
self.index = connections['default'].get_unified_index().get_index(MockModel)
self.sample_objs = MockModel.objects.all()
def test_update(self):
self.backend.update(self.index, self.sample_objs)
def test_remove(self):
self.backend.remove(self.sample_objs[0])
def test_clear(self):
self.backend.clear()
def test_search(self):
# No query string should always yield zero results.
self.assertEqual(self.backend.search(u''), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'*')['hits'], 23)
self.assertEqual([result.pk for result in self.backend.search(u'*')['results']], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'daniel')['hits'], 23)
self.assertEqual([result.pk for result in self.backend.search(u'daniel')['results']], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'should be a string')['hits'], 1)
self.assertEqual([result.pk for result in self.backend.search(u'should be a string')['results']], [8])
# Ensure the results are ``SearchResult`` instances...
self.assertEqual(self.backend.search(u'should be a string')['results'][0].score, 0)
self.assertEqual(self.backend.search(u'index document')['hits'], 6)
self.assertEqual([result.pk for result in self.backend.search(u'index document')['results']], [2, 3, 15, 16, 17, 18])
# Regression-ville
self.assertEqual([result.object.id for result in self.backend.search(u'index document')['results']], [2, 3, 15, 16, 17, 18])
self.assertEqual(self.backend.search(u'index document')['results'][0].model, MockModel)
# No support for spelling suggestions
self.assertEqual(self.backend.search(u'Indx')['hits'], 0)
self.assertFalse(self.backend.search(u'Indx').get('spelling_suggestion'))
# No support for facets
self.assertEqual(self.backend.search(u'', facets=['name']), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', facets=['name'])['hits'], 23)
self.assertEqual(self.backend.search(u'', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}})['hits'], 23)
self.assertEqual(self.backend.search(u'', query_facets={'name': '[* TO e]'}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', query_facets={'name': '[* TO e]'})['hits'], 23)
self.assertFalse(self.backend.search(u'').get('facets'))
self.assertFalse(self.backend.search(u'daniel').get('facets'))
# Note that only textual-fields are supported.
self.assertEqual(self.backend.search(u'2009-06-18')['hits'], 0)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.backend.search(u'index document', result_class=MockSearchResult)['results'][0], MockSearchResult))
def test_more_like_this(self):
self.backend.update(self.index, self.sample_objs)
self.assertEqual(self.backend.search(u'*')['hits'], 23)
# Unsupported by 'simple'. Should see empty results.
self.assertEqual(self.backend.more_like_this(self.sample_objs[0])['hits'], 0)
class LiveSimpleSearchQuerySetTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveSimpleSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SimpleMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sample_objs = MockModel.objects.all()
self.sqs = SearchQuerySet()
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
settings.DEBUG = self.old_debug
super(LiveSimpleSearchQuerySetTestCase, self).tearDown()
def test_general_queries(self):
# For now, just make sure these don't throw an exception.
# They won't work until the simple backend is improved.
self.assertTrue(len(self.sqs.auto_query('daniel')) > 0)
self.assertTrue(len(self.sqs.filter(text='index')) > 0)
self.assertTrue(len(self.sqs.exclude(name='daniel')) > 0)
self.assertTrue(len(self.sqs.order_by('-pub_date')) > 0)
|
[
"evandrix@gmail.com"
] |
evandrix@gmail.com
|
1031decef22a5f8e9fa6d0446887620f1a17bbd6
|
cb95b3a2714f003e76c5e1db1d3e4726f87f14d8
|
/pstests/launch_schevers.py
|
50881808378a6bad2b948300e21df85af51ae09c
|
[
"Apache-2.0"
] |
permissive
|
DMALab/Het
|
5aaa9fda1b8c77c0db24a477fe1eccd9665a9fe0
|
81b7e9f0f593108db969fc46a1af3df74b825230
|
refs/heads/main
| 2023-03-30T13:22:03.085283
| 2021-04-04T05:31:43
| 2021-04-04T05:31:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
from athena import gpu_ops as ad
import os
import sys
import yaml
import multiprocessing
import signal
def main():
def start_scheduler(settings):
for key, value in settings.items():
os.environ[key] = str(value)
assert os.environ['DMLC_ROLE'] == "scheduler"
print('Scheduler starts...')
ad.scheduler_init()
ad.scheduler_finish()
def start_server(settings):
for key, value in settings.items():
os.environ[key] = str(value)
assert os.environ['DMLC_ROLE'] == "server"
print('Server starts...')
ad.server_init()
ad.server_finish()
def signal_handler(sig, frame):
print("SIGINT signal caught, stop Training")
for proc in server_procs:
proc.kill()
sched_proc.kill()
exit(0)
if len(sys.argv) == 1:
settings = yaml.load(open('./settings/dist_s1.yml').read(), Loader=yaml.FullLoader)
else:
file_path = sys.argv[1]
suffix = file_path.split('.')[-1]
if suffix == 'yml':
settings = yaml.load(open(file_path).read(), Loader=yaml.FullLoader)
else:
assert False, 'File type not supported.'
print('Scheduler and servers settings:')
print(settings)
server_procs = []
for key, value in settings.items():
if key == 'shared':
continue
elif key == 'sched':
sched_proc = multiprocessing.Process(target=start_scheduler, args=(value,))
sched_proc.start()
else:
server_procs.append(multiprocessing.Process(target=start_server, args=(value,)))
server_procs[-1].start()
signal.signal(signal.SIGINT, signal_handler)
for proc in server_procs:
proc.join()
sched_proc.join()
if __name__ == '__main__':
main()
|
[
"swordonline@foxmail.com"
] |
swordonline@foxmail.com
|
e538aa28b1bd9e8f0574539f2c5075b7eea00ec2
|
ba962c2441572ba45ff97a97bb713eb8a603a269
|
/lunchmap/models.py
|
2c9b100ab61729344379c83188a3554f131dd623
|
[] |
no_license
|
melonpan777/my-first-blog
|
8158104ba2b3c97a8e6350ac57aac77edf85be26
|
9ff5eee69523d8fbbbd004e566090ea715b043d5
|
refs/heads/master
| 2020-06-04T13:58:16.704685
| 2019-06-15T11:14:00
| 2019-06-15T11:14:00
| 192,051,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
from django.db import models
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Shop(models.Model):
name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
memo = models.CharField(max_length=255, null=True)
author = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
)
category = models.ForeignKey(
Category,
on_delete=models.PROTECT,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('lunchmap:detail', kwargs={'pk': self.pk})
|
[
"you@example.com"
] |
you@example.com
|
f6b131bbddadded5e915501ce5a719b1e74ce352
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/cloudformation/checks/resource/aws/APIGatewayXray.py
|
79b7ec85c6b5ac40b0aaa6c2c422267e4a656db6
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class APIGatewayXray(BaseResourceValueCheck):
def __init__(self):
name = "Ensure API Gateway has X-Ray Tracing enabled"
id = "CKV_AWS_73"
supported_resources = ['AWS::ApiGateway::Stage', "AWS::Serverless::Api"]
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'Properties/TracingEnabled'
def get_expected_value(self):
return True
check = APIGatewayXray()
|
[
"noreply@github.com"
] |
bridgecrewio.noreply@github.com
|
fd9e1af03b971a1db1d6893bbd1eb4399fbcb3d6
|
b6c09a1b87074d6e58884211ce24df8ec354da5c
|
/1720. 解码异或后的数组.py
|
62dc31fa20f8ded1e4528d692e236b11be60047e
|
[] |
no_license
|
fengxiaolong886/leetcode
|
a0ee12d67c4a10fb12d6ca4369762ab5b090cab1
|
4c0897bc06a297fa9225a0c46d8ec9217d876db8
|
refs/heads/master
| 2023-03-18T22:16:29.212016
| 2021-03-07T03:48:16
| 2021-03-07T03:48:16
| 339,604,263
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
"""
未知 整数数组 arr 由 n 个非负整数组成。
经编码后变为长度为 n - 1 的另一个整数数组 encoded ,其中 encoded[i] = arr[i] XOR arr[i + 1] 。例如,arr = [1,0,2,1] 经编码后得到 encoded = [1,2,3] 。
给你编码后的数组 encoded 和原数组 arr 的第一个元素 first(arr[0])。
请解码返回原数组 arr 。可以证明答案存在并且是唯一的。
示例 1:
输入:encoded = [1,2,3], first = 1
输出:[1,0,2,1]
解释:若 arr = [1,0,2,1] ,那么 first = 1 且 encoded = [1 XOR 0, 0 XOR 2, 2 XOR 1] = [1,2,3]
示例 2:
输入:encoded = [6,2,7,3], first = 4
输出:[4,2,0,7,4]
"""
def decode(encoded, first):
res = [first]
for i in encoded:
res.append(first ^ i)
first = res[-1]
return res
print(decode(encoded = [1,2,3], first = 1))
print(decode(encoded = [6,2,7,3], first = 4))
|
[
"xlfeng886@163.com"
] |
xlfeng886@163.com
|
47c3d8019181b00a4cc6f1e528455517694034d1
|
1662507ec7104531e4e54209fc32bfdf397b60cd
|
/backend/wallet/models.py
|
c0d4a9fbfaf096d0cda2c061ebe3a3c6041ebd63
|
[] |
no_license
|
crowdbotics-apps/home-trend-24478
|
4b2397fbefc9469e2d8f00240dff0b3fc3eaa368
|
850309d0bb282cf824f8b8d42ef8c6ab3c43bc1c
|
refs/heads/master
| 2023-03-07T18:34:15.590576
| 2021-02-20T00:34:25
| 2021-02-20T00:34:25
| 338,431,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
from django.conf import settings
from django.db import models
class PaymentMethod(models.Model):
"Generated Model"
wallet = models.ForeignKey(
"wallet.CustomerWallet",
on_delete=models.CASCADE,
related_name="paymentmethod_wallet",
)
account_token = models.CharField(
max_length=255,
)
payment_account = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
class PaymentTransaction(models.Model):
"Generated Model"
price = models.FloatField()
tip = models.FloatField()
tracking_id = models.CharField(
max_length=50,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_tasker",
)
customer = models.ForeignKey(
"task_profile.CustomerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_customer",
)
transaction = models.ForeignKey(
"task.TaskTransaction",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_transaction",
)
payment_method = models.ForeignKey(
"wallet.PaymentMethod",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_payment_method",
)
class TaskerWallet(models.Model):
"Generated Model"
tasker = models.OneToOneField(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="taskerwallet_tasker",
)
balance = models.FloatField(
max_length=254,
)
expiration_date = models.DateTimeField()
last_transaction = models.DateTimeField()
class CustomerWallet(models.Model):
"Generated Model"
customer = models.OneToOneField(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="customerwallet_customer",
)
balance = models.FloatField()
expiration_date = models.DateTimeField()
last_transaction = models.DateTimeField()
class TaskerPaymentAccount(models.Model):
"Generated Model"
wallet = models.ForeignKey(
"wallet.TaskerWallet",
on_delete=models.CASCADE,
related_name="taskerpaymentaccount_wallet",
)
account_token = models.CharField(
max_length=255,
)
payment_account = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
# Create your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bd40e87cf094c91dcb5d4c15d6fec0e2daf3068f
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/spaCy/2016/4/test_flag_features.py
|
880704e28905500ee8aa5b21c6e60fc6e73fdc58
|
[
"MIT"
] |
permissive
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
from __future__ import unicode_literals
import pytest
from spacy.orth import is_alpha
from spacy.orth import is_digit
from spacy.orth import is_punct
from spacy.orth import is_space
from spacy.orth import is_ascii
from spacy.orth import is_upper
from spacy.orth import is_lower
from spacy.orth import is_title
@pytest.fixture
def words():
return ["1997", "19.97", "hello9", "Hello", "HELLO", "Hello9", "\n", "!",
"!d", "\nd"]
def test_is_alpha(words):
assert not is_alpha(words[0])
assert not is_alpha(words[1])
assert not is_alpha(words[2])
assert is_alpha(words[3])
assert is_alpha(words[4])
assert not is_alpha(words[5])
assert not is_alpha(words[6])
assert not is_alpha(words[7])
assert not is_alpha(words[8])
assert not is_alpha(words[9])
def test_is_digit(words):
assert is_digit(words[0])
assert not is_digit(words[1])
assert not is_digit(words[2])
assert not is_digit(words[3])
assert not is_digit(words[4])
assert not is_digit(words[5])
assert not is_digit(words[6])
assert not is_digit(words[7])
assert not is_digit(words[8])
assert not is_digit(words[9])
def test_is_quote(words):
pass
def test_is_bracket(words):
pass
def test_is_left_bracket(words):
pass
def test_is_right_bracket(words):
pass
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
24875a336f66ccd4c114ada3a3e42c2d603c2639
|
e81d274d6a1bcabbe7771612edd43b42c0d48197
|
/数据库/03_Redis/day48(主从服务器)/demo/02_python操作redis/01.py
|
9cef9a735360a75455cde6d390c9cebd36992a94
|
[
"MIT"
] |
permissive
|
ChWeiking/PythonTutorial
|
1259dc04c843382f2323d69f6678b9431d0b56fd
|
1aa4b81cf26fba2fa2570dd8e1228fef4fd6ee61
|
refs/heads/master
| 2020-05-15T00:50:10.583105
| 2016-07-30T16:03:45
| 2016-07-30T16:03:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
#推荐网站
#http://python.jobbole.com/87305/
import redis
#获取连接对象 当我们用Redis和StrictRedis创建连接时,其实内部实现并没有主动给我创建一个连接,我们获得的连接是连接池提供的连接,这个连接由连接池管理,所以我们无需关注连接是否需要主动释放关闭的问题。另外连接池有自己的关闭连接的接口,一旦调用该接口,所有连接都将被关闭,连接池的操作不需要程序员管理,系统redis模块自动管理好了。
conn = redis.StrictRedis('127.0.0.1',6379,password=123456)
#如果是多个增删改,使用管道对象,默认先存在管道中,当execute时候,保存到数据库文件中
pip = conn.pipeline()
pip.set('a',1)
pip.set('b',2)
pip.set('c',3)
#提交
pip.execute()
#查询的时候,可以使用pip,也可以使用conn对象
print(conn.get('a'))
print('哦了')
|
[
"1025212779@qq.com"
] |
1025212779@qq.com
|
794e2904caebb85aa81ccb41eaed66721843747f
|
09301c71638abf45230192e62503f79a52e0bd80
|
/besco_erp/besco_warehouse/general_stock_fifo/__openerp__.py
|
7aa0010772448e6c5236add7f97c1eec77d47520
|
[] |
no_license
|
westlyou/NEDCOFFEE
|
24ef8c46f74a129059622f126401366497ba72a6
|
4079ab7312428c0eb12015e543605eac0bd3976f
|
refs/heads/master
| 2020-05-27T06:01:15.188827
| 2017-11-14T15:35:22
| 2017-11-14T15:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
##############################################################################
{
"name" : "General Stock FIFO",
"version" : "9.0",
"author" : "Le Truong Thanh <thanh.lt1689@gmail.com>",
'category': 'General 90',
"depends" : ["general_stock",
"general_account",
],
"init_xml" : [],
"demo_xml" : [],
"description": """
""",
'data': [
# 'security/ir.model.access.csv',
# 'security/security.xml',
'cron.xml',
'stock_fifo_view.xml',
'menu.xml',
],
'test': [
],
'installable': True,
'auto_install': False,
'certificate': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"son.huynh@nedcoffee.vn"
] |
son.huynh@nedcoffee.vn
|
1a4a84046bb067d8317cba7a3dfb51fef729d588
|
abf44e8ac8325e1c95b0d0569baee19b8f725b0a
|
/1_slide_window/7.py
|
79fadc05e2ce3816ac627747f460e59868bd8734
|
[] |
no_license
|
terrifyzhao/educative2
|
05994b0e7f4e0c8d4319106eddd48ba1dfe5317d
|
00e9d630da117fa9550f2efb2191709734c63c8a
|
refs/heads/master
| 2022-12-24T02:51:18.671842
| 2020-09-24T07:43:08
| 2020-09-24T07:43:08
| 276,569,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
def length_of_longest_substring(arr, k):
start = 0
max_len = 0
count_1 = 0
for i in range(len(arr)):
num = arr[i]
if num == 1:
count_1 += 1
if i - start + 1 - count_1 > k:
num = arr[start]
if num == 1:
count_1 -= 1
start += 1
max_len = max(i - start + 1, max_len)
return max_len
def main():
print(length_of_longest_substring([0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1], 2))
print(length_of_longest_substring(
[0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1], 3))
main()
|
[
"zjiuzhou@gmail.com"
] |
zjiuzhou@gmail.com
|
81f5316150af9c908dd4b3ef8628cca2b90db2b0
|
8fc7635b84b42e61b7efb9eaf7215394b5b5790a
|
/aliennor-backend copy/aliennorDjangoBackend/aliennorDjangoBackend/wsgi.py
|
021b6e96cb9641200f626f50804bb038f497e40a
|
[] |
no_license
|
phamcong/aliennor-platform
|
f1e8470aab7ed634859e071f6028931f576ddf3e
|
e1d71532426ac9414d2158d50ee34c32257618f0
|
refs/heads/master
| 2021-05-14T17:08:08.629564
| 2018-02-17T23:35:07
| 2018-02-17T23:35:07
| 116,038,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
"""
WSGI config for aliennorDjangoBackend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aliennorDjangoBackend.settings")
application = get_wsgi_application()
|
[
"ccuong.ph@gmail.com"
] |
ccuong.ph@gmail.com
|
40f118a930e06e6edf455277d99dddcc1d85aa9a
|
2e6c95871bd255873fb563347c0f070e6fcdde74
|
/ngram_2_model_pca.py
|
6b077e23c628515f969ffa99bba1c5e5f09cec87
|
[] |
no_license
|
MSBradshaw/BioHackathon2020
|
3203c5232bebd70d2c2a88b7f49063a09da023c4
|
31826b698a408541200b6f75bfe9c03217bf2d1a
|
refs/heads/master
| 2022-08-05T11:57:32.221444
| 2020-05-29T17:30:29
| 2020-05-29T17:30:29
| 258,961,184
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,036
|
py
|
import re
import pandas as pd
from bs4 import BeautifulSoup
import datetime
import time
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
import pickle
import os
def date_to_unix_time(date):
if date is None or date == '':
return None
dt = datetime.datetime.strptime(date, '%B %d, %Y')
return int(time.mktime(dt.timetuple()))
def get_n_grams(_text, _n, _gram_dict={}):
# if a special character is being used as punctuation (not in a name) add a space
_text = re.sub('(: )', ' \\g<1>', _text)
_text = re.sub('(- )', ' \\g<1>', _text)
_text = re.sub('(, )', ' \\g<1>', _text)
_text = re.sub('(\\. )', ' \\g<1>', _text)
_text = re.sub('(- )', ' \\g<1>', _text)
_text = re.sub('(\\? )', ' \\g<1>', _text)
_text = re.sub('(; )', ' \\g<1>', _text)
_text = re.sub('(! )', ' \\g<1>', _text)
# remove paranthesis arounda single word
_text = re.sub(' \\(([^ ])\\) ', ' \\g<1> ', _text)
# remove leading and trailing parenthesis
_text = re.sub(' \\(', ' ', _text)
_text = re.sub('\\) ', ' ', _text)
_text_list = _text.split(' ')
# create the n-grams
_done = False
# gram_dict = {}
for _i in range(len(_text_list)):
_gram = ''
_skip = False
for _j in range(_n):
if _i + _j >= len(_text_list):
_done = True
break
# check if the current item is punctuation, if so skip this gram
if _text_list[_i + _j] in ['.', ',', '?', ';', '!', ':', '-']:
_skip = True
break
_gram += _text_list[_i + _j] + ' '
if not _done and not _skip:
# remove trailing space
_gram = _gram[:-1]
# if gram has already been made
if _gram in _gram_dict:
# increment count
_gram_dict[_gram] += 1
else:
# else create new entry
_gram_dict[_gram] = 1
_gram_df = pd.DataFrame({'gram': list(_gram_dict.keys()), 'count': list(_gram_dict.values())})
return _gram_df, _gram_dict
def get_df_of_n_grams(_texts, _n):
_dic = {}
_final_df = None
for _ab in _texts:
_final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n, _dic)
_grams = list(set(_final_df['gram']))
_article_n_grams = {_x: [] for _x in _grams}
for _ab in _texts:
_final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n,{})
for _key in _grams:
if _key in _dic:
_article_n_grams[_key].append(_dic[_key])
else:
_article_n_grams[_key].append(0)
fake_df_n_grams = pd.DataFrame(_article_n_grams)
return fake_df_n_grams
train = pd.read_csv('train.csv')
pickle_cache = 'grams_2_df.pickle'
if os.path.exists(pickle_cache):
grams_2 = pickle.load(open(pickle_cache,'rb'))
else:
grams_2 = get_df_of_n_grams(list(train['abstract']),2)
pickle.dump(grams_2,open(pickle_cache,'wb'),protocol=4)
X = grams_2.to_numpy()
y = train['type'].to_numpy()
pca2 = PCA(n_components=10)
pca2.fit(grams_2.to_numpy().transpose())
# pca = pickle.load(open('real_fake_pca.pickle','rb'))
clf = svm.SVC(kernel='linear', C=1)
scores = cross_val_score(clf,pca2.components_.transpose(), y, cv=5)
#
# with open('svm-cross-val-pca.txt','w') as outfile:
# outfile.write(str(scores))
X_train, X_test, y_train, y_test = train_test_split(pca2.components_.transpose(), y, test_size=0.33, random_state=42)
clf.fit(X_train, y_train)
with open('svm-results-pca.txt','w') as outfile:
outfile.write('Cross Val scores: ' + str(scores) + '\n')
outfile.write('SVM SCore: ' + str(clf.score(X_test,y_test)) + '\n')
preds = clf.predict(X_test)
outfile.write('Predictions: ')
for p in preds:
outfile.write(',' + str(p))
|
[
"michaelscottbradshaw@gmail.com"
] |
michaelscottbradshaw@gmail.com
|
5d701f0a48dd6c81ab978a9683db47f0cf9fb515
|
587ac0749473666c2bcdfe558bdba8517cb1c0a0
|
/sp2020/j.py
|
9470133d7c7fc37f2ee060305d69a2e6d4c99a9d
|
[] |
no_license
|
katrinafyi/cpg
|
fc2f408baf19791fa7260561a55d29464a42b212
|
0631d1983ec6a45cbe1a8df63963ab8caac51440
|
refs/heads/main
| 2023-02-21T13:07:02.517306
| 2021-01-23T06:09:39
| 2021-01-23T06:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
def ints(): return [int(x.strip()) for x in input().split()]
# t is interval of measurement
# d is time considered
# p is percentile required
# r is response delay required for responsiveness
num_meas, t, d, p, r = ints()
num_attempts = ints() [0]
SUBMIT = 'S'
REPLY = 'R'
MEASURE = 'M'
timeline = [] # list of (time, response time)
submitted = [None] * num_attempts
for i in range(2 * num_attempts):
a, b, c = input().strip().split()
a = int(a)
b = int(b) - 1
if c == SUBMIT:
submitted[b] = a
else:
timeline.append((a, 'R', a - submitted[b]))
# for i in range(1, num_meas + 1):
# timeline.append((i * t, MEASURE, None))
# timeline.sort()
from collections import deque
from math import ceil, floor
considering = deque()
def measure():
if not considering: return True
l = [x[1] for x in considering]
l.sort()
# print(l)
i = (p/100 * len(l))
if i == int(i): i = int(i) - 1
else: i = floor(i)
return l[i] <= r
# print(num_meas, t, d, p, r)
# print(timeline)
num_responsive = 0
prev_measure = -1
prev_measure_time = 0
changed = True
for time, event, value in timeline:
if event == REPLY:
if time > prev_measure_time + t:
next_measure_time = floor(time / t) * t
while considering and considering[0][0] < next_measure_time - d:
considering.popleft()
m = measure()
num_responsive += m * (time - prev_measure_time + t) // t
prev_measure_time = next_measure_time
considering.append((time, value))
changed = True
print(num_responsive)
|
[
"kenton_lam@outlook.com"
] |
kenton_lam@outlook.com
|
4fe2f24ace7a19b1acc48f98e1b7555884e1392c
|
6e2e476c5764d5e75c7afe5a531ac5b890ef0c64
|
/Models_barAllExecutionTimes.py
|
6dd46654dd04bc45d58214343e8245ce54d8db3f
|
[] |
no_license
|
BrunoDatoMeneses/PythonPloting
|
d4611f62f2709465e32d3ab2dc4e0d5cef65e783
|
b5bd1c7aa5a50144d2db82f29ab754b01084f230
|
refs/heads/master
| 2023-05-07T14:08:17.225336
| 2021-06-02T09:06:13
| 2021-06-02T09:06:13
| 297,996,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,065
|
py
|
import _PLOT
from Utils import transpose
import os
import csv
# transpose.transposeFiles()
from _FIG import PLOTTING
from _PARAMS import PARAMETERS
figEndName = "-AllNCS"
#xlabel = 'Learning Cycles (#)'
ylabel = 'Times (ms)'
yStringLong ="ExecuyionTimes"
# figVaryingParamString = "learningCycles"
# varyingParamStringValues = ["500","1000","1500","2000"]
# varyingParamStrings = []
# paramlabelString = " Learning Cycles"
# PARAMETERS.learningCycles= "("
# for value in varyingParamStringValues:
# # precisionRange+= str(int(100*float(label))) + "_"
# # labelStrings.append(labelString + str(int(100*float(label))) + " %")
# PARAMETERS.learningCycles += value + "_"
# varyingParamStrings.append(value + paramlabelString)
#
# PARAMETERS.learningCycles += ")"
PARAMETERS.figSize = (4.5, 3.75)
yStrings = ["perceptsTimeExecution","contextsTimeExecution","headTimeExecution",
"NCSTimeExecution",
"NCS_UselessnessTimeExecution","NCS_IncompetendHeadTimeExecution","NCS_ConcurrenceAndConflictTimeExecution",
"NCS_Create_New_ContextTimeExecution","NCS_OvermappingTimeExecution","NCS_ChildContextTimeExecution","NCS_PotentialRequestTimeExecution"]
yStringsAvg = []
yStringsDev = []
yStringsMin = []
yStringsMax = []
for string in yStrings:
yStringsAvg.append(string+"_Average")
yStringsDev.append(string+"_Deviation")
yStringsMin.append(string+"_Min")
yStringsMax.append(string+"_Max")
xLabelStrings = ["Pcts","Ctxt","Head",
"NCSs",
"NCS Useless.","NCS Unprod.","NCS Conf. and Conc.",
"NCS Ctxt Creation","NCS Redun.","NCS Model","NCS Endo."]
logXScale = False
logYScale = False
# for label in labelStrings:
# yStringLong += label + "_"
XYDevMinMax = []
for y,yDev,min,max,yString in zip(yStringsAvg, yStringsDev, yStringsMin, yStringsMax,yStrings):
if(yString == "endoRequests"):
XYDevMinMax.append([y, yDev, min, max,0.1])
else:
XYDevMinMax.append([y, yDev, min, max, 1])
figName = "ToFill_" + yStringLong + "-" + PARAMETERS.getFigName() + figEndName
print(figName)
PARAMETERS.isActiveLearning = "false"
PARAMETERS.isSelfLearning = "true"
PARAMETERS.isLearnFromNeighbors = "true"
PARAMETERS.isActiveExploitation = "true"
PARAMETERS.activeExploitationCycles = "4000"
PARAMETERS.learningCycles = "500"
varyingParamStrings=[""]
constrains = []
constrains.append(PARAMETERS.getConstainsLabelsAreYStrings(xLabelStrings, XYDevMinMax))
PLOTTING.ROTATION = 45
_PLOT.barWithDeviationConstrained(xLabelStrings, varyingParamStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
figName, ylabel, False, False,
constrains, 1, 1, PARAMETERS.figSize)
_PLOT.barWithDeviationConstrained(xLabelStrings, varyingParamStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
figName, ylabel, False, True,
constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWitMinMaxWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, False, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWithDeviationWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, True, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWitMinMaxWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, True, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWithDeviation(labels, colors, markers, figName, xlabel, ylabel, logXScale, logYScale, xString, yString, deviationString, constrains, 1, 1)
|
[
"bruno.dato.meneses@gmail.com"
] |
bruno.dato.meneses@gmail.com
|
419bee1b9fe65c8d11a7d4b70693ec15423d958f
|
cc578cec7c485e2c1060fd075ccc08eb18124345
|
/cs15211/24Game.py
|
ea18464f94d06f61725723f26fa46ca83987f4e3
|
[
"Apache-2.0"
] |
permissive
|
JulyKikuAkita/PythonPrac
|
18e36bfad934a6112f727b4906a5e4b784182354
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
refs/heads/master
| 2021-01-21T16:49:01.482561
| 2019-02-07T06:15:29
| 2019-02-07T06:15:29
| 91,907,704
| 1
| 1
|
Apache-2.0
| 2019-02-07T06:15:30
| 2017-05-20T18:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,885
|
py
|
__source__ = 'https://leetcode.com/problems/24-game/description/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 679. 24 Game
#
# You have 4 cards each containing a number from 1 to 9.
# You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24.
#
# Example 1:
# Input: [4, 1, 8, 7]
# Output: True
# Explanation: (8-4) * (7-1) = 24
# Example 2:
# Input: [1, 2, 1, 2]
# Output: False
# Note:
# The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12.
# Every operation done is between two numbers. In particular, we cannot use - as a unary operator.
#
# For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed.
# You cannot concatenate numbers together.
# For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12.
#
# Companies
# Google
# Related Topics
# Depth-first Search
#
#868ms 6.09%
import unittest
import itertools
from operator import truediv, mul, add, sub
from fractions import Fraction
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def apply(A, B):
ans = set()
for x, y, op in itertools.product(A, B, (truediv, mul, add, sub)):
if op is not truediv or y: ans.add(op(x, y))
if op is not truediv or x: ans.add(op(y, x))
return ans
A = [{x} for x in map(Fraction, nums)]
for i, j in itertools.combinations(range(4), 2):
r1 = apply(A[i], A[j])
k, l = {0, 1, 2, 3} - {i, j}
if 24 in apply(apply(r1, A[k]), A[l]): return True
if 24 in apply(apply(r1, A[l]), A[k]): return True
if 24 in apply(r1, apply(A[k], A[l])): return True
return False
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/24-game/solution/
Given: (a, b, c, d) - (A tuple of 4)
Generate:
((a+b),c,d) ((a-b),c,d) ((b-a),c,d) ((a*b),c,d) ((a/b),c,d) ((b/a),c,d)
((a+c),b,d) ................................................................. ((c/a),b,d)
((a+d),b,c) ................................................................. ((d/a),b,c)
(a,(b+c),d) ................................................................. (a,(c/b),d)
(a,(b+d),d) ................................................................. (a,(d/b),d)
(a,b,(c+d)) ................................................................. (a,b,(d/c))
There are 36 (6*6) such tuples. Of these, + & - are not order dependent. That is 2+3 = 3+2.
But / & - are order dependent. i.e. 2/3 != 3/2. These look like (e,f,g) i.e. a tuple of 3 now.
Carrying out similar reductions gives 18 (6*3) tuples for each of the above-generated tuples.
These now look like (h, i) i.e. a tuple of 2 now.
Similiar, the final reduction now yields 6 answers (a+b, a-b, a*b, a/b, b-a, b/a)
for each of the above-generated tuple.
Thus in total 36x18x6 final values can be generated using the 4 operators and 4 initial values.
Algo: Generate all such answers using dfs method and stop when it's 24.
Catches:
Use double instead of int
Be careful about the classical divide by zero error
#18ms 56.03%
class Solution {
public boolean judgePoint24(int[] nums) {
ArrayList A = new ArrayList<Double>();
for (int v: nums) A.add((double) v);
return solve(A);
}
private boolean solve(ArrayList<Double> nums) {
if (nums.size() == 0) return false;
if (nums.size() == 1) return Math.abs(nums.get(0) - 24) < 1e-6;
for (int i = 0; i < nums.size(); i++) {
for (int j = 0; j < nums.size(); j++) {
if (i != j) {
ArrayList<Double> nums2 = new ArrayList<Double>();
for (int k = 0; k < nums.size(); k++) if (k != i && k != j) {
nums2.add(nums.get(k));
}
for (int k = 0; k < 4; k++) {
if (k < 2 && j > i) continue;
if (k == 0) nums2.add(nums.get(i) + nums.get(j));
if (k == 1) nums2.add(nums.get(i) * nums.get(j));
if (k == 2) nums2.add(nums.get(i) - nums.get(j));
if (k == 3) {
if (nums.get(j) != 0) {
nums2.add(nums.get(i) / nums.get(j));
} else {
continue;
}
}
if (solve(nums2)) return true;
nums2.remove(nums2.size() - 1);
}
}
}
}
return false;
}
}
'''
|
[
"b92701105@gmail.com"
] |
b92701105@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.