blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb53eb588a4faefc13994b172e7b3d6a0943885b
|
97e9e4c2ab8efd940946f5d28a3bab0da647ca0b
|
/huxley/accounts/tests/models.py
|
f6a0bb209703e66d80a1776990a19f76587a4435
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
tdowds/huxley
|
d9df0068d89ffd3f6c855be6c9ae7c3f15bedc3e
|
6705ef2d64408389cf55388abbad70beb7025158
|
refs/heads/master
| 2021-01-20T06:33:01.898577
| 2013-10-18T16:07:58
| 2013-10-18T16:07:58
| 13,706,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,952
|
py
|
# Copyright (c) 2011-2013 Kunal Mehta. All rights reserved.
# Use of this source code is governed by a BSD License found in README.md.
from django.test import TestCase
from huxley.accounts.constants import *
from huxley.accounts.models import *
class HuxleyUserTest(TestCase):
def test_authenticate(self):
""" Tests that the function correctly authenticates and returns a
user, or returns an error message. """
kunal = HuxleyUser.objects.create(username='kunal', email='kunal@lol.lol')
kunal.set_password('kunalmehta')
kunal.save()
user, error = HuxleyUser.authenticate('kunal', '')
self.assertIsNone(user)
self.assertEqual(error, AuthenticationErrors.MISSING_FIELDS)
user, error = HuxleyUser.authenticate('', 'kunalmehta')
self.assertIsNone(user)
self.assertEqual(error, AuthenticationErrors.MISSING_FIELDS)
user, error = HuxleyUser.authenticate('roflrofl', 'roflrofl')
self.assertIsNone(user)
self.assertEqual(error, AuthenticationErrors.INVALID_LOGIN)
user, error = HuxleyUser.authenticate('kunal', 'kunalmehta')
self.assertEqual(user, kunal)
self.assertIsNone(error)
def test_change_password(self):
""" Tests that the function correctly changes a user's password, or
returns an error message. """
user = HuxleyUser.objects.create(username='adavis', email='lol@lol.lol')
user.set_password('mr_davis')
success, error = user.change_password('', 'lololol', 'lololol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISSING_FIELDS, error)
success, error = user.change_password('mr_davis', '', 'lololol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISSING_FIELDS, error)
success, error = user.change_password('mr_davis', 'lololol', '')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISSING_FIELDS, error)
success, error = user.change_password('mr_davis', 'lololol', 'roflrofl')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISMATCHED_PASSWORDS, error)
success, error = user.change_password('mr_davis', 'lol', 'lol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.PASSWORD_TOO_SHORT, error)
success, error = user.change_password('mr_davis', 'lololol<', 'lololol<')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.INVALID_CHARACTERS, error)
success, error = user.change_password('roflrofl', 'lololol', 'lololol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.INCORRECT_PASSWORD, error)
success, error = user.change_password('mr_davis', 'lololol', 'lololol')
self.assertTrue(success)
self.assertTrue(user.check_password('lololol'))
|
[
"k.mehta@berkeley.edu"
] |
k.mehta@berkeley.edu
|
6aac625e1e0f348f3464e7dd1517a655dc5aab7e
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/QWAqDyd9RXqyrNyo3_15.py
|
b3247adb587b049bdda014c190a3be66d5086b08
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
def abbreviate(txt,n=4) :
output = ""
crap = txt.split(" ")
for each in crap :
if len(each) >= n :
output += each[0].upper()
return output
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
3e0f87cbc1d9d3d745df80682fabac642e330b46
|
5279b3fd3b0cba9e1ca5552229bf6d32c3c0bb44
|
/ch08_Tensorflow/ex09_Functional_API_2_GoogLeNet.py
|
71df88cd007238767bd256db09a478a6f32ed4d2
|
[] |
no_license
|
handaeho/lab_dl
|
16afe29b1b6db005cdae8ef9cc1f5f675ea34cf8
|
99ddcadec0c93bd42113f6b5fbecd9171c030f8b
|
refs/heads/master
| 2020-11-30T08:42:39.506059
| 2020-02-11T05:22:32
| 2020-02-11T05:22:32
| 230,358,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,285
|
py
|
"""
>> GoogLeNet - (교재 P.271 그림 8-10)
= layer를 하나씩 쌓아 올려 순차적으로 통과 하지 않고,
layer가 수평적으로 놓여 있고, 이 여러 layer를 하나의 데이터에 한꺼번에 적용해서 그 결과를 결합 후, 다음 구조로 보낸다.
이 구조를 'Inception'이라 하며, 'Inception'구조를 하나의 구성 요소로 여러개를 결합하여 사용하는 것이 GoogLeNet의 특징이다.
단, 같은 층에 수평적으로 놓인 layer는 서로 연결 되지 않는다.
================================================================================================================
>> ResNet(Residual Network) - (교재 P.272 그림 8-12)
= 학습할 때 층이 지나치게 깊으면 학습이 잘 되지 않고, 오히려 성능이 떨어진다.
ResNet에서는 이 문제를 해결하기 위해 '스킵 연결'을 도입한다. 층의 깊이에 비례해 성능 향상을 기대할 수 있다.
# 스킵 연결 ~> 입력 데이터를 Convolution_layer를 건너 뛰고 바로 Output에 더하는 구조.
입력 x를 연속한 두 Conv_layer를 건너 뛰고 출력에 바로 연결한다.
이 단축 경로가 없다면 Output은 'F(x)'가 되지만, 스킵연결로 인해 Output은 'F(x) + x'가 된다.
스킵 연결은 층이 깊어져도 학습을 효율적으로 할수 있게 해주는데,
이는 Back Propagation 때 스킵 연결이 신호 감쇠를 막아주기 때문이다.
스킵 연결은 입력 데이터를 '그대로'흘리는 것으로, 역전파 때도 상류의 기울기를 그래도 하류로 보낸다.
핵심은 상류의 기울기에 아무런 수정도 하지 않고 '그대로' 흘리는 것이다.
스킵 연결로 기울기가 작아지거나 지나치게 커질 걱정 없이 앞 층에서의 '의미있는 기울기'를 하류로 전할수 있다.
ResNet은 Conv_layer를 2개 층마다 건너뛰면서 층을 깊게 한다.
# 전이 학습 ~> 학습된 가중치(또는 그 일부)를 다른 신경망에 복사한 후, 그대로 재학습을 수행한다.
예를 들어, VGG와 구성이 같은 신경망을 준비하고 미리 학습된 가중치를 초기값으로 설정한 후,
새로운 데이터 셋으로 재학습을 수행한다.
전이 학습은 '보유한 데이터 셋이 적을 때 유용'한 방법이다.
여기서는 'GoogLeNet', 'ResNet'의 구조 그대로를 구현 하지는 않고, 'ex08_Functional_API'에 이어 같은 구조를 구현해 보자.
1) GoogLeNet
Input_tensor(784, ) -> Dense(64) -> ReLU -> Dense(32) -> ReLU -> Dense(10) -> Output_tensor(10, ) -> Softmax
-> Dense(64) -> ReLU ->
2) ResNet
Input_tensor(784, ) -> Dense(32) -> ReLU -> [건너뛰고 Dense(32) -> ReLU -> Dense(32) -> ReLU]
-> F(x) + x -> Output_tensor(10, ) -> Softmax
"""
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Dense, Add, concatenate
# GoogLeNet ===========================================
# Input_tensor 생성
input_tensor = Input(shape=(784,))
# Input_tensor가 한 층에 수평적으로 놓인 두 개의 hidden_layer(Dense layer)를 각각 통과
x1 = Dense(units=64, activation='relu')(input_tensor)
x2 = Dense(units=64, activation='relu')(input_tensor)
# 두 개의 hidden_layer(Dense layer)를 통과한 두 결과를 연결
concat = concatenate([x1, x2])
# 그 다음 hidden_layer에 연결된 결과 전달
x = Dense(32, activation='relu')(concat)
# Output_tensor
output_tensor = Dense(10, activation='softmax')(x)
# 모델 생성
model = Model(input_tensor, output_tensor)
# 모델 정보 요약
model.summary()
# ResNet ===========================================
# input_tensor 생성
input_tensor = Input(shape=(784, ))
# hidden_layer(Dense)
fx = Dense(units=32, activation='relu')(input_tensor)
x = Dense(units=32, activation='relu')(fx)
x = Dense(units=32, activation='relu')(x)
# 입력데이터 x를 출력에 더해준다.(F(x) + x)
x = Add()([x, fx])
# F(x) + x를 output_tensor에 전달
output_tensor = Dense(10, activation='softmax')(x)
# 모델 생성
model = Model(input_tensor, output_tensor)
# 모델 정보 요약
model.summary()
|
[
"mrdh94@naver.com"
] |
mrdh94@naver.com
|
2960d22b7267e18644fa6fd6755edc830f88df53
|
bc41457e2550489ebb3795f58b243da74a1c27ae
|
/python/plot_daily_solar_energy.py
|
dcf30c21d78a663dfaca75668a7998236822581c
|
[] |
no_license
|
SEL-Columbia/ss_sql_views
|
28a901d95fe779b278d2a51aec84d6bf51245c02
|
d146fd96849a4d165f3dc3f197aadda804a2f60a
|
refs/heads/master
| 2021-01-01T19:35:18.999147
| 2012-05-10T18:43:36
| 2012-05-10T18:43:36
| 3,020,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
'''
plot_daily_solar_energy.py
==============================
uses the midnight values of the reported cumulative kWh to generate
a daily report of energy delivered to the battery
only reports on days where there are two consecutive readings
right now uses numpy to do differentiation, should use postgres lag instead
'''
import datetime as dt
date_start = dt.datetime(2011, 11, 01)
date_end = dt.datetime(2012, 12, 31)
meter_name = 'ml05'
def plot_solar_power_generation():
import sqlalchemy as sa
# create metadata object
metadata = sa.MetaData('postgres://postgres:postgres@localhost:5432/gateway')
# define table objects from database
vspm = sa.Table('view_solar_midnight', metadata, autoload=True)
query = sa.select([vspm.c.meter_timestamp, vspm.c.solar_kwh],
distinct=True,
order_by=vspm.c.meter_timestamp,
whereclause=sa.and_(vspm.c.meter_name==meter_name,
vspm.c.meter_timestamp>date_start,
vspm.c.meter_timestamp<date_end)
)
print query
# get list of meter names in solar logs
meter_timestamp = []
solar_kwh = []
result = query.execute()
for r in result:
print r
meter_timestamp.append(r.meter_timestamp)
solar_kwh.append(r.solar_kwh)
import numpy as np
meter_timestamp = np.array(meter_timestamp)
solar_kwh = np.array(solar_kwh)
solar_kwh_per_day = np.diff(solar_kwh)
td = np.diff(meter_timestamp)
meter_timestamp= meter_timestamp[td==dt.timedelta(days=1)]
solar_kwh = solar_kwh_per_day[td==dt.timedelta(days=1)]
import matplotlib.pyplot as plt
f, ax = plt.subplots(1,1)
ax.plot_date(meter_timestamp, solar_kwh)
plt.show()
if __name__ == '__main__':
plot_solar_power_generation()
|
[
"danielrsoto@gmail.com"
] |
danielrsoto@gmail.com
|
6c1b68b9d40da4fc90059b3ddcde709deba188a0
|
c0f86b926fc82baa633862896096c149dd9913cf
|
/Python/Numpy/Concatenate/Pypy3/solution.py
|
de28d2f999a0276902fe41444343428ac5f95099
|
[] |
no_license
|
qxzsilver1/HackerRank
|
8df74dd0cd4a9dedd778cdecea395f4234eda767
|
bcb1b74711a625d8ad329a3f9fdd9f49b1bebc54
|
refs/heads/master
| 2021-09-09T15:45:35.681284
| 2021-09-07T00:11:16
| 2021-09-07T00:11:16
| 75,671,896
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
def matrix_printer(arr,n,m):
for i in range(n):
if i == 0:
print('[[' + ' '.join(map(str, arr[0][0:m])) + ']')
elif i == n-1:
print(' [' + ' '.join(map(str, arr[n-1][0:m])) + ']]')
else:
print(' [' + ' '.join(map(str, arr[i][0:m])) + ']')
n,m,p = map(int, input().split())
lst=[]
for _ in range(n+m):
lst.append(list(map(int,input().split())))
matrix_printer(lst,(n+m),p)
|
[
"noreply@github.com"
] |
qxzsilver1.noreply@github.com
|
e841be16c600c39b5a87a129e088eaac21922d22
|
5cc204e2ecb9a756127e7c71633a1edcdb3e989b
|
/pylmp/InKim/BGF_generateRigidScan.py
|
4582dd6a832f4c8a5fea3760c6e6fcd038c80431
|
[] |
no_license
|
hopefulp/sandbox
|
1a1d518cf7b5e6bca2b2776be1cac3d27fc4bcf8
|
4d26767f287be6abc88dc74374003b04d509bebf
|
refs/heads/master
| 2023-06-27T17:50:16.637851
| 2023-06-15T03:53:39
| 2023-06-15T03:53:39
| 218,209,112
| 1
| 0
| null | 2022-09-13T13:22:34
| 2019-10-29T05:14:02
|
C++
|
UTF-8
|
Python
| false
| false
| 4,346
|
py
|
#!/home/noische/python
import sys
import getopt
import copy
import numpy as np
import tqdm
import bgf
import bgftools
import nutils as nu
usage = """
Generates BGF file with changed distances between two molecules.
This is useful to generate input structures for Rigid Coordinate Scan.
Usage: %s -b bgf_file -1 atom1 -2 atom2 -o out_file -d "distances"
-b bgf_file: self-descriptive.
-1 atom1: atom number to fix.
-2 atom2: atom number to change distance. should be in another atom to atom1.
-o out_file: self-descriptive.
-d distances: self-descriptive. optional.
"-1.0 1.0 2.0" will generate three structures with those distances changed.
defaults: "-1.0 0.0 1.0 2.0 ... 10.0"
Please report any bugs to in.kim@kaist.ac.kr
""" % sys.argv[0]
def generate_rcs(bgf_file, atom1, atom2, out_file, dist=[], silent=False):
# init
if not dist:
dist = np.arange(-1.0, 10.0, 1.0)
# open
if isinstance(bgf_file, bgf.BgfFile):
myBGF = bgf_file
else:
if not silent: print("reading " + bgf_file + " ..")
myBGF = bgf.BgfFile(bgf_file)
a1 = myBGF.a[myBGF.a2i[atom1]];
a2 = myBGF.a[myBGF.a2i[atom2]];
# check whether a1 and a2 are in a same molecule // the script cannot continue if yes
mol1 = []; mol2 = []
dummy = bgftools.getmolecule(myBGF, a1, mol1)
dummy = bgftools.getmolecule(myBGF, a2, mol2)
if mol1 == mol2:
nu.die("atom1 and atom2 are in a same molecule: cannot perform a rigid body scan within a same molecule.")
mybgf2 = copy.deepcopy(myBGF)
# translate the coordinate: atom1 to origin
v1 = (a1.x, a1.y, a1.z) # position of atom1
for atom in mybgf2.a:
atom.x -= v1[0]
atom.y -= v1[1]
atom.z -= v1[2]
# find the rotation matrix: v21 to x axis
v21 = [ (a2.x - a1.x), (a2.y - a1.y), (a2.z - a1.z) ]
u1 = v21 / np.linalg.norm(v21) ##
v2 = [u1[1], -u1[0], 0]; u2 = v2 / np.linalg.norm(v2) ##
v3 = np.cross(u1, u2); u3 = v3 / np.linalg.norm(v3) ##
U = np.array([[u1[0], u1[1], u1[2]], [u2[0], u2[1], u2[2]], [u3[0], u3[1], u3[2]]])
Uinv = np.linalg.inv(U)
for d in tqdm.tqdm(dist, ncols=120, desc="Creating RCS structures"):
mybgf2 = copy.deepcopy(myBGF)
# translate the coordinate: atom1 to origin
v1 = (a1.x, a1.y, a1.z) # position of atom1
for atom in mybgf2.a:
atom.x -= v1[0]
atom.y -= v1[1]
atom.z -= v1[2]
# rotate all atoms
for atom in mybgf2.a:
a = np.matrix([atom.x, atom.y, atom.z]).T
b = U*a
atom.x = float(b[0])
atom.y = float(b[1])
atom.z = float(b[2])
# move mol2 by d
for ano in mol2:
atom = mybgf2.getAtom(ano)
atom.x += d
# re-rotate all atoms
for atom in mybgf2.a:
a = np.matrix([atom.x, atom.y, atom.z]).T
b = Uinv*a
atom.x = float(b[0])
atom.y = float(b[1])
atom.z = float(b[2])
## save
fname = out_file.split(".bgf")[0] + "." + str("{0:.2f}".format(d)).replace(".", "p") + ".bgf"
mybgf2.saveBGF(fname)
### end of function
if __name__ == "__main__":
bgf_file = ""; trj_file = ""; atom1 = 0; atom2 = 0; out_file = ""; distance = ""
options, args = getopt.getopt(sys.argv[1:], 'hb:1:2:o:d:', ['help', 'bgf=', 'atom1=', 'atom2=', 'out=', 'distance='])
if len(sys.argv) < 2:
print(usage)
sys.exit(0)
print("Requested options: " + str(options))
for option, value in options:
if option in ('-h', '--help'):
print(usage)
sys.exit(0)
elif option in ('-b', '--bgf'):
bgf_file = value
elif option in ('-1', '--atom1'):
atom1 = int(value)
elif option in ('-2', '--atom2'):
atom2 = int(value)
elif option in ('-o', '--out'):
out_file = value
elif option in ('-d', '--distance'):
distance = str(value)
elif option == NULL:
print(usage)
sys.exit(0)
# scan distances:
dist = [float(i) for i in distance.split(' ')]
# main call
generate_rcs(bgf_file, atom1, atom2, out_file, dist=dist)
|
[
"hopefulp@gmail.com"
] |
hopefulp@gmail.com
|
1121516406cec1116b90ef7fc536a6cd5afdd7c4
|
e4e4c60ffa509f257afc915d4c6cd32c0cb7098c
|
/.history/app_20200919192647.py
|
b152935c7de29fa5e6e7fe1e9d5d974fae893d03
|
[] |
no_license
|
QianyueMa/Google-Health-Search-Project
|
01dbd597780158f50eebfba2a228b505f8169726
|
6ef6b270dc7ab0826ad4f0338c9cd95d3571e19a
|
refs/heads/master
| 2022-12-19T03:55:10.328167
| 2020-10-02T12:54:27
| 2020-10-02T12:54:27
| 296,495,736
| 0
| 0
| null | 2020-09-18T02:44:12
| 2020-09-18T02:44:11
| null |
UTF-8
|
Python
| false
| false
| 4,557
|
py
|
import numpy as np
import os
import json
import requests
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
import pandas.io.sql as pdsql
from config import pg_user, pg_password, db_name
from flask import Flask, jsonify, render_template, abort, redirect
#################################################
# Database Setup
##################################################
connection_string = f"{pg_user}:{pg_password}@localhost:5432/{db_name}"
engine = create_engine(f'postgresql://{connection_string}')
# checking the table names
engine.table_names()
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def home():
return render_template("index.html")
@app.route("/comparison")
def comparison():
return render_template("comparison.html")
@app.route('/searchbyyear')
def searchbyyear():
sqlStatement = """
SELECT year, SUM ("Cancer" + "cardiovascular" + "stroke" + "depression" + "rehab" + "vaccine" + "diarrhea" + "obesity" + "diabetes") AS Searches
FROM search_condition
GROUP BY year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/searchyearandcondition')
def searchyearandcondition():
sqlStatement = """
SELECT year, SUM ("Cancer") AS Cancer,SUM ("cardiovascular") As Cardiovascular,SUM ("stroke") As Stroke,SUM ("depression") As Depression,SUM ("rehab") AS Rehab,SUM ("vaccine") AS Vaccine, SUM ("diarrhea") AS Diarrhea, SUM("obesity") AS Obesity, SUM ("diabetes") AS Diabetes
FROM search_condition
GROUP BY year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/searchbystate')
def searchbystate():
sqlStatement = """
SELECT l.location, l.latitude, l.longitude, SUM (s."Cancer" + s."cardiovascular" + s."stroke" + s."depression" + s."rehab" + s."vaccine" + s."diarrhea" + s."obesity" + s."diabetes") AS Searches
FROM location l
INNER JOIN search_condition s on s.location_id = l.location_id
GROUP BY l.location, l.latitude, l.longitude
ORDER BY location;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('location', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/bylocationandyear')
def bylocationandyear():
sqlStatement = """
SELECT l.location, l.latitude, l.longitude,s.year, SUM (s."Cancer" + s."cardiovascular" + s."stroke" + s."depression" + s."rehab" + s."vaccine" + s."diarrhea" + s."obesity" + s."diabetes") AS Searches
FROM location l
INNER JOIN search_condition s on s.location_id = l.location_id
GROUP BY l.location, l.latitude, l.longitude,s.year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/casesleadingdeath')
def casesleadingdeath():
sqlStatement = """
SELECT * FROM leading_causes_of_death;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/allsearchrecord')
def allsearchrecord():
sqlStatement = """
SELECT *
FROM location l
INNER JOIN search_condition s on s.location_id = l.location_id
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/location')
def location():
sqlStatement = """
SELECT * FROM location;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('location', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
if __name__ == '__main__':
app.run(debug=True)
|
[
"ermiasgelaye@gmail.com"
] |
ermiasgelaye@gmail.com
|
6878a567d42d9e1757938e0bb0df53b84b7c730a
|
65b9a63e8c132f32aeb56961968f5e363bd9a087
|
/20191104_归一化层Normalization/layernorm_keras.py
|
a605fbdc0613d5284fffcc85ef6fd7daea5d15e2
|
[] |
no_license
|
346644054/examples2019
|
e70f13cfb56c3478fc6e335c730e0e70e70a6226
|
5f9777e7a887e635971156354f56ce065fa3f41e
|
refs/heads/master
| 2022-04-09T03:52:52.973414
| 2020-02-28T03:05:02
| 2020-02-28T03:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
haikou layernorm
fun:
save csv
env:
Win7 64bit;anaconda 1.7.2;python 3.5;tensorflow1.10.1;Keras2.2.4
pip3,matplotlib2.2.3,seaborn0.9.0
"""
__author__ = 'elesun'
import keras
from keras_layer_normalization import LayerNormalization
print("test layer_norm")
input_layer = keras.layers.Input(shape=(2, 3),name="in")
norm_layer = LayerNormalization(name="norm")(input_layer)
model = keras.models.Model(inputs=input_layer, outputs=norm_layer)
model.compile(optimizer='adam', loss='mse', metrics={})
model.summary()
|
[
"elesun2018@gmail.com"
] |
elesun2018@gmail.com
|
1db367d4918110b8a0232d642973173af2617b77
|
66e6360325b781ed0791868765f1fd8a6303726f
|
/TB2009/WorkDirectory/5141 Number Of Samples Over Threshold/Count_108535.py
|
b35a81682068d313d7f63d34b079e98224e2decc
|
[] |
no_license
|
alintulu/FHead2011PhysicsProject
|
c969639b212d569198d8fce2f424ce866dcfa881
|
2568633d349810574354ad61b0abab24a40e510e
|
refs/heads/master
| 2022-04-28T14:19:30.534282
| 2020-04-23T17:17:32
| 2020-04-23T17:17:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("EventDisplay")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108535.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(3),
mip = cms.untracked.string("MIPCalibration.txt"),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(False),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(False),
usePedestalMean = cms.untracked.bool(True),
pedestalMean = cms.untracked.string("PedestalMean_108535.txt")
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100)
)
)
process.countsample = cms.EDAnalyzer("FillMaxAdcAnalyzer",
threshold = cms.untracked.double(12600),
output = cms.untracked.string('CountSample_108535.root')
)
process.p = cms.Path(process.tbunpack*process.ABCcut*process.vlsbinfo*process.countsample)
|
[
"yichen@positron01.hep.caltech.edu"
] |
yichen@positron01.hep.caltech.edu
|
63ba110f6605deb2d39ba210975b637a8084fbb7
|
90cad1df7b7d424feb8e71ff3d77e772d446afdf
|
/alembic/versions/3147aa982e03_payment_application_datetime.py
|
434fce6bc4ce03501fa4352481a996f74989e5e0
|
[] |
no_license
|
razagilani/billing
|
acb8044c22b4075250c583f599baafe3e09abc2e
|
fd2b20019eeedf0fcc781e5d81ff240be90c0b37
|
refs/heads/master
| 2021-05-01T14:46:32.138870
| 2016-03-09T18:55:09
| 2016-03-09T18:55:09
| 79,589,205
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
"""payment application datetime
Revision ID: 3147aa982e03
Revises: 4f2f8e2f7cd
Create Date: 2014-07-23 15:43:12.596865
"""
# revision identifiers, used by Alembic.
revision = '3147aa982e03'
down_revision = '4f2f8e2f7cd'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.types import DateTime, Date
def upgrade():
op.alter_column('payment', 'date_applied', type_=DateTime)
op.alter_column('reebill', 'issue_date', type_=DateTime)
def downgrade():
op.alter_column('payment', 'date_applied', type_=Date)
op.alter_column('reebill', 'issue_date', type_=Date)
|
[
"dklothe@skylineinnovations.com"
] |
dklothe@skylineinnovations.com
|
61dec1d0e6f74a2fecd66c82df9047590d148082
|
cf9158e73c3630bb6c76f5a849014df79e550471
|
/paixu.py
|
e6239d3c47c6e108dbaa197b9970e50d9d62b9ba
|
[] |
no_license
|
starmerxhcd/python1
|
dbcc589c101421a38644f22cb2b7af0378d0e534
|
f7ed4b129a4ec22d0e1eed99081d9d695b769229
|
refs/heads/master
| 2021-08-12T00:55:18.466596
| 2017-11-14T07:48:51
| 2017-11-14T07:48:51
| 110,618,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
# -*- coding: UTF-8 -*-
# 快速排序
# 从数列中取出一个数作为基准数->分区过程,最终结果小的数 基准数 大的数->再对左右区间重复分区过程,知道区间只有一个数
# s list l左标志位 r右标志位 paixu 0为升序 1为降序
def quick_sort(s,l,r,paixu=0):
if(l>=r):
return 'r必须大于l'
if(not isinstance(s,list)):
return '请传入list类型'
if(l<r):
i=l
j=r
x=s[l]
# 一次分区过程
while(i<j):
# 先从右向左找
# 降序排列
if paixu:
while (i < j and s[j] <= x):
j = j - 1
else:
while (i < j and s[j] >= x):
j = j - 1
if(i<j):
# 注意是把当前位置的数据赋值给基准数的位置
s[i]=s[j]
#i位置的数据确定了 则i坐标右移1个
i=i+1
# 从左向右找
if paixu:
while (i < j and s[i] >= x):
i = i + 1
else:
while (i < j and s[i] <= x):
i = i + 1
if(i<j):
s[j]=s[i]
j=j-1
# i=j时,找到了中间位置
s[i]=x
# 循环调用
quick_sort(s,l,i-1,paixu)
quick_sort(s,i+1,r,paixu)
# 最后返回列表
return s
# 进行测试
s=[33,43,22,66,2,54,7,39,55]
print s
print '升序排列'
print quick_sort(s,0,s.__len__()-1)
print '降序排列'
print quick_sort(s,0,s.__len__()-1,1)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e6999cec4013893c63f3967582a644f9a6d64d51
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/get_group.py
|
1ad33fd5f9a9cc3080ea190180c967ae733fe384
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 4,723
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
]
@pulumi.output_type
class GetGroupResult:
"""
Contract details.
"""
def __init__(__self__, built_in=None, description=None, display_name=None, external_id=None, name=None, type=None):
if built_in and not isinstance(built_in, bool):
raise TypeError("Expected argument 'built_in' to be a bool")
pulumi.set(__self__, "built_in", built_in)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if external_id and not isinstance(external_id, str):
raise TypeError("Expected argument 'external_id' to be a str")
pulumi.set(__self__, "external_id", external_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="builtIn")
def built_in(self) -> bool:
"""
true if the group is one of the three system groups (Administrators, Developers, or Guests); otherwise false.
"""
return pulumi.get(self, "built_in")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Group description. Can contain HTML formatting tags.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Group name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[str]:
"""
For external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory `aad://<tenant>.onmicrosoft.com/groups/<group object id>`; otherwise the value is null.
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
built_in=self.built_in,
description=self.description,
display_name=self.display_name,
external_id=self.external_id,
name=self.name,
type=self.type)
def get_group(group_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
Use this data source to access information about an existing resource.
:param str group_id: Group identifier. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['groupId'] = group_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20191201:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
built_in=__ret__.built_in,
description=__ret__.description,
display_name=__ret__.display_name,
external_id=__ret__.external_id,
name=__ret__.name,
type=__ret__.type)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
cf000ffd91c88cbfd2f59ee93dc3e4697700ce28
|
c1ef1f1fa94b5dbecff2ec09e94ae29a9094d82a
|
/20210701.py
|
c13acabf4bce257bd66f37de2cc21ee67c7fa201
|
[] |
no_license
|
MONKEYZ9/algorithm
|
cd6039a2232615e9bd40f63e2509fddf7edcede7
|
4ffde1ac47294af87152ed740962db600e0b9755
|
refs/heads/main
| 2023-08-14T17:01:54.792376
| 2021-10-01T06:14:55
| 2021-10-01T06:14:55
| 380,917,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# 약수 구하기
# 약수는 나눠서 나머지가 0인 것을 나타낸다.
# 그럼 그것들을 모아서 리스트에 나오게끔 하면 되다.
def solution(n):
num_list = []
for i in range(1, n+1):
if n % i == 0:
num_list.append(i)
return num_list[0], num_list[-1]
n = int(input())
max, min = solution(n)
print(max, min)
lsitasd = list(range(10, 0,-1))
print(lsitasd)
|
[
"sangmin3285@gmail.com"
] |
sangmin3285@gmail.com
|
c4c49205364f01979a89041cba0306dc7b3714a1
|
1a5e3871a403057fbbc3f0914a931fc0a6bf06c3
|
/Server/app/docs/__init__.py
|
c1df725028415f127e2c74e054d89fcb8f008df7
|
[
"MIT"
] |
permissive
|
DSM-DMS/DMS-Backend-API-v2
|
ec754b575bfa3ef831efaae242f0a8203c595dd1
|
dfb49879ce81d7a3c1c4cda47b1de150f245cd86
|
refs/heads/master
| 2021-05-04T00:03:17.912351
| 2018-04-06T13:51:25
| 2018-04-06T13:51:25
| 120,405,963
| 0
| 0
|
MIT
| 2018-02-18T14:39:13
| 2018-02-06T05:25:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
TEMPLATE = {
'schemes': [
'http'
],
'tags': [
{
'name': '관리자 계정',
'description': '관리자 권한으로 접근 가능한 계정/계정 관리 API'
},
{
'name': '계정',
'description': '학생 권한으로 접근 가능한 계정 API'
},
{
'name': '신청 정보 관리',
'description': '관리자 권한으로 접근 가능한 신청 정보 관리 API'
},
{
'name': '신청',
'description': '학생 권한으로 접근 가능한 신청 API'
},
{
'name': '신고 관리',
'description': '관리자 권한으로 접근 가능한 시설 고장, DMS 시스템 등 신고 관리 API'
},
{
'name': '신고',
'description': '학생 권한으로 접근 가능한 시설 고장, DMS 시스템 등 신고 API'
},
{
'name': '게시글 관리',
'description': '관리자 권한으로 접근 가능한 게시글 관리 API'
},
{
'name': '게시글',
'description': '학생 권한으로 접근 가능한 게시글 조회 관련 API'
},
{
'name': '설문지 관리',
'description': '관리자 권한으로 접근 가능한 설문지 관리 API'
},
{
'name': '설문지',
'description': '학생 권한으로 접근 가능한 설문지 API'
},
{
'name': '상벌점 관리',
'description': '관리자 권한으로 접근 가능한 상벌점 관리 API'
},
{
'name': '학교',
'description': '학생과 관리자 권한으로 접근 가능한 학교 정보 조회 관련 API'
}
]
}
|
[
"city7310@naver.com"
] |
city7310@naver.com
|
62a54329673eecda6470aab943460239d2c97a22
|
fb82fdf706863465b1f357cd1fa0447474cd8a70
|
/ServerComponent/venv/Lib/site-packages/rsrc/contrib/db/mongo/serializer.py
|
4ce65d072d3cc5cf44fb07e5acfe9000727e374a
|
[
"MIT"
] |
permissive
|
CDU55/FakeNews
|
d79e2a069b3f1392f779d5b2256cd54c696e789a
|
707bd48dd78851081d98ad21bbdadfc2720bd644
|
refs/heads/main
| 2023-02-20T06:27:18.618837
| 2021-01-17T15:14:27
| 2021-01-17T15:14:27
| 305,167,221
| 0
| 1
|
MIT
| 2020-12-07T19:51:46
| 2020-10-18T18:16:49
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from rsrc import settings
from jsonsir import Serializer
from jsonsir.contrib.intencoder import IntEncoder
from jsonsir.contrib.boolencoder import BoolEncoder
from jsonsir.contrib.regexencoder import RegexEncoder
from jsonsir.contrib.objectidencoder import ObjectIdEncoder
from jsonsir.contrib.datetimeencoder import DateTimeEncoder
# instantiate `Serializer` (bound with specified encoders)
serializer = Serializer([
IntEncoder(),
BoolEncoder(),
RegexEncoder(),
ObjectIdEncoder(),
DateTimeEncoder(settings.DATE_FORMAT),
])
|
[
"48147775+BiancaChirica@users.noreply.github.com"
] |
48147775+BiancaChirica@users.noreply.github.com
|
8a1620d7c5e238a937e8c5e317cd0d841bd6c9c1
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_12364.py
|
078700cb2bbfce3105692f4e33e508d8b2f4b77d
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
# backslash in a character set of a python regexp (how to specify 'not a backslash' character set)?
regexps.append({'left':r'[^\\]%.*', 'right':r''})
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
fd23120ffa6c03d72252d789dea7030132ea3bd4
|
31eadb37c1748ea37c9d978ae099f56f3d01d3be
|
/core/utils.py
|
ba5db3540ba38eb07995f3dd2da24fc7852637a7
|
[] |
no_license
|
geofferyj/simple_django_chat
|
daccf0b8e47b0a70d6bd4ad10da737da88fcc0a1
|
ad9606bc42528bc24448e9786665f7fae034e51f
|
refs/heads/master
| 2023-08-12T19:13:32.992640
| 2021-10-14T04:52:00
| 2021-10-14T04:52:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
from django.contrib.auth.models import AnonymousUser
from channels.db import database_sync_to_async
from django.db import close_old_connections
from rest_framework.authtoken.models import Token
from channels.middleware import BaseMiddleware
from channels.auth import AuthMiddlewareStack
from rest_framework.authentication import TokenAuthentication
@database_sync_to_async
def get_user(token_key):
try:
token = Token.objects.get(key=token_key)
return token.user
except Token.DoesNotExist:
return AnonymousUser()
finally:
close_old_connections()
class TokenAuthMiddleware(BaseMiddleware):
def __init__(self, inner):
self.inner = inner
async def __call__(self, scope, receive, send):
token_key = scope['query_string'].decode().split('=')[-1]
scope['user'] = await get_user(token_key)
return await super().__call__(scope, receive, send)
TokenAuthMiddlewareStack = lambda inner: TokenAuthMiddleware(AuthMiddlewareStack(inner))
class QSTokenAuthentication(TokenAuthentication):
"""
Extend the TokenAuthentication class to support querystring authentication
in the form of "http://www.example.com/?auth_token=<token_key>"
"""
def authenticate(self, request):
# Check if 'token_auth' is in the request query params.
# Give precedence to 'Authorization' header.
if 'token' in request.query_params and \
'HTTP_AUTHORIZATION' not in request.META:
return self.authenticate_credentials(request.query_params.get('token'))
else:
return super().authenticate(request)
|
[
"geofferyjoseph1@gmail.com"
] |
geofferyjoseph1@gmail.com
|
7af3af409784414f72d30c21d0d452263f877838
|
b5cba88ce8c86740c8c3453134610fd5bafbb8c4
|
/AlgoExpert/Selection Sort/solution.py
|
2f1ea5a4052470bf42473e09bd03a59a4df0f506
|
[] |
no_license
|
EduardoSantos7/Algorithms4fun
|
55fcf9d515ea3b70b93298ac96a58d2ae68dee11
|
6ff182ed596b6322322b087f29e6ad98baec3f97
|
refs/heads/master
| 2023-07-23T01:38:08.216313
| 2023-07-23T01:35:58
| 2023-07-23T01:35:58
| 227,448,848
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
def selectionSort(array):
for i in range(len(array) - 1):
current_min = i
for j in range(i + 1, len(array)):
if array[j] < array[current_min]:
current_min = j
array[i], array[current_min] = array[current_min], array[i]
return array
|
[
"eduardoluissd@gmail.com"
] |
eduardoluissd@gmail.com
|
42690924946bef5ab0da79ed1f252d7fc6198fff
|
6e3d061f94468905841a918278a352d4e5df89a1
|
/hashicorp_vault_client/test/test_body92.py
|
87a8f24bd7cd68ac8fe9c87c8e3e6c826c7412f2
|
[
"Apache-2.0"
] |
permissive
|
drewmullen/HAC
|
179a4188e6e6ce3a36d480e45f238fd0901a710f
|
fb185804fd244366f8f8d01df22835b3d96e7512
|
refs/heads/master
| 2020-08-03T12:13:08.785915
| 2019-10-03T18:33:04
| 2019-10-03T18:33:04
| 211,749,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
# coding: utf-8
"""
HashiCorp Vault API
HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`. # noqa: E501
OpenAPI spec version: 1.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import hashicorp_vault_client
from models.body92 import Body92 # noqa: E501
from hashicorp_vault_client.rest import ApiException
class TestBody92(unittest.TestCase):
"""Body92 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBody92(self):
"""Test Body92"""
# FIXME: construct object with mandatory attributes with example values
# model = hashicorp_vault_client.models.body92.Body92() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"drew@nebulaworks.com"
] |
drew@nebulaworks.com
|
32102d490eaf01579a5fdfd91f758f8e60ddd1d3
|
3c6b3b0a92e5a290ba69d0f73af51ac82aff3509
|
/exams/midterm2/list_set.py
|
9e0137e47de684f4ff2f0f13a542abe3f3d2f362
|
[] |
no_license
|
sarae17/2019-T-111-PROG
|
ba6c6db7075acba16bbcd23e4c0d3db6e2bb374f
|
017287b3300ec4fe809bfc81fee856ffb17b4800
|
refs/heads/master
| 2020-09-10T14:36:53.715479
| 2019-11-13T13:41:04
| 2019-11-13T13:41:04
| 221,722,173
| 1
| 0
| null | 2019-11-14T14:54:15
| 2019-11-14T14:54:14
| null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
def unique_elements(a_list):
''' Returns a new list containing the unique elements in a_list '''
result = []
for elem in a_list:
if not elem in result:
result.append(elem)
return result
def make_sorted_set(a_list):
a_set = unique_elements(a_list)
return sorted(a_set)
def intersection(set1, set2):
result_set = []
for elem in set1:
if elem in set2:
result_set.append(elem)
return result_set
def union(set1, set2):
result_list = set1 + set2
result_set = make_sorted_set(result_list)
return result_set
def get_set():
a_list = input("Enter elements of a list separated by space: ").strip().split()
a_list = [int(i) for i in a_list]
return make_sorted_set(a_list)
# Main program starts here
set1 = get_set()
set2 = get_set()
print("Set 1: {}".format(set1))
print("Set 2: {}".format(set2))
set3 = intersection(set1, set2)
print("Intersection: {}".format(set3))
set4 = union(set1, set2)
print("Union: {}".format(set4))
|
[
"hrafnl@gmail.com"
] |
hrafnl@gmail.com
|
19c07d874f40176edcbf9fe01161f45a8eab33ac
|
acf314ab0fa399018764b2ebd96e33c66362994e
|
/0x0F-python-object_relational_mapping/model_city.py
|
de7071389d2f251afb219add79ea14ca20dfe060
|
[] |
no_license
|
glyif/holbertonschool-higher_level_programming
|
98f9c2da0b71a4e9e2dd9f6fde755875e9015f34
|
14c02d79e2008db1b992b08f9faa55b20dbe0691
|
refs/heads/master
| 2021-01-20T06:53:16.179354
| 2017-09-28T18:14:12
| 2017-09-28T18:14:12
| 89,939,980
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
#!/usr/bin/python3
"""
City Base Model
"""
from sqlalchemy import Column, Integer, String, ForeignKey
from model_state import Base, State
class City(Base):
"""
City Model
tablename: cities
id: id
name: name
state_id: state_id
"""
__tablename__ = "cities"
id = Column(Integer, autoincrement=True, nullable=False, primary_key=True)
name = Column(String(128), nullable=False)
state_id = Column(Integer, ForeignKey(State.id))
|
[
"122@holbertonschool.com"
] |
122@holbertonschool.com
|
e0fade2adeb0017c44bf7145e021bf79ff8828c2
|
edfdc0d3a2fdeed95ba7aa3d0e198eb9dafe4064
|
/operator_api/ledger/migrations/0034_auto_20180921_1222.py
|
2fd8e466f164d186fb24a45d8da3bf7466401d5e
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
xiaobai900/nocust-hub
|
880e72ba4e1d324ae36adea6c03c9761a7d91621
|
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
|
refs/heads/master
| 2023-05-28T08:18:17.402228
| 2020-11-01T19:48:17
| 2020-11-01T19:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-09-21 12:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ledger', '0033_transfer_cancelled'),
]
operations = [
migrations.RenameField(
model_name='transfer',
old_name='sent',
new_name='complete',
),
]
|
[
"guillaume@felley.io"
] |
guillaume@felley.io
|
87f6a95198355d88ac93093a58294e5b1c64dcf4
|
8f64d50494507fd51c0a51010b84d34c667bd438
|
/BeautyForMe/myvenv/Lib/site-packages/win32/scripts/backupEventLog.py
|
1a72db1578c00a5a37fa7d7486d95356f6238705
|
[
"MIT"
] |
permissive
|
YooInKeun/CAU_CSE_Capstone_3
|
5a4a61a916dc13c8635d25a04d59c21279678477
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
refs/heads/master
| 2022-12-11T15:39:09.721019
| 2021-07-27T08:26:04
| 2021-07-27T08:26:04
| 207,294,862
| 6
| 1
|
MIT
| 2022-11-22T04:52:11
| 2019-09-09T11:37:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
# Generate a base file name
import time, os
import win32api
import win32evtlog
def BackupClearLog(logType):
datePrefix = time.strftime("%Y%m%d", time.localtime(time.time()))
fileExists = 1
retry = 0
while fileExists:
if retry == 0:
index = ""
else:
index = "-%d" % retry
try:
fname = os.path.join(win32api.GetTempPath(), "%s%s-%s" % (datePrefix, index, logType) + ".evt")
os.stat(fname)
except os.error:
fileExists = 0
retry = retry + 1
# OK - have unique file name.
try:
hlog = win32evtlog.OpenEventLog(None, logType)
except win32evtlogutil.error as details:
print("Could not open the event log", details)
return
try:
if win32evtlog.GetNumberOfEventLogRecords(hlog)==0:
print("No records in event log %s - not backed up" % logType)
return
win32evtlog.ClearEventLog(hlog, fname)
print("Backed up %s log to %s" % (logType, fname))
finally:
win32evtlog.CloseEventLog(hlog)
if __name__=='__main__':
BackupClearLog("Application")
BackupClearLog("System")
BackupClearLog("Security")
|
[
"keun0390@naver.com"
] |
keun0390@naver.com
|
e4781373c7e3b7491d6ba5776799f89dbb0ac3ec
|
e724c4340ddf8a99fe7d887b5cc5453e46e7eb29
|
/src/main/python/pgshovel/streams/sequences.py
|
a2a0f8037ef84174e19d62a735d20e0797e5ace0
|
[
"Apache-2.0"
] |
permissive
|
fuziontech/pgshovel
|
1789369e2db6c82deba08bb86504840cef705d8f
|
5107a9885785f702340d421bbae9f6b82f44e98a
|
refs/heads/master
| 2021-01-17T07:07:25.383805
| 2015-08-14T08:33:54
| 2015-08-14T17:15:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
"""
Tools for validating input streams.
"""
import logging
logger = logging.getLogger(__name__)
class SequencingError(Exception):
"""
Error raised when a message contains an invalid sequence value.
"""
class InvalidPublisher(SequencingError):
"""
Error raised when a message is received from an unexpected publisher.
"""
class RepeatedSequenceError(SequencingError):
"""
Error raised when a sequence value is reused for a different message from
the same publisher.
"""
class InvalidSequenceStartError(Exception):
"""
Error raised when a message is recieved from a new publisher that starts
at an incorrect sequence value.
"""
def validate(messages):
"""
Validates a stream of Message instances, ensuring that the correct
sequencing order is maintained, all messages are present, and only a single
publisher is communicating on the stream.
Duplicate messages are dropped if they have already been yielded.
"""
# TODO: Also warn on non-monotonic timestamp advancement.
previous = None
# All of the publishers that have been previously seen during the execution
# of this validator. (Does not include the currently active publisher.)
dead = set()
for message in messages:
if message.header.publisher in dead:
raise InvalidPublisher('Received message from previously used publisher.')
if previous is not None:
if previous.header.publisher == message.header.publisher:
# If the message we just received is exactly the same as the
# previous message, we can safely ignore it. (This could happen
# if the publisher is retrying a message that was not fully
# acknowledged before being partitioned from the recipient, but
# was actually written.)
if previous.header.sequence == message.header.sequence:
if previous == message:
logger.debug('Skipping duplicate message.')
continue
else:
raise RepeatedSequenceError(previous, message)
elif previous.header.sequence + 1 != message.header.sequence:
raise SequencingError(
'Invalid sequence: {0} to {1}'.format(
previous.header.sequence,
message.header.sequence,
)
)
else:
logger.info(
'Publisher of %r has changed from %r to %r.',
messages,
previous.header.publisher,
message.header.publisher,
)
dead.add(previous.header.publisher)
previous = None
# TODO: This needs to handle starting consumption in the middle of the
# stream somehow.
if previous is None and message.header.sequence != 0:
raise InvalidSequenceStartError(
'Invalid sequence start point: {0}'.format(
message.header.sequence,
)
)
yield message
previous = message
|
[
"ted@kaemming.com"
] |
ted@kaemming.com
|
783e84a8fb9c3cbbecc3557157aa5917430ac426
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_164/ch128_2020_04_01_18_11_42_129452.py
|
ab64e7a6ec6a5e0b2d75d17984dba66242fc56bb
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
sim = ["sim"]
nao = ["nao"]
esta_se_movendo = input("Está se movendo?")
if esta_se_movendo in sim:
se_move = input("Deveria se mover?")
if se_move in sim:
print("Sem problemas!")
elif se_move in nao:
print("Silver Tape")
if esta_se_movendo in nao:
nao_se_move = input("Derveria se mover ?")
if esta_se_movendo in sim:
print ("Sem problemas!")
elif esta_se_movendo in nao:
print ("WD-40")
|
[
"you@example.com"
] |
you@example.com
|
d782c90e1f40be5b2502d577a699d8d716c9955b
|
1b4672de3ee7eb823fb2a238ea767fe0e1e11e7e
|
/shared/migrations/0001_initial.py
|
0c89461bd8ba3895f44aa18c3d23aaeb018a4b11
|
[] |
no_license
|
FriedrichK/gamecoach
|
ce6510115e58286cc5eb56ddfbd35e7457639379
|
cfa0d8c686cbc7b48cfc86e1feabd30d575f3039
|
refs/heads/master
| 2016-09-06T04:08:52.746047
| 2014-11-03T21:20:39
| 2014-11-03T21:20:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
from south.v2 import DataMigration
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
site = self._forward_ensure_primary_site_is_set_up_correctly(orm)
self._forward_ensure_facebook_provider_exists(orm, site)
def _forward_ensure_primary_site_is_set_up_correctly(self, orm):
Site = orm['sites.Site']
site, created = Site.objects.get_or_create(id=settings.SITE_ID)
site.domain = settings.SITE_DOMAIN
site.name = settings.SITE_NAME
site.save()
return site
def _forward_ensure_facebook_provider_exists(self, orm, site):
SocialApp = orm['socialaccount.SocialApp']
provider, created = SocialApp.objects.get_or_create(provider='facebook', name='Facebook')
provider.secret = settings.FACEBOOK_APP_SECRET
provider.client_id = settings.FACEBOOK_APP_ID
provider.key = ''
provider.sites.add(site)
provider.save()
return provider
def backwards(self, orm):
pass
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['shared']
|
[
"friedrich@cartogami.com"
] |
friedrich@cartogami.com
|
a97a18124e03c1c5cba179560a3a8bcc88bc64ae
|
f3472d0f7c4c15d978dc74520ecebc7b6396da78
|
/tests/http/test_request_data_property.py
|
0d147ccbed4c4a77b5fe7fb35156fb3fa65c8468
|
[
"MIT"
] |
permissive
|
Lookyan/flex
|
a5b18e7f69ab4bc98ef810deebc22733b5346e4a
|
284ea6d5b5a5f65d7f4abbe9d36ca6f31567c0ec
|
refs/heads/master
| 2021-01-16T17:40:58.428296
| 2016-10-12T11:49:18
| 2016-10-12T11:49:18
| 67,237,824
| 0
| 1
| null | 2016-09-02T16:23:14
| 2016-09-02T16:23:13
| null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
import pytest
import json
from flex.constants import EMPTY
from tests.factories import (
RequestFactory,
)
def test_null_body_returns_null():
request = RequestFactory(body=None)
assert request.data is None
def test_empty_string_body_returns_empty_string():
request = RequestFactory(body='')
assert request.data == ''
def test_empty_body_returns_empty():
request = RequestFactory(body=EMPTY)
assert request.data is EMPTY
def test_json_content_type_with_json_body():
request = RequestFactory(
body=json.dumps({'key': 'value'}),
content_type='application/json',
)
assert request.data == {'key': 'value'}
def test_unsupported_content_type():
request = RequestFactory(
body=json.dumps({'key': 'value'}),
content_type='application/unsupported',
)
with pytest.raises(NotImplementedError):
request.data
|
[
"piper@simpleenergy.com"
] |
piper@simpleenergy.com
|
c600389a03e33cbfd54ea9b4103aba8b123f1cc5
|
aa3f670fcc2b43d8a5eb8a131082510bed2eb4d8
|
/scripts/ingestors/nass_quickstats.py
|
2b7ee785badec8b6d8ea7c1b583e03b7621a4a51
|
[
"MIT"
] |
permissive
|
jamayfieldjr/iem
|
e0d496311d82790ad518c600c2fcffe44e834da1
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
refs/heads/master
| 2020-08-07T11:55:56.256857
| 2019-10-04T04:22:36
| 2019-10-04T04:22:36
| 213,439,554
| 1
| 0
|
MIT
| 2019-10-07T17:01:20
| 2019-10-07T17:01:20
| null |
UTF-8
|
Python
| false
| false
| 5,165
|
py
|
"""Dump NASS Quickstats to the IEM database"""
from __future__ import print_function
import sys
import subprocess
import datetime
import os
import pandas as pd
from pyiem.util import get_dbconn
TMP = "/mesonet/tmp"
def get_file():
"""Download and gunzip the file from the FTP site"""
os.chdir("/mesonet/tmp")
if os.path.isfile("%s/qstats.txt" % (TMP, )):
print(' skipping download as we already have the file')
return
for i in range(0, -7, -1):
now = datetime.date.today() + datetime.timedelta(days=i)
fn = "qs.crops_%s.txt.gz" % (now.strftime("%Y%m%d"),)
cmd = ("wget -q "
"ftp://ftp.nass.usda.gov/quickstats/%s") % (fn, )
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.stdout.read()
if os.path.isfile(fn):
break
cmd = "cd %s; mv %s qstats.txt.gz; gunzip qstats.txt.gz" % (TMP, fn)
# Popen is async, so we need to read from it!
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.stdout.read()
def database(pgconn, df):
"""Save df to the database!"""
cursor = pgconn.cursor()
cursor.execute("""SET TIME ZONE 'UTC'""")
df.columns = [x.lower() for x in df.columns]
df = df.where((pd.notnull(df)), None)
df['num_value'] = pd.to_numeric(df['value'], errors='coerce')
df2 = df[df['commodity_desc'].isin(['CORN', 'SOYBEANS'])]
for _, row in df2.iterrows():
try:
# If we are not in addall mode, we have to be careful!
cursor.execute("""
INSERT into nass_quickstats(source_desc, sector_desc,
group_desc,
commodity_desc,
class_desc,
prodn_practice_desc,
util_practice_desc,
statisticcat_desc,
unit_desc,
agg_level_desc,
state_alpha,
asd_code,
county_ansi,
zip_5,
watershed_code,
country_code,
year,
freq_desc,
begin_code,
end_code,
week_ending,
load_time,
value,
cv,
num_value) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", (row['source_desc'], row['sector_desc'], row['group_desc'],
row['commodity_desc'],
row['class_desc'],
row['prodn_practice_desc'],
row['util_practice_desc'],
row['statisticcat_desc'],
row['unit_desc'],
row['agg_level_desc'],
row['state_alpha'],
row['asd_code'],
row['county_ansi'],
row['zip_5'],
row['watershed_code'],
row['country_code'],
row['year'],
row['freq_desc'],
row['begin_code'],
row['end_code'],
row['week_ending'],
row['load_time'],
row['value'],
row['cv_%'],
row['num_value']))
except Exception as exp:
print(exp)
for key in row.keys():
print("%s %s %s" % (key, row[key], len(str(row[key]))))
sys.exit()
print(' processed %6s lines from %6s candidates' % (len(df2.index),
len(df.index)))
cursor.close()
pgconn.commit()
def process(pgconn):
"""Do some work"""
# The file is way too big (11+ GB) to be reliably read into pandas, so
# we need to do some chunked processing.
cursor = pgconn.cursor()
cursor.execute("truncate nass_quickstats")
cursor.close()
pgconn.commit()
header = ""
tmpfn = None
accumlines = 0
for linenum, line in enumerate(open("%s/qstats.txt" % (TMP, ))):
if linenum == 0:
header = line
if tmpfn is None:
tmpfn = '/mesonet/tmp/tempor.txt'
fh = open(tmpfn, 'w')
fh.write(header+"\n")
if linenum > 0:
fh.write(line+"\n")
accumlines += 1
if accumlines >= 600000:
fh.close()
df = pd.read_csv(tmpfn, sep='\t', low_memory=False)
database(pgconn, df)
tmpfn = None
accumlines = 0
if accumlines > 0:
fh.close()
df = pd.read_csv(tmpfn, sep='\t', low_memory=False)
database(pgconn, df)
def cleanup():
"""Cleanup after ourselves"""
for fn in ['%s/qstats.txt' % (TMP, ), '%s/tempor.txt' % (TMP, )]:
print(' Deleted %s' % (fn, ))
os.unlink(fn)
def main(argv):
"""Go Main Go"""
pgconn = get_dbconn('coop')
print("scripts/ingestors/nass_quickstats.py")
get_file()
process(pgconn)
if len(argv) == 1:
cleanup()
print("done...")
if __name__ == '__main__':
main(sys.argv)
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
94e54f003e8bf858139be8bc41ecdd20852a0d7b
|
3e149e35fbe53c190d74da8b68a12a472e0e6552
|
/footer_project/templatetags/footer_tags.py
|
f94449355897e353ddb401a08e4359feb5aeccc5
|
[] |
no_license
|
kamral/official_student_project
|
20a5c02fc0a7769f209c33a8eeee6a3f748ea647
|
fc585a20d46b858204c643cfa7f7b6f3e3a1655f
|
refs/heads/main
| 2023-02-22T16:41:28.269882
| 2021-01-18T11:41:19
| 2021-01-18T11:41:19
| 323,295,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
from django import template
from footer_project.models import \
About_Company_Category,\
Opportunities_category,\
Ourpartners_category
register=template.Library()
@register.simple_tag()
def get_about_company_categories(name='get_about_company_categories'):
return About_Company_Category.objects.all()
@register.simple_tag()
def get_oportunities_categories():
return Opportunities_category.objects.all()
@register.simple_tag()
def get_ourpartners_category():
return Ourpartners_category.objects.all()
@register.inclusion_tag('templatetags_categories_footer/about_company_categories.html')
def show_categories_about_company():
categories=About_Company_Category.objects.all()
return {'about_company_categories':categories}
@register.inclusion_tag('templatetags_categories_footer/oportunities_category.html')
def show_categories_oportunities_category():
categories=Opportunities_category.objects.all()
return {'oportunities_category':categories}
@register.inclusion_tag('templatetags_categories_footer/ourpartners_category.html')
def show_ourpartners_category():
category=Ourpartners_category.objects.all()
return {'ourpartners_category':category}
|
[
"kamral010101@gmail.com"
] |
kamral010101@gmail.com
|
78160df90330823af6c5d5389fbe35435da6181e
|
9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c
|
/baomoicrawl/venv/Lib/site-packages/twisted/python/test/deprecatedattributes.py
|
368f231b4fba03650d3126673b5a98b6ffcf64ff
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler
|
b0fdedee2942a12d9f64dfed93f43802dc5ab340
|
87c8c07433466bbc43a24ea089f75baeb467c356
|
refs/heads/master
| 2022-11-27T21:36:33.917491
| 2020-08-10T23:24:42
| 2020-08-10T23:24:42
| 286,583,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A module that is deprecated, used by L{twisted.python.test.test_deprecate} for
testing purposes.
"""
from __future__ import division, absolute_import
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
# Known module-level attributes.
DEPRECATED_ATTRIBUTE = 42
ANOTHER_ATTRIBUTE = 'hello'
version = Version('Twisted', 8, 0, 0)
message = 'Oh noes!'
deprecatedModuleAttribute(
version,
message,
__name__,
'DEPRECATED_ATTRIBUTE')
|
[
"thuy4tbn99@gmail.com"
] |
thuy4tbn99@gmail.com
|
9aa328809ba68d90740be6791e15c2849822679e
|
3cec81bf2c37485482b9ac100c45029baa8044a2
|
/pde/tools/spectral.py
|
98c12dee4c9b855f17d2228f1d8477346148dbbe
|
[
"MIT"
] |
permissive
|
jhurreaq/py-pde
|
085adfbbcdabc162d66fef28007d729d465fbadf
|
42cd3e9cc45793840ecfe244e606c39b13502658
|
refs/heads/master
| 2023-08-05T18:23:53.612466
| 2021-09-19T13:19:29
| 2021-09-19T13:19:29
| 318,832,446
| 0
| 0
|
MIT
| 2020-12-05T16:16:46
| 2020-12-05T16:16:45
| null |
UTF-8
|
Python
| false
| false
| 3,022
|
py
|
"""
Functions making use of spectral decompositions
.. autosummary::
:nosignatures:
make_colored_noise
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from typing import Callable, Tuple
import numpy as np
try:
from pyfftw.interfaces.numpy_fft import irfftn as np_irfftn
from pyfftw.interfaces.numpy_fft import rfftn as np_rfftn
except ImportError:
from numpy.fft import irfftn as np_irfftn
from numpy.fft import rfftn as np_rfftn
def make_colored_noise(
shape: Tuple[int, ...],
dx=1.0,
exponent: float = 0,
scale: float = 1,
rng: np.random.Generator = None,
) -> Callable[[], np.ndarray]:
r"""Return a function creating an array of random values that obey
.. math::
\langle c(\boldsymbol k) c(\boldsymbol k’) \rangle =
\Gamma^2 |\boldsymbol k|^\nu \delta(\boldsymbol k-\boldsymbol k’)
in spectral space on a Cartesian grid. The special case :math:`\nu = 0`
corresponds to white noise.
Args:
shape (tuple of ints):
Number of supports points in each spatial dimension. The number of
the list defines the spatial dimension.
dx (float or list of floats):
Discretization along each dimension. A uniform discretization in
each direction can be indicated by a single number.
exponent:
Exponent :math:`\nu` of the power spectrum
scale:
Scaling factor :math:`\Gamma` determining noise strength
rng (:class:`~numpy.random.Generator`):
Random number generator (default: :func:`~numpy.random.default_rng()`)
Returns:
callable: a function returning a random realization
"""
if rng is None:
rng = np.random.default_rng()
# extract some information about the grid
dim = len(shape)
dx = np.broadcast_to(dx, (dim,))
if exponent == 0:
# fast case of white noise
def noise_normal():
"""return array of colored noise"""
return scale * rng.normal(size=shape)
return noise_normal
# deal with colored noise in the following
# prepare wave vectors
k2s = np.array(0)
for i in range(dim):
if i == dim - 1:
k = np.fft.rfftfreq(shape[i], dx[i])
else:
k = np.fft.fftfreq(shape[i], dx[i])
k2s = np.add.outer(k2s, k ** 2)
# scaling of all modes with k != 0
k2s.flat[0] = 1 # type: ignore
scaling = 2 * np.pi * scale * k2s ** (exponent / 4)
scaling.flat[0] = 0 # type: ignore
# TODO: accelerate the FFT using the pyfftw package
def noise_colored() -> np.ndarray:
"""return array of colored noise"""
# random field
arr: np.ndarray = rng.normal(size=shape) # type: ignore
# forward transform
arr = np_rfftn(arr)
# scale according to frequency
arr *= scaling
# backwards transform
arr = np_irfftn(arr, shape)
return arr
return noise_colored
|
[
"david.zwicker@ds.mpg.de"
] |
david.zwicker@ds.mpg.de
|
d6e8ff8516b3f0322e3955eae9f8edc244f810f8
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-dbs/aliyunsdkdbs/request/v20190306/ModifyBackupObjectsRequest.py
|
0e3fa5590c1538ee6643ee531e162ad58600b343
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,056
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdbs.endpoint import endpoint_data
class ModifyBackupObjectsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dbs', '2019-03-06', 'ModifyBackupObjects')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_BackupPlanId(self): # String
return self.get_query_params().get('BackupPlanId')
def set_BackupPlanId(self, BackupPlanId): # String
self.add_query_param('BackupPlanId', BackupPlanId)
def get_BackupObjects(self): # String
return self.get_query_params().get('BackupObjects')
def set_BackupObjects(self, BackupObjects): # String
self.add_query_param('BackupObjects', BackupObjects)
def get_OwnerId(self): # String
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # String
self.add_query_param('OwnerId', OwnerId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
f45fdaae0dc927be8cad55969099030418d04029
|
633b2299ab0bb2648b94a7f8bf0288aa7a083347
|
/Project_Assignment11/app11/views.py
|
709df73b08555f9bfcf6f78709277c4db176767d
|
[] |
no_license
|
Abhirvalandge/Django-Practice-Projects-with-Naveen-Sir
|
7deb309b8af3c9e123da8525d7a925661662bde3
|
172191e22fcec3a3f80f64e624e6f9a52617486e
|
refs/heads/main
| 2023-03-23T08:34:25.196039
| 2021-03-06T06:56:17
| 2021-03-06T06:56:17
| 345,023,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
from django.shortcuts import render
def showMain(request):
return render(request,"index.html")
def showIndex(request):
oroom = 1500
droom = 2500
sroom = 5000
comp = 1000
gy = 250
min = 50
gname = request.POST.get("gn")
gcont = request.POST.get("gc")
add = request.POST.get("add")
ordi = request.POST.get("ord")
delux = request.POST.get("del")
suite = request.POST.get("sui")
computer = request.POST.get("com")
gym = request.POST.get("gym")
mineral = request.POST.get("min")
if ordi == "oroom" and computer == "comp":
a1=oroom+comp
return render(request,"confirmation.html", {"data1":a1})
|
[
"abhirvalandge@gmail.com"
] |
abhirvalandge@gmail.com
|
a13cc031394394a73888e319442d50773ae1f9cf
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1_neat/16_0_1_Samsruti_1.py
|
fef1069471fc5682a4dbee4a96c5491f48a857dc
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 828
|
py
|
import sys
def readInt():
return int(raw_input())
def retrieveEachNumber(number):
eachNumberList = []
rem = 0
while number > 0:
rem = number % 10
number = number/10
eachNumberList.append(rem)
return eachNumberList
fileout = open('log.txt', 'w')
needList = [0,1,2,3,4,5,6,7,8,9]
trueval = []
T = readInt()
for cases in range(1,T+1):
#print "Enter N: "
N = readInt()
val = 0
i = 1
arr = []
while i<1000000:
# print "i is :",i
val = N*i
# print "val = ",val
arr += retrieveEachNumber(val)
# print arr
i = i+1
arr = list(set(sorted(arr)))
if arr == needList:
flag = 1
break
else:
flag = 0
continue
if flag == 1:
print >> fileout,"Case #"+str(cases)+": ",val
else:
print >> fileout,"Case #"+str(cases)+": INSOMNIA"
fileout.close()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
43d77b0cd16051f64708b60f8fccce68d390dffb
|
dc99d95671170444cd7bf02e37da6ecda4a5f19e
|
/apps/operation/migrations/0008_auto_20180905_1609.py
|
55dfc0e5ddf62f37178e22dbdf88e45ca8ef24f2
|
[] |
no_license
|
bbright3493/python_real_war
|
734d49ed9f7e1800d24dc754424a07b69d7d8c1f
|
6e43bb7d814920222f3310bd6fd9f04cb3d5bbf1
|
refs/heads/master
| 2020-03-30T06:08:40.249185
| 2018-10-22T07:33:41
| 2018-10-22T07:33:41
| 150,841,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-09-05 16:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operation', '0007_auto_20180831_1621'),
]
operations = [
migrations.RemoveField(
model_name='usercourse',
name='status',
),
migrations.AddField(
model_name='usercourse',
name='course_status',
field=models.SmallIntegerField(choices=[(1, '未开通'), (2, '已开通')], default=1, verbose_name='课程状态'),
),
migrations.AddField(
model_name='usercourse',
name='study_status',
field=models.SmallIntegerField(choices=[(1, '未学习'), (2, '学习中'), (3, '已学习')], default=1, verbose_name='课程的学习状态'),
),
migrations.AddField(
model_name='userpass',
name='choice_status',
field=models.SmallIntegerField(default=0, verbose_name='关卡选择题库答题状态'),
),
]
|
[
"44704708@qq.com"
] |
44704708@qq.com
|
3d8e60b68e721264a7b6abc65cdd796f0b801774
|
db9060d9a8550d6ce24fff6bdf04b4fa8974f20b
|
/utils/install_pyro.py
|
e6823af6f8006fac074ae01adce8216a8dbc3c8b
|
[] |
no_license
|
davecap/pydr
|
92fbb7ba9ecaa054a36b0409f86b58740592767d
|
fc6cbef45007b55bf78fd661cc81c6b0ef7e0d80
|
refs/heads/master
| 2016-09-06T11:11:28.685731
| 2011-08-05T16:50:22
| 2011-08-05T16:50:22
| 864,921
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
#!/usr/bin/python
import subprocess
import os
import shutil
import tempfile
import re
try:
temp_dir = tempfile.mkdtemp(prefix='pyro_install_')
print 'Installing in %s' % temp_dir
os.chdir(temp_dir)
url = 'http://www.xs4all.nl/~irmen/pyro3/download/Pyro-3.10.tar.gz'
filename = 'Pyro-3.10.tar.gz'
dir = re.match(r'(.*)\.tar.gz', filename).group(1)
return_code = subprocess.call(['wget', url])
if return_code != 0:
raise Exception('Could not download file from url: %s' % url)
else:
print 'Downloaded...'
return_code = subprocess.call(['tar', 'xzvf', filename])
if return_code != 0:
raise Exception('Could not untar file: %s' % filename)
else:
print 'Untarred...'
os.chdir(os.path.abspath(dir))
return_code = subprocess.call(['python', 'setup.py', 'install'])
if return_code != 0:
raise Exception('Could not install Pyro')
finally:
shutil.rmtree(temp_dir)
|
[
"dcaplan@gmail.com"
] |
dcaplan@gmail.com
|
9554a7b27eb0ab83d7221086697a354c5f8a3b85
|
6547c59e1041bd888c31d360f9b75e07f5b2bdd0
|
/bin/pythonicus
|
d17e06e9c343bbdefffcca29145c02e19757ae3d
|
[] |
no_license
|
diegorubin/pythonicus
|
b5069f3bc12f20715e70da990fb4476089046c53
|
ae0015d301df5cac3896455a6b64917f0b5bc1b8
|
refs/heads/master
| 2021-03-12T20:36:05.564847
| 2014-12-27T10:52:10
| 2014-12-27T10:52:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
#!/usr/bin/env python
import sys
#import cyclone.web
from twisted.python import log
from twisted.internet import reactor
try:
from pythonicus.server.base import *
except ImportError:
from os.path import join, abspath, dirname
parentpath = abspath(join(dirname(__file__), '..'))
sys.path.append(parentpath)
from pythonicus.server.base import *
log.startLogging(sys.stdout)
reactor.listenTCP(8888, Application())
reactor.run()
|
[
"rubin.diego@gmail.com"
] |
rubin.diego@gmail.com
|
|
979444d06a18703716c2ffc753dde974cb5502fd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03967/s359107548.py
|
d0ee9a3ac3bd33bf4227ed9624640a7cce491654
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
S = input()
g_cnt, p_cnt = 0, 0
ans = 0
for s in S:
if p_cnt < g_cnt:
if s == "g":
p_cnt += 1
ans += 1
elif s == "p":
p_cnt += 1
else:
if s == "g":
g_cnt += 1
elif s == "p":
g_cnt += 1
ans -= 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f45ba9f87301ca0d99a049ac8d38a6398db17af0
|
e5b4ed93d6666e195e96a265d3e7cfe4243a7300
|
/python_net/day02/udp_server.py
|
8df6e4aa87d01a1cdc5cca61ad4cecade830b7b8
|
[] |
no_license
|
Spider251/python
|
934f5b8b923c2b61186a6df8445957290e5c4c74
|
8b1931f862e1d5c29fed9af624bcac94c1d25755
|
refs/heads/master
| 2020-04-05T11:58:04.558098
| 2018-11-09T12:06:06
| 2018-11-09T12:06:06
| 156,852,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# udp_server.py
from socket import *
#创建数据报套接字
sockfd = socket(AF_INET,SOCK_DGRAM)
#绑定地址
server_addr = ('0.0.0.0',8888)
sockfd.bind(server_addr)
#消息收发
while True:
data,addr = sockfd.recvfrom(1024)
print("消息来自%s:%s"%(addr,data.decode()))
sockfd.sendto(b"Thanks for you msg",addr)
#关闭套接字
sockfd.close()
|
[
"1419418693@qq.com"
] |
1419418693@qq.com
|
0cc41ccb5f1374c3d4787416da11b9c307894afd
|
16c8fdf291430475f40d578b0d64552eb64046e9
|
/colour/difference/tests/test_huang2015.py
|
9e6bd01ac869d70b6f779b19f66a0c1b05fae61b
|
[
"BSD-3-Clause"
] |
permissive
|
nodefeet/colour
|
4c1bfed87ce173ff878bdf288fd9828bb68022e3
|
319dd5b1c45aef6983eff1830f918c1e593fb530
|
refs/heads/develop
| 2022-02-19T17:39:36.657993
| 2022-02-15T08:38:26
| 2022-02-15T08:38:26
| 460,456,444
| 0
| 0
|
BSD-3-Clause
| 2022-02-17T13:53:37
| 2022-02-17T13:53:36
| null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
"""Defines the unit tests for the :mod:`colour.difference.huang2015` module."""
import numpy as np
import unittest
from colour.difference import power_function_Huang2015
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestPowerFunctionHuang2015",
]
class TestPowerFunctionHuang2015(unittest.TestCase):
"""
Define :func:`colour.difference.huang2015.power_function_Huang2015`
definition unit tests methods.
"""
def test_power_function_Huang2015(self):
"""
Test :func:`colour.difference.huang2015.power_function_Huang2015`
definition.
"""
d_E = np.array([2.0425, 2.8615, 3.4412])
np.testing.assert_almost_equal(
power_function_Huang2015(d_E),
np.array([2.35748796, 2.98505036, 3.39651062]),
decimal=7,
)
if __name__ == "__main__":
unittest.main()
|
[
"thomas.mansencal@gmail.com"
] |
thomas.mansencal@gmail.com
|
6f91fd482b30ef5e937e14797819b36816094e2f
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_basis3.py
|
7221798fa0235553b855d476ccc0d957c7a06580
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500
| 2021-11-06T06:31:20
| 2021-11-06T06:31:20
| 62,589,715
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
from xcp2k.inputsection import InputSection
class _basis3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Default_keyword = []
self._name = "BASIS"
self._repeated_default_keywords = {'Default_keyword': 'DEFAULT_KEYWORD'}
self._attributes = ['Default_keyword']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
536c4275d8ead61ff8849a450a9f33405be4a30a
|
2594650405c1424bec1ab14c1ce994906d6cc961
|
/AByteOfPython/C14_InputOutput1b.py
|
9ce11e5999a8462b2ab6eb9fb64979d493ae9a6e
|
[] |
no_license
|
aa2276016/Learning_Python
|
10dd46eeb77d5ec05b4e607c523e9e5597a2e7ee
|
f0e3b4876ea078a45493eb268992cec62ccd29d1
|
refs/heads/master
| 2021-10-19T08:34:15.694353
| 2018-03-17T02:38:49
| 2018-03-17T02:38:49
| 125,590,648
| 0
| 0
| null | 2018-03-17T02:41:26
| 2018-03-17T02:41:26
| null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# Input Output 1b:
def reverse(text):
return text[::-1]
def is_palindrome(text):
return text == reverse(text)
while True: # 先设置循环, 把input包括在循环中,这样每次循环才让人输入内容
something = input("Enter text: ")
if something == 'quit':
break
if is_palindrome(something):
print("Yes, it s a palindrome")
else:
print("No, it is not a palindrome")
|
[
"jxie0755@users.noreply.github.com"
] |
jxie0755@users.noreply.github.com
|
699c1ceda3c1dd63c07aae302a294b79564c477f
|
e39cd8cdcbfe60eafb45fb96f6f5ada62940c6f2
|
/setup.py
|
fc63bf42cbb532d0af0752d0f7a3a6da2041a9db
|
[
"MIT"
] |
permissive
|
DarkmatterVale/hurricane
|
5d4505ed6a994ed669e9795d047602ca773ce20c
|
94e9e56fcc6d73f5f76ad1fe9e3e4f248549fe7b
|
refs/heads/master
| 2021-05-01T00:52:22.374738
| 2017-01-13T00:15:34
| 2017-01-13T00:15:34
| 64,959,057
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 914
|
py
|
# This is the setup file for pip
from setuptools import setup, find_packages
import os, sys
from os import path
setup(
name = 'hurricane',
version = '0.0.1',
description = 'A master-slave computer communication protocol',
url = 'https://github.com/DarkmatterVale/hurricane',
author = 'Vale Tolpegin',
author_email = 'valetolpegin@gmail.com',
license = 'MIT',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
"Operating System :: OS Independent",
],
packages = find_packages(),
install_requires = ['scapy-python3'],
keywords = [],
)
|
[
"valetolpegin@gmail.com"
] |
valetolpegin@gmail.com
|
b6e84a0000cbadb35e33a397764e8a72f3b8d1a7
|
a0a41401bafc44f233d5ba82ed8905ba9d213699
|
/huntserver/migrations/0023_auto_20150717_1025.py
|
0539387ebf04335213a8d2b2b2e750a333718e53
|
[] |
no_license
|
christophsjones/puzzlehunt_server
|
266c0e6a02e09d1f7290dab6b006c0ce70b2586b
|
53390bdb8d97955c280f7a2e660150d5c56b49f2
|
refs/heads/master
| 2021-01-23T01:35:12.499539
| 2015-10-21T19:59:43
| 2015-10-21T19:59:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0022_auto_20150717_1024'),
]
operations = [
migrations.AlterField(
model_name='unlockable',
name='content_type',
field=models.CharField(default=b'TXT', max_length=3, choices=[(b'IMG', b'Image'), (b'PDF', b'PDF'), (b'TXT', b'Text'), (b'WEB', b'Link')]),
),
]
|
[
"flybye22@gmail.com"
] |
flybye22@gmail.com
|
526117433c8898f00ad8d1ef33ed32fc9051ce2d
|
b0c375bda0f25ab2408be94fd0714819af3bc1ab
|
/model/layers/blocks_module.py
|
5927e6a6f428baca4259783ad12f9d0372bcd408
|
[] |
no_license
|
nenoc/YOLOv4-pytorch
|
8f0ee6415b610d8d566396500abe133b1079ba70
|
781b07d5fbbd4faefcfd1d535f21982c436f27cd
|
refs/heads/master
| 2022-11-20T20:03:45.814946
| 2020-07-20T12:05:34
| 2020-07-20T12:05:34
| 282,566,409
| 2
| 0
| null | 2020-07-26T03:15:34
| 2020-07-26T03:15:34
| null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
import torch.nn as nn
from ..layers.conv_module import Convolutional
class Residual_block(nn.Module):
def __init__(self, filters_in, filters_out, filters_medium):
super(Residual_block, self).__init__()
self.__conv1 = Convolutional(filters_in=filters_in, filters_out=filters_medium, kernel_size=1, stride=1, pad=0,
norm="bn", activate="leaky")
self.__conv2 = Convolutional(filters_in=filters_medium, filters_out=filters_out, kernel_size=3, stride=1, pad=1,
norm="bn", activate="leaky")
def forward(self, x):
r = self.__conv1(x)
r = self.__conv2(r)
out = x + r
return out
|
[
"your email"
] |
your email
|
348ced0727e42f540f5292dbfa643f5e7e7f46b7
|
a438748ac89d53b19e7f4130529906896f059b25
|
/gen_for_collect.py
|
9fc0c0c61660397c13ff38e9779b4a1fbdb2bf23
|
[] |
no_license
|
Alexfordrop/Basics
|
90ead9294727a823eb044e5f2f69d8f29133d150
|
eda400424b2c72bd5e01a6c7cb14ad7ae29477d4
|
refs/heads/master
| 2023-06-08T16:42:26.704163
| 2021-06-27T20:46:27
| 2021-06-27T20:46:27
| 329,421,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
# Генераторный список
a = [i for i in range(5)]
# Генераторное выражение
x = (i for i in range(5))
print(a)
print(x)
for i in x:
print(i, end=' ')
|
[
"mishechkin.aleksei@mail.ru"
] |
mishechkin.aleksei@mail.ru
|
d89c72f08a21bd2f42f071c1c791e17542d88c4f
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_google_maps_directions/tests/test_fn_google_maps_directions.py
|
ea98b341ce5894834d0f28a893991ead0320e9ba
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297
| 2023-06-23T16:33:58
| 2023-06-23T16:33:58
| 101,410,006
| 81
| 107
|
MIT
| 2023-03-29T20:40:31
| 2017-08-25T14:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,118
|
py
|
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_google_maps_directions"
FUNCTION_NAME = "fn_google_maps_directions"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_google_maps_directions_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("fn_google_maps_directions", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("fn_google_maps_directions_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnGoogleMapsDirections:
""" Tests for the fn_google_maps_directions function"""
inputs = ['IBM, Armonk, New York', 'IBM Resilient, Cambridge, Boston, MA']
outputs = [
{
"success": True,
"directions_link": "https://www.google.com/maps/dir/?api=1&origin=IBM%2C%20Armonk%2C%20New%20York&destination=IBM%20Resilient%2C%20Cambridge%2C%20Boston%2C%20MA",
"inputs": {
"google_maps_origin": inputs[0],
"google_maps_destination": inputs[1]
}
}
]
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.parametrize("google_maps_origin, google_maps_destination, expected_results", [
(inputs[0], inputs[1], outputs[0])
])
def test_success(self, circuits_app, google_maps_origin, google_maps_destination, expected_results):
""" Test calling with sample values for the parameters """
function_params = {
"google_maps_origin": google_maps_origin,
"google_maps_destination": google_maps_destination
}
results = call_fn_google_maps_directions_function(circuits_app, function_params)
assert(expected_results == results)
@pytest.mark.parametrize("google_maps_origin, google_maps_destination, expected_results", [
(inputs[0], inputs[1], outputs[0])
])
def test_result_is_dict(self, circuits_app, google_maps_origin, google_maps_destination, expected_results):
""" Test calling with sample values for the parameters and result is of type dict"""
function_params = {
"google_maps_origin": google_maps_origin,
"google_maps_destination": google_maps_destination
}
results = call_fn_google_maps_directions_function(circuits_app, function_params)
assert (isinstance(results, dict))
|
[
"shane.curtin@ie.ibm.com"
] |
shane.curtin@ie.ibm.com
|
7b23637cb362ddb83b9c8ac25dd2b2295942406e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/217/usersdata/274/113591/submittedfiles/av2_p3_m2.py
|
11d3b1df8bc4632ec02ca21815a72cd111a8a103
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
# -*- coding: utf-8 -*
n=int(input("Dimensão do Quadrado: "))
while notn>=3:
n=int(input("Dimensão do Quadrado: "))
M=[]
for i in range(0,n,1):
L=[]
for j in range(o,n,1):
L.append(int(input("Elemento da Linha: "))
M.append(L)
somaL=[]
for i in range(0,n,1):
somaL.append(sum(M[i]))
somaC=[]
for j in range(0,n,1):
C=0
for i in range (0,n,1):
C=C+M[i][j]
somaC.append(C)
b=[somaL[0]]
ct=0
k=0
VE=0
VC=0
for i in range(0,n,1):
if somaL[i]in b:
continue
else:
ct+ct=1
k=1
if ct==1:
VE=somaL[k]
VC+somaL[0]
if ct!=1:
VE=somaL[0]
VC+somaL[1]
k=0
b1=[somaC[0]]
cont2=0
k=0
VE1=0
for i in range(0,n,1):
if somaC[i]in b1:
continue
else:
ct2=ct2+1
k1=i
if cont2==1:
VE=somaL[k]
VC+somaL[0]
if ct!=1:
VE=somaL[0]
VC+somaL[1]
k1=0
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
dc2c448b2f4ba78a00a1c9045e74ad0ae468900e
|
4a5056686e604e5af78d4653c7c51ac3769b6b38
|
/server/view.py
|
e7de73a06c0760f65ebc1e704baa315ae65457bc
|
[
"MIT"
] |
permissive
|
SangminOut/Short-URL
|
673e164cf0e9c471d5e509ccd9ae5fe604ddf2f5
|
6269049442e0c499764eb1061631b6399bdbc919
|
refs/heads/master
| 2020-04-11T04:01:38.015376
| 2018-12-16T12:22:53
| 2018-12-16T12:22:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
from flask import request, redirect, Response, jsonify, current_app
from model import UrlModel
def save_url():
original_url = request.json['url']
url_key = UrlModel(original_url).save_url()
if current_app.config['SERVER_NAME'] is not None:
base_url = current_app.config['SERVER_NAME'] + '/'
else:
base_url = 'localhost/'
return jsonify({'url': 'http://' + base_url + url_key}), 201
def get_url(key) -> Response:
url = UrlModel.get_url(key)
if url is None:
return Response('', 204)
return redirect(url)
|
[
"python@istruly.sexy"
] |
python@istruly.sexy
|
188c4c56d93bad2c21c59daa92790577ed1fbaaa
|
a3d32e0ff84958d194ced642441f5379c0032465
|
/tfmsarest/views/common_schema.py
|
2e068f8dc9fbea974ec8cf611c0b9e4f16d9026b
|
[] |
no_license
|
TensorMSA/tensormsa_old
|
406755511d05d4ec179c085337a05f73c0dde80a
|
ef058737f391de817c74398ef9a5d3a28f973c98
|
refs/heads/master
| 2021-06-18T11:58:29.349060
| 2017-04-20T10:17:43
| 2017-04-20T10:17:43
| 67,384,681
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
from tfmsacore import netconf
import json, unicodedata
from rest_framework.response import Response
from rest_framework.views import APIView
from tfmsacore.utils.json_conv import JsonDataConverter as jc
class CommonSchema(APIView):
"""
"""
def get(self, request, datatype, preprocess, category, subcategory ):
"""
- desc : return nn_info data
"""
try:
result = netconf.get_namespace(datatype, preprocess, category, subcategory)
return_data = {"status": "200", "result": result}
return Response(json.dumps(return_data))
except Exception as e:
return_data = {"status": "400", "result": str(e)}
return Response(json.dumps(return_data))
|
[
"tmddno1@naver.com"
] |
tmddno1@naver.com
|
69bb43586bb37d90d4904525583972a1c1353d74
|
f6f632bee57875e76e1a2aa713fdbe9f25e18d66
|
/python/_0501_1000/0631_design-excel-sum-formula.py
|
f5ab87115d8d3a0a794edc0aec9f0d2c163b354e
|
[] |
no_license
|
Wang-Yann/LeetCodeMe
|
b50ee60beeeb3661869bb948bef4fbe21fc6d904
|
44765a7d89423b7ec2c159f70b1a6f6e446523c2
|
refs/heads/master
| 2023-08-07T05:31:23.428240
| 2021-09-30T15:33:53
| 2021-09-30T15:33:53
| 253,497,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,890
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-07-24 09:36:33
# @Last Modified : 2020-07-24 09:36:33
# @Mail : lostlorder@gmail.com
# @Version : alpha-1.0
"""
# 你的任务是实现 Excel 的求和功能,具体的操作如下:
#
# Excel(int H, char W): 这是一个构造函数,输入表明了 Excel 的高度和宽度。H 是一个正整数,范围从 1 到 26,代表高度。W
# 是一个字符,范围从 'A' 到 'Z',宽度等于从 'A' 到 W 的字母个数。Excel 表格是一个高度 * 宽度的二维整数数组,数组中元素初始化为 0。第一
# 行下标从 1 开始,第一列下标从 'A' 开始。
#
#
#
# void Set(int row, char column, int val): 设置 C(row, column) 中的值为 val。
#
#
#
# int Get(int row, char column): 返回 C(row, column) 中的值。
#
#
#
# int Sum(int row, char column, List of Strings : numbers): 这个函数会将计算的结果放入 C(row
# , column) 中,计算的结果等于在 numbers 中代表的所有元素之和,这个函数同时也会将这个结果返回。求和公式会一直计算更新结果直到这个公式被其他的值
# 或者公式覆盖。
#
# numbers 是若干字符串的集合,每个字符串代表单个位置或一个区间。如果这个字符串表示单个位置,它的格式如下:ColRow,例如 "F7" 表示位置 (
# 7, F) 。如果这个字符串表示一个区间,它的格式如下:ColRow1:ColRow2。区间就是左上角为 ColRow1 右下角为 ColRow2 的长方形。
#
#
#
#
# 样例 1 :
#
#
#
# Excel(3,"C");
# // 构造一个 3*3 的二维数组,初始化全是 0。
# // A B C
# // 1 0 0 0
# // 2 0 0 0
# // 3 0 0 0
#
# Set(1, "A", 2);
# // 设置 C(1,"A") 为 2。
# // A B C
# // 1 2 0 0
# // 2 0 0 0
# // 3 0 0 0
#
# Sum(3, "C", ["A1", "A1:B2"]);
# // 将 C(3,"C") 的值设为 C(1,"A") 单点,左上角为 C(1,"A") 右下角为 C(2,"B") 的长方形,所有元素之和。返回值 4。
#
# // A B C
# // 1 2 0 0
# // 2 0 0 0
# // 3 0 0 4
#
# Set(2, "B", 2);
# // 将 C(2,"B") 设为 2。 注意 C(3, "C") 的值也同时改变。
# // A B C
# // 1 2 0 0
# // 2 0 2 0
# // 3 0 0 6
#
#
#
#
# 注释 :
#
#
# 你可以认为不会出现循环求和的定义,比如说: A1 = sum(B1) ,B1 = sum(A1)。
# 测试数据中,字母表示用双引号。
# 请记住清零 Excel 类中的变量,因为静态变量、类变量会在多组测试数据中保存之前结果。详情请看这里。
#
#
#
# Related Topics 设计
# 👍 16 👎 0
"""
from typing import List
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Excel:
def __init__(self, H: int, W: str):
self.m = {}
self.mat = [[0] * (self.col_idx(W) + 1) for _ in range(H)]
def col_idx(self, char):
return ord(char) - ord("A")
def set(self, r: int, c: str, v: int) -> None:
if (r, c) in self.m:
self.m.pop((r, c))
self.mat[r - 1][self.col_idx(c)] = v
def get(self, r: int, c: str) -> int:
if (r, c) in self.m:
return self.sum(r, c, self.m[(r, c)])
return self.mat[r - 1][self.col_idx(c)]
def sum(self, r: int, c: str, strs: List[str]) -> int:
res = 0
for s in strs:
if ":" not in s:
y = s[0]
x = int(s[1:])
res += self.get(x, y)
else:
f, t = s.split(":")
for i in range(int(f[1:]), int(t[1:]) + 1):
for j in range(ord(f[0]), ord(t[0]) + 1):
res += self.get(i, chr(j))
self.m[r, c] = strs
# print(self.m)
return res
# Your Excel object will be instantiated and called as such:
# obj = Excel(H, W)
# obj.set(r,c,v)
# param_2 = obj.get(r,c)
# param_3 = obj.sum(r,c,strs)
# leetcode submit region end(Prohibit modification and deletion)
def test_solution():
ex = Excel(3, "C")
ex.set(1, "A", 2)
assert ex.sum(3, "C", ["A1", "A1:B2"]) == 4
ex.set(2, "B", 2)
assert ex.get(3, "C") == 6
def test1():
ops = ["Excel", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set",
"set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "sum",
"sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum",
"sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum"]
args = [[26, "Z"], [1, "A", 0], [1, "B", 1], [1, "C", 2], [1, "D", 3], [1, "E", 4], [1, "F", 5], [1, "G", 6],
[1, "H", 7], [1, "I", 8], [1, "J", 9], [1, "K", 10], [1, "L", 11], [1, "M", 12], [1, "N", 13],
[1, "O", 14], [1, "P", 15], [1, "Q", 16], [1, "R", 17], [1, "S", 18], [1, "T", 19],
[1, "U", 20], [1, "V", 21], [1, "W", 22],
[1, "X", 23], [1, "Y", 24], [1, "Z", 25], [2, "A", ["A1:A1"]], [2, "B", ["A1:B1"]], [2, "C", ["A1:C1"]],
[2, "D", ["A1:D1"]], [2, "E", ["A1:E1"]], [2, "F", ["A1:F1"]], [2, "G", ["A1:G1"]], [2, "H", ["A1:H1"]],
[2, "I", ["A1:I1"]], [2, "J", ["A1:J1"]], [2, "K", ["A1:K1"]], [2, "L", ["A1:L1"]], [2, "M", ["A1:M1"]],
[2, "N", ["A1:N1"]], [2, "O", ["A1:O1"]], [2, "P", ["A1:P1"]], [2, "Q", ["A1:Q1"]], [2, "R", ["A1:R1"]],
[2, "S", ["A1:S1"]], [2, "T", ["A1:T1"]], [2, "U", ["A1:U1"]], [2, "V", ["A1:V1"]], [2, "W", ["A1:W1"]],
[2, "X", ["A1:X1"]], [2, "Y", ["A1:Y1"]], [2, "Z", ["A1:Z1"]]]
ex = Excel(26, "Z")
for op, arg in zip(ops[1:], args[1:]):
x = getattr(ex, op)(*arg)
if op != "set":
print(x)
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=no", __file__])
|
[
"rock@get.com.mm"
] |
rock@get.com.mm
|
28e9cadbf5eb0e4047f49800c21759742600c50c
|
c76b54198bcfbf0eb3427db86270053b05c34531
|
/main.py
|
7bc62005369f9fa349970a9e699ccc71fa56957f
|
[] |
no_license
|
tang1323/ArticleSpider
|
fd2f97c4b6cb77adf3439e993c641f345e9fe07e
|
7eb99c98a24fc5b6c2f2e54c6322c07b15bb7eed
|
refs/heads/master
| 2023-04-04T02:03:29.289456
| 2021-04-15T01:43:18
| 2021-04-15T01:43:18
| 358,093,260
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
from scrapy.cmdline import execute
import sys
import os
# print(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# execute(["scrapy", "crawl", "cnblogs"])
# execute(["scrapy", "crawl", "zhihu_sel"])
# execute(["scrapy", "crawl", "lagou"])
execute(["scrapy", "crawl", "lagou_cooike_pool"])
|
[
"1171242903@qq.com"
] |
1171242903@qq.com
|
ea2cc4a918ce7233d5fd7fc4ced1465a7724a886
|
5376007035bf5aebb57c4d4f788098c9706ebe44
|
/api/serializers/store.py
|
bf4b7d5c0a1d2081c3fe855a0077f072ecdf34a1
|
[] |
no_license
|
oyeolamilekan/monoapp
|
6c0f49cc12a167bc0343648ae63f4e9c864bb130
|
939de5a1bb65e9bc48e48662f2ccffef280ffe10
|
refs/heads/master
| 2022-12-12T04:26:49.515305
| 2019-09-07T11:28:51
| 2019-09-07T11:28:51
| 192,071,474
| 0
| 0
| null | 2022-12-08T03:03:11
| 2019-06-15T11:35:50
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
"""
This serializer handles the serialization of the store object
"""
from rest_framework.serializers import ModelSerializer
from shop.models import Shop
class ShopSerializer(ModelSerializer):
"""[the shop info]
Arguments:
{[ inherits from serializer class rest framework]} -- [description]
"""
class Meta:
model = Shop
fields = (
"id",
"user",
"slug",
"title",
"categories",
"phone_number",
"address",
"description",
"logo",
)
|
[
"johnsonoye34@gmail.com"
] |
johnsonoye34@gmail.com
|
e0a16314f9011bec7baf07a499ee6dc14a3fd84c
|
d838bed08a00114c92b73982a74d96c15166a49e
|
/docs/data/learn/Bioinformatics/input/ch7_code/src/Stepik.7.3.ExerciseBreak.ReduceMatrixAndTreeSizeForDegreeGreaterThan3.py
|
8c78a252e9bb67b680c446f51bfc4e3cd9a48b02
|
[] |
no_license
|
offbynull/offbynull.github.io
|
4911f53d77f6c59e7a453ee271b1e04e613862bc
|
754a85f43159738b89dd2bde1ad6ba0d75f34b98
|
refs/heads/master
| 2023-07-04T00:39:50.013571
| 2023-06-17T20:27:05
| 2023-06-17T23:27:00
| 308,482,936
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,617
|
py
|
# EXERCISE BREAK
#
# Exercise Break: We have just described how to reduce the size of the tree as well as the dimension of the distance
# matrix D if the parent node (m) has degree 3. Design a similar approach in the case that the degree of m is larger
# than 3.
# Recall that for m having a degree of 3...
#
# i * * k
# \ /
# m *-------*
# / \
# j * * l
#
# | | i | j | k | l |
# |---|---|---|---|---|
# | i | ? | ? | ? | ? |
# | j | ? | ? | ? | ? |
# | k | ? | ? | ? | ? |
# | l | ? | ? | ? | ? |
#
# ... you can remove the leaf nodes i and j, leaving you with a new leaf node of just m. This is possible because the
# distance between m and other leaf nodes may be derived just from the weights in the original matrix...
#
# dist(m,k) = (dist(i,k) + dist(j,k) - dist(i,j)) / 2
# dist(m,l) = (dist(i,l) + dist(j,l) - dist(i,j)) / 2
#
# Once these distances have been calculated, you can remove i and j from the tree and replace them in the distance
# matrix with just m...
#
# * k
# /
# m *-------*
# \
# * l
#
# | | m | k | l |
# |---|---|---|---|
# | m | ? | ? | ? |
# | k | ? | ? | ? |
# | l | ? | ? | ? |
#
# ANSWER
#
# How would this work if m had a degree of 4?
#
# * a
# /
# *
# i * / \
# \ / * b
# m *
# / \ * c
# j * \ /
# *
# \
# * d
#
# | | i | j | a | b | c | d |
# |---|---|---|---|---|---|---|
# | i | ? | ? | ? | ? | ? | ? |
# | j | ? | ? | ? | ? | ? | ? |
# | a | ? | ? | ? | ? | ? | ? |
# | b | ? | ? | ? | ? | ? | ? |
# | c | ? | ? | ? | ? | ? | ? |
# | d | ? | ? | ? | ? | ? | ? |
#
# Do the same thing as before: Calculate the distance from m to every other leaf node just as you did for degree = 3...
#
# dist(m,a) = (dist(i,a) + dist(j,a) - dist(i,j)) / 2
# dist(m,b) = (dist(i,b) + dist(j,b) - dist(i,j)) / 2
# dist(m,c) = (dist(i,c) + dist(j,c) - dist(i,j)) / 2
# dist(m,d) = (dist(i,d) + dist(j,d) - dist(i,j)) / 2
#
# ... which ends up resulting in...
#
# * a
# /
# *
# / \
# / * b
# m *
# \ * c
# \ /
# *
# \
# * d
#
# | | m | a | b | c | d |
# |---|---|---|---|---|---|
# | m | ? | ? | ? | ? | ? |
# | a | ? | ? | ? | ? | ? |
# | b | ? | ? | ? | ? | ? |
# | c | ? | ? | ? | ? | ? |
# | d | ? | ? | ? | ? | ? |
#
# I'm fairly certain this is right, but I haven't tested it out.
|
[
"offbynull@gmail.com"
] |
offbynull@gmail.com
|
81dc0357d63777f89ea036e852b3efe56feaf9ab
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/205/usersdata/273/87079/submittedfiles/questao2_av1.py
|
bf918e1ddadf59e5328db54d5ee0e1da3c292262
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a=int(input('Digite o numero escolhido: '))
b=int(input('Digite o numero escolhido: '))
c=int(input('Digite o numero escolhido: '))
d=int(input('Digite o numero escolhido: '))
e=int(input('Digite o numero escolhido: '))
f=int(input('Digite o numero escolhido: '))
asort=int(input('Digite o numero sorteado: '))
bsort=int(input('Digite o numero sorteado: '))
csort=int(input('Digite o numero sorteado: '))
dsort=int(input('Digite o numero sorteado: '))
esort=int(input('Digite o numero sorteado: '))
fsort=int(input('Digite o numero sorteado: '))
n1=a
cont=0
while (a!=n1):
if a==asort or a==bsort or a==csort or a==dsort or a==esort or a==fsort:
cont=cont+1
a=b
b=c
c=d
d=e
e=f
n1=a
if cont==3:
print('Terno')
elif cont==4:
print('Quadra')
elif cont==5:
print('Quina')
elif cont==6:
print('Sena')
else:
print('Azar')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
edd67f99568d16c9b74838ae193917ba2ac2bd67
|
b8a803694c283a5acd13ab6760a36710884ab24f
|
/llvm/tests/test_operands.py
|
ef68b9eebfa0080d14e29767806e6305f85e337b
|
[
"NCSA",
"BSD-3-Clause"
] |
permissive
|
llvmpy/llvmpy
|
8a4c31e731364ead802231b97e058b8f8c444f96
|
13130fe35f1fb03a7051ad46c36146002391a6fa
|
refs/heads/master
| 2016-09-05T16:48:54.694686
| 2015-04-28T16:21:34
| 2015-04-28T16:21:34
| 3,375,197
| 155
| 13
| null | 2015-05-27T18:36:45
| 2012-02-07T07:09:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,076
|
py
|
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from llvm.core import Module
from .support import TestCase, tests
class TestOperands(TestCase):
# implement a test function
test_module = """
define i32 @prod(i32, i32) {
entry:
%2 = mul i32 %0, %1
ret i32 %2
}
define i32 @test_func(i32, i32, i32) {
entry:
%tmp1 = call i32 @prod(i32 %0, i32 %1)
%tmp2 = add i32 %tmp1, %2
%tmp3 = add i32 %tmp2, 1
%tmp4 = add i32 %tmp3, -1
%tmp5 = add i64 -81985529216486895, 12297829382473034410
ret i32 %tmp4
}
"""
def test_operands(self):
m = Module.from_assembly(StringIO(self.test_module))
test_func = m.get_function_named("test_func")
prod = m.get_function_named("prod")
# test operands
i1 = test_func.basic_blocks[0].instructions[0]
i2 = test_func.basic_blocks[0].instructions[1]
i3 = test_func.basic_blocks[0].instructions[2]
i4 = test_func.basic_blocks[0].instructions[3]
i5 = test_func.basic_blocks[0].instructions[4]
self.assertEqual(i1.operand_count, 3)
self.assertEqual(i2.operand_count, 2)
self.assertEqual(i3.operands[1].z_ext_value, 1)
self.assertEqual(i3.operands[1].s_ext_value, 1)
self.assertEqual(i4.operands[1].z_ext_value, 0xffffffff)
self.assertEqual(i4.operands[1].s_ext_value, -1)
self.assertEqual(i5.operands[0].s_ext_value, -81985529216486895)
self.assertEqual(i5.operands[1].z_ext_value, 12297829382473034410)
self.assert_(i1.operands[-1] is prod)
self.assert_(i1.operands[0] is test_func.args[0])
self.assert_(i1.operands[1] is test_func.args[1])
self.assert_(i2.operands[0] is i1)
self.assert_(i2.operands[1] is test_func.args[2])
self.assertEqual(len(i1.operands), 3)
self.assertEqual(len(i2.operands), 2)
self.assert_(i1.called_function is prod)
tests.append(TestOperands)
if __name__ == '__main__':
unittest.main()
|
[
"michael.lam.sk@gmail.com"
] |
michael.lam.sk@gmail.com
|
ef79cfd8757f6d078443dd0489deeb6ba381380c
|
ad6ad38511b214d1d6cc183535d0d4ff463eadf9
|
/vulscan_Project/pageUtil.py
|
83b026b6a28da1424c2632eaf0628ffc050d2d5e
|
[] |
no_license
|
0ps/VulScanner
|
abf8417f00dec8f7485246fc208a157d96207180
|
de2519655c214ebfbe56c0278e6230afaae72559
|
refs/heads/master
| 2023-08-30T18:05:44.620650
| 2021-10-22T07:25:18
| 2021-10-22T07:25:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,428
|
py
|
import re
def get_pages(page, last_page):
page_list = []
if last_page < 9:
for i in range(1, last_page + 1):
page_list.append(i)
return page_list
if page < 7:
for i in range(1, 9):
page_list.append(i)
page_list.extend(["...", last_page - 1, last_page])
elif page < last_page - 4:
page_list.extend([1, 2, "..."])
for i in range(-3, 3):
page_list.append(page + i)
page_list.extend(["...", last_page - 1, last_page])
else:
page_list.extend([1, 2, "..."])
for i in range(last_page - 7, last_page + 1):
page_list.append(i)
return page_list
def get_lastpage(count, each_num):
if count == 0:
return 1
if count % each_num == 0:
return int(count / each_num)
else:
return int(count / each_num) + 1
def get_ctx(ctx, list_name, all_list, page, last_page, query, base_path):
ctx[list_name] = all_list # 本页显示总列表
ctx["count"] = len(all_list)
ctx["page"] = page # 当前页数
ctx['notfirst'] = 0 if page == 1 else -1
ctx['notlast'] = 0 if page == last_page else 1
ctx['pages'] = get_pages(page, last_page)
ctx["last_page"] = last_page
ctx["query"] = query
base_path = re.sub(r"((\?)?(&)?page=\w*)", "", base_path)
ctx["page_url"] = base_path + ("&page=" if "?" in base_path else "?page=")
return ctx
|
[
"2998430232@qq.com"
] |
2998430232@qq.com
|
661644011cb7fd52e289e875561f343ed8320b55
|
7b74696ff2ab729396cba6c203984fce5cd0ff83
|
/analysis/migrations/0020_auto_20200524_0933.py
|
80926a6c092f0a2ecf1c00f9e2138a97d0a500d0
|
[
"MIT"
] |
permissive
|
webclinic017/investtrack
|
e9e9a7a8caeecaceebcd79111c32b334c4e1c1d0
|
4aa204b608e99dfec3dd575e72b64a6002def3be
|
refs/heads/master
| 2023-06-18T12:57:32.417414
| 2021-07-10T14:26:53
| 2021-07-10T14:26:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
# Generated by Django 3.0.2 on 2020-05-24 01:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0019_auto_20200524_0859'),
]
operations = [
migrations.AlterField(
model_name='stockstrategytestlog',
name='event_type',
field=models.CharField(choices=[('MARK_CP', '标记临界点'), ('DOWNLOAD', '下载历史交易'), ('MARK_EXP_PCT', '标记预期涨幅'), ('UPD_CP', '更新临界点'), ('UPD_DOWNLOAD', '更新下载历史交易'), ('UPD_EXP_PCT', '更新预期涨幅'), ('MARK_LH_PCT', '标记高低点涨幅'), ('UPD_LH_PCT', '更新高低点涨幅')], max_length=50, verbose_name='日志类型'),
),
migrations.AlterField(
model_name='tradestrategystat',
name='applied_period',
field=models.CharField(blank=True, choices=[('mm', '月线'), ('30', '30分钟'), ('dd', '日线'), ('60', '60分钟'), ('wk', '周线'), ('15', '15分钟')], default='60', max_length=2, verbose_name='应用周期'),
),
]
|
[
"jie.han@outlook.com"
] |
jie.han@outlook.com
|
3e7d5739d2701988a82fde01995009a2af4221b8
|
d152aa407b78640648cdafc005c8c2f9ee722dd6
|
/lib/project_data.py
|
b8ffd2ff7bb359fb198b434c953edde43acf4ce7
|
[] |
no_license
|
bsmith89/sc-validate-haplotypes
|
7bc4033d853f5529b9159d5157484deb85832252
|
1e9325db95330b5cf3c37f9270382680876afcc1
|
refs/heads/main
| 2023-09-03T13:22:17.657279
| 2021-11-09T17:20:42
| 2021-11-09T17:20:42
| 401,805,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
def metagenotype_db_to_xarray(df):
"""Convert from project database schema to a StrainFacts metagenotype.
"""
return (
df.rename_axis(columns="allele")
.rename(columns=dict(alternative_tally="alt", reference_tally="ref"))
.rename_axis(index=dict(lib_id="sample", species_position="position"))
.stack()
.to_xarray()
.fillna(0)
.sortby("allele")
)
|
[
"me@byronjsmith.com"
] |
me@byronjsmith.com
|
4d8b302264ff7450e02482b29129980a08e00304
|
997551673e3f08d83b966e35bb55c192f35c44c6
|
/tests/test_schemas.py
|
1dbcc7fbf49e7c01cc2445106c6ad0bd1451aece
|
[
"MIT"
] |
permissive
|
lockefox/pyRobinhood
|
6069f533f8733e8199bd22eaf9fce8da5b345aac
|
8cfd9adf384d2da9d61287f483b0038195e2f476
|
refs/heads/master
| 2021-09-06T22:42:04.548824
| 2018-02-12T20:09:28
| 2018-02-12T20:09:28
| 116,594,721
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,771
|
py
|
"""test_schemas.py: validate responses from live Robinhood.com endpoints"""
import pytest
import jsonschema
import requests
import helpers
def test_api_root():
"""validate / has all expected routes"""
endpoint_list = {
'mfa': 'https://api.robinhood.com/mfa/',
'margin_interest_charges': 'https://api.robinhood.com/cash_journal/margin_interest_charges/',
'margin_upgrades': 'https://api.robinhood.com/margin/upgrades/',
'instruments': 'https://api.robinhood.com/instruments/',
'quotes': 'https://api.robinhood.com/quotes/',
'accounts': 'https://api.robinhood.com/accounts/',
'orders': 'https://api.robinhood.com/orders/',
'subscription_fees': 'https://api.robinhood.com/subscription/subscription_fees/',
'id_documents': 'https://api.robinhood.com/upload/photo_ids/',
'portfolios': 'https://api.robinhood.com/portfolios/',
'markets': 'https://api.robinhood.com/markets/',
'wire_relationships': 'https://api.robinhood.com/wire/relationships/',
'ach_queued_deposit': 'https://api.robinhood.com/ach/queued_deposit/',
'subscriptions': 'https://api.robinhood.com/subscription/subscriptions/',
'wire_transfers': 'https://api.robinhood.com/wire/transfers/',
'dividends': 'https://api.robinhood.com/dividends/',
'notification_settings': 'https://api.robinhood.com/settings/notifications/',
'applications': 'https://api.robinhood.com/applications/',
'user': 'https://api.robinhood.com/user/',
'ach_relationships': 'https://api.robinhood.com/ach/relationships/',
'ach_deposit_schedules': 'https://api.robinhood.com/ach/deposit_schedules/',
'ach_iav_auth': 'https://api.robinhood.com/ach/iav/auth/',
'notifications': 'https://api.robinhood.com/notifications/',
'ach_transfers': 'https://api.robinhood.com/ach/transfers/',
'positions': 'https://api.robinhood.com/positions/',
'watchlists': 'https://api.robinhood.com/watchlists/',
'document_requests': 'https://api.robinhood.com/upload/document_requests/',
'edocuments': 'https://api.robinhood.com/documents/',
'password_reset': 'https://api.robinhood.com/password_reset/request/',
'password_change': 'https://api.robinhood.com/password_change/',
}
req = requests.get('https://api.robinhood.com/')
req.raise_for_status()
data = req.json()
assert data == endpoint_list
@pytest.mark.auth
def test_accounts_schema():
"""validate /accounts endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='accounts',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('accounts.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_applications_schema():
"""validate /applications endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='applications',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('applications.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_dividends_schema():
"""validate /dividends endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='dividends',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('dividends.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_documents_schema():
"""validate /documents endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='edocuments',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('documents.schema')
jsonschema.validate(result, schema)
def test_fundamentals_schema():
"""validate /fundamentals endpoint"""
result = helpers.raw_request_get(
endpoint_url='https://api.robinhood.com/fundamentals/',
params={'symbols': helpers.CONFIG.get('tests', 'good_stock_list')},
)
schema = helpers.load_schema('fundamentals.schema')
jsonschema.validate(result, schema)
def test_instruments_schema():
"""validate /instruments endpoint"""
# TODO: instruments from API ROOT
result = helpers.raw_request_get(
endpoint='instruments'
)
schema = helpers.load_schema('instruments.schema')
jsonschema.validate(result, schema)
def test_markets_schema():
"""validate /markets endpoint"""
result = helpers.raw_request_get(
endpoint='markets'
)
schema = helpers.load_schema('markets.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_mfa_schema():
"""validate /mfa endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='mfa',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('mfa.schema')
jsonschema.validate(result, schema)
def test_news_schema():
"""validate /midlands/news/ endpoint"""
# TODO: not on API ROOT?
result = helpers.raw_request_get(
endpoint_url='https://api.robinhood.com/midlands/news/',
params={'symbol': helpers.CONFIG.get('tests', 'good_stock')}
)
schema = helpers.load_schema('news.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_orders_schema():
"""validate /orders endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='orders',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('orders.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_portfolios_schema():
"""validate /orders endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='portfolios',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('portfolios.schema')
jsonschema.validate(result, schema)
def test_quotes_schema():
"""validate /quotes endpoint"""
# TODO: not on API ROOT?
result = helpers.raw_request_get(
endpoint='quotes',
params={'symbols': helpers.CONFIG.get('tests', 'good_stock_list')}
)
schema = helpers.load_schema('quotes.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_user_schema():
"""validate /orders endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='user',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('user.schema')
jsonschema.validate(result, schema)
|
[
"locke.renard@gmail.com"
] |
locke.renard@gmail.com
|
ba1050f4f449533e84541451c37cbe5468b1e375
|
be84495751737bbf0a8b7d8db2fb737cbd9c297c
|
/tests2/materials/test_ndir_btdf.py
|
68d82a3eb938def30d40fded9d7f0910a8055d81
|
[] |
no_license
|
mario007/renmas
|
5e38ff66cffb27b3edc59e95b7cf88906ccc03c9
|
bfb4e1defc88eb514e58bdff7082d722fc885e64
|
refs/heads/master
| 2021-01-10T21:29:35.019792
| 2014-08-17T19:11:51
| 2014-08-17T19:11:51
| 1,688,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,441
|
py
|
import unittest
from random import random
from tdasm import Runtime
import renmas2
class TransmissionSampling(unittest.TestCase):
def setUp(self):
pass
def asm_code1(self, ren):
code = """
#DATA
"""
code += ren.structures.structs(("hitpoint",)) + """
uint32 next_dir_ptr
hitpoint hp
#CODE
; call next direction of material
mov eax, hp
call dword [next_dir_ptr]
#END
"""
return code
def test_transmission_sampling(self):
factory = renmas2.Factory()
ren = renmas2.Renderer()
runtime = Runtime()
mat = renmas2.core.material.Material(ren.converter.zero_spectrum())
eta_in = 1.3
eta_out = 1.0
sampling = renmas2.materials.PerfectTransmissionSampling(eta_in, eta_out)
mat.add(sampling)
eta_in = ren.converter.zero_spectrum().set(1.3)
eta_out = ren.converter.zero_spectrum().set(1.0)
fresnel = renmas2.materials.FresnelDielectric(eta_in, eta_out)
spec = ren.converter.create_spectrum((0.5, 0.5, 0.5))
perf_spec = renmas2.materials.PerfectTransmission(spec, fresnel, 1.0)
mat.add(perf_spec)
normal = factory.vector(2, 4.5, 5)
normal.normalize()
hit_point = factory.vector(3, 5, 6)
wo = factory.vector(-2, 1, 0)
wo.normalize()
hp = renmas2.shapes.HitPoint(1.5, hit_point, normal, 0)
hp.wo = wo
hp.fliped = False
ren.macro_call.set_runtimes([runtime])
mat.next_direction_btdf_asm([runtime], ren.structures, ren.assembler)
mc = ren.assembler.assemble(self.asm_code1(ren))
ds = runtime.load("test", mc)
ds["next_dir_ptr"] = runtime.address_module(mat.nd_asm_name)
ds["hp.normal"] = (normal.x, normal.y, normal.z, 0.0)
ds["hp.t"] = 1.5
ds["hp.hit"] = (hit_point.x, hit_point.y, hit_point.z, 0.0)
ds["hp.wo"] = (wo.x, wo.y, wo.z, 0.0)
ds["hp.fliped"] = 0
runtime.run("test")
mat.next_direction_btdf(hp)
print ("Python")
print (hp.wi)
print (hp.ndotwi)
print (hp.specular)
print (hp.f_spectrum)
print ("ASM")
print (ds["hp.wi"])
print (ds["hp.ndotwi"])
print (ds["hp.specular"])
print (ds["hp.f_spectrum.values"])
if __name__ == "__main__":
unittest.main()
|
[
"mvidov@yahoo.com"
] |
mvidov@yahoo.com
|
f0a94d9d2d473018865e32f6034970de6d891486
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc080/B/3769232.py
|
ebaf3d3a32aa808a1603111549af9d0b2c17c8ed
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
import sys,collections
def solve():
H,W = map(int,input().split())
N = int(input())
L = list(map(int,input().split()))
ans = [["" for _ in range(W)] for _ in range(H)]
h,w = 0,0
for i,v in enumerate(L):
while v != 0:
if h % 2 == 0:
ans[h][w] = str(i+1)
v -= 1
if w == W-1:
h += 1
else:
w += 1
else:
ans[h][w] = str(i+1)
v -= 1
if w == 0:
h += 1
else:
w -= 1
for v in ans:
print(" ".join(v))
solve()
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
0550a909ac90ff9e96eb663552ef66bb57941547
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Questran_LOC_pow_f_oral_susp_SmPC.py
|
972f617ceafb8aa344c064ee17a6c67a8eae2cac
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087
| 2013-07-16T14:05:41
| 2013-07-16T14:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
{'_data': [['Unknown',
[['Investigations',
u' f\xf6rkortad protrombin tid, f\xf6rl\xe4ngd protrombin tid, vikt\xf6kning, viktminskning.'],
['Blood',
u' Bl\xf6dningsben\xe4genhet, hypoprotrombinemi, anemi, lymfadenopati, ekkymos.'],
['Nervous system',
u' huvudv\xe4rk, yrsel, synkop\xe9, s\xf6mnighet, neuralgi, parestesier, dysgeusi.'],
['Eye', u' nattblindhet (med vitamin A brist), uveit.'],
['Ear', u' tinnitus, yrsel.'],
['Respiratory', u' astma, v\xe4sning, dyspn\xe9, hicka.'],
['GI',
u' f\xf6rstoppning, pankreatit, abdominal obehagsk\xe4nsla, flatulens, illam\xe5ende, kr\xe4kningar, diarr\xe9, dyspepsi, steatorr\xe9, glossit, anorektalt besv\xe4r, gastrointestinal bl\xf6dning, rektal bl\xf6dning, missf\xe4rgning av faeces, hemorroidal bl\xf6dning, duodenal ulcer bl\xf6dning, dysfagi, ulcus, proktalgi, rapning, akut buk, karies, bl\xf6dningar i munnen och intestinal obstruktion (inklusive 2 d\xf6dsfall i pediatriska patienter), divertikulit.'],
['Renal', u' hematuri, dysuri, onormal urinod\xf6r, polyuri.'],
['Skin', u' rodnad, hudirritation, n\xe4sselutslag.'],
['Musculoskeletal', u' osteoporos, ryggont, myalgi, artralgi, artrit.'],
['Metabolism',
u' vitamin A brist, vitamin K brist, vitamin D brist, acidos, hyperkloremi (hos barn) anorexi.'],
['General', u' tr\xf6tthet, \xf6dem. '],
['Hepato',
u' kolelitiasis, kalcifiering av gallbl\xe5san, gallkolik, onormala leverfunktionsv\xe4rden'],
['Psychiatric', u' \xf6kat libido, \xe5ngest.']]]],
'_pages': [3, 4],
u'_rank': 14,
u'_type': u'LSFU2'}
|
[
"urudaro@gmail.com"
] |
urudaro@gmail.com
|
38d538d07d33b4c8a2d6259cc1bca71f2d0d91b8
|
d7f45fac46598da9825a404d7511df7474237e4a
|
/ex.099.py
|
d39572b9a7a98a885df5df8b8c4d938d841ec59a
|
[] |
no_license
|
MarceloBCS/Exercicios_Curso_em_video
|
b4a8cbc8573e1303065c0cf1baad25c47d5a2fd8
|
a90fd67d83cf154f3554f962815fb791d3508d0c
|
refs/heads/master
| 2022-12-29T19:15:50.007022
| 2020-10-13T05:09:28
| 2020-10-13T05:09:28
| 303,592,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
from random import sample
from datetime import date
from time import sleep
cor = {'red':'\033[1;31m', 'limp':'\033[m', 'az':'\033[1;34m'}
def maior(k):
c = len('Analisando os valores passados...')
print('=' * c)
print('Analisando os valores passados...')
if k == 0:
n = []
if k > 0:
n = sample(range(0, 10), k)
for x in range(0, len(n)):
sleep(.2)
print(cor['red'], f'{n[x]}', cor['limp'], end='|')
print(f'\nForam informados {cor["az"]}{len(n)} valores{cor["limp"]} ao todo')
print('O {}maior{} valor gerado foi'.format(cor['az'], cor['limp']),
f'{cor["az"]}{max(n)}{cor["limp"]}' if k > 0 else f'{cor["az"]}0{cor["limp"]}')
print()
maior(6)
maior(3)
maior(2)
maior(1)
maior(0)
print(f'\nProcessado em {date.today()}')
|
[
"63213758+MarceloBCS@users.noreply.github.com"
] |
63213758+MarceloBCS@users.noreply.github.com
|
8f6a56a0cb2adb1b054194442eca236cf9d057df
|
48e4aedd813ab55fefd137ef22b2af3242012c19
|
/lib/readConfig.py
|
2b86849f4c882e1d7726174e98f56ffaede9d2e2
|
[
"MIT"
] |
permissive
|
philip-shen/MongoDB_TSEOTC_Crawler
|
0b307917846606833ee804ea2f7061c4f0cb55df
|
87d8dded2557eaca541499ccce6c8942476d3741
|
refs/heads/master
| 2020-04-17T16:47:53.638885
| 2019-01-29T13:30:26
| 2019-01-29T13:30:26
| 166,756,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
import os
import codecs
import configparser
class ReadConfig:
def __init__(self,configPath):
self.configPath=configPath
#fd = open(self.configPath)
fd = open(self.configPath, encoding='utf-8')
data = fd.read()
# remove BOM
if data[:3] == codecs.BOM_UTF8:
data = data[3:]
file = codecs.open(configPath, "w")
file.write(data)
file.close()
fd.close()
self.cf = configparser.ConfigParser()
#self.cf.read(self.configPath)
self.cf.read(self.configPath,encoding='utf-8')
def get_MongoDB(self, name):
value = self.cf.get("MONGODB", name)
return value
def get_SeymourExcel(self,name):
value = self.cf.get("SeymourExcel", name)
return value
|
[
"amyfanpti@gmail.com"
] |
amyfanpti@gmail.com
|
1de34dbf8faa35f48aada6bff23b5847c55c0a3b
|
72b18602ac21c9a1f1474a52c99b45ce9b825288
|
/apptools/apptools-android-tests/apptools/versionCode.py
|
0f202eff480f6e949bb09c7a5db82b022f5fabfd
|
[
"BSD-3-Clause"
] |
permissive
|
anawhj/crosswalk-test-suite
|
3f3b4cf2de2b3dfdfd15a7a2bbf45d0827f062f7
|
9fa4c96578f6bc95ae884ee943845c8d9a62fc17
|
refs/heads/master
| 2021-01-16T17:44:21.266562
| 2015-08-04T09:38:28
| 2015-08-04T09:38:28
| 40,181,956
| 0
| 0
| null | 2015-08-04T11:57:27
| 2015-08-04T11:57:27
| null |
UTF-8
|
Python
| false
| false
| 7,418
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <yunx.liu@intel.com>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_versionCode_normal(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode")[1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.clear("org.xwalk.test")
self.assertEquals(versionCode, versionCode_xml)
def test_update_app_version_onedot(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "0.1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode")[1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "0.1")
self.assertEquals(versionCode, versionCode_xml)
def test_update_app_version_twodot(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "0.0.1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode")[1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "0.0.1")
self.assertEquals(versionCode, versionCode_xml)
def test_update_app_version_threedot(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "0.0.0.1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
return_code = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "0.0.0.1")
self.assertNotEquals(return_code, 0)
def test_update_app_version_out_of_range(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "1000"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
return_code = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "1000")
self.assertNotEquals(return_code, 0)
if __name__ == '__main__':
unittest.main()
|
[
"yunx.liu@intel.com"
] |
yunx.liu@intel.com
|
4fef22836d27d35cc4f8d07acc6629da007ba6d3
|
e29f8c29a993156b7de7b0451d63ad8cca51c9a6
|
/zajecia10/zadanie_domowe/zadanie_domowe6.py
|
a968a7b4bbf1f9418fa17abbe7a86929bd3704a6
|
[] |
no_license
|
remekwilk/python_basic
|
d898ad26aba809eb14ebed9d94bd93db69154ffa
|
af145a9711dabca232dc5f5be8fe4c407a5fda54
|
refs/heads/master
| 2020-05-03T14:42:19.523070
| 2020-04-11T20:42:03
| 2020-04-11T20:42:03
| 176,701,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
from loteria_funkcje import chybil_trafil, dodaj_zaklad, czy_jest_zwyciezca
wszystkie_zaklady = []
moj_zaklad = [1, 2, 3, 4]
dodaj_zaklad(moj_zaklad, wszystkie_zaklady)
for i in range(100):
losowy_zaklad = chybil_trafil()
dodaj_zaklad(losowy_zaklad, wszystkie_zaklady)
print(wszystkie_zaklady)
zwycieskie_liczby = chybil_trafil()
print('Zwycięskie liczby to dziś:', zwycieskie_liczby)
czy_jest_zwyciezca(zwycieskie_liczby, wszystkie_zaklady)
if moj_zaklad == zwycieskie_liczby:
print("HURRA! Moje liczby padły!")
|
[
"remekwilk@gmail.com"
] |
remekwilk@gmail.com
|
d7a32db31d3c7a9da78a58a494c7e6c264a6e10c
|
22671693f8bf7e11b6ec2571f187eaba94810a35
|
/day-01/day-01.py
|
66dcca8960eec6cfb05bb09e60f16a0315304f4f
|
[] |
no_license
|
timbook/advent-of-code-2018
|
9d4679d345ba9e108286b85134904ba8613e3327
|
f9b62c3528cae66b4e9e60735d83dda43ea9cc05
|
refs/heads/master
| 2022-09-29T09:05:39.191564
| 2022-08-25T02:19:16
| 2022-08-25T02:19:16
| 159,948,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
import sys
import os
with open("../data/01-input.txt", 'r') as f:
lines = f.readlines()
items = [int(n.strip()) for n in lines]
print("::: PART A")
print(f"FREQUENCY: {sum(items)}\n")
record = set()
freq = 0
done = False
while True:
for i in items:
freq += i
if freq in record:
print("::: PART B")
print(f"FREQUENCY: {freq}")
sys.exit(0)
else:
record.add(freq)
|
[
"timothykbook@gmail.com"
] |
timothykbook@gmail.com
|
0cfdb3d99d88dfe1c04b0ad5e0a4aa09ee652000
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/common/Lib/distutils/unixccompiler.py
|
24a787da6e3d7bee12deb2c92f6079e414adaabd
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009
| 2017-02-03T21:40:17
| 2017-02-03T21:40:17
| 80,870,824
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 8,204
|
py
|
# 2017.02.03 21:58:03 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/distutils/unixccompiler.py
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = '$Id$'
import os, sys, re
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
if sys.platform == 'darwin':
import _osx_support
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
executables = {'preprocessor': None,
'compiler': ['cc'],
'compiler_so': ['cc'],
'compiler_cxx': ['cc'],
'linker_so': ['cc', '-shared'],
'linker_exe': ['cc'],
'archiver': ['ar', '-cr'],
'ranlib': None}
if sys.platform[:6] == 'darwin':
executables['ranlib'] = ['ranlib']
src_extensions = ['.c',
'.C',
'.cc',
'.cxx',
'.cpp',
'.m']
obj_extension = '.o'
static_lib_extension = '.a'
shared_lib_extension = '.so'
dylib_lib_extension = '.dylib'
static_lib_format = shared_lib_format = dylib_lib_format = 'lib%s%s'
if sys.platform == 'cygwin':
exe_extension = '.exe'
def preprocess(self, source, output_file = None, macros = None, include_dirs = None, extra_preargs = None, extra_postargs = None):
ignore, macros, include_dirs = self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError as msg:
raise CompileError, msg
return
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _osx_support.compiler_fixup(compiler_so, cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] + extra_postargs)
except DistutilsExecError as msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname, output_dir = None, debug = 0, target_lang = None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver + [output_filename] + objects + self.objects)
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError as msg:
raise LibError, msg
else:
log.debug('skipping %s (up-to-date)', output_filename)
def link(self, target_desc, objects, output_filename, output_dir = None, libraries = None, library_dirs = None, runtime_library_dirs = None, export_symbols = None, debug = 0, extra_preargs = None, extra_postargs = None, build_temp = None, target_lang = None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = objects + self.objects + lib_opts + ['-o', output_filename]
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == 'c++' and self.compiler_cxx:
i = 0
if os.path.basename(linker[0]) == 'env':
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _osx_support.compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError as msg:
raise LinkError, msg
else:
log.debug('skipping %s (up-to-date)', output_filename)
return
def library_dir_option(self, dir):
return '-L' + dir
def _is_gcc(self, compiler_name):
return 'gcc' in compiler_name or 'g++' in compiler_name
def runtime_library_dir_option(self, dir):
compiler = os.path.basename(sysconfig.get_config_var('CC'))
if sys.platform[:6] == 'darwin':
return '-L' + dir
elif sys.platform[:5] == 'hp-ux':
if self._is_gcc(compiler):
return ['-Wl,+s', '-L' + dir]
return ['+s', '-L' + dir]
elif sys.platform[:7] == 'irix646' or sys.platform[:6] == 'osf1V5':
return ['-rpath', dir]
elif self._is_gcc(compiler):
return '-Wl,-R' + dir
else:
return '-R' + dir
def library_option(self, lib):
return '-l' + lib
def find_library_file(self, dirs, lib, debug = 0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search('-isysroot\\s+(\\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
if sys.platform == 'darwin' and (dir.startswith('/System/') or dir.startswith('/usr/') and not dir.startswith('/usr/local/')):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
if os.path.exists(dylib):
return dylib
if os.path.exists(shared):
return shared
if os.path.exists(static):
return static
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\common\Lib\distutils\unixccompiler.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:58:03 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
fbfc5a0933a68da21f882740ee160c58c51967f9
|
2a63d9e571323351868f72caee8a215d1a5a3617
|
/cracking-the-coding-interview/ctci-bubble-sort.py
|
f95afbbc30efbe9aad7bbcb125a59d1ed0e049af
|
[] |
no_license
|
JavierCabezas/hackerrank
|
eb66b16904d452e69f84fc6c67beb8fa60544e88
|
2c32093e7545d621c9158fd0422e91d5955b16d9
|
refs/heads/master
| 2021-01-14T07:46:35.179276
| 2020-03-02T00:07:08
| 2020-03-02T00:07:08
| 81,884,845
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
number_swaps = 0
for i in range(n):
for j in range(n - 1):
if a[j+1] < a[j]:
a[j + 1], a[j] = a[j], a[j + 1]
number_swaps += 1
print("Array is sorted in {0} swaps.".format(number_swaps))
print("First Element: {0}".format(a[0]))
print("Last Element: {0}".format(a[-1]))
|
[
"jcleyton@gmail.com"
] |
jcleyton@gmail.com
|
cec26d6da00096d0b6d4ca3b722c3252ef390eb5
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/157_v5/test_accents.py
|
00e47c47c698345f81005a2762a0d7f4f640cf19
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
# _______ p__
#
# ____ ? _______ ?
#
# # texts taken from:
# # https://losviajesdedomi.com/las-15-ciudades-mas-bonitas-de-espana/
# # and:
# # https://www2.rocketlanguages.com/french/lessons/french-accents/
# texts
# ("Denominada en Euskera como Donostia, está "
# "situada en el Golfo de Vizcaya en la provincia "
# "de Guipúzcoa. San Sebastián no es solo conocida "
# "por su afamado festival de cine, sino también "
# "por la belleza de sus calles, las cuales tienen "
# "un corte francés y aburguesado que atraen cada "
# "año a centenares de turistas."),
# ("La capital de Cataluña, es la ciudad más visitada "
# "de España y la segunda más poblada. Barcelona es "
# "también una de las ciudades europeas más "
# "cosmopolitas y todo un símbolo cultural, "
# "financiero, comercial y turístico. Para muchos "
# "Barcelona es la ciudad más atractiva de España y "
# "una de las más bonitas."),
# ("Sevilla es la capital de Andalucía, y para muchos, "
# "la ciudad más bonita de España. Pasear por sus calles, "
# "contemplar la Giralda, la Catedral o la Torre del Oro "
# "es una auténtica gozada. En primavera el olor a azahar "
# "lo envuelve todo. Al igual que Granada, toda la ciudad "
# "es una auténtica delicia. Su clima hace propensa la "
# "visita en casi cualquier época del año."),
# ("The 5 French accents;"
# "The cédille (cedilla) Ç ..."
# "The accent aigu (acute accent) é ..."
# "The accent circonflexe (circumflex) â, ê, î, ô, û ..."
# "The accent grave (grave accent) à, è, ù ..."
# "The accent tréma (dieresis/umlaut) ë, ï, ü"),
#
# e..
# 'á', 'é', 'ñ', 'ú' ,
# 'á', 'é', 'í', 'ñ' ,
# 'á', 'é', 'í', 'ñ' ,
# 'à', 'â', 'ç', 'è', 'é', 'ê', 'ë', 'î', 'ï', 'ô', 'ù', 'û', 'ü' ,
#
#
#
# ?p__.m__.p. "text, expected", z.. ? e..
# ___ test_filter_accents text e..
# # get rid of duplicates and sort results
# result ? ?
# a.. s.. l.. s.. ?
# ... a.. __ e..
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
a9dbe3d85b795366c42a2dca4450d111ad924806
|
f26d67e3e9f8b90e5d6243279a1c2ce87fa41d46
|
/src/prodstats/db/__init__.py
|
ccce13712ed04ac410ff119837cc10ccc21f8acd
|
[
"MIT"
] |
permissive
|
OCB-DS/prodstats
|
cf554e3abee651463e9f81606d4b633f464658a7
|
4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824
|
refs/heads/master
| 2022-11-25T15:30:06.988683
| 2020-08-02T16:08:05
| 2020-08-02T16:08:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
# flake8: noqa
import logging
import gino
from config import DATABASE_CONFIG, DATABASE_POOL_SIZE_MAX, DATABASE_POOL_SIZE_MIN
logger = logging.getLogger(__name__)
db: gino.Gino = gino.Gino(
naming_convention={ # passed to sqlalchemy.MetaData
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
)
async def startup(
pool_min_size: int = DATABASE_POOL_SIZE_MIN,
pool_max_size: int = DATABASE_POOL_SIZE_MAX,
): # nocover (implicitly tested with test client)
if not db.is_bound():
await db.set_bind(db.url, min_size=pool_min_size, max_size=pool_max_size)
logger.debug(f"Connected to {db.url.__to_string__(hide_password=True)}")
async def shutdown(): # nocover (implicitly tested with test client)
await db.pop_bind().close()
logger.debug(f"Disconnected from {db.url.__to_string__(hide_password=True)}")
async def create_engine() -> gino.GinoEngine:
return await gino.create_engine(db.url)
logger.debug(f"Created engine for {db.url.__to_string__(hide_password=True)}")
def qsize():
""" Get current number of connections """
return db.bind.raw_pool._queue.qsize()
# set some properties for convenience
db.qsize, db.startup, db.shutdown, db.create_engine, db.url = (
qsize,
startup,
shutdown,
create_engine,
DATABASE_CONFIG.url,
)
|
[
"brocklfriedrich@gmail.com"
] |
brocklfriedrich@gmail.com
|
be5f84bdd762a47bd2e8d8f8e6560fa22e3d7252
|
0b20f4ce14b9ff77c84cedbecbaa29831335920d
|
/tests/common/goget/test_goget_base.py
|
6fceebcf63ce6748fcc30c3fc5158fd279e5fee7
|
[
"Apache-2.0"
] |
permissive
|
sergesec488/checkov
|
219c1b3864ab4f70b39a4cd79b041e98f3145364
|
56008e1c531b3626f14716067731be6e673040bc
|
refs/heads/master
| 2023-04-10T12:26:49.749864
| 2021-02-26T18:36:52
| 2021-02-26T18:40:58
| 342,883,133
| 0
| 1
|
Apache-2.0
| 2023-03-30T13:31:25
| 2021-02-27T15:01:08
| null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
import os
import unittest
from tests.common.goget.local_getter import LocalGetter
class TestBaseGetter(unittest.TestCase):
def test_directory_creation(self):
current_dir = os.getcwd()
getter = LocalGetter(current_dir)
result_dir = getter.get()
print(current_dir)
print(result_dir)
self.assertTrue(current_dir in result_dir)
# Cleanup
os.rmdir(getter.temp_dir)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
sergesec488.noreply@github.com
|
0b8c50f09cfd7e557aa018d9e64bb975e34ee8b2
|
acbe6bd6cefaf8b12070d7258dab30e4f7fcebed
|
/tests/introspect/testInitNonFunction.py
|
5ee1859253d354f127fef821f61f730071e1c884
|
[
"MIT"
] |
permissive
|
RogueScholar/debreate
|
02c98c5a78d33041798410f0e3b99e80fda65d00
|
dfe9bcac7333a53082b3a2ae169806cf604d59f6
|
refs/heads/master
| 2023-06-07T11:49:03.821969
| 2023-04-28T02:14:25
| 2023-04-28T02:14:25
| 253,707,766
| 0
| 0
|
MIT
| 2023-05-28T15:24:17
| 2020-04-07T06:34:47
|
Python
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
# ****************************************************
# * Copyright (C) 2023 - Jordan Irwin (AntumDeluge) *
# ****************************************************
# * This software is licensed under the MIT license. *
# * See: docs/LICENSE.txt for details. *
# ****************************************************
from libdbr.logger import Logger
logger = Logger()
logger.info("'init' is string ...")
init = ""
|
[
"antumdeluge@gmail.com"
] |
antumdeluge@gmail.com
|
e8be4f8a2eb4e59320a4740f22abbba70ac66c02
|
75c4f5e6f840a14fed3e5d3e57012abf6d0e77db
|
/Pacote Dawload/Projeto progamas Python/ex1007 Diferença.py
|
5234571cda69706fd7630b149bc7a6be7226d9e4
|
[
"MIT"
] |
permissive
|
wagnersistemalima/Exercicios-Python-URI-Online-Judge-Problems---Contests
|
fc378abca0264ceb7fa5feebc57df17d1372953a
|
d839a344b899c08f4199ff1ae22dd6ee931df6a2
|
refs/heads/master
| 2022-07-18T03:52:44.941510
| 2020-05-20T01:01:38
| 2020-05-20T01:01:38
| 264,508,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
#Ex1007 Diferença 09/04/2020
valor_a = int(input())
valor_b = int(input())
valor_c = int(input())
valor_d = int(input())
diferenca = valor_a * valor_b - valor_c * valor_d
print('DIFERENCA = {}'.format(diferenca))
|
[
"wagner.sistemalima@gmail.com"
] |
wagner.sistemalima@gmail.com
|
69848d8d6b615309e741c0e5995c99da100e36b3
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/kubernetes/checks/resource/k8s/KubeControllerManagerServiceAccountCredentials.py
|
2000075d5c1086b60c6802f3d5acd29a8ffa723f
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
from typing import Any, Dict
from checkov.common.models.enums import CheckResult
from checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck
class KubeControllerManagerServiceAccountCredentials(BaseK8sContainerCheck):
def __init__(self) -> None:
id = "CKV_K8S_108"
name = "Ensure that the --use-service-account-credentials argument is set to true"
super().__init__(name=name, id=id)
def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:
self.evaluated_container_keys = ["command"]
if conf.get("command"):
if "kube-controller-manager" in conf["command"]:
for command in conf["command"]:
if command.startswith("--use-service-account-credentials"):
value = command.split("=")[1]
if value == "true":
return CheckResult.PASSED
return CheckResult.FAILED
return CheckResult.PASSED
check = KubeControllerManagerServiceAccountCredentials()
|
[
"noreply@github.com"
] |
bridgecrewio.noreply@github.com
|
b7495338c27e7909104e693f683854f1eacbb5ce
|
7cb3e5e16fd93e6f8a1c07c211cee16dc248ef5d
|
/venv/lib/python3.6/site-packages/django/core/mail/backends/filebased.py
|
1037f4b3d03f833ad4f62d0f4ab687ba30d55d27
|
[] |
no_license
|
JustynaJBroniszewska/Blog
|
d74a8cb19fa037b834f5218522ff1397eb60d370
|
cfd8efbcce3e23c7ebeea82b2e732de63c663ac8
|
refs/heads/master
| 2022-11-03T22:01:07.165652
| 2020-06-05T14:25:01
| 2020-06-05T14:25:01
| 266,791,768
| 0
| 0
| null | 2020-06-05T14:25:02
| 2020-05-25T13:52:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
"""Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import EmailBackend as ConsoleEmailBackend
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, file_path=None, **kwargs):
self._fname = None
if file_path is not None:
self.file_path = file_path
else:
self.file_path = getattr(settings, "EMAIL_FILE_PATH", None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, str):
raise ImproperlyConfigured(
"Path for saving emails is invalid: %r" % self.file_path
)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is a directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured(
"Path for saving email messages exists, but is not a directory: %s"
% self.file_path
)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError as err:
raise ImproperlyConfigured(
"Could not create directory for saving email messages: %s (%s)"
% (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured(
"Could not write to directory: %s" % self.file_path
)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs["stream"] = None
super().__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b"\n")
self.stream.write(b"-" * 79)
self.stream.write(b"\n")
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), "ab")
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
|
[
"jj.broniszewska@gmail.com"
] |
jj.broniszewska@gmail.com
|
10e27cc0f1cdf158c62d57f1fcd0b60c721571eb
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/aadiam/v20200701preview/__init__.py
|
efff6040c30972e572a699b04d5d0c8ba770037e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from .azure_ad_metric import *
from .get_azure_ad_metric import *
from . import outputs
|
[
"github@mikhail.io"
] |
github@mikhail.io
|
3925dc3b8bf5f9bfef206788ab3039aeb80b6a9c
|
6bf4e54f8ae95582b73bb969ba44069c64e87651
|
/kdhi/main_site/migrations/0016_rok_individual_name_true.py
|
87b886ada5621d6df781553e0e2727c809ce4525
|
[] |
no_license
|
speedycowenator/kdhi_migration
|
4bc983c4656a2a87cb056461bfb4219e38da1a85
|
422b2e3f142a30c81f428fb8eaa813e4a71d56fc
|
refs/heads/master
| 2022-11-14T13:27:51.520697
| 2020-07-02T19:31:12
| 2020-07-02T19:31:12
| 246,138,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
# Generated by Django 2.2.5 on 2020-03-16 03:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_site', '0015_auto_20200315_2328'),
]
operations = [
migrations.AddField(
model_name='rok_individual',
name='name_true',
field=models.CharField(default='BLANK', max_length=200),
preserve_default=False,
),
]
|
[
"54556114+speedycowenator@users.noreply.github.com"
] |
54556114+speedycowenator@users.noreply.github.com
|
6fff7a33ec4993a71006924c414459b7b912ac5e
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/goodweather/testcase/firstcases/testcase3_029.py
|
1615709d896778e9e889f0338b549707f4674cfa
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,725
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.asdtm.goodweather',
'appActivity' : 'org.asdtm.goodweather.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.asdtm.goodweather/org.asdtm.goodweather.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase029
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_refresh\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_detect_location\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Cancel\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/fab\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_refresh\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_refresh\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_search_city\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/search_close_btn\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"NL\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_search_city\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_029\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.asdtm.goodweather'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
aa91781b96d7642e15834414e4aa606fa0aac250
|
a372a816373d63ad626a9947077e137eac2e6daf
|
/test/leetcode/test_SnapshotArray.py
|
f0c14e8d86207ff54939ccda9f60c2ea37835f38
|
[] |
no_license
|
DmitryPukhov/pyquiz
|
07d33854a0e04cf750b925d2c399dac8a1b35363
|
8ae84f276cd07ffdb9b742569a5e32809ecc6b29
|
refs/heads/master
| 2021-06-13T14:28:51.255385
| 2021-06-13T08:19:36
| 2021-06-13T08:19:36
| 199,842,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,469
|
py
|
from unittest import TestCase
from pyquiz.leetcode.SnapshotArray import SnapshotArray
class TestSnapshotArray(TestCase):
def test_get_closest_snap_id__123_1(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1, 2, 3], 1))
def test_get_closest_snap_id__123_2(self):
sa = SnapshotArray(1)
self.assertEqual(2, sa.get_closest_snap_id([1, 2, 3], 2))
def test_get_closest_snap_id__123_3(self):
sa = SnapshotArray(1)
self.assertEqual(3, sa.get_closest_snap_id([1, 2, 3], 3))
def test_get_closest_snap_id__1_1(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1], 1))
def test_get_closest_snap_id__13_2(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1, 3], 2))
def test_get_closest_snap_id__13_4(self):
sa = SnapshotArray(1)
self.assertEqual(3, sa.get_closest_snap_id([1, 3], 4))
def test_get_closest_snap_id__23_1(self):
sa = SnapshotArray(1)
self.assertEqual(None, sa.get_closest_snap_id([2, 3], 1))
def test_get_closest_snap_id__12_1(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1, 2], 1))
def test_get_closest_snap_id__12_2(self):
sa = SnapshotArray(1)
self.assertEqual(2, sa.get_closest_snap_id([1, 2], 2))
def test_example1(self):
"""
Example 1:
Input: ["SnapshotArray","set","snap","set","get"]
[[3],[0,5],[],[0,6],[0,0]]
Output: [null,null,0,null,5]
Explanation:
SnapshotArray snapshotArr = new SnapshotArray(3); // set the length to be 3
snapshotArr.set(0,5); // Set array[0] = 5
snapshotArr.snap(); // Take a snapshot, return snap_id = 0
snapshotArr.set(0,6);
snapshotArr.get(0,0); // Get the value of array[0] with snap_id = 0, return 5
:return:
"""
# SnapshotArray snapshotArr = new SnapshotArray(3); // set the length to be 3
sa = SnapshotArray(3)
# Set array[0] = 5
sa.set(0, 5)
# Take a snapshot, return snap_id = 0
snapid = sa.snap()
self.assertEqual(0, snapid)
sa.set(0, 6)
# Get the value of array[0] with snap_id = 0, return 5
out = sa.get(0, 0)
self.assertEqual(5, out)
def test_case2(self):
# SnapshotArray snapshotArr = new SnapshotArray(3); // set the length to be 3
sa = SnapshotArray(3)
out = sa.get(2, 0)
self.assertEqual(0, out)
# Set array[0] = 5
sa.set(0, 5)
# Take a snapshot, return snap_id = 0
snapid = sa.snap()
self.assertEqual(0, snapid)
sa.set(0, 6)
snapid = sa.snap()
self.assertEqual(1, snapid)
out = sa.get(0, 0)
self.assertEqual(5, out)
out = sa.get(0, 1)
self.assertEqual(6, out)
def test_case3(self):
"""
["SnapshotArray","set","snap","snap","snap","get","snap","snap","get"]
[[1],[0,15],[],[],[],[0,2],[],[],[0,0]]
"""
sa = SnapshotArray(1)
sa.set(0, 15)
sa.snap()
sa.snap()
sa.snap()
out = sa.get(0, 1)
out = sa.get(0, 2)
self.assertEqual(15, out)
sa.snap()
sa.snap()
out = sa.get(0, 0)
self.assertEqual(out, 15)
def test_case4(self):
"""
["SnapshotArray","snap","get","get","set","get","set","get","set"]
[[2],[],[1,0],[0,0],[1,8],[1,0],[0,20],[0,0],[0,7]]
[null,0,0,0,null,8,null,20,null]
Expected
[null,0,0,0,null,0,null,0,null]
"""
sa = SnapshotArray(2)
self.assertEqual(0, sa.snap())
self.assertEqual(0, sa.get(1, 0))
self.assertEqual(0, sa.get(0, 0))
sa.set(1, 8)
self.assertEqual(0, sa.get(1, 0))
sa.set(0, 20)
self.assertEqual(0, sa.get(0, 0))
sa.set(0, 7)
def test_case5(self):
"""
["SnapshotArray","set","set","snap","get","set","snap","set","set","get","get"]
[[3],[1,18],[1,4],[],[0,0],[0,20],[],[0,2],[1,1],[1,1],[1,0]]
bad: [null,null,null,0,0,null,1,null,null,0,4]
Expected: [null,null,null,0,0,null,1,null,null,4,4]
"""
sa = SnapshotArray(3)
# ["SnapshotArray","set","set","snap","get", "set","snap","set","set","get","get"]
# [[3],[1,18],[1,4],[],[0,0], [0,20],[], [0,2],[1,1], [1,1],[1,0]]
sa.set(1, 18)
sa.set(1, 4)
self.assertEqual(0, sa.snap())
sa.set(0, 20)
sa.snap()
sa.set(0, 2)
sa.set(1, 1)
self.assertEqual(4, sa.get(1, 1))
self.assertEqual(4, sa.get(1, 0))
def test_case6(self):
"""
["SnapshotArray","set","snap","set","get","snap", "get","get","set","set", "snap","get","set","snap","snap","get","snap","get"]
[[3],[1,5],[],[1,6],[0,0],[], [0,0],[0,0],[0,11],[1,16], [],[0,1],[2,12],[],[],[0,4],[],[1,1]]
bad Output [null,null,0,null,0,1,0,0,null,null, 2,0,null,3,4,11,5,0]
Expected [null,null,0,null,0,1,0,0,null,null, 2,0,null,3,4,11,5,6]
"""
sa = SnapshotArray(3)
sa.set(1, 5)
sa.snap()
sa.set(1, 6)
self.assertEqual(0, sa.get(0, 0))
sa.snap()
self.assertEqual(0, sa.get(0, 0))
self.assertEqual(0, sa.get(0, 0))
sa.set(0, 11)
sa.set(1, 16)
def test_case7(self):
"""
["SnapshotArray","snap","snap","set","snap","get","set","get","snap","get"]
[[1],[],[],[0,4],[],[0,1],[0,12],[0,1],[],[0,3]]
Output
[null,0,1,null,2,0,null,0,3,0]
Expected
[null,0,1,null,2,0,null,0,3,12]
"""
sa = SnapshotArray(1)
sa.snap()
sa.snap()
sa.set(0, 4)
sa.snap()
self.assertEqual(0, sa.get(0, 1))
sa.snap()
self.assertEqual(0, sa.get(0, 0))
self.assertEqual(0, sa.get(0, 0))
sa.set(0, 11)
sa.set(1, 16)
def test_case7(self):
sa = SnapshotArray(1)
sa.snap()
sa.snap()
sa.set(0,4)
sa.snap()
sa.get(0,1)
sa.get(0,12)
sa.set(0,1)
sa.snap()
sa.get(0,3)
#
#
# ["SnapshotArray","snap","snap","set","snap","get","set","get","snap","get"]
# [[1],[],[],[0,4],[],[0,1],[0,12],[0,1],[],[0,3]]
|
[
"dmitry.pukhov@gmail.com"
] |
dmitry.pukhov@gmail.com
|
b0bc199d141c65c18eb061d68f55044ec9df1e0d
|
a606893da1e354c7c617d0c9247b23118be2813a
|
/二叉树/1530.py
|
c667d4d4f62c56d93cb9d7f26420def687e14e1a
|
[] |
no_license
|
lindo-zy/leetcode
|
4ce6cb9ded7eeea0a6953b6d8152b5a9657965da
|
f4277c11e620ddd748c2a2f3d9f5f05ee58e5716
|
refs/heads/master
| 2023-07-22T06:19:00.589026
| 2023-07-16T12:35:14
| 2023-07-16T12:35:14
| 229,958,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def countPairs(self, root: TreeNode) -> int:
res = []
def dfs(root, path, counter):
if root:
if counter <= 0:
res.append(path)
return
if not root.left and not root.right:
res.append(path)
return
else:
dfs(root.left, path + [root.val], counter - root.val)
dfs(root.right, path + [root.val], counter - root.val)
dfs(root, [], 4)
return res
if __name__ == '__main__':
s = Solution()
# root = [1, None, 2, None, 3, None, 4, None, 5,]
root = [1, 2, 3, 4, None, None, 5]
from gen_tree import generate_tree
tree = generate_tree(root)
print(s.countPairs(tree))
|
[
"492201845@qq.com"
] |
492201845@qq.com
|
38829a9e4b5166f3f94813441cc6e86222184a9a
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/densitymapbox/_lon.py
|
01f0a45fe1ed12a0960dfc365a231700a0e093d3
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
import _plotly_utils.basevalidators
class LonValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="lon", parent_name="densitymapbox", **kwargs):
super(LonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
654f89d213946c60e1ac0472df7ebdbd69014fe6
|
f337c975d2446cf4c2ac4fb23a3f2c540a41d23d
|
/testing/date/test_datetimezone_klass.py
|
3a3c84938a57e2077131e8f1edda2e89e62f3d8d
|
[
"MIT"
] |
permissive
|
shendel/hippyvm
|
9fca2bb479da8273a3036350f3f4e06c2d44afc9
|
26cc6675612e4ddc4d1b425d2731d34c97355c45
|
refs/heads/master
| 2021-01-15T15:31:04.689510
| 2014-04-14T08:54:33
| 2014-04-14T08:54:33
| 18,753,589
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,295
|
py
|
import pytest
from testing.test_interpreter import BaseTestInterpreter
class TestDateTimeZone(BaseTestInterpreter):
def test_constructor(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo get_class($tz);
''')
assert self.space.str_w(output[0]) == 'DateTimeZone'
def test_get_name(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo $tz->getName();
''')
assert self.space.str_w(output[0]) == 'Pacific/Nauru'
def test_get_offset(self):
output = self.run('''
$dateTimeZoneTaipei = new DateTimeZone("Asia/Taipei");
$dateTimeZoneJapan = new DateTimeZone("Asia/Tokyo");
$dateTimeTaipei = new DateTime("now", $dateTimeZoneTaipei);
$dateTimeJapan = new DateTime("now", $dateTimeZoneJapan);
echo $dateTimeZoneJapan->getOffset($dateTimeTaipei);
echo $dateTimeZoneJapan->getOffset($dateTimeJapan);
''')
assert self.space.int_w(output.pop(0)) == 32400
assert self.space.int_w(output.pop(0)) == 32400
def test_list_abbreviations(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo count($tz->listAbbreviations());
''')
assert self.space.int_w(output[0]) == 373
def test_list_identifiers(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo count($tz->listIdentifiers(128));
echo count($tz->listIdentifiers(DateTimeZone::EUROPE));
''')
assert self.space.int_w(output.pop(0)) == 56
assert self.space.int_w(output.pop(0)) == 56
def test_consts(self):
output = self.run('''
echo DateTimeZone::ASIA;
''')
assert self.space.int_w(output[0]) == 16
def test_listI_ientifiers_constants(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo count($tz->listIdentifiers(DateTimeZone::PER_COUNTRY, 'PL'));
echo count($tz->listIdentifiers(DateTimeZone::PER_COUNTRY, 'RU'));
''')
assert self.space.int_w(output[0]) == 1
assert self.space.int_w(output[1]) == 18
def test_get_transition_1(self):
pytest.xfail("broken implementation")
output = self.run('''
$timezone = new DateTimeZone("Europe/London");
echo $timezone->getTransitions();
echo $timezone->getTransitions(2120015000);
echo $timezone->getTransitions(0);
echo $timezone->getTransitions(0, 2140045200);
echo $timezone->getTransitions(0, 2140045300);
echo $timezone->getTransitions(2140045200);
echo $timezone->getTransitions(2121901200);
''')
assert len(output.pop(0).as_pair_list(self.space)) == 243
assert len(output.pop(0).as_pair_list(self.space)) == 3
assert len(output.pop(0).as_pair_list(self.space)) == 135
assert len(output.pop(0).as_pair_list(self.space)) == 134
assert len(output.pop(0).as_pair_list(self.space)) == 135
assert len(output.pop(0).as_pair_list(self.space)) == 1
assert len(output.pop(0).as_pair_list(self.space)) == 2
def test_get_transition_2(self):
pytest.xfail("broken implementation")
output = self.run('''
$timezone = new DateTimeZone("Europe/Prague");
echo $timezone->getTransitions();
''')
first = output[0].as_list_w()[0].as_dict()
last = output[0].as_list_w()[-1].as_dict()
assert self.space.int_w(first['ts']) == -9223372036854775808
assert self.space.str_w(first['time']) == '-292277022657-01-27T08:29:52+0000'
assert self.space.int_w(first['offset']) == 7200
assert first['isdst'] == self.space.w_True
assert self.space.str_w(first['abbr']) == 'CEST'
assert self.space.int_w(last['ts']) == 2140045200
assert self.space.str_w(last['time']) == '2037-10-25T01:00:00+0000'
assert self.space.int_w(first['offset']) == 7200
assert last['isdst'] == self.space.w_False
assert self.space.str_w(last['abbr']) == 'CET'
|
[
"fijall@gmail.com"
] |
fijall@gmail.com
|
a68582856ced763fc141c32c0220df5f01620038
|
b627da650f75bdcf7e0dc0ef5c4419cf53a1d690
|
/src/zqh_devices/zqh_bootrom/zqh_bootrom_parameters.py
|
6517029efbcb015d18f624aad5b278aefbb369d4
|
[] |
no_license
|
Jusan-zyh/zqh_riscv
|
4aa8a4c51e19fb786ba0c2a120722f1382994a52
|
bccde2f81b42ac258b92c21bb450ec6ff848387a
|
refs/heads/main
| 2023-08-06T12:56:52.420302
| 2021-09-21T01:25:41
| 2021-09-21T01:25:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import sys
import os
from phgl_imp import *
from zqh_tilelink.zqh_tilelink_node_module_parameters import zqh_tilelink_node_module_parameter
from zqh_tilelink.zqh_tilelink_parameters import zqh_tl_bundle_all_channel_parameter
class zqh_bootrom_parameter(zqh_tilelink_node_module_parameter):
def set_par(self):
super(zqh_bootrom_parameter, self).set_par()
self.par('bootrom_file', '../tests/zqh_riscv_sw/bootrom/bootrom.hex.fix')
def check_par(self):
super(zqh_bootrom_parameter, self).check_par()
def address(self):
return self.extern_slaves[0].address[0]
|
[
"zhouqinghua888@163.com"
] |
zhouqinghua888@163.com
|
a1cef2729bb056ca6f889a0eb5e4cf66ed69ab4d
|
39e91ca0b536166b0a1e8ffb21b75440aa00466e
|
/dbplot/__init__.py
|
f2ea98df08eb65965acf6140b7fac972806aaf78
|
[
"Apache-2.0"
] |
permissive
|
danielfrg/dbplot
|
bd26f5560945c08324e5346519a51581e31aa830
|
fa0902c61f79e05dac9b71934b9a233658cabeba
|
refs/heads/master
| 2020-04-17T20:12:21.897004
| 2019-02-04T21:33:30
| 2019-02-04T21:33:30
| 166,895,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
"""
DBPLOT
"""
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from dbplot.mpl import *
|
[
"df.rodriguez143@gmail.com"
] |
df.rodriguez143@gmail.com
|
87a7111cc408f1244c5c1e4ea5a047a52f48ac18
|
31b3ac7cc2f0cf43a4979e53d43002a9c5fb2038
|
/detect pattern of length M repeated in array1.py
|
1d723775c7a449082c18cd25734c9ebfaad3ab87
|
[] |
no_license
|
shreyansh-tyagi/leetcode-problem
|
ed31ada9608a1526efce6178b4fe3ee18da98902
|
f8679a7b639f874a52cf9081b84e7c7abff1d100
|
refs/heads/master
| 2023-08-26T13:50:27.769753
| 2021-10-29T17:39:41
| 2021-10-29T17:39:41
| 378,711,844
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,943
|
py
|
'''
Given an array of positive integers arr, find a pattern of length m that is repeated k or more times.
A pattern is a subarray (consecutive sub-sequence) that consists of one or more values, repeated multiple times consecutively without overlapping. A pattern is defined by its length and the number of repetitions.
Return true if there exists a pattern of length m that is repeated k or more times, otherwise return false.
Example 1:
Input: arr = [1,2,4,4,4,4], m = 1, k = 3
Output: true
Explanation: The pattern (4) of length 1 is repeated 4 consecutive times. Notice that pattern can be repeated k or more times but not less.
Example 2:
Input: arr = [1,2,1,2,1,1,1,3], m = 2, k = 2
Output: true
Explanation: The pattern (1,2) of length 2 is repeated 2 consecutive times. Another valid pattern (2,1) is also repeated 2 times.
Example 3:
Input: arr = [1,2,1,2,1,3], m = 2, k = 3
Output: false
Explanation: The pattern (1,2) is of length 2 but is repeated only 2 times. There is no pattern of length 2 that is repeated 3 or more times.
Example 4:
Input: arr = [1,2,3,1,2], m = 2, k = 2
Output: false
Explanation: Notice that the pattern (1,2) exists twice but not consecutively, so it doesn't count.
Example 5:
Input: arr = [2,2,2,2], m = 2, k = 3
Output: false
Explanation: The only pattern of length 2 is (2,2) however it's repeated only twice. Notice that we do not count overlapping repetitions.
Constraints:
2 <= arr.length <= 100
1 <= arr[i] <= 100
1 <= m <= 100
2 <= k <= 100
'''
class Solution:
def containsPattern(self, arr: List[int], m: int, k: int) -> bool:
a,b=list(set(arr)),[]
arr.append(0)
for i in range(len(arr)-1):
if arr[i]==arr[i+1]:
b.append(arr[i])
b=list(set(b))
for j in range(len(b)):
if arr.count(b[j])>=k and len(b)==m:
return True
else:
return False
|
[
"sunnytyagi886@gmail.com"
] |
sunnytyagi886@gmail.com
|
e78dbe71cec3d1aa3f760ae589ea001a8b971dac
|
a1080c28573e1a59ec418ad3b0b0bf18e035dc41
|
/Common Algos/Dynamic Programming/knapsack.py
|
d151e39b6b328cbf9cf0f327eb06c777b8b86c0f
|
[] |
no_license
|
AumkarG/Algorithms-and-Data-Structures
|
8c6fc21218897d2361fed1512dc6bb13eabd8842
|
03603ad579564ef213c58edd57cb8753cf8f86ba
|
refs/heads/master
| 2023-03-04T09:48:25.167519
| 2021-02-14T17:16:18
| 2021-02-14T17:16:18
| 330,424,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
n=int(input())
m=int(input())
w=[int(i) for i in input().split()]
p=[int(i) for i in input().split()]
table=[]
for i in range(n+1):
table.append([0]*(m+1))
for k in range(w[0],m+1):
table[1][k]=p[0]
for i in range(1,n):
weight=w[i]
profit=p[i]
for j in range(1,m+1):
if j<weight:
table[i+1][j]=table[i][j]
else:
table[i+1][j]=max(table[i][j],table[i][j-weight]+profit)
for i in table:
print(i)
print(table[n][m])
|
[
"aumkaar,g@gmail.com"
] |
aumkaar,g@gmail.com
|
507c104bb82cec4979e1d42788699aa47f9a1f71
|
014d996ab984699bf9fccc9f92b6925fca65fea9
|
/lista/src/ex11.py
|
8e49def459b8c2aaecc146889fb7e7891ed37f0e
|
[] |
no_license
|
fernandooliveirapimenta/python
|
e04fb7dbb002761da64cdff3eac987e91e2cb43f
|
f174b0f35cd3918e5c17a96deab59e4ae7e191ab
|
refs/heads/master
| 2022-10-10T09:02:39.382974
| 2020-10-27T18:00:48
| 2020-10-27T18:00:48
| 208,432,260
| 0
| 0
| null | 2022-09-20T22:15:58
| 2019-09-14T11:43:18
|
Python
|
UTF-8
|
Python
| false
| false
| 283
|
py
|
larg = float(input('Largura da parede: '))
alt = float(input('Altura da parede: '))
area = larg * alt
print('Sua parede tem a dimensao de {}x{} e sua area é de {}m2'.format(larg, alt, area))
print('Para pintar essa parede, você vai precisar de {} litros de tinta'.format(area / 2))
|
[
"fernando.pimenta107@gmail.com"
] |
fernando.pimenta107@gmail.com
|
0bfff896c1fb061d612f9c67d83aeafadf8d1a8d
|
7b91550bb272385d74f9868b8d18bbb5757c0c7c
|
/workertier/backends/cache/memcluster/dns.py
|
bf23625a603cbf2377caa3e46fedc669300ab671
|
[
"Apache-2.0"
] |
permissive
|
krallin/tutorial-workertier
|
c0becd4566bf951df60d5d28f569ecb684433a8b
|
ab579cbe9d68adee26209a8b6b092ef9895fda65
|
refs/heads/master
| 2023-08-31T10:45:06.326063
| 2013-09-03T19:15:48
| 2013-09-03T19:15:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
#coding:utf-8
import logging
from gevent import dns
from workertier.backends.cache.memcluster import BaseMemcachedClusterCache
logger = logging.getLogger(__name__)
class DNSMemcachedClusterCache(BaseMemcachedClusterCache):
def __init__(self, domain, port, timeout, refresh_signal):
super(DNSMemcachedClusterCache, self).__init__(port, timeout, refresh_signal)
self.domain = domain
def _get_servers_list(self):
ttl, ips = dns.resolve_ipv4(self.domain)
# noinspection PyUnresolvedReferences
return [socket.inet_ntoa(ip) for ip in sorted(ips)]
|
[
"thomas@orozco.fr"
] |
thomas@orozco.fr
|
16358668e72c2d4470391eb9b565ac07a520392e
|
f891828ffe9c8501d276560c8c52d319f284056f
|
/285_bst_inorder_successor_m/main.py
|
39f43c7a1696d08865e214ac086d1e2452a87734
|
[] |
no_license
|
chao-shi/lclc
|
1b852ab61fef4072039c61f68e951ab2072708bf
|
2722c0deafcd094ce64140a9a837b4027d29ed6f
|
refs/heads/master
| 2021-06-14T22:07:54.120375
| 2019-09-02T23:13:59
| 2019-09-02T23:13:59
| 110,387,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderSuccessor(self, root, p):
"""
:type root: TreeNode
:type p: TreeNode
:rtype: TreeNode
"""
left_tree = []
q = root
while q != p:
if q.val < p.val:
q = q.right
else:
left_tree.append(q)
q = q.left
q = p.right
if q:
while q.left != None:
q = q.left
return q
elif left_tree:
return left_tree[-1]
else:
return None
|
[
"chris19891128@gmail.com"
] |
chris19891128@gmail.com
|
19afb8673972e5cc6b22b16a764caa23317a77aa
|
7c688104f1fd816be603257116fb27040c620db3
|
/RemoveDupSortedListTwo.py
|
65ca336e74a6f2c02c62ee337edd742dcf97b492
|
[] |
no_license
|
xyzhangaa/ltsolution
|
5e22c6a12e1ba9f6dea18be69d45c6ac14bc4fe5
|
9445ba22d5f0c12fd12b17e6791d1b387f1c4082
|
refs/heads/master
| 2020-05-21T11:37:58.781437
| 2015-10-14T14:45:04
| 2015-10-14T14:45:04
| 32,817,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
###Given a sorted linked list, delete all nodes that have duplicate numbers,
###leaving only distinct numbers from the original list.
###For example,
###Given 1->2->3->3->4->4->5, return 1->2->5.
###Given 1->1->1->2->3, return 2->3.
#O(n), O(1)
class ListNode:
def __init__(self,x):
self.val = x
self.next = None
def RemoveDupSortedLinkedListTwo(self,head):
if head == None or head.next == None:
return Head
dummy = ListNode(0)
dummy.next = head
p = dummy
temp = dummy.next
while p.next:
while temp.next and temp.next.val == p.next.val:
temp = temp.next
if temp == p.next:
p = p.next
else:
p.next = temp.next
return dummy.hext
|
[
"xyzpku06@gmail.com"
] |
xyzpku06@gmail.com
|
198a751c7ef0698faac7959a9358d589b7b908f2
|
0a65d42f4f0e491cb2aada408401b94909f821c2
|
/management/management_main/migrations/0001_initial.py
|
1b30ce4bcc4d93d51f9cdb030eb6dbbf7f77a6db
|
[] |
no_license
|
jmadlansacay/_Office
|
3acde7655784e91c7dcecfc853d4f36cdfeef028
|
7f46449b9f7e8e892e2e0025ba493259197fa592
|
refs/heads/main
| 2023-07-28T10:23:54.680822
| 2021-09-11T02:28:07
| 2021-09-11T02:28:07
| 379,155,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,415
|
py
|
# Generated by Django 2.2.5 on 2021-01-01 02:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('management_accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Employees',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('employee_idno', models.CharField(max_length=4)),
('last_name', models.CharField(blank=True, max_length=50, null=True)),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('middle_name', models.CharField(blank=True, max_length=50, null=True)),
('nickname', models.CharField(blank=True, max_length=50, null=True)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='management_accounts.Projects')),
],
options={
'verbose_name': 'Employee List',
},
),
migrations.CreateModel(
name='ProjectHours',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('jan_hrs', models.FloatField(default=0)),
('feb_hrs', models.FloatField(default=0)),
('mar_hrs', models.FloatField(default=0)),
('apr_hrs', models.FloatField(default=0)),
('may_hrs', models.FloatField(default=0)),
('jun_hrs', models.FloatField(default=0)),
('jul_hrs', models.FloatField(default=0)),
('aug_hrs', models.FloatField(default=0)),
('sep_hrs', models.FloatField(default=0)),
('oct_hrs', models.FloatField(default=0)),
('nov_hrs', models.FloatField(default=0)),
('dec_hrs', models.FloatField(default=0)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='management_accounts.Projects')),
],
options={
'verbose_name': 'No of Project Hour',
'unique_together': {('project', 'year')},
},
),
]
|
[
"Q034800@mhi.co.jp"
] |
Q034800@mhi.co.jp
|
f71dfa7c6489547a83673da08aed8c223511a034
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part3/updated_part2_batch/jm/parser_errors_2/101032623.py
|
d0b164169b518ca6bdd2b447b5d28c4a453d6b87
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713
| 2020-06-09T21:15:37
| 2020-06-09T21:15:37
| 262,290,632
| 0
| 0
|
MIT
| 2020-06-09T21:15:38
| 2020-05-08T10:10:47
|
C
|
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 101032623
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 3, 2, 3)
assert board is not None
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 0, 2) == 0
board800795293 = gamma_board(board)
assert board800795293 is not None
assert board800795293 == ("1.\n" ".1\n" "22\n")
del board800795293
board800795293 = None
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_golden_possible(board, 2) == 1
board868257597 = gamma_board(board)
assert board868257597 is not None
assert board868257597 == ("1.\n" ".1\n" "22\n")
del board868257597
board868257597 = None
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 0, 0) == 0
board272709497 = gamma_board(board)
assert board272709497 is not None
assert board272709497 == ("1.\n" ".1\n" "22\n")
del board272709497
board272709497 = None
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_free_fields(board, 1) == 1
board769041695 = gamma_board(board)
assert board769041695 is not None
assert board769041695 == ("11\n" ".1\n" "22\n")
del board769041695
board769041695 = None
gamma_delete(board)
|
[
"noreply@github.com"
] |
kozakusek.noreply@github.com
|
0e157e83d1cba0ce1ad766f371ea6de09d9138db
|
26a73e4df854313aa213882c5d3db16269a3254b
|
/hist/ks_tests.py
|
8955ce6d77eb8499cb5ac19bb688c03dcfba6f23
|
[] |
no_license
|
howonlee/vr-timeseries-analysis
|
69b18c35589d84e12ace02671a45d97443dbf333
|
bec34c7088e897fb17c424cfcd0da687223e44df
|
refs/heads/master
| 2020-04-11T10:59:04.375920
| 2015-06-11T00:13:26
| 2015-06-11T00:13:26
| 33,697,737
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
import scipy.stats as sci_stats
import numpy as np
import matplotlib.pyplot as plt
with open("total_correlations") as correlations_file:
corrs = map(lambda x: float(x.strip()), list(correlations_file))
#corrs_sum = float(sum(corrs))
#norm_corrs = [corr / corrs_sum for corr in corrs]
with open("total_gammas") as gamma_file:
gammas = map(lambda x: float(x.strip()), list(gamma_file))
#gammas_sum = float(sum(gammas))
#norm_gammas = [gamma / gammas_sum for gamma in gammas]
with open("total_cmis") as cmi_file:
cmis = map(lambda x: float(x.strip()), list(cmi_file))
#cmis_sum = float(sum(cmis))
#norm_cmis = [cmi / cmis_sum for cmi in cmis]
print sci_stats.ks_2samp(corrs, cmis)
print sci_stats.ks_2samp(corrs, gammas)
print sci_stats.ks_2samp(cmis, gammas)
|
[
"howon@howonlee.com"
] |
howon@howonlee.com
|
8000a18e2e630764b2ef77d3854c06b4ca2e8ac0
|
d10190ccc03c89032cc44738d275010eb62b46f3
|
/urlybirdlite/urls.py
|
708427a25564d74d0ed4ec804ecc5e5601ef32a7
|
[] |
no_license
|
bekkblando/urlybirdlite-fixed
|
659aa236110c088ff606c57ed23e5d0f1216ea09
|
84dec1c712210fd04628592a187154446e60ad43
|
refs/heads/master
| 2021-01-15T13:00:03.854463
| 2015-08-18T14:15:39
| 2015-08-18T14:15:39
| 38,802,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
"""urlybirdlite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm
from django.views.generic import CreateView
from urlshortner.views import home, CreateBookMark, BookmarkUpdate, BookmarkDelete, profile, wtd, wtupdate
urlpatterns = [
url('^register/', CreateView.as_view(
template_name='registration/create_user.html',
form_class=UserCreationForm,
success_url='/'), name="regis"),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', 'django.contrib.auth.views.login',name="login"),
url(r'^logout', 'django.contrib.auth.views.logout', name="logout"),
url('^bookmark/', CreateBookMark.as_view(
template_name='createbookmark.html',
success_url='/profile/'), name="createbookmark"),
url(r'^wtd/', wtd, name="wtd"),
url(r'^uwtupdate/', wtupdate, name="wtupdate"),
url(r'^profile/', profile, name="profile"),
url('^delrate(?P<pk>\w+)', BookmarkDelete.as_view(
template_name='deletebookmark.html',
success_url='/profile/'), name="delbookmark"),
url('^update(?P<pk>\w+)', BookmarkUpdate.as_view(
template_name='bookmark_update.html',
success_url='/profile/'), name="updatebookmark"),
url(r'^', home, name="home"),
]
|
[
"bekkblando@gmail.com"
] |
bekkblando@gmail.com
|
9e10a26d7e2bcaab2aa9a47e0250f1b7413dd2cc
|
9bed711e5b7c4fbeb556c2cea918adcf1de94bdc
|
/app_reports/templatetags/app_reports_tags.py
|
90752faa82b4f5f8ca96d36211a37cec1d85c665
|
[] |
no_license
|
imagilex/django_main_apps
|
bc6e900db4cab2a0603f14c844e769faf93be3a3
|
fa52958a384fdb06121b17b64967a2e0c8e468cd
|
refs/heads/env_dev
| 2022-11-24T09:05:17.523336
| 2021-03-03T04:29:44
| 2021-03-03T04:29:44
| 232,409,309
| 0
| 1
| null | 2022-11-22T05:47:32
| 2020-01-07T20:17:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,160
|
py
|
from django import template
from app_reports.models import Esfera
register = template.Library()
@register.inclusion_tag('app_reports/esfera/card.html')
def esfera_card(user, context):
"""
Inclusion tag: {% esfera_card user %}
"""
esferas = []
for esfera in Esfera.objects.all():
if esfera.accesible_by(user):
esferas.append(esfera)
return {'esferas': esferas, 'context': context}
@register.filter
def esfera_accesible_by(esfera, user):
"""
Simple Tag: {% if esfera|esfera_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar a la esfera
Parameters
----------
esfera : objeto Esfera
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar a la esfera, False en otro caso
"""
return esfera.accesible_by(user)
@register.filter
def dimension_accesible_by(dimension, user):
"""
Simple Tag: {% if dimension|dimension_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar a la
dimension del reporte
Parameters
----------
dimension : objeto DimensionReporte
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar a la dimension de reporte,
False en otro caso
"""
return dimension.accesible_by(user)
@register.filter
def reporte_accesible_by(reporte, user):
"""
Simple Tag: {% if reporte|reporte_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar al reporte
Parameters
----------
reporte : objeto Reporte
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar al reporte, False en otro caso
"""
return reporte.accesible_by(user)
@register.inclusion_tag('app_reports/esfera/menu_opc.html')
def dimension_as_menu(esfera, dimension, user, nivel=0):
"""
Inclusion tag: {% dimension_as_menu esfera dimension user nivel %}
"""
nivel = int(nivel) + 1
return {
'esfera': esfera,
'dimension': dimension,
'user': user,
'nivel': nivel}
|
[
"rramirez@junkyard.mx"
] |
rramirez@junkyard.mx
|
400887601c2793e3af1e6dc4a7b58058d3bfc0b5
|
d8546aa0fc53775300b0f9d86cb705b0e5157890
|
/arguments3.py
|
774bd879eab7653b57e5e9a9eaa21b5a18a1a8fd
|
[] |
no_license
|
larago/click
|
ecf02e7462ada4c41e685480dde239ece2f1749d
|
89bedb148398965a9046777b18d3e02e10c1630b
|
refs/heads/master
| 2021-04-30T16:26:20.722104
| 2017-01-26T12:41:21
| 2017-01-26T12:41:21
| 80,103,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
#encoding=utf8
import click
@click.command()
@click.argument('src', nargs=-1)
@click.argument('dst', nargs=1)
def move(src, dst):
click.echo('move %s to %s' % (src, dst))
move()
|
[
"bingeye@foxmail.com"
] |
bingeye@foxmail.com
|
e4a65033a44b01d85d2d3eaec791905f28567ada
|
a25223f5be818b549323e8232c31a606fcca275a
|
/work/lib_learn/fuzzy_learn.py
|
dbb281d2d370d084e8bde9994599c3168320dde1
|
[] |
no_license
|
Jsonming/workspace
|
4ef1119606b3c138ff9594b3e0cf16de8077e28d
|
2ac1b07d85deeb611600f5e64083c4eb0688fdb4
|
refs/heads/master
| 2020-06-13T04:23:15.492080
| 2020-03-31T10:34:30
| 2020-03-31T10:34:30
| 194,531,923
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/30 10:48
# @Author : yangmingming
# @Site :
# @File : fuzzy_learn.py
# @Software: PyCharm
|
[
"ymmbjcz@126.com"
] |
ymmbjcz@126.com
|
6753a86ca1666946b2512abe25045282bb3863fc
|
a110805b0e0cf26d1da8e6276ec6883ed4297752
|
/SOLUCIONES/SOLUCIONES/intermedio I/ejemplos/fecha_hora_sistema.py
|
5668b7246f9d8e1eddcdf6fb495eda480aa0f7b3
|
[] |
no_license
|
dayes/curso_Python
|
a1e77725bd8ab4c287589f15e36849817bcb39e8
|
352b0505a5e3d6f3310893b5c87d1eab31a2a66d
|
refs/heads/master
| 2020-03-22T07:37:54.487944
| 2018-07-13T10:59:40
| 2018-07-13T10:59:40
| 139,713,481
| 0
| 0
| null | 2018-07-04T11:40:03
| 2018-07-04T11:34:55
| null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# fecha y hora del sistema:
import time
t = time.strftime("%H:%M:%S")
d = time.strftime("%d/%m/%y")
d2 = time.strftime("%d/%m/%Y")
dt = time.strftime("%d/%m/%Y %H:%M:%S")
print ("time: ", t)
print ("date: ", d)
print ("date: ", d2)
print ("datetime: ", dt)
|
[
"david@MacBook-Air-de-David.local"
] |
david@MacBook-Air-de-David.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.