blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7729b8232868cb134a2e295ce3058f8047fe5360
|
f24050f9be7f089ebe97857b4d6dc67a7dda17a8
|
/poetry/pozzi/python/lite_call_runtime_top.py
|
ce7c56220fe7521b3d4e5f2d6867c1d30028cf4b
|
[] |
no_license
|
ntsourakis/regulus-python
|
63bffdfbf5ba5c09e60e3d729d310edfd961d79b
|
91830264e0476ccaaf7ccec83e8bb8ca32a9a4fe
|
refs/heads/master
| 2020-04-24T10:33:49.220740
| 2019-04-29T17:11:33
| 2019-04-29T17:11:33
| 171,897,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,628
|
py
|
#!/usr/bin/python
import pozzi.python.lite_call_runtime as call_main
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
# Use this to load table file from canonical place in zipfile (speech interface)
def init():
TableFile = 'call_tables.data.gz'
MatchingFile = 'robust_matching_tables.data.gz'
return call_main.init_state(TableFile, MatchingFile)
# Use this to load table file from canonical place in zipfile (web-server interface)
def init_basic():
TableFile = dir_path + '/call_tables.data.gz'
MatchingFile = dir_path + '/robust_matching_tables.data.gz'
return call_main.init_state_basic(TableFile, MatchingFile)
# Top-level call for Alexa version: string to string
def string_and_state_to_action(String, State):
return call_main.string_and_state_to_action_main(String, State)
# Top-level call for web-server version: json to json
def message_and_state_to_message(Message, State):
return call_main.process_call_message(Message, State)
# Top-level call for doing robust matching (either version)
def robust_match(String, State, N):
return call_main.robust_match_string(String, State, N)
# Convenient for testing on local machine (Alexa apps)
def init_local(Dir0):
LocalCompiledDir = 'c:/cygwin64/home/speech/reguluslitecontent-svn/trunk/litecontent/alexa_content/compiled/'
Dir = LocalCompiledDir + Dir0 + '/'
TableFile = Dir + 'call_tables.data.gz'
MatchingFile = Dir + 'robust_matching_tables.data.gz'
return call_main.init_state(TableFile, MatchingFile)
# Possible values:
# 'quel_animal'
# 'zahlenspiel'
# 'welches_tier'
# 'number_game'
# 'which_language'
# 'which_movie'
# 'jeu_de_chiffres'
# 'quelle_langue'
# Convenient for testing on local machine (web-server apps)
def init_dante():
Dir = 'c:/cygwin64/home/speech/reguluslitecontent-svn/trunk/litecontent/alexa_content/compiled/dante/'
TableFile = Dir + 'call_tables.data.gz'
return call_main.init_state_basic(TableFile)
# import lite_call_runtime_top as call
# (State, Init, Bad) = call.init_local('quelle_langue')
# call.string_and_state_to_action('aide', State)
# call.robust_match('vassili', State, 2)
# State = call.init_dante()
# call.message_and_state_to_message(['get_available_lessons'], State)
# call.message_and_state_to_message(['set_lesson_by_name', 'Inferno I 1-30'], State)
# call.message_and_state_to_message(['help_file'], State)
# call.message_and_state_to_message(['spoken_help'], State)
# call.message_and_state_to_message(['match', 'mi ritrovai per una selva oscura'], State)
|
[
"Nikolaos.Tsourakis@unige.ch"
] |
Nikolaos.Tsourakis@unige.ch
|
5101714fc9c01a1ff9534e8afcec0f66f825348c
|
742956eb16ebc9ec802929a3ffde7377bbdd461f
|
/hackbright.py
|
1ab453681028f9eded0400da2822ef13a84ab553
|
[] |
no_license
|
mashikro/hb-project-tracker-flask
|
bb7a964d1d1be90f2f6a608f84766ad4783ad87e
|
7a3fc23324d9ab2de3b3c8fc72164f32e8a6494f
|
refs/heads/master
| 2021-06-24T14:39:31.201648
| 2019-10-24T00:53:45
| 2019-10-24T00:53:45
| 217,180,943
| 0
| 0
| null | 2021-03-20T02:01:22
| 2019-10-24T00:54:35
|
Python
|
UTF-8
|
Python
| false
| false
| 5,311
|
py
|
"""Hackbright Project Tracker.
A front-end for a database that allows users to work with students, class
projects, and the grades students receive in class projects.
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy()
def connect_to_db(app):
"""Connect the database to our Flask app."""
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///hackbright'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
def get_student_by_github(github):
"""Given a GitHub account name, print info about the matching student."""
QUERY = """
SELECT first_name, last_name, github
FROM students
WHERE github = :github
"""
db_cursor = db.session.execute(QUERY, {'github': github})
row = db_cursor.fetchone()
print(f"Student: {row[0]} {row[1]}\nGitHub account: {row[2]}")
return row #returns a tuple of (first name, last name, github)
def make_new_student(first_name, last_name, github):
"""Add a new student and print confirmation.
Given a first name, last name, and GitHub account, add student to the
database and print a confirmation message.
"""
QUERY = """
INSERT INTO students (first_name, last_name, github)
VALUES (:first_name, :last_name, :github)
"""
db.session.execute(QUERY, {'first_name': first_name,
'last_name': last_name,
'github': github})
db.session.commit()
print(f"Successfully added student: {first_name} {last_name}")
def get_project_by_title(title):
"""Given a project title, print information about the project."""
QUERY = """
SELECT title, description, max_grade
FROM projects
WHERE title = :title
"""
db_cursor = db.session.execute(QUERY, {'title': title})
row = db_cursor.fetchone()
print(f"Title: {row[0]}\nDescription: {row[1]}\nMax Grade: {row[2]}")
return row
def get_grade_by_github_title(github, title):
"""Print grade student received for a project."""
QUERY = """
SELECT grade
FROM grades
WHERE student_github = :github
AND project_title = :title
"""
db_cursor = db.session.execute(QUERY, {'github': github, 'title': title})
row = db_cursor.fetchone()
print(f"Student {github} in project {title} received grade of {row[0]}")
return row
def assign_grade(github, title, grade):
"""Assign a student a grade on an assignment and print a confirmation."""
QUERY = """
INSERT INTO grades (student_github, project_title, grade)
VALUES (:github, :title, :grade)
"""
db_cursor = db.session.execute(QUERY, {'github': github,
'title': title,
'grade': grade})
db.session.commit()
print(f"Successfully assigned grade of {grade} for {github} in {title}")
def get_grades_by_github(github):
"""Get a list of all grades for a student by their github username"""
QUERY = """
SELECT project_title, grade
FROM grades
WHERE student_github = :github
"""
db_cursor = db.session.execute(QUERY, {'github': github})
rows = db_cursor.fetchall()
for row in rows:
print(f"Student {github} received grade of {row[1]} for {row[0]}")
return rows
def get_grades_by_title(title):
"""Get a list of all student grades for a project by its title"""
QUERY = """
SELECT student_github, grade
FROM grades
WHERE project_title = :title
"""
db_cursor = db.session.execute(QUERY, {'title': title})
rows = db_cursor.fetchall()
for row in rows:
print(f"Student {row[0]} received grade of {row[1]} for {title}")
return rows
def handle_input():
"""Main loop.
Repeatedly prompt for commands, performing them, until 'quit' is received
as a command.
"""
command = None
while command != "quit":
input_string = input("HBA Database> ")
tokens = input_string.split()
command = tokens[0]
args = tokens[1:]
if command == "student":
github = args[0]
get_student_by_github(github)
elif command == "new_student":
first_name, last_name, github = args # unpack!
make_new_student(first_name, last_name, github)
elif command == "project":
title = args[0]
get_project_by_title(title)
elif command == "grade":
github, title = args
get_grade_by_github_title(github, title)
elif command == "assign_grade":
github, title, grade = args
assign_grade(github, title, grade)
elif command == "student_grades":
github = args[0]
get_grades_by_github(github)
elif command == "project_grades":
title = args[0]
get_grades_by_title(title)
if __name__ == "__main__":
connect_to_db(app)
handle_input()
# To be tidy, we'll close our database connection -- though, since this
# is where our program ends, we'd quit anyway.
db.session.close()
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
b8db2d856d22439d7469fcfed29803ac47f6a361
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/Others/past/past202004-open/n.py
|
f5270ea558cfda85c4314921ba5f9252f01ad907
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 134
|
py
|
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
if __name__ == '__main__':
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
d84017fd7fe042e521b48cd24401a9e9513723e5
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/12165092.py
|
401f428ce7c24c5fb7a3cddf1f6b4d44312d9daa
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12165092.py generated: Wed, 25 Jan 2017 15:25:34
#
# Event Type: 12165092
#
# ASCII decay Descriptor: [B+ -> (D+ => K- pi+ pi+) K+ pi- ]cc
#
from Configurables import Generation
Generation().EventType = 12165092
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_D+K+pi-,Kpipi=sqDalitz.dec"
Generation().SignalRepeatedHadronization.CutTool = ""
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = ""
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12165092
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
a43676dc807faaf5ff96bb4b2e5d3f8aee04c123
|
9cc3135d5fcd781c0542a905c61dc19b0ceeffef
|
/file_reader_line.py
|
66d355dba2173774df41579cbc1cc0eb3dafa21e
|
[] |
no_license
|
bkalcho/python-crash-course
|
411d8af223fb6974d4f890c0f82c9e56b062359c
|
8425649a2ecd5abeeb438e816400f270d937758e
|
refs/heads/master
| 2022-09-11T13:47:56.837256
| 2022-08-23T10:04:35
| 2022-08-23T10:04:35
| 69,810,386
| 14
| 8
| null | 2022-08-23T10:04:36
| 2016-10-02T17:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 250
|
py
|
# Author: Bojan G. Kalicanin
# Date: 05-Oct-2016
# Program that reads file line by line and prints line by line
# on the stdout
filename = 'pi_digits.txt'
with open(filename) as file_object:
for line in file_object:
print(line.rstrip())
|
[
"bojan.g.kalicanin@gmail.com"
] |
bojan.g.kalicanin@gmail.com
|
0316e6973b49d6d7e409eef2969a5e273989a715
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03437/s534128723.py
|
39f835745095b832d314d35d1a94be11cc79a200
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from sys import stdin
import fractions
def lcm(x, y):
return (x * y) // fractions.gcd(x, y)
n,m = [int(x) for x in stdin.readline().rstrip().split()]
if lcm(n,m) == n:
print(-1)
else:
print(n)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
473f81fd11b029ce5acfd36114b5f5d320f145cd
|
6c5daf5133656a33574dc2f5b62b9f1a1bdf1390
|
/Elec Power Chongqing/2021/old/dataForecasting.py
|
fdbd4edc3a5ba2ca8971ae189ad93166be7ca73e
|
[] |
no_license
|
RobinChen121/Python-Practices
|
6c10b721dce3a8d2b76e190959d0940c52f0d1cc
|
85bd9ad30c245dd62dc7ea837f964eaecbe24ed9
|
refs/heads/master
| 2023-08-31T10:08:01.613828
| 2023-08-27T14:51:46
| 2023-08-27T14:51:46
| 142,564,793
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,716
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 14:41:47 2021
@author: zhen chen
MIT Licence.
Python version: 3.8
Description: forecast based on the last 12 months
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf # pacf 是偏相关系数
sns.set()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
ini_data = pd.read_excel(r'sumCleanData.xlsx')
ini_data['日期'] = pd.to_datetime(ini_data['日期']).dt.strftime('%y-%m-%d')
group_data = ini_data[['日期', 'sum']].groupby('日期').sum()
group_data.plot(kind = 'line', title = '所有用户用电量之和')
result = adfuller(group_data.values)
print('ADF Statistic: %f' % result[0]) # ADF 检验稳定性的,若不稳定则做差分,直到稳定为止
print('p-value: %f' % result[1])
# 也可以通过看差分的自相关系数确定差分阶数
# 确定差分阶数 d, d = 1, 确定移动平均阶数,q = 0
# Original Series
fig, axes = plt.subplots(3, 2)
axes[0, 0].plot(group_data.values);
axes[0, 0].set_title('原始数据')
plot_acf(group_data.values, ax=axes[0, 1], title = '自相关系数')
# 1st Differencing
axes[1, 0].plot(group_data.diff().dropna().values);
axes[1, 0].set_title('一阶差分')
plot_acf(group_data.diff().dropna().values, ax=axes[1, 1], title = '自相关系数')
# 2nd Differencing
axes[2, 0].plot(group_data.diff(2).dropna().values);
axes[2, 0].set_title('二阶差分')
plot_acf(group_data.diff(2).dropna().values, ax=axes[2, 1], title = '自相关系数')
plt.setp(plt.gcf().get_axes(), xticks=[]); # gcf: get current figure
plt.show()
# print()
# result = adfuller(group_data.diff().dropna().values)
# print('ADF Statistic: %f' % result[0])
# print('p-value: %f' % result[1])
# 确定自回归阶数, p=1
fig, axes = plt.subplots(1, 2)
axes[0].plot(group_data.diff().dropna().values); axes[0].set_title('1st Differencing')
plot_pacf(group_data.diff().dropna().values, ax=axes[1])
plt.show()
# build ARIMA Model
model = sm.tsa.ARIMA(group_data.values, order=(1,1,1))
model_fit = model.fit()
print(model_fit.summary())
# # Plot residual errors
# residuals = pd.DataFrame(model_fit.resid)
# fig, ax = plt.subplots(1,2)
# residuals.plot(title="Residuals", ax=ax[0])
# residuals.plot(kind='kde', title='Density', ax=ax[1]) # 密度图 KDE:Kernel Density Estimate
# plt.show()
# Actual vs Fitted
plt.figure()
predict = model_fit.predict(start=1, end = 380)
plt.plot(range(366), group_data['sum'], 'm', label = '原始数据')
plt.plot(range(380), predict, 'r:', label = '预测数据')
plt.legend()
plt.show()
plt.savefig('forecast.png', dpi=1000)
# df1 = ini_data[ini_data.户号对应 == 1]
# df1.plot(x = '日期', y = 'sum', kind = 'line', title = '用户1的历史用电量')
# # df2 = ini_data[ini_data.户号对应 == 2]
# # df2.plot(x = '日期', y = 'sum', kind = 'line', title = '用户2的历史用电量')
# df3 = ini_data[ini_data.户号对应 == 3]
# df3.plot(x = '日期', y = 'sum', kind = 'line', title = '用户3的历史用电量')
# df4 = ini_data[ini_data.户号对应 == 4]
# df4.plot(x = '日期', y = 'sum', kind = 'line', title = '用户4的历史用电量')
# df5 = ini_data[ini_data.户号对应 == 5]
# df5.plot(x = '日期', y = 'sum', kind = 'line', title = '用户5的历史用电量')
# df6 = ini_data[ini_data.户号对应 == 6]
# df6.plot(x = '日期', y = 'sum', kind = 'line', title = '用户6的历史用电量')
# df8 = ini_data[ini_data.户号对应 == 8]
# df8.plot(x = '日期', y = 'sum', kind = 'line', title = '用户8的历史用电量')
|
[
"40953071+RobinChen121@users.noreply.github.com"
] |
40953071+RobinChen121@users.noreply.github.com
|
67f5b410a9c362544f83edcb25f34d9f24d4fc1f
|
c83e356d265a1d294733885c373d0a4c258c2d5e
|
/mayan/apps/rest_api/urls.py
|
43be019b07eb85024ce8607bfd3fde3f44544c10
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3
|
4160809d2c96707a196b8c94ea9e4df1a119d96a
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
refs/heads/master
| 2023-08-21T23:36:41.230179
| 2021-10-02T03:51:12
| 2021-10-02T03:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
from django.conf.urls import include, url
from .api_views import (
APIRoot, APIVersionRoot, BrowseableObtainAuthToken,
ProjectInformationAPIView, schema_view
)
from .literals import API_VERSION
api_version_urls = [
url(regex=r'^$', name='api_version_root', view=APIVersionRoot.as_view()),
url(
regex=r'^auth/token/obtain/$', name='auth_token_obtain',
view=BrowseableObtainAuthToken.as_view()
),
url(
regex=r'^project/$', name='project_information',
view=ProjectInformationAPIView.as_view()
)
]
api_urls = [
url(
regex=r'^swagger(?P<format>.json|.yaml)$', name='schema-json',
view=schema_view.without_ui(cache_timeout=None),
),
url(regex=r'^v{}/'.format(API_VERSION), view=include(api_version_urls)),
url(regex=r'^$', name='api_root', view=APIRoot.as_view()),
]
urlpatterns = [
url(
regex=r'^swagger/ui/$', name='schema-swagger-ui',
view=schema_view.with_ui('swagger', cache_timeout=None)
),
url(
regex=r'^redoc/ui/$', name='schema-redoc',
view=schema_view.with_ui('redoc', cache_timeout=None)
),
url(regex=r'^', view=include(api_urls)),
]
|
[
"79801878+Meng87@users.noreply.github.com"
] |
79801878+Meng87@users.noreply.github.com
|
0a2f3cfff69d681b3500ecf3a9d62ad75e684c68
|
431a1f738b1edfba7dad8d10a6b7520d51d917cb
|
/Samples/UserSamples/2018/VBFConfig.py
|
0f6c4d5699d172ffbfdbde1760a8050c5fd41cbf
|
[] |
no_license
|
aloeliger/DatacardCreator
|
5ce702e46fbb77e843b44d8fe088c2645a4a8f66
|
5c7e890276a5be079ed3b677a471c1dcadcba52d
|
refs/heads/master
| 2022-02-26T19:52:30.563747
| 2022-02-16T20:24:48
| 2022-02-16T20:24:48
| 215,602,523
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.TES import TESUncertainty
from Samples.Uncertainties.UserUncertainties.Signal_JES_18 import JES18Uncertainty
from Samples.Uncertainties.UserUncertainties.JER import JERUncertainty
from Samples.Uncertainties.UserUncertainties.MetRecoil import MetRecoilUncertainty
from Samples.Uncertainties.UserUncertainties.MuonES import MuonESUncertainty
#from Samples.Uncertainties.UserUncertainties.Prefiring import PrefiringUncertainty
from Samples.Uncertainties.UserUncertainties.TauID import TauIDUncertainty
from Samples.Uncertainties.UserUncertainties.Trigger17_18 import Trigger1718Uncertainty
from Samples.Uncertainties.UserUncertainties.qqHTheory import qqHTheoryUncertainty
from Samples.Uncertainties.UserUncertainties.QCDAcceptanceUncertainties.qqH_QCD_AcceptanceUncertainties.qqH_scale_Inclusive_Uncertainty import qqH_scale_Inclusive_Uncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
VBFSample = Sample()
VBFSample.name = 'qqH_htt125'
VBFSample.path = '/data/aloeliger/SMHTT_Selected_2018_Deep/'
VBFSample.files = ['VBF.root']
VBFSample.definition = ''
VBFSample.uncertainties = [
TESUncertainty(),
JES18Uncertainty(),
JERUncertainty(),
MetRecoilUncertainty(),
MuonESUncertainty(),
# PrefiringUncertainty(),
TauIDUncertainty(),
Trigger1718Uncertainty(),
qqHTheoryUncertainty(),
qqH_scale_Inclusive_Uncertainty(),
]
VBFSample.eventDictionaryInstance = MuTauEventDictionary
VBFSample.CreateEventWeight = VBFSample.CreateEventWeight_Standard
|
[
"aloelige@cern.ch"
] |
aloelige@cern.ch
|
4e1002d9ce5286e189a43928b766b6ff72a4dbff
|
01926621374435f7daf622f1ef04a51f94e3e883
|
/litex/build/quicklogic/platform.py
|
fbd200cb2efd6636f27feeb7075a6e6e6f0658c1
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
betrusted-io/litex
|
d717513e41ff6aba54ac172e886c21479aa41752
|
8109a8e91ca8321483ccc2f58bd4fed5379bbd18
|
refs/heads/master
| 2022-11-23T07:11:35.297128
| 2022-02-22T11:55:00
| 2022-02-22T11:55:00
| 231,203,917
| 3
| 0
|
NOASSERTION
| 2020-01-01T10:48:06
| 2020-01-01T10:48:05
| null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
#
# This file is part of LiteX.
#
# Copyright (c) 2021 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
from litex.build.generic_platform import GenericPlatform
from litex.build.quicklogic import common, symbiflow
# QuickLogicPlatform -------------------------------------------------------------------------------
class QuickLogicPlatform(GenericPlatform):
bitstream_ext = ".bit"
def __init__(self, device, *args, toolchain="symbiflow", **kwargs):
GenericPlatform.__init__(self, device, *args, **kwargs)
if toolchain == "symbiflow":
self.toolchain = symbiflow.SymbiflowToolchain()
else:
raise ValueError(f"Unknown toolchain {toolchain}")
def get_verilog(self, *args, special_overrides=dict(), **kwargs):
so = dict(common.quicklogic_special_overrides)
so.update(special_overrides)
return GenericPlatform.get_verilog(self, *args,
special_overrides = so,
attr_translate = self.toolchain.attr_translate,
**kwargs)
def build(self, *args, **kwargs):
return self.toolchain.build(self, *args, **kwargs)
|
[
"florent@enjoy-digital.fr"
] |
florent@enjoy-digital.fr
|
c480f46b0c551272158063ee08ae7ef47fb91801
|
6b5c67590979627a97b7d8f0d9fc131b63fa817d
|
/cgettext.py
|
11081dd45c063fcc7fa697958c11031a104e4612
|
[
"MIT"
] |
permissive
|
eevee/cgettext
|
303357e28349a6cdd906a3e5ffb2fc6889041f37
|
9efa06369c19c0631dbebbc2f45f787b4cd01eb5
|
refs/heads/master
| 2016-09-05T09:01:11.343350
| 2014-06-27T20:03:10
| 2014-06-27T20:03:10
| 19,359,054
| 1
| 0
| null | 2014-05-31T01:02:46
| 2014-05-01T21:56:14
|
Python
|
UTF-8
|
Python
| false
| false
| 495
|
py
|
try:
from _cgettext import c_parse
except ImportError:
# No C module available; just re-export the builtin
from gettext import GNUTranslations
else:
import gettext
class GNUTranslations(gettext.GNUTranslations):
def _parse(self, fp):
charset, metadata, catalog, plural = c_parse(fp)
self._charset = charset
self._info = metadata
self._catalog = catalog
self.plural = plural
__all__ = ['GNUTranslations']
|
[
"eevee.git@veekun.com"
] |
eevee.git@veekun.com
|
0467a469bfb2a1b833b93af0761a056efbc02d40
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/TRACE/FPythonCode/FTradeSheetColumnCustom.py
|
7368f700f8f3f51a6c7c5f45f72afb9cdeed7e09
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,351
|
py
|
import acm
def get_TradeReportTransType_string_from_value(val):
'''
Accepts value for TradeReportTransType, in FIX message and returns mapped value, which needs to be displayed in tradesheet for particular column.
'''
switcher = {
"0": "New",
"1": "Cancel",
"2": "Replace",
"3": "Release",
"4": "Reverse"
}
ret = switcher.get(val, val)
return ret
def get_TradeReportType_string_from_value(val):
'''
Accepts value for TradeReportType, in FIX message and returns mapped value, which needs to be displayed in tradesheet for particular column.
'''
switcher = {
"0": "Submit",
"1": "Alleged",
"2": "Accept",
"3": "Decline",
"4": "Addendum",
"5": "No/Was",
"6": "Trade Report Cancel",
"7": "Locked In Trade Break"
}
ret = switcher.get(val, val)
return ret
def get_PartyRole_string_from_value(val):
'''
Accepts value for PartyRole, in FIX message and returns mapped value, which needs to be displayed in tradesheet for particular column.
'''
switcher = {
"1": "Executing Firm",
"7": "Entering Firm",
"14": "Giveup Firm",
"17": "Contra Firm",
"83": "Clearing Account"
}
ret = switcher.get(val, val)
return ret
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
c12a2731c0266326e4342197497bdbe4b3103bbe
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/KoubeiCateringOrderPayDisburseModel.py
|
d7684ebecee96e879c01d6568cb06e1e665fd1cf
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,200
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PosOrderKey import PosOrderKey
class KoubeiCateringOrderPayDisburseModel(object):
def __init__(self):
self._auth_code = None
self._member_flag = None
self._out_pay_no = None
self._pos_order_key = None
self._timeout = None
self._total_amount = None
self._undiscountable = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
@property
def member_flag(self):
return self._member_flag
@member_flag.setter
def member_flag(self, value):
self._member_flag = value
@property
def out_pay_no(self):
return self._out_pay_no
@out_pay_no.setter
def out_pay_no(self, value):
self._out_pay_no = value
@property
def pos_order_key(self):
return self._pos_order_key
@pos_order_key.setter
def pos_order_key(self, value):
if isinstance(value, PosOrderKey):
self._pos_order_key = value
else:
self._pos_order_key = PosOrderKey.from_alipay_dict(value)
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
@property
def undiscountable(self):
return self._undiscountable
@undiscountable.setter
def undiscountable(self, value):
self._undiscountable = value
def to_alipay_dict(self):
params = dict()
if self.auth_code:
if hasattr(self.auth_code, 'to_alipay_dict'):
params['auth_code'] = self.auth_code.to_alipay_dict()
else:
params['auth_code'] = self.auth_code
if self.member_flag:
if hasattr(self.member_flag, 'to_alipay_dict'):
params['member_flag'] = self.member_flag.to_alipay_dict()
else:
params['member_flag'] = self.member_flag
if self.out_pay_no:
if hasattr(self.out_pay_no, 'to_alipay_dict'):
params['out_pay_no'] = self.out_pay_no.to_alipay_dict()
else:
params['out_pay_no'] = self.out_pay_no
if self.pos_order_key:
if hasattr(self.pos_order_key, 'to_alipay_dict'):
params['pos_order_key'] = self.pos_order_key.to_alipay_dict()
else:
params['pos_order_key'] = self.pos_order_key
if self.timeout:
if hasattr(self.timeout, 'to_alipay_dict'):
params['timeout'] = self.timeout.to_alipay_dict()
else:
params['timeout'] = self.timeout
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
if self.undiscountable:
if hasattr(self.undiscountable, 'to_alipay_dict'):
params['undiscountable'] = self.undiscountable.to_alipay_dict()
else:
params['undiscountable'] = self.undiscountable
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringOrderPayDisburseModel()
if 'auth_code' in d:
o.auth_code = d['auth_code']
if 'member_flag' in d:
o.member_flag = d['member_flag']
if 'out_pay_no' in d:
o.out_pay_no = d['out_pay_no']
if 'pos_order_key' in d:
o.pos_order_key = d['pos_order_key']
if 'timeout' in d:
o.timeout = d['timeout']
if 'total_amount' in d:
o.total_amount = d['total_amount']
if 'undiscountable' in d:
o.undiscountable = d['undiscountable']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
8edff0421ebc56d61abee4a4cef9d6eef91672f0
|
f6290b7b8ffb263b7f0d252a67e2c6320a4c1143
|
/Binary Tree/height_of_special_binary_tree.py
|
180231ea70bd5a270e62130aca6e3fd2873838a8
|
[] |
no_license
|
datAnir/GeekForGeeks-Problems
|
b45b0ae80053da8a1b47a2af06e688081574ef80
|
c71f11d0349ed3850dfaa9c7a078ee70f67e46a1
|
refs/heads/master
| 2023-05-29T15:21:59.680793
| 2020-12-15T04:55:01
| 2020-12-15T04:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
'''
https://practice.geeksforgeeks.org/problems/height-of-spiral-tree/1
Given a special Binary Tree whose leaf nodes are connected to form a circular doubly linked list. Find the height of this special Binary Tree.
Example 1:
Input:
1
/ \
2 3
/ \
4 5
/
6
Output: 4
Explanation:
In the above binary tree, 6, 5 and 3 are leaf nodes and they form a circular doubly linked list. Here, the left pointer of leaf node will act as a previous pointer of circular doubly linked list and its right pointer will act as next pointer of circular doubly linked list.
'''
# method - 1 => create visited array, and using BFS find total level
from collections import deque, defaultdict
def findTreeHeight(root):
q = deque([root])
level = 0
visited = defaultdict(bool)
visited[root] = True
while len(q) > 0:
size = len(q)
for i in range(size):
node = q.popleft()
if node.left and not visited[node.left]:
q.append(node.left)
visited[node.left] = True
if node.right and not visited[node.right]:
q.append(node.right)
visited[node.right] = True
level += 1
return level
# method - 2 => check circular conditions extra
# if current node is equal to next node's previous or next
def findTreeHeight(root):
if root == None:
return 0
if (root.left != None and root == root.left.right) or (root.right != None and root == root.right.left):
return 1
lh = findTreeHeight(root.left)
rh = findTreeHeight(root.right)
return max(lh, rh) + 1
|
[
"komalbansal97@gmail.com"
] |
komalbansal97@gmail.com
|
91469a33a999bfea9fed7f0776edd52783522402
|
b05fee086482565ef48785f2a9c57cfe2c169f68
|
/part_one/6-builder_pattern/builder/director.py
|
30ec18b0022359e51962b212e0ff4a289f939982
|
[] |
no_license
|
diegogcc/py-design_patterns
|
76db926878d5baf9aea1f3d2f6a09f4866c3ce1e
|
2b49b981f2d3514bbd02796fe9a8ec083df6bb38
|
refs/heads/master
| 2023-04-01T08:28:53.211024
| 2021-04-05T11:48:19
| 2021-04-05T11:48:19
| 304,145,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
class Director:
def __init__(self, builder):
self._builder = builder
def get_computer(self):
return self._builder.get_computer()
def build_computer(self):
self._builder.new_computer()
self._builder.get_case()
self._builder.build_mainboard()
self._builder.install_mainboard()
self._builder.install_hard_drive()
self._builder.install_video_card()
|
[
"diegoc906@gmail.com"
] |
diegoc906@gmail.com
|
a1bab5f325d133df17fbae75ee780f703da474c6
|
482467f7875513440ccc9fb5ee5755214137e8df
|
/homeassistant/components/stiebel_eltron/__init__.py
|
52dc2d848918bf88b821b56a49c0cb0a36338a48
|
[
"Apache-2.0"
] |
permissive
|
Watemlifts/home-assistant
|
fbf16d91489f9ab472b1fda928fc472f99d2b057
|
6e414983738d9495eb9e4f858e3e98e9e38869db
|
refs/heads/dev
| 2023-07-21T06:38:40.212969
| 2023-07-15T09:33:07
| 2023-07-15T09:33:07
| 195,134,511
| 4
| 0
|
Apache-2.0
| 2023-07-15T09:33:08
| 2019-07-03T22:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
"""The component for STIEBEL ELTRON heat pumps with ISGWeb Modbus module."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.modbus import (
CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN)
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
DOMAIN = 'stiebel_eltron'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the STIEBEL ELTRON unit.
Will automatically load climate platform.
"""
name = config[DOMAIN][CONF_NAME]
modbus_client = hass.data[MODBUS_DOMAIN][config[DOMAIN][CONF_HUB]]
hass.data[DOMAIN] = {
'name': name,
'ste_data': StiebelEltronData(name, modbus_client)
}
discovery.load_platform(hass, 'climate', DOMAIN, {}, config)
return True
class StiebelEltronData:
"""Get the latest data and update the states."""
def __init__(self, name, modbus_client):
"""Init the STIEBEL ELTRON data object."""
from pystiebeleltron import pystiebeleltron
self.api = pystiebeleltron.StiebelEltronAPI(modbus_client, 1)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update unit data."""
if not self.api.update():
_LOGGER.warning("Modbus read failed")
else:
_LOGGER.debug("Data updated successfully")
|
[
"marhje52@kth.se"
] |
marhje52@kth.se
|
3333bd7d1d54e4a76c2974fe2941e952ca4dd14a
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/log-20190927/132.230.102.123-10.21.11.11/1569575419.py
|
5356af5e985e01c5e1c1f9be6046d34f1addbda1
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
import functools
import typing
import string
import random
import pytest
def leap(year: int) -> bool:
"""
Args:
year: an integer
Returns:
a boolean expression
"""
if year < 1583:
return False
if year % 4 == 0: #and year % 100 == 0 and year %400 != 0:
if year % 100 == 0 and year %400 != 0:
print("test")
return False
else:
return True
else:
return False
print(leap(1582))
print(leap(1644))
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(4))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(year):
nonlocal covered, count
if year % 4 != 0:
covered.add(0)
elif year % 100 != 0:
covered.add(1)
elif year % 400 != 0:
covered.add(2)
else:
covered.add(3)
r = func (year)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
functools.update_wrapper(wrapper, func)
return wrapper
return coverage
coverage = mk_coverage ()
try:
leap = coverage(leap)
except:
pass
## Lösung Teil 2 (Tests)
def test_leap():
assert leap(1582) == False
assert leap(1583) == False
assert leap(1600) == True
assert leap(1644) == False
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_leap (self):
assert leap
assert 'year' in getfullargspec(leap).args
class TestGrades:
def test_docstring_present(self):
assert leap.__doc__ is not None
def test_typing_present(self):
assert leap.__hints__ == typing.get_type_hints(self.leap_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def leap_oracle(self, year :int) -> bool:
if year % 4 != 0:
return False
elif year % 100 != 0:
return True
elif year % 400 == 0:
return True
else:
return False
def check_leap (self, year):
assert leap (year) == self.leap_oracle (year)
def test_correctness(self):
for i in range (100):
year = random.randrange (1582,2500)
self.check_leap (year)
for i in range (100):
year = random.randrange (1600,3000, 100)
self.check_leap (year)
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
92eb574a98ab18224e6f678efd484bebf0f75fbd
|
f1961c86e6da14f35c21d7235f4fc8a89fabdcad
|
/DailyProgrammer/DP20140226B.py
|
723789a5c53f06aeb779421827eca6c15247d7f0
|
[
"MIT"
] |
permissive
|
DayGitH/Python-Challenges
|
d4930bdd85cd1a977d8f6192775ca956a375fcde
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
refs/heads/master
| 2021-01-17T13:01:03.784523
| 2018-06-29T23:49:04
| 2018-06-29T23:49:04
| 58,497,683
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,841
|
py
|
"""
[02/26/14] Challenge #150 [Intermediate] Re-emvoweler 1
https://www.reddit.com/r/dailyprogrammer/comments/1yzlde/022614_challenge_150_intermediate_reemvoweler_1/
# _(Intermediate)_: Re-emvoweler 1
In [this week's Easy
challenge](http://www.reddit.com/r/dailyprogrammer/comments/1ystvb/022414_challenge_149_easy_disemvoweler/), series of
words were disemvoweled into vowels, and non-vowel letters. Spaces were also removed. Your task today is, given the two
strings produced via disemvowelment, output _one possibility_ for the original string.
1. Your output must be such that if you put it through the solution to this week's Easy challenge, you'll recover
exactly the input you were given.
2. You don't need to output the same string as the one that was originally disemvoweled, just _some_ string that
disemvowels to your input.
3. Use [the Enable word list](http://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt), or some
other reasonable English word list. Every word in your output must appear in your word list.
4. For the sample inputs, all words in originally disemvoweled strings appear in Enable. In particular, I'm not using
any words with punctuation, and I'm not using the word "a".
5. As before, ignore punctuation and capitalization.
# Formal Inputs & Outputs
## Input description
Two strings, one containing only non-vowel letters, and one containing only vowels.
## Output description
A space-separated series of words that could be disemvoweled into the input, each word of which must appear in your
word list.
# Sample Inputs & Outputs
## Sample Input 1
wwllfndffthstrds
eieoeaeoi
## Sample Output 1
There are, in general, many correct outputs. Any of these is valid output for the sample input (using the Enable word
list to verify words):
we wile lo fen daff et host rids
we wile lo fend aff eths tor ids
we wile lo fen daff the sot rids
we will fend off eths tare do si
we will fend off the asteroids
## Sample Input 2
bbsrshpdlkftbllsndhvmrbndblbnsthndlts
aieaeaeieooaaaeoeeaeoeaau
## Sample Outputs 2
ab bise ars he ae pi ed look fa tab all sned hove me ar bend blob ens than adults
ai be base rash pe die look fat bal la sned hove me ar bend blob ens than adults
babies ae rash pe die loo ka fat balls end ho vee mar bend blob ens than adults
babies rash pedal kef tie bolls nod aah ave omer bendable bones than adults
babies are shaped like footballs and have more bendable bones than adults
## Sample Input 3
llfyrbsshvtsmpntbncnfrmdbyncdt
aoouiaeaeaoeoieeoieaeoe
# Notes
Thanks to /u/abecedarius for inspiring this challenge on /r/dailyprogrammer_ideas!
Think you can do a better job of re-emvoweling? Check out this week's Hard challenge!
"""
def main():
pass
if __name__ == "__main__":
main()
|
[
"akber91@gmail.com"
] |
akber91@gmail.com
|
c1a3f8a768bbde06f5bc0e63a67e80424aa23eeb
|
a7ded5d3d19a98e61a44189cffe3703f7938e0db
|
/xero_python/accounting/models/country_code.py
|
20c8ef17a04040149b701b78f318d6a723c86732
|
[
"MIT"
] |
permissive
|
liseekeralbert/xero-python
|
dfd1076344f763d74f81f701e32600cf88bcc7b2
|
d27ab1894ecd84d2a9af0ca91583593756b21ab3
|
refs/heads/master
| 2022-12-16T07:41:14.331308
| 2020-09-18T17:12:35
| 2020-09-18T17:12:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,889
|
py
|
# coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.3.0
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from enum import Enum
class CountryCode(Enum):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
allowed enum values
"""
AD = "AD"
AE = "AE"
AF = "AF"
AG = "AG"
AI = "AI"
AL = "AL"
AM = "AM"
AN = "AN"
AO = "AO"
AQ = "AQ"
AR = "AR"
AS = "AS"
AT = "AT"
AU = "AU"
AW = "AW"
AZ = "AZ"
BA = "BA"
BB = "BB"
BD = "BD"
BE = "BE"
BF = "BF"
BG = "BG"
BH = "BH"
BI = "BI"
BJ = "BJ"
BL = "BL"
BM = "BM"
BN = "BN"
BO = "BO"
BR = "BR"
BS = "BS"
BT = "BT"
BW = "BW"
BY = "BY"
BZ = "BZ"
CA = "CA"
CC = "CC"
CD = "CD"
CF = "CF"
CG = "CG"
CH = "CH"
CI = "CI"
CK = "CK"
CL = "CL"
CM = "CM"
CN = "CN"
CO = "CO"
CR = "CR"
CU = "CU"
CV = "CV"
CW = "CW"
CX = "CX"
CY = "CY"
CZ = "CZ"
DE = "DE"
DJ = "DJ"
DK = "DK"
DM = "DM"
DO = "DO"
DZ = "DZ"
EC = "EC"
EE = "EE"
EG = "EG"
EH = "EH"
ER = "ER"
ES = "ES"
ET = "ET"
FI = "FI"
FJ = "FJ"
FK = "FK"
FM = "FM"
FO = "FO"
FR = "FR"
GA = "GA"
GB = "GB"
GD = "GD"
GE = "GE"
GG = "GG"
GH = "GH"
GI = "GI"
GL = "GL"
GM = "GM"
GN = "GN"
GQ = "GQ"
GR = "GR"
GT = "GT"
GU = "GU"
GW = "GW"
GY = "GY"
HK = "HK"
HN = "HN"
HR = "HR"
HT = "HT"
HU = "HU"
ID = "ID"
IE = "IE"
IL = "IL"
IM = "IM"
IN = "IN"
IO = "IO"
IQ = "IQ"
IR = "IR"
IS = "IS"
IT = "IT"
JE = "JE"
JM = "JM"
JO = "JO"
JP = "JP"
KE = "KE"
KG = "KG"
KH = "KH"
KI = "KI"
KM = "KM"
KN = "KN"
KP = "KP"
KR = "KR"
KW = "KW"
KY = "KY"
KZ = "KZ"
LA = "LA"
LB = "LB"
LC = "LC"
LI = "LI"
LK = "LK"
LR = "LR"
LS = "LS"
LT = "LT"
LU = "LU"
LV = "LV"
LY = "LY"
MA = "MA"
MC = "MC"
MD = "MD"
ME = "ME"
MF = "MF"
MG = "MG"
MH = "MH"
MK = "MK"
ML = "ML"
MM = "MM"
MN = "MN"
MO = "MO"
MP = "MP"
MR = "MR"
MS = "MS"
MT = "MT"
MU = "MU"
MV = "MV"
MW = "MW"
MX = "MX"
MY = "MY"
MZ = "MZ"
NA = "NA"
NC = "NC"
NE = "NE"
NG = "NG"
NI = "NI"
NL = "NL"
NO = "NO"
NP = "NP"
NR = "NR"
NU = "NU"
NZ = "NZ"
OM = "OM"
PA = "PA"
PE = "PE"
PF = "PF"
PG = "PG"
PH = "PH"
PK = "PK"
PL = "PL"
PM = "PM"
PN = "PN"
PR = "PR"
PS = "PS"
PT = "PT"
PW = "PW"
PY = "PY"
QA = "QA"
RE = "RE"
RO = "RO"
RS = "RS"
RU = "RU"
RW = "RW"
SA = "SA"
SB = "SB"
SC = "SC"
SD = "SD"
SE = "SE"
SG = "SG"
SH = "SH"
SI = "SI"
SJ = "SJ"
SK = "SK"
SL = "SL"
SM = "SM"
SN = "SN"
SO = "SO"
SR = "SR"
SS = "SS"
ST = "ST"
SV = "SV"
SX = "SX"
SY = "SY"
SZ = "SZ"
TC = "TC"
TD = "TD"
TG = "TG"
TH = "TH"
TJ = "TJ"
TK = "TK"
TL = "TL"
TM = "TM"
TN = "TN"
TO = "TO"
TR = "TR"
TT = "TT"
TV = "TV"
TW = "TW"
TZ = "TZ"
UA = "UA"
UG = "UG"
US = "US"
UY = "UY"
UZ = "UZ"
VA = "VA"
VC = "VC"
VE = "VE"
VG = "VG"
VI = "VI"
VN = "VN"
VU = "VU"
WF = "WF"
WS = "WS"
XK = "XK"
YE = "YE"
YT = "YT"
ZA = "ZA"
ZM = "ZM"
ZW = "ZW"
|
[
"sid.maestre@gmail.com"
] |
sid.maestre@gmail.com
|
ca71ccf61df6a0176341a1941ea6e6315f5fcdf1
|
e3040a2e23a856e319e02037dc6baf3882c796b9
|
/samples/openapi3/client/3_0_3_unit_test/python/unit_test_api/paths/response_body_post_oneof_response_body_for_content_types/post.pyi
|
ef5fdf6e1c02d23db716ba98c8ca2592e6274dbc
|
[
"Apache-2.0"
] |
permissive
|
mishin/openapi-generator
|
2ed2e0739c0cc2a627c25191d5898071d9294036
|
3ed650307513d552404f3d76487f3b4844acae41
|
refs/heads/master
| 2023-06-10T03:01:09.612130
| 2022-10-14T08:29:15
| 2022-10-14T08:29:15
| 271,080,285
| 0
| 0
|
Apache-2.0
| 2023-05-30T02:01:25
| 2020-06-09T18:29:41
|
Java
|
UTF-8
|
Python
| false
| false
| 7,519
|
pyi
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.oneof import Oneof
SchemaFor200ResponseBodyApplicationJson = Oneof
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostOneofResponseBodyForContentTypes(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_oneof_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_oneof_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"noreply@github.com"
] |
mishin.noreply@github.com
|
f9a778bd121f5471a7545a51299c85b1ed6fe37d
|
7b74696ff2ab729396cba6c203984fce5cd0ff83
|
/stockmarket/migrations/0018_auto_20210310_0713.py
|
a92ec4772b8fef3cfc51038084147fdbfb09d35c
|
[
"MIT"
] |
permissive
|
webclinic017/investtrack
|
e9e9a7a8caeecaceebcd79111c32b334c4e1c1d0
|
4aa204b608e99dfec3dd575e72b64a6002def3be
|
refs/heads/master
| 2023-06-18T12:57:32.417414
| 2021-07-10T14:26:53
| 2021-07-10T14:26:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,977
|
py
|
# Generated by Django 3.0.7 on 2021-03-09 23:13
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('stockmarket', '0017_auto_20210307_1803'),
]
operations = [
migrations.AddField(
model_name='companydailybasic',
name='pb',
field=models.FloatField(blank=True, null=True, verbose_name='市净率'),
),
migrations.AddField(
model_name='companydailybasic',
name='ps',
field=models.FloatField(blank=True, null=True, verbose_name='市销率'),
),
migrations.AddField(
model_name='companydailybasic',
name='ps_ttm',
field=models.FloatField(blank=True, null=True, verbose_name='市销率TTM'),
),
migrations.CreateModel(
name='IndexDailyBasic',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
('last_mod_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='最后更新时间')),
('ts_code', models.CharField(blank=True, max_length=50, unique=True, verbose_name='TS代码')),
('trade_date', models.DateField(blank=True, null=True, verbose_name='交易日期')),
('turnover_rate', models.FloatField(blank=True, max_length=50, null=True, verbose_name='换手率')),
('turnover_rate_f', models.FloatField(blank=True, max_length=50, null=True, verbose_name='换手率(自由流通)')),
('pe', models.FloatField(blank=True, null=True, verbose_name='市盈率')),
('pe_ttm', models.FloatField(blank=True, null=True, verbose_name='市盈率TTM')),
('pb', models.FloatField(blank=True, null=True, verbose_name='市净率')),
('total_share', models.FloatField(blank=True, null=True, verbose_name='总股本')),
('float_share', models.FloatField(blank=True, null=True, verbose_name='流通股本')),
('free_share', models.FloatField(blank=True, null=True, verbose_name='自由流通股本')),
('total_mv', models.FloatField(blank=True, null=True, verbose_name='总市值')),
('float_mv', models.FloatField(blank=True, null=True, verbose_name='流通市值')),
('company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='stockmarket.StockNameCodeMap')),
],
options={
'verbose_name': '指数每日基本',
'verbose_name_plural': '指数每日基本',
'ordering': ['-last_mod_time'],
'get_latest_by': 'id',
},
),
]
|
[
"jie.han@outlook.com"
] |
jie.han@outlook.com
|
01e304b264f3b7116f5df2b1b3345739a66a4d8f
|
5775513b81096d77b11bfe99949e4cbd80af20d4
|
/jumpingintodjango/questionsandanswers/migrations/0005_auto__add_field_question_cmpnyvisit.py
|
1fa82c8c942e0a57ee34a2c78567711f91edea9b
|
[] |
no_license
|
gzpgg3x/BrowsingOR
|
55234ba7b785675ea6b1d6a99c083aa0885fba74
|
15d467c6cc70beece93c699f2e9728509c3ce9f3
|
refs/heads/master
| 2016-09-06T11:00:20.737008
| 2013-04-29T16:19:53
| 2013-04-29T16:19:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.cmpnyvisit'
db.add_column(u'questionsandanswers_question', 'cmpnyvisit',
self.gf('django.db.models.fields.IntegerField')(default=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.cmpnyvisit'
db.delete_column(u'questionsandanswers_question', 'cmpnyvisit')
models = {
u'questionsandanswers.answer': {
'Meta': {'object_name': 'Answer'},
'best_answer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionsandanswers.Question']"})
},
u'questionsandanswers.question': {
'Meta': {'object_name': 'Question'},
'cmpnyvisit': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['questionsandanswers']
|
[
"gzpgg3x@yahoo.com"
] |
gzpgg3x@yahoo.com
|
a460b11b44d5739c0f1a1c59783c8dcabe4843f2
|
02bfa3b84a5c811c3fd4c293b14b0846bd6ab3b5
|
/SortingAlrorithms/QuickSort.py
|
46b3cf3f67d0d478023a3d3c18746ddb48f32b9b
|
[] |
no_license
|
Ronak912/Programming_Fun
|
9dde0842245b62748b479924921383de07b24d16
|
2a504d0ef230d09007b8a268c356055ced5ca6c0
|
refs/heads/master
| 2020-04-06T03:40:52.046819
| 2020-04-03T22:25:36
| 2020-04-03T22:25:36
| 42,281,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
def quickSort(alist):
quickSortHelper(alist, 0, len(alist)-1)
def quickSortHelper(alist, first, last):
if first >= last:
return
splitpoint = partition(alist, first, last)
quickSortHelper(alist, first, splitpoint-1)
quickSortHelper(alist, splitpoint+1, last)
def partition(alist, first, last):
pivotvalue = alist[first]
leftmark = first+1
rightmark = last
while True:
while leftmark <= rightmark and alist[leftmark] <= pivotvalue:
leftmark += 1
while alist[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark -= 1
if rightmark < leftmark:
break
else:
temp = alist[leftmark]
alist[leftmark] = alist[rightmark]
alist[rightmark] = temp
temp = alist[first]
alist[first] = alist[rightmark]
alist[rightmark] = temp
return rightmark
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
quickSort(alist)
#print(alist)
|
[
"ronak.928@gmail.com"
] |
ronak.928@gmail.com
|
14c5bba12db00e778ec048d589648f8d833c1e3e
|
0b76e4db1f08f2d6d7b9379a884c2075f6e258c3
|
/w9/G4/4.py
|
8d5116ba23d233d71f26eeda927160fa5d04798e
|
[] |
no_license
|
bobur554396/WD2020Spring
|
244ec8b491f297646d1d37f1feeb3767b68b9180
|
2b833c9043701ebaa4d122f717c8465af8fd5677
|
refs/heads/master
| 2020-12-26T19:01:46.605344
| 2020-04-18T05:33:42
| 2020-04-18T05:33:42
| 237,606,624
| 1
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# Functions
def hello(request):
print('hi')
def sum(a, b):
return a + b
# c = sum(2, 3)
# print(c)
def mult(a, b=3, c=10):
return a * b * c
print(mult(2))
|
[
"bobur.muhsimbaev@gmail.com"
] |
bobur.muhsimbaev@gmail.com
|
60a087f8c198509615937f7b2d47732ab42cbb42
|
c3a6e39441d70cd632adff3ade7d7d331f702bbf
|
/DocxTest.py
|
60456d561aed8d9ef55091e23e4315a838a4c804
|
[] |
no_license
|
Yzp109062/programming
|
ef81f5588b28da130d6a4c608578cbf622c50e16
|
b84aee684fe39623185749e5250ffb454a302176
|
refs/heads/master
| 2022-11-23T01:36:18.344437
| 2020-07-28T21:37:39
| 2020-07-28T21:37:39
| 284,621,508
| 1
| 0
| null | 2020-08-03T06:28:43
| 2020-08-03T06:28:42
| null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
from docx import Document
from docx.shared import Pt
document = Document("/home/wesley/Desktop/Construction/Conlanging/Daellic/Daool Lexicon Working Version Python Test.docx")
style = document.styles["Normal"]
font = style.font
font.name = "Charis SIL"
font.size = Pt(12)
p = document.add_paragraph("Test ")
p.add_run("bold").bold = True
p.add_run(" and ")
p.add_run("italic").italic = True
p = document.add_paragraph("New paragraph")
document.save("/home/wesley/programming/DocxTestOutput.docx")
|
[
"jazzzman627@yahoo.com"
] |
jazzzman627@yahoo.com
|
c59e659d909312483f20f07ec25ed071a0ab1d64
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/.history/Code/histogram_20200204122135.py
|
5d0a4eeb8d476dabb9528ff8aa4b80539dcd1d90
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290
| 2020-08-04T23:19:23
| 2020-08-04T23:19:23
| 234,856,234
| 0
| 0
| null | 2020-06-05T21:13:04
| 2020-01-19T07:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,137
|
py
|
from clean_text import clean
from benchmark import bench
def tuple_hist(source):
''' Fastest - tuples are immutable. List of tuples: [('hello', 3), ('what', 4)]
Takes text. Stores each item in text, compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.
'''
histo = []
used = []
text = clean(source)
# print(text)
for word in text:
# see if we've used the word before
counter = 0
if word in used:
continue
used.append(word)
print("hello")
for word2 in text:
if word == word2:
counter += 1
instance = (word, counter)
histo.append(instance)
# print(histo)
print('USED: ', used)
return histo
def list_hist(source):
''' List of lists histogram. [['hello', 1], ['you', 3], ['sir', 4]]
Takes text. Stores each item in text, compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.
'''
histo = []
used = []
text = clean(source)
# print(text)
for word in text:
counter = 0
if word in used:
continue
used.append(word)
for word2 in text:
if word == word2:
counter += 1
instance = [word, counter]
histo.append(instance)
# print(histo)
return histo
def dict_hist(source):
''' Dictionary key value pairs {'hello':1, 'sir':2, 'how':5}
Takes text. Stores each item in text, compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.'''
histo_dict = {}
# used = []
text = clean(source)
# print(text)
for word in text:
if word in histo_dict:
histo_dict[word] += 1
else:
histo_dict[word] = 1
# print(histo_dict)
return histo_dict
def counts_list(source):
histo = []
instances = []
used = []
text = clean(source)
# print(text)
for word in text:
# check if the word has already been accounted
if word in used:
continue
counter = 0
used.append(word)
# for each word in the text if it matches a word in the same text,
# we have an instance of that word - so increase counter by 1
for word2 in text:
if word == word2:
counter += 1
# we know the word and we have the occurances stored in counter.
# create a list instance object with the word and its occurances
# and append it to the list of word instances.
instance = [word, counter]
instances.append(instance)
used_nums = []
for item in instances:
# check if the word frequency has been accounted for before
if item[1] in used_nums:
continue
used_nums.append(item[1])
membs = []
new_instance = (item[1], membs) # this is what an instance of our histogram looks like
# for one item in our instances we check if the frequency matches
# any other frequencies in the instances list. if it does we add those to members list
for item2 in instances:
if item[1] == item2[1]:
# print(item2[0])
membs.append(item2[0])
histo.append(new_instance)
# print(histo)
return histo
def unique_words(histo):
''' takes a histogram and returns the number of unique words in it.
'''
counter = 0
for item in histo:
if type(item[0]) == int: # if the first item is an integer
for word in item[1]:
# print(item[1])
counter += 1
else:
# print(item)
counter += 1
# print(counter)
return counter
def frequency(word, histo):
''' takes a word and histo, returns the frequency of that word in the histo
'''
for item in histo:
if word in item:
freq = 0
if type(item[0]) == int: # if the first item is an integer
freq = item[0]
else:
freq = item[1]
# print("{} freq: {}".format(word, freq))
return freq
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
listo_histo = list_hist("source.txt")
# print(listo_histo)
tuple_histo = tuple_hist(source)
print(tuple_histo)
# print(dict_hist('source.txt'))
# print(counts_list('source.txt'))
print('')
print(unique_words(list_hist("source.txt")))
print(unique_words(counts_list('source.txt')))
print('freq of fish: ', frequency('fish', list_hist("source.txt")))
print('freq of tax: ', frequency('tax', list_hist("source.txt")))
print('freq of i: ', frequency('i', list_hist("source.txt")))
print('benchmark for list hist: ', bench(listo_histo))
print('benchmark for dict hist: ', bench(dict_hist('source.txt')))
print('benchmark for tuple hist: ', bench(tuple_histo))
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
ed7692ac1e2630e87218877cf5032e76083e7c98
|
e5c3b3a044e826425dd0f783d5e38e5bfeb82626
|
/diplomacy_research/proto/diplomacy_tensorflow/core/protobuf/transport_options_pb2.py
|
e079da40929cc5f94bf114d387992431fb51a4c9
|
[
"MIT"
] |
permissive
|
JACKHAHA363/research
|
04f67f98dcd238092941725d531517ae2a4ab47f
|
e752f02f34936bbae904815708904cabda554b57
|
refs/heads/master
| 2020-09-14T23:42:32.337085
| 2019-11-22T03:36:35
| 2019-11-22T03:36:35
| 223,296,172
| 0
| 0
| null | 2019-11-22T01:15:52
| 2019-11-22T01:15:51
| null |
UTF-8
|
Python
| false
| true
| 2,218
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: diplomacy_tensorflow/core/protobuf/transport_options.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='diplomacy_tensorflow/core/protobuf/transport_options.proto',
package='diplomacy.tensorflow',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:diplomacy_tensorflow/core/protobuf/transport_options.proto\x12\x14\x64iplomacy.tensorflow\"*\n\x10RecvBufRespExtra\x12\x16\n\x0etensor_content\x18\x01 \x03(\x0c\x62\x06proto3')
)
_RECVBUFRESPEXTRA = _descriptor.Descriptor(
name='RecvBufRespExtra',
full_name='diplomacy.tensorflow.RecvBufRespExtra',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensor_content', full_name='diplomacy.tensorflow.RecvBufRespExtra.tensor_content', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=126,
)
DESCRIPTOR.message_types_by_name['RecvBufRespExtra'] = _RECVBUFRESPEXTRA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RecvBufRespExtra = _reflection.GeneratedProtocolMessageType('RecvBufRespExtra', (_message.Message,), dict(
DESCRIPTOR = _RECVBUFRESPEXTRA,
__module__ = 'diplomacy_tensorflow.core.protobuf.transport_options_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.RecvBufRespExtra)
))
_sym_db.RegisterMessage(RecvBufRespExtra)
# @@protoc_insertion_point(module_scope)
|
[
"pcpaquette@gmail.com"
] |
pcpaquette@gmail.com
|
c8da8eefa8c5d426331ac8e5cd31e22fc656e14e
|
7825f583fc11716f0a7b6a90799170bdfa6cc969
|
/apps/trade/models.py
|
fc00ed8ebcd5ba1c8bfafaa8ea5ef7538e3b19c9
|
[] |
no_license
|
dingmf/TTSX1
|
ca49dc76a59e1f0e5357a40ac708487e0a32afa2
|
b1bc1403114961a0830e273f14413f7421647005
|
refs/heads/master
| 2020-03-28T16:43:43.526808
| 2018-09-10T01:38:31
| 2018-09-10T01:38:31
| 148,722,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
from datetime import datetime
from django.db import models
from django.contrib.auth import get_user_model
from goods.models import Goods
User = get_user_model()
# Create your models here.
class ShoppingCart(models.Model):
"""
购物车
"""
user = models.ForeignKey(User, verbose_name=u"用户",on_delete=models.DO_NOTHING)
goods = models.ForeignKey(Goods, verbose_name=u"商品",on_delete=models.DO_NOTHING)
nums = models.IntegerField(default=0, verbose_name="购买数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = '购物车'
verbose_name_plural = verbose_name
unique_together = ("user", "goods")
def __str__(self):
return "%s(%d)".format(self.goods.name, self.nums)
class OrderInfo(models.Model):
"""
订单
"""
ORDER_STATUS = (
("TRADE_SUCCESS", "成功"),
("TRADE_CLOSED", "超时关闭"),
("WAIT_BUYER_PAY", "交易创建"),
("TRADE_FINISHED", "交易结束"),
("paying", "待支付"),
)
user = models.ForeignKey(User, verbose_name="用户",on_delete=models.DO_NOTHING)
order_sn = models.CharField(max_length=30, null=True, blank=True, unique=True, verbose_name="订单号")
trade_no = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name=u"交易号")
pay_status = models.CharField(choices=ORDER_STATUS, default="paying", max_length=30, verbose_name="订单状态")
post_script = models.CharField(max_length=200, verbose_name="订单留言")
order_mount = models.FloatField(default=0.0, verbose_name="订单金额")
pay_time = models.DateTimeField(null=True, blank=True, verbose_name="支付时间")
# 用户信息
address = models.CharField(max_length=100, default="", verbose_name="收货地址")
signer_name = models.CharField(max_length=20, default="", verbose_name="签收人")
singer_mobile = models.CharField(max_length=11, verbose_name="联系电话")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = u"订单"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order_sn)
class OrderGoods(models.Model):
"""
订单的商品详情
"""
order = models.ForeignKey(OrderInfo, verbose_name="订单信息", related_name="goods",on_delete=models.DO_NOTHING)
goods = models.ForeignKey(Goods, verbose_name="商品",on_delete=models.DO_NOTHING)
goods_num = models.IntegerField(default=0, verbose_name="商品数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "订单商品"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order.order_sn)
|
[
"1175674559@qq.com"
] |
1175674559@qq.com
|
8d952a384d61bfa8c0d257aa9b30e1060fa69354
|
f7ff9607822bb8f347598c10d185941cf1956852
|
/aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateOTAStaticUpgradeJobRequest.py
|
7e7ee501d6c20d1a94d5c9e4dc901b71e523ad7c
|
[
"Apache-2.0"
] |
permissive
|
djzqbx001/aliyun-openapi-python-sdk
|
5ca32201c578528f4b4228c7636b36c3f60a7c60
|
7d2e3c854c4d70ed341f036f5f7be0310216c303
|
refs/heads/master
| 2023-09-06T10:17:55.489439
| 2021-11-19T04:26:37
| 2021-11-19T04:26:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,658
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateOTAStaticUpgradeJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateOTAStaticUpgradeJob','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RetryCount(self):
return self.get_query_params().get('RetryCount')
def set_RetryCount(self,RetryCount):
self.add_query_param('RetryCount',RetryCount)
def get_TimeoutInMinutes(self):
return self.get_query_params().get('TimeoutInMinutes')
def set_TimeoutInMinutes(self,TimeoutInMinutes):
self.add_query_param('TimeoutInMinutes',TimeoutInMinutes)
def get_NeedConfirm(self):
return self.get_query_params().get('NeedConfirm')
def set_NeedConfirm(self,NeedConfirm):
self.add_query_param('NeedConfirm',NeedConfirm)
def get_GroupType(self):
return self.get_query_params().get('GroupType')
def set_GroupType(self,GroupType):
self.add_query_param('GroupType',GroupType)
def get_NeedPush(self):
return self.get_query_params().get('NeedPush')
def set_NeedPush(self,NeedPush):
self.add_query_param('NeedPush',NeedPush)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_DownloadProtocol(self):
return self.get_query_params().get('DownloadProtocol')
def set_DownloadProtocol(self,DownloadProtocol):
self.add_query_param('DownloadProtocol',DownloadProtocol)
def get_TargetSelection(self):
return self.get_query_params().get('TargetSelection')
def set_TargetSelection(self,TargetSelection):
self.add_query_param('TargetSelection',TargetSelection)
def get_ScheduleFinishTime(self):
return self.get_query_params().get('ScheduleFinishTime')
def set_ScheduleFinishTime(self,ScheduleFinishTime):
self.add_query_param('ScheduleFinishTime',ScheduleFinishTime)
def get_Tags(self):
return self.get_query_params().get('Tag')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_GrayPercent(self):
return self.get_query_params().get('GrayPercent')
def set_GrayPercent(self,GrayPercent):
self.add_query_param('GrayPercent',GrayPercent)
def get_DnListFileUrl(self):
return self.get_query_params().get('DnListFileUrl')
def set_DnListFileUrl(self,DnListFileUrl):
self.add_query_param('DnListFileUrl',DnListFileUrl)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_FirmwareId(self):
return self.get_query_params().get('FirmwareId')
def set_FirmwareId(self,FirmwareId):
self.add_query_param('FirmwareId',FirmwareId)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_RetryInterval(self):
return self.get_query_params().get('RetryInterval')
def set_RetryInterval(self,RetryInterval):
self.add_query_param('RetryInterval',RetryInterval)
def get_SrcVersions(self):
return self.get_query_params().get('SrcVersion')
def set_SrcVersions(self, SrcVersions):
for depth1 in range(len(SrcVersions)):
if SrcVersions[depth1] is not None:
self.add_query_param('SrcVersion.' + str(depth1 + 1) , SrcVersions[depth1])
def get_ScheduleTime(self):
return self.get_query_params().get('ScheduleTime')
def set_ScheduleTime(self,ScheduleTime):
self.add_query_param('ScheduleTime',ScheduleTime)
def get_OverwriteMode(self):
return self.get_query_params().get('OverwriteMode')
def set_OverwriteMode(self,OverwriteMode):
self.add_query_param('OverwriteMode',OverwriteMode)
def get_MaximumPerMinute(self):
return self.get_query_params().get('MaximumPerMinute')
def set_MaximumPerMinute(self,MaximumPerMinute):
self.add_query_param('MaximumPerMinute',MaximumPerMinute)
def get_TargetDeviceNames(self):
return self.get_query_params().get('TargetDeviceName')
def set_TargetDeviceNames(self, TargetDeviceNames):
for depth1 in range(len(TargetDeviceNames)):
if TargetDeviceNames[depth1] is not None:
self.add_query_param('TargetDeviceName.' + str(depth1 + 1) , TargetDeviceNames[depth1])
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
5414549b6e05db7f00d8fa5cd8e78438012ca3b3
|
d1db2d004f989c89d7d7b599a79be73485d15154
|
/backend/home/migrations/0001_load_initial_data.py
|
1d7f53afab7b8c07ccd47a313d03c168330d2153
|
[] |
no_license
|
crowdbotics-apps/dry-glitter-29203
|
79161318f4bc536b1b69e07dfc592f19f4056ce5
|
ee14380afe72369a0e7306b5954885f675493020
|
refs/heads/master
| 2023-06-25T06:39:47.076869
| 2021-07-26T19:47:29
| 2021-07-26T19:47:29
| 389,750,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "dry-glitter-29203.botics.co"
site_params = {
"name": "Dry Glitter",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b2417e1d6d1bbde80e013a3d3c5c891f70809f47
|
38422c3edeb269926502fed31a0761aff8dd3d3b
|
/Si_and_InGaAs_detectors/Calibration_2Mar_2015/IGA22030TC_caldata_for_python/run_DSS-IGA22030TC_cal_data.py
|
77487933e19dc4dedfd57eaf2f4694e28cecc1ca
|
[] |
no_license
|
vfurtula/Alle-projekter
|
2dab3ccbf7ddb6be3ee09f9f5e87085f354dd84a
|
da3d7c9611088043e2aea5d844f1ae6056215e04
|
refs/heads/master
| 2022-06-07T05:17:35.327228
| 2020-04-30T10:28:48
| 2020-04-30T10:28:48
| 260,180,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,815
|
py
|
## Import libraries
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial import polynomial as P
from scipy import interpolate
## For Matplotlib fonts
from matplotlib import rc
## for LaTeX style use:
rc("text", usetex=True)
rc("font", family="serif")
######################################################
# Create folder structure for intput and output data #
######################################################
class Get_DSS_IGA22030TC_new:
def __init__(self,my_string):
# Open new datafile form SOURCE 2 (OLIS)
#my_string='Kalibrering_DSS-S025TC.txt'
#step=5
f2 = open(my_string, 'r')
# Read and ignore header lines
headers = [f2.readline() for i in range(7)]
self.all_data=[]
# Read new datafile
for line in f2:
line = line.strip()
columns = line.split()
self.all_data.extend([ float(columns[0]) ])
f2.close()
self.wl_indx=[]
self.res_m30C_indx=[]
self.err_m30C_indx=[]
self.res_p23C_indx=[]
self.err_p23C_indx=[]
self.wl=[]
self.res_m30C=[]
self.err_m30C=[]
self.res_p23C=[]
self.err_p23C=[]
def getdata(self):
for i in range(len(self.all_data)):
if i*5<len(self.all_data):
self.wl_indx.extend([ i*5 ])
if 1+i*5<len(self.all_data):
self.res_m30C_indx.extend([ 1+i*5 ])
if 2+i*5<len(self.all_data):
self.err_m30C_indx.extend([ 2+i*5 ])
if 3+i*5<len(self.all_data):
self.res_p23C_indx.extend([ 3+i*5 ])
if 4+i*5<len(self.all_data):
self.err_p23C_indx.extend([ 4+i*5 ])
for i in range(len(self.wl_indx)):
self.wl.extend([ self.all_data[self.wl_indx[i]] ])
self.res_m30C.extend([ self.all_data[self.res_m30C_indx[i]] ])
self.err_m30C.extend([ self.all_data[self.err_m30C_indx[i]] ])
self.res_p23C.extend([ self.all_data[self.res_p23C_indx[i]] ])
self.err_p23C.extend([ self.all_data[self.err_p23C_indx[i]] ])
return self.wl, self.res_m30C, self.err_m30C, self.res_p23C, self.err_p23C
class Get_DSS_IGA22030TC_old:
def __init__(self,my_string):
# Open new datafile form SOURCE 2 (OLIS)
#my_string='Kalibrering_DSS-S025TC_HORIBA.txt'
f2 = open(my_string, 'r')
# Read and ignore header lines
headers = [f2.readline() for i in range(3)]
self.wl=[]
self.res_m30C=[]
self.res_p23C=[]
# Read new datafile
for line in f2:
line = line.strip()
columns = line.split()
self.wl.extend([ float(columns[0]) ])
self.res_m30C.extend([ float(columns[2]) ])
self.res_p23C.extend([ float(columns[1]) ])
f2.close()
def getdata(self):
return self.wl, self.res_m30C, self.res_p23C
if __name__ == "__main__":
out_data1=Get_DSS_IGA22030TC_new('Kalibrering_DSS-IGA22030TC.txt').getdata()
out_data2=Get_DSS_IGA22030TC_old('Kalibrering_DSS-IGA22030TC_HORIBA.txt').getdata()
out_data3=Get_DSS_IGA22030TC_new('Kalibrering_DSS-IGA22030TC_crosscheck.txt').getdata()
'''
coef_first = P.polyfit(true_val,Ard_baud_23040,1)
#print "polyfit coef = ", coef
a1=coef_first[1]
b1=coef_first[0]
val_first = [a1*i+b1 for i in true_val]
coef_second = P.polyfit(true_val,Ard_baud_23040,2)
#print "polyfit coef = ", coef
a2=coef_second[2]
b2=coef_second[1]
c2=coef_second[0]
val_second = [a2*i**2+b2*i+c2 for i in true_val]
delta1=[]
delta2=[]
for i in range(len(true_val)):
if i==0:
delta1.extend([ 1 ])
delta2.extend([ 1 ])
else:
delta1.extend([ Ard_baud_23040[i]/true_val[i] ])
delta2.extend([ Ard_baud_230400[i]/true_val[i] ])
f2.close()
'''
# Plot the results
plt.figure(1, figsize=(18,12))
plt.plot(out_data1[0],out_data1[1],'b-',label="-30C, JV, 5 nm step (supply 041004-1)")
up_err_m30=[out_data1[1][i]*(1+out_data1[2][i]/100) for i in range(len(out_data1[1]))]
do_err_m30=[out_data1[1][i]*(1-out_data1[2][i]/100) for i in range(len(out_data1[1]))]
plt.fill_between(out_data1[0], up_err_m30, do_err_m30, facecolor='blue', alpha=0.3)
plt.plot(out_data1[0],out_data1[3],'r-',label="+23C, JV, 5 nm step (supply 041004-1)")
up_err_p23=[out_data1[3][i]*(1+out_data1[4][i]/100) for i in range(len(out_data1[3]))]
do_err_p23=[out_data1[3][i]*(1-out_data1[4][i]/100) for i in range(len(out_data1[3]))]
plt.fill_between(out_data1[0], up_err_p23, do_err_p23, facecolor='red', alpha=0.3)
plt.plot(out_data2[0],out_data2[1],'b--',label="-30C, HORIBA, Oct 2010 (supply unknown)")
plt.plot(out_data2[0],out_data2[2],'r--',label="+23C, HORIBA, Oct 2010 (supply unknown)")
###
plt.plot(out_data3[0],out_data3[1],'bx-',label="-30C, JV, 100 nm step (supply 031113-2)")
#up_err_m30=[out_data3[1][i]*(1+out_data3[2][i]/100) for i in range(len(out_data3[1]))]
#do_err_m30=[out_data3[1][i]*(1-out_data3[2][i]/100) for i in range(len(out_data3[1]))]
#plt.fill_between(out_data3[0], up_err_m30, do_err_m30, facecolor='yellow', alpha=0.3)
plt.plot(out_data3[0],out_data3[3],'rx-',label="+23C, JV, 100 nm step (supply 031113-2)")
#up_err_p23=[out_data3[3][i]*(1+out_data3[4][i]/100) for i in range(len(out_data3[3]))]
#do_err_p23=[out_data3[3][i]*(1-out_data3[4][i]/100) for i in range(len(out_data3[3]))]
#plt.fill_between(out_data3[0], up_err_p23, do_err_p23, facecolor='green', alpha=0.3)
plt.xlabel("Wavelength [nm]", fontsize=20)
plt.ylabel("Responsivity [A/W]", fontsize=20)
plt.tick_params(axis="both", labelsize=20)
plt.title('Calibration of DSS-IGA22030TC (Serial No. 021147) at Justervesenet (JV), Jan 2015')
#plt.yticks( np.linspace(0,1,11) )
#plt.xticks( np.linspace(0,11000,12) )
#plt.ylim([0,1])
#plt.xlim([0,11000])
l=plt.legend(loc=2, fontsize=15)
l.draw_frame(False)
plt.savefig('DSS-IGA22030TC_calplots.pdf')
plt.show()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
bab675eb57306cc67946459ee039be109cb91a15
|
810ce1c1ac47743e253171ec7541c0e431d952c2
|
/standard_library/Concurrency/Subprocess/subprocess_popen.py
|
d43d2ee87874e36321a9f663df2059047f4a48f9
|
[] |
no_license
|
hjlarry/practise-py
|
91052c25dc7ab706c6234f6d657db76667a27124
|
871e06b9652d356f55e3888f1f7ea180ac2b1954
|
refs/heads/master
| 2022-09-11T17:47:48.557194
| 2022-08-10T02:07:24
| 2022-08-10T02:07:24
| 136,263,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
import subprocess
import io
print("一、 与进程单向通信")
print("read:")
proc = subprocess.Popen(["echo", "to stdout"], stdout=subprocess.PIPE)
# proc = subprocess.Popen(["ls", "-l"], stdout=subprocess.PIPE)
value = proc.communicate()
print(value)
stdout_value = value[0].decode("utf-8")
print(stdout_value)
print("write:")
proc = subprocess.Popen(["cat", "-"], stdin=subprocess.PIPE)
proc.communicate("stdin:sth".encode("utf-8"))
print()
print()
print("二、 与进程双向通信:")
proc = subprocess.Popen(
'cat -; echo "to stderr" 1>&2',
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
msg = "through stdin to stdout".encode("utf-8")
stdout_value, stderr_value = proc.communicate(msg)
print(stdout_value)
print(stderr_value)
print()
print("三、 管道连接:")
# 相当于 $cat signal.ipynb | grep "def" | cut -b -30
cat = subprocess.Popen(["cat", "subprocess.ipynb"], stdout=subprocess.PIPE)
grep = subprocess.Popen(["grep", "def"], stdin=cat.stdout, stdout=subprocess.PIPE)
cut = subprocess.Popen(["cut", "-b", "-30"], stdin=grep.stdout, stdout=subprocess.PIPE)
for line in cut.stdout:
print(line)
print()
print("四、 与另一个命令行去交互:")
print("one line at a time:")
proc = subprocess.Popen(
"python3 repeater.py", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
stdin = io.TextIOWrapper(proc.stdin, encoding="utf-8", line_buffering=True)
stdout = io.TextIOWrapper(proc.stdout, encoding="utf-8")
for i in range(5):
line = f"{i} \n"
stdin.write(line)
output = stdout.readline()
print(output)
remainder = proc.communicate()[0].decode("utf-8")
print(remainder)
print()
print("All line at a time:")
proc = subprocess.Popen(
"python3 repeater.py", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
stdin = io.TextIOWrapper(proc.stdin, encoding="utf-8")
stdout = io.TextIOWrapper(proc.stdout, encoding="utf-8")
for i in range(5):
line = f"{i} \n"
stdin.write(line)
stdin.flush()
remainder = proc.communicate()[0].decode("utf-8")
print(remainder)
|
[
"hjlarry@163.com"
] |
hjlarry@163.com
|
3e2a1a8413800f85f2a56ec57d1eb41f78af3a63
|
926b3c52070f6e309567c8598248fd5c57095be9
|
/src/mmdeploy/mmdeploy/codebase/mmcls/models/heads/multi_label_head.py
|
7a5d63375aa42db93fccdde1904c7945f465bc96
|
[
"Apache-2.0"
] |
permissive
|
fengbingchun/PyTorch_Test
|
410f7cd2303707b0141d433fb9d144a961e1f4c8
|
df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348
|
refs/heads/master
| 2023-05-23T16:42:29.711338
| 2023-03-25T11:31:43
| 2023-03-25T11:31:43
| 167,339,907
| 15
| 4
| null | 2023-03-25T11:31:45
| 2019-01-24T09:24:59
|
C++
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmcls.models.heads.MultiLabelClsHead.post_process')
def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):
"""Rewrite `post_process` of MultiLabelClsHead for default backend.
Rewrite this function to directly return pred.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
pred (Tensor): Predict result of model.
Returns:
pred (Tensor): Result of MultiLabelClsHead. The tensor
shape (batch_size,num_classes).
"""
return pred
|
[
"fengbingchun@163.com"
] |
fengbingchun@163.com
|
090ec55ee38d15f5b51f50a928495db00fce01bc
|
048c6b84e679a3e81bf7b4980ad2b4a99781b9b7
|
/quantarhei/core/implementations.py
|
c2fb6b38c5bfa8bd44a0d92d5b3f9187fecc4b07
|
[] |
no_license
|
saayeh/quantarhei
|
9b7a7c60e1325ef783bdbc9ac4b6f33a13301802
|
b77a41272b7df0ccbcde2710bf04bf412c126a6f
|
refs/heads/master
| 2020-12-07T06:29:27.954470
| 2017-09-01T21:09:45
| 2017-09-01T21:09:45
| 66,932,421
| 0
| 0
| null | 2016-08-30T10:52:11
| 2016-08-30T10:52:11
| null |
UTF-8
|
Python
| false
| false
| 3,130
|
py
|
# -*- coding: utf-8 -*-
from functools import wraps
import os
from importlib import import_module
from .managers import Manager
def implementation(package="",
taskname="",
at_runtime=False,
fallback_local=False,
always_local=False):
"""Decorator to select numerical implememtation
"""
m = Manager()
def decorate_at_runtime(func):
"""Decoration at run time
The wrapper decides which function to return at runtime.
"""
@wraps(func)
def wrapper(*arg,**kwargs):
fc = get_function(func,package,taskname,
default_local=fallback_local,
always_local=always_local)
return fc(*arg,**kwargs)
return wrapper
def decorate_at_loadtime(func):
"""Decoration at load time
The wrapper decides which function to return when the Manager module
is loaded, i.e. at the start of the application.
"""
fc = get_function(func,package,taskname,
default_local=fallback_local,
always_local=always_local)
@wraps(func)
def wrapper(*arg,**kwargs):
return fc(*arg,**kwargs)
return wrapper
if (at_runtime and m.change_implementation_at_runtime):
return decorate_at_runtime
else:
return decorate_at_loadtime
#
# Auxiliary function
#
def load_function(lib,fce):
"""Load the module and get the desired function
"""
a = import_module(lib)
if hasattr(a,fce):
fc = getattr(a,fce)
else:
raise Exception("Cannot reach implementation of %s " % fce)
return fc
def get_function(func,package,taskname,default_local,always_local):
"""Decide which function to use
"""
if always_local:
return func
m = Manager()
# default implementation package
default_imp_prefix = "quantarhei.implementations.python"
# decide which implementation will be used
imp_prefix = m.get_implementation_prefix(package=package,
taskname=taskname)
# load the package
try:
imp_name = imp_prefix + "." + package
fc = load_function(imp_name,taskname)
except:
try:
# fall back on pure Python implementation
if default_local:
fc = func
else:
imp_name = default_imp_prefix + "." + package
fc = load_function(imp_name,taskname)
# FIXME: issue a warning
print("WARNING: import failed, falling back on pure Python")
except:
# do not provide implementation, call the decorated function itself
# FIXME: issue a warning (this is an unwanted result)
fc = func
return fc
|
[
"tmancal74@gmail.com"
] |
tmancal74@gmail.com
|
68bacba70b10cde713891d28ded05e5009dbe565
|
1e013dc5f0de0f61e27f2867557803a01c01f4da
|
/Language/python/module/pybluez/rfcomm-client.py
|
87fe97bf58fe5eb0164caa0cd0eaa19a542ffacb
|
[] |
no_license
|
chengyi818/kata
|
a2941ce8675c6e7a47169a0eae4c757d3f6f5bf9
|
a7cb7ad499037bcc168aaa0eaba857b33c04ef14
|
refs/heads/master
| 2023-04-10T18:39:09.518433
| 2023-01-08T15:22:12
| 2023-01-08T15:22:12
| 53,040,540
| 1
| 0
| null | 2023-03-25T00:46:51
| 2016-03-03T10:06:58
|
C++
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
import bluetooth
server_address = "00:1A:7D:DA:71:11"
port = 1
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((server_address, port))
sock.send("hello world!")
sock.close()
|
[
"chengyi818@foxmail.com"
] |
chengyi818@foxmail.com
|
8cc11edbf4514684f0ccebeb30a0086a8925dce2
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_21649.py
|
40532dcb7c23f6e6d1294bf9a3247202883f3fe7
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
# How to use regular expressions to only capture a word by itself rather than in another word?
import re
print re.subn('Co$','',"Company & Co")
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
02274a6e349253d379c133717b79435475122281
|
006ff11fd8cfd5406c6f4318f1bafa1542095f2a
|
/SimG4CMS/Calo/test/python/runWithGun_cfg.py
|
bd29197ff9ce3cb18bf543be313d6be973a0ff76
|
[] |
permissive
|
amkalsi/cmssw
|
8ac5f481c7d7263741b5015381473811c59ac3b1
|
ad0f69098dfbe449ca0570fbcf6fcebd6acc1154
|
refs/heads/CMSSW_7_4_X
| 2021-01-19T16:18:22.857382
| 2016-08-09T16:40:50
| 2016-08-09T16:40:50
| 262,608,661
| 0
| 0
|
Apache-2.0
| 2020-05-09T16:10:07
| 2020-05-09T16:10:07
| null |
UTF-8
|
Python
| false
| false
| 6,352
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("SimG4CMS.Calo.pythiapdt_cfi")
#process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryAPD1XML_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.load("SimG4CMS.Calo.CaloSimHitStudy_cfi")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('CaloSim', 'EcalGeom', 'EcalSim',
'HCalGeom', 'HcalSim', 'HFShower',
'SimG4CoreApplication', 'HitStudy',
'G4cout', 'G4cerr', 'SimTrackManager'),
# debugModules = cms.untracked.vstring('*'),
cout = cms.untracked.PSet(
# threshold = cms.untracked.string('DEBUG'),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
G4cerr = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
G4cout = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
SimTrackManager = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
SimG4CoreApplication = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HitStudy = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
CaloSim = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
EcalGeom = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
EcalSim = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
HCalGeom = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HFShower = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HcalSim = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
)
)
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(211),
MinEta = cms.double(-3.0),
MaxEta = cms.double(3.0),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinPt = cms.double(100.),
MaxPt = cms.double(100.)
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('simevent_QGSP_FTFP_BERT_EML.root')
)
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
oncePerEventMode = cms.untracked.bool(True),
showMallocInfo = cms.untracked.bool(True),
dump = cms.untracked.bool(True),
ignoreTotal = cms.untracked.int32(1)
)
process.Tracer = cms.Service("Tracer")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('runWithGun_QGSP_FTFP_BERT_EML.root')
)
process.common_maximum_timex = cms.PSet(
MaxTrackTime = cms.double(1000.0),
MaxTimeNames = cms.vstring(),
MaxTrackTimes = cms.vdouble()
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.g4SimHits*process.caloSimHitStudy)
process.outpath = cms.EndPath(process.o1)
process.caloSimHitStudy.MaxEnergy = 1000.0
#process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML'
process.g4SimHits.Physics.MonopoleCharge = 1
process.g4SimHits.Physics.Verbosity = 0
process.g4SimHits.CaloSD.UseResponseTables = [1,1,0,1]
process.g4SimHits.CaloSD.EminHits[0] = 0
process.g4SimHits.ECalSD.StoreSecondary = True
process.g4SimHits.CaloTrkProcessing.PutHistory = True
process.g4SimHits.CaloResponse.UseResponseTable = True
process.g4SimHits.CaloResponse.ResponseScale = 1.0
process.g4SimHits.CaloResponse.ResponseFile = 'SimG4CMS/Calo/data/responsTBpim50.dat'
process.g4SimHits.G4Commands = ['/run/verbose 2']
process.g4SimHits.StackingAction = cms.PSet(
process.common_heavy_suppression,
process.common_maximum_timex,
KillDeltaRay = cms.bool(True),
TrackNeutrino = cms.bool(False),
KillHeavy = cms.bool(False),
SaveFirstLevelSecondary = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInTracker = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInCalo = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInMuon = cms.untracked.bool(True)
)
process.g4SimHits.SteppingAction = cms.PSet(
process.common_maximum_timex,
KillBeamPipe = cms.bool(False),
CriticalEnergyForVacuum = cms.double(0.0),
CriticalDensity = cms.double(1e-15),
EkinNames = cms.vstring(),
EkinThresholds = cms.vdouble(),
EkinParticles = cms.vstring(),
Verbosity = cms.untracked.int32(2)
)
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
CheckForHighEtPhotons = cms.untracked.bool(False),
TrackMin = cms.untracked.int32(0),
TrackMax = cms.untracked.int32(0),
TrackStep = cms.untracked.int32(1),
EventMin = cms.untracked.int32(0),
EventMax = cms.untracked.int32(0),
EventStep = cms.untracked.int32(1),
PDGids = cms.untracked.vint32(),
VerboseLevel = cms.untracked.int32(0),
G4Verbose = cms.untracked.bool(True),
DEBUG = cms.untracked.bool(False),
type = cms.string('TrackingVerboseAction')
))
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
f1e2287dae490a131bbd72f576a927f9b633b777
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_25/models/file_system_response.py
|
c255a82c3b1ae1924c1debaa8c376982be89c983
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,913
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class FileSystemResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[FileSystem]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.FileSystem]
):
"""
Keyword args:
items (list[FileSystem]): Displays a list of all items after filtering. If applicable, the values are displayed for each name.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileSystemResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileSystemResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
d4f9618477330f0db7a60c5a90a8a20f134850ae
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_360/ch27_2020_03_30_19_57_47_608648.py
|
c67301045edffce312884d90b151fd1e85029789
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
tem_duvida = True
while tem_duvida:
perg = input("Tem dúvidas?")
if perg != 'não':
print("Pratique mais")
else:
print('Até a próxima')
tem_duvidas = False
|
[
"you@example.com"
] |
you@example.com
|
57a81a6f705289723249fb0b09e8a065b08ab8cf
|
5fbf2adec8d7647b9aeefa51695aa3f13ee57810
|
/server/util/ah_handlers.py
|
455a1dd878527b50e58dde3861598691f56b2737
|
[] |
no_license
|
angelacantfly/dancedeets-monorepo
|
8bb6579f6f5d30e88c8d4c0e239c6c8fed678094
|
6b7a48d91d0737010acd9e08a89d99c2c982205a
|
refs/heads/master
| 2021-01-20T09:14:22.613044
| 2017-08-26T21:48:14
| 2017-08-26T21:48:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
import webapp2
import app
@app.route('/_ah/start')
class StartHandler(webapp2.RequestHandler):
def get(self):
pass
@app.route('/_ah/stop')
class StopHandler(webapp2.RequestHandler):
def get(self):
pass
|
[
"mlambert@gmail.com"
] |
mlambert@gmail.com
|
ffc1de01b564f7729799b45337e5d8ae9fbb92ee
|
03330fc41b226e3b597676944b335a77f1979965
|
/examples/using_xref.py
|
05042801a9b20fdce5800a420dcd161fb80fed47
|
[
"MIT"
] |
permissive
|
ols3er/ezdxf
|
b00076742022b21118d3645685205fbdae419b38
|
a01ed68ea45f25a231e470d239aefed73ab285d5
|
refs/heads/master
| 2020-05-29T16:57:18.235926
| 2019-02-24T03:41:09
| 2019-02-24T03:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# Copyright (c) 2018 Manfred Moitzi
# License: MIT License
import ezdxf
# AutoCAD 2010 can not resolve XREFS in DXF R12 Format :-(,
ref_dwg = ezdxf.new('R2000')
ref_dwg.modelspace().add_circle(center=(5, 5), radius=2.5)
ref_dwg.header['$INSBASE'] = (5, 5, 0) # set insertion point
ref_dwg.saveas("xref_drawing.dxf")
# XREF definition
host_dwg = ezdxf.new('R2000')
host_dwg.add_xref_def(filename='xref_drawing.dxf', name='my_xref')
host_dwg.modelspace().add_blockref(name='my_xref', insert=(0, 0))
host_dwg.saveas("using_xref.dxf")
|
[
"mozman@gmx.at"
] |
mozman@gmx.at
|
3119fae3fe1aadaa71c5cae9f1576b38a7c3afc3
|
f68eda51246c95597def569224f3b56d4c3700e7
|
/top/api/rest/PromotionLimitdiscountGetRequest.py
|
4b9ebc258b2a7047be00899b998a81697c8c960a
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
stoensin/taobao-openapi
|
47de8fb29ae2d8ce47d4fce07c0ccaeaee1ef91f
|
202a9df2085229838541713bd24433a90d07c7fc
|
refs/heads/main
| 2023-07-17T02:17:51.527455
| 2021-08-25T15:08:49
| 2021-08-25T15:08:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
'''
Created by auto_sdk on 2018.11.10
'''
from top.api.base import RestApi
class PromotionLimitdiscountGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.end_time = None
self.limit_discount_id = None
self.page_number = None
self.start_time = None
self.status = None
def getapiname(self):
return 'taobao.promotion.limitdiscount.get'
|
[
"samzong.lu@gmail.com"
] |
samzong.lu@gmail.com
|
c61c33ed11cd6124e71d682034e6e67551e279fc
|
7d84000f2977def7118b4c93a47b9d71c4ee38f8
|
/app/src/dbi.py
|
f5871ef0d1e50dfc9ad71010786307b81ee1d8cb
|
[] |
no_license
|
tensorci/core
|
d405d17099987163dfc589711345ce414ace406e
|
50d18bb43f73b1d5d47fefad543c2554e87a6520
|
refs/heads/master
| 2021-03-19T13:27:26.219591
| 2020-12-03T01:14:57
| 2020-12-03T01:14:57
| 110,917,313
| 0
| 0
| null | 2020-12-03T01:15:26
| 2017-11-16T03:20:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,899
|
py
|
"""
Postgres Database Interface providing the following helper methods:
find_one
find_all
update
create
destroy
undestroy
delete
* Destroy-ing is the same as "soft" deleting a record...it will simply set the is_destroyed column to True
for a record. The helper methods used for querying the DB are automatically scoped to include is_destroyed=False
for a given query. One can simply pass in unscoped=True to these query helper methods to find ALL records for a model,
regardless of is_destroyed status. NOTE: If a table does NOT have an is_destroyed column on it, calling destroy
is the same as calling delete, and the record will be completely removed from the database.
Usage Examples:
user = dbi.create(User, {'email': 'my_email@email.com'})
dbi.update(user, {'email': 'my_updated_email@email.com'})
dbi.destroy(user)
"""
from src import db
# Column used for soft-deleting models
IS_DESTROYED = 'is_destroyed'
def find_one(model, params={}, unscoped=False):
"""
Find the first record of a database model per specified query params
:param model: (required) model class to query (check models.py)
:param params: (optional) dict of params to query model with
:param unscoped: (optional) whether to gather ALL query results, regardless of model's is_destroyed status
:return: first model instance returned from DB query
"""
if hasattr(model, IS_DESTROYED) and not params.get(IS_DESTROYED) and not unscoped:
params[IS_DESTROYED] = False
return db.session.query(model).filter_by(**params).first()
def find_all(model, params={}, unscoped=False):
"""
Find ALL records of a database model per specified query params
:param model: (required) model class to query (check models.py)
:param params: (optional) dict of params to query model with
:param unscoped: (optional) whether to gather ALL query results, regardless of model's is_destroyed status
:return: list of model instances
"""
exact_params = {}
list_params = {}
for k, v in params.items():
if type(v).__name__ in ['list', 'tuple']:
list_params[k] = tuple(v)
else:
exact_params[k] = v
if hasattr(model, IS_DESTROYED) and not exact_params.get(IS_DESTROYED) and not unscoped:
exact_params[IS_DESTROYED] = False
query = db.session.query(model).filter_by(**exact_params)
for k, v in list_params.items():
query = query.filter(getattr(model, k).in_(v))
return query.all()
def update(model_instance, params={}):
"""
Update a model instance with new params
:param model_instance: (required) model instance to update
:param params: (optional) dict of params to update model with
:return: the updated model instance
"""
[setattr(model_instance, k, v) for k, v in params.items()]
db.session.commit()
return model_instance
def create(model, params={}):
"""
Create a model and save a new record for specified model class and params
:param model: (required) model class to create new record for
:param params: (model-dependent) dict of params to create model with
:return: the created model instance
"""
model_instance = model(**params)
db.session.add(model_instance)
db.session.commit()
return model_instance
def upsert(model, params={}, unscoped=False):
"""
Update model if already exists. Create new one if not.
:param model: (required) model class to upsert new record for
:param params: (model-dependent) dict of params to upsert model with
:return: tuple --> (model_instance, is_new)
"""
query_params = {k: v for k, v in params.items()}
model_instance = find_one(model, query_params, unscoped=unscoped)
if model_instance:
return model_instance, False
return create(model, params), True
def destroy(model_instance):
"""
"Soft" delete a model instance (if allowed); otherwise, hard delete it.
:param model_instance: (required) model instance to soft delete
:return: (boolean) whether the model instance was successfully soft deleted
"""
# If model is not soft-deletable, hard delete it.
if not hasattr(model_instance, IS_DESTROYED):
return delete(model_instance)
model_instance.is_destroyed = True
db.session.commit()
return True
def undestroy(model_instance):
"""
Undestroy a model instance
:param model: (required) model instance to undestroy
:return: (boolean) whether the model instance was successfully undestroyed
"""
if not hasattr(model_instance, IS_DESTROYED):
return False
model_instance.is_destroyed = False
db.session.commit()
return True
def delete(model_instance):
"""
Hard delete a model instance
:param model_instance: (required) model instance to hard delete
:return: (boolean) whether the model instance was successfully hard deleted
"""
db.session.delete(model_instance)
db.session.commit()
return True
|
[
"benwhittle31@gmail.com"
] |
benwhittle31@gmail.com
|
17539ecb89461a97e039d325bef834b78d08259b
|
f415dd840e150a0ada86bc8b7c54f8d1c301e314
|
/tests/helpers.py
|
694db0ecd5e61ceb7f8490a25316267d22ec46a9
|
[
"WTFPL"
] |
permissive
|
Feuermurmel/venv_cli
|
5c3680150f8c54fbbb4e5c36b3d609695b1b1104
|
87b5185d11ab4d6f66b8dd76533ab405f820ad97
|
refs/heads/master
| 2021-01-10T17:19:09.017138
| 2016-02-25T22:09:52
| 2016-02-25T22:09:52
| 51,231,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,678
|
py
|
import os, subprocess, sys, contextlib, pkgutil, tempfile, pytest
class RunResult:
def __init__(self, returncode : int, stdout : str, stderr : str):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
class Workspace:
"""
Allows executing commands and checking conditions in a temporary directory.
"""
def __init__(self, dir):
self.cwd = os.path.join(dir, 'cwd')
self.home = os.path.join(dir, 'home')
os.mkdir(self.cwd)
os.mkdir(self.home)
def _run_commands(self, lines):
environ = dict(os.environ)
environ['HOME'] = os.path.abspath(self.home)
process = subprocess.Popen(
['bash'],
cwd = self.cwd,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = environ)
input = ''.join(i + '\n' for i in lines).encode()
out, err = process.communicate(input)
sys.stdout.buffer.write(out)
sys.stderr.buffer.write(err)
# We expect all output to be valid UTF-8, mainly because all output should be ASCII.
return RunResult(process.returncode, out.decode(), err.decode())
def run(self, *lines, expect_error = False, expect_stdout_contains = '', expect_stderr_contains = ''):
"""
Runs the specified commands by piping them into a non-interactive bash process.
"""
def iter_lines():
yield 'set -e'
for i in lines:
yield i
# Enable errexit whenever a new shell session might have been started.
if i.split()[0] == 'venv':
yield 'set -e'
result = self._run_commands(list(iter_lines()))
if expect_error:
assert result.returncode
else:
assert not result.returncode
assert expect_stdout_contains in result.stdout
assert expect_stderr_contains in result.stderr
return result
def check_venv(self, path = 'venv', *, exists = True):
if exists:
self.run(
'. {}/bin/activate'.format(path),
'[ "$VIRTUAL_ENV" ]')
else:
self.run(
'! [ -e venv ]')
def create_file(self, path, content : str = ''):
with open(os.path.join(self.cwd, path), 'w', encoding = 'utf-8') as file:
file.write(content)
def create_dir(self, path):
os.makedirs(os.path.join(self.cwd, path), exist_ok = True)
def check_file(self, path, content = None, *, exists = True):
file_path = os.path.join(self.cwd, path)
if exists:
assert os.path.isfile(file_path)
if content is not None:
with open(file_path, 'r', encoding = 'utf-8') as file:
assert file.read() == content
else:
if content is not None:
raise ValueError('content must be None if exists is set to False.')
assert not os.path.exists(file_path)
def check_dir(self, dirs = [], files = [], *, path = '.', exclude_hidden = True):
"""
Check that a set of directories exists and that only those directories exist.
"""
found_dirs = set()
found_files = set()
for i in os.listdir(os.path.join(self.cwd, path)):
if not (i.startswith('.') and exclude_hidden):
item_path = os.path.join(self.cwd, path, i)
if os.path.isdir(item_path):
found_dirs.add(i)
elif os.path.isfile(item_path):
found_files.add(i)
if dirs is not None:
assert found_dirs == set(dirs)
if files is not None:
assert found_files == set(files)
@contextlib.contextmanager
def workspace(*, virtualenvs = [], dummy_project = False):
with tempfile.TemporaryDirectory() as temp_dir:
ws = Workspace(temp_dir)
if dummy_project:
for i in 'setup.py', 'venv_cli_dummy.py':
data = pkgutil.get_data(__name__, os.path.join('example_project', i)).decode()
ws.create_file(i, data)
for i in virtualenvs:
ws.run('venv --no-activate {}'.format(i))
yield ws
|
[
"michi.schwarz@gmail.com"
] |
michi.schwarz@gmail.com
|
21db26de3198d180a5e39a545b3d434cfcfb9b71
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_117/ch84_2019_06_07_02_09_59_822281.py
|
c7e3553b0116c2d5fe47dde12a8fa2c6debf32c7
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
def inverte_dicionario (dic):
dic_invert = {}
for k, v in dic_invert.items():
if v not in dic:
dic_invert = []
dic_invert.append(v)
return dic_invert
|
[
"you@example.com"
] |
you@example.com
|
3dd56adae1191d1dbd4cb5db6911e9f04756571f
|
4e693506b1b69b28ae2bcf0f5eb0d30e71a5e63d
|
/keras_models_factory/utils.py
|
61d28442735ec316350b013d4e5cab50e1268d3f
|
[
"MIT"
] |
permissive
|
shadiakiki1986/keras-models-factory
|
62fabc7e786bc2e7ad85f00bf41abff85df57b35
|
ee4f776eea0ec2e20347105d31cf192877f386bd
|
refs/heads/master
| 2021-01-23T16:57:34.653001
| 2017-09-19T09:22:00
| 2017-09-19T09:22:00
| 102,754,603
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
# https://gist.github.com/shadiakiki1986/2c293e364563492c65bffdb6122b4e92
from sklearn.preprocessing import MinMaxScaler # normalize,
min_max_scaler = MinMaxScaler()
# def myNorm3(X): return normalize(X, norm='l2', axis=0)
def myNorm3(X): return min_max_scaler.fit_transform(X)
##########################################
import numpy as np
from matplotlib import pyplot as plt
def myPlot(X, space:int=5):
X_plt = X+space*np.arange(X.shape[1])
N_PLOT=200
plt.plot(X_plt[0:N_PLOT,:])
plt.show()
from sklearn.model_selection import train_test_split
def ae_fit_encode_plot_mse(X_in, autoencoder, encoder, N_epochs, verbose=1, callbacks:list=[]):
# split
X_train, X_test = train_test_split(X_in, train_size=0.8, random_state=8888)
# train autoencoder
autoencoder.fit(
X_train,
X_train,
epochs=N_epochs,
batch_size=256,
shuffle=True,
validation_data=(
X_test,
X_test,
),
verbose = verbose,
callbacks=callbacks
)
# if not easy to visualize
if X_in.shape[1]<50:
# print("encoder predict")
X_enc = encoder.predict(X_in)
# print("encoded",X_enc)
# # X_enc_dec = decoder.predict(X_enc)
# # print("enc-dec",X_enc_dec)
# X_rec = autoencoder.predict(X_pca)
# print("recoded",X_rec)
# plot
# from matplotlib import pyplot as plt
myPlot(X_enc)
X_rec = autoencoder.predict(X_in)
#result = mse(X_in, X_rec)
#print("AE mse = ", result)
#return result
return X_rec
#####################
# functions for t1e_pca_ae_nonlinear-2
# copied from https://stats.stackexchange.com/questions/190148/autoencoder-pca-tensorflow?rq=1
def mse(x, x_est):
numerator = np.linalg.norm(x - x_est)
denominator = np.linalg.norm(x)
#print('num/deonm', numerator, denominator, numerator/denominator)
return numerator/denominator
from sklearn.linear_model import LinearRegression
def pca_err(X, x_pca):
#from sklearn.decomposition import PCA
#pca = PCA(n_components=2).fit(X)
#x_pca = pca.transform(X)
lr = LinearRegression().fit(x_pca, X)
x_est = lr.predict(x_pca)
result = mse(X, x_est)
print('err pca = ', result)
return result
|
[
"shadiakiki1986@gmail.com"
] |
shadiakiki1986@gmail.com
|
bda67dea8cdb17417a447b603190fdbc5a7850d8
|
6351221d588668804e2df01936732eede4d96ed0
|
/leetcode-cn/Python/232.用栈实现队列.py
|
7ae75d5f682f19c9bda3328e8f390ed0abeb0c49
|
[] |
no_license
|
LogicJake/code-for-interview
|
8e4ec9e24ec661a443ad42aa2496d78a1fbc8a3f
|
5990b09866696c2f3e845047c755fa72553dd421
|
refs/heads/master
| 2021-09-20T20:19:17.118333
| 2021-09-14T13:46:30
| 2021-09-14T13:46:30
| 102,202,212
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
#
# @lc app=leetcode.cn id=232 lang=python3
#
# [232] 用栈实现队列
#
# @lc code=start
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
tmp_stack = []
while self.stack:
tmp_stack.append(self.stack.pop(-1))
self.stack.append(x)
while tmp_stack:
self.stack.append(tmp_stack.pop(-1))
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
return self.stack.pop(-1)
def peek(self) -> int:
"""
Get the front element.
"""
return self.stack[-1]
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return not self.stack
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty()
# @lc code=end
|
[
"835410808@qq.com"
] |
835410808@qq.com
|
cb0d026ba9bbf7fb071cfc018eaf8538a0285a2d
|
9a343c495459e79dc408a102730bcaeac7fa8886
|
/blog/app01/admin.py
|
d1211d0bccc7fd7e1a91cb51ce105a8e53f5ca8c
|
[
"MIT"
] |
permissive
|
MMingLeung/Python_Study
|
62d3ae92bf6760de0804aa5792f53fb3799486a2
|
4ff1d02d2b6dd54e96f7179fa000548936b691e7
|
refs/heads/master
| 2022-12-27T12:53:05.186800
| 2018-03-07T04:34:36
| 2018-03-07T04:34:36
| 92,124,981
| 3
| 1
|
MIT
| 2021-06-10T18:35:33
| 2017-05-23T03:28:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
from django.contrib import admin
from app01 import models
# Register your models here.
admin.site.register(models.UserInfo)
admin.site.register(models.Article)
admin.site.register(models.ArticleDetail)
admin.site.register(models.Article2Tag)
admin.site.register(models.Tag)
admin.site.register(models.Category)
admin.site.register(models.UserFans)
admin.site.register(models.Blog)
admin.site.register(models.UpDown)
admin.site.register(models.Comment)
|
[
"mingmingleung1991@gmail.com"
] |
mingmingleung1991@gmail.com
|
00149d0616ecf21778b8fc9f4226f2e31c0455cf
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/surface/container/node_pools/delete.py
|
85c05b6a7ef85f44a9e6eb9c9c58a6ee068f7c38
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,208
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete node pool command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.container import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.container import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
DETAILED_HELP = {
'DESCRIPTION':
"""\
*{command}* deletes a node pool from a Google Kubernetes Engine (GKE)
cluster. When you delete a node pool, GKE drains all the nodes in the
node pool. The draining process involves GKE evicting Pods on each node
in the node pool. Each node in a node pool is drained by evicting Pods
with an allotted graceful termination period of `MAX_POD`. `MAX_POD` is
the maximum `terminationGracePeriodSeconds` set on the Pods scheduled to
the node with a cap of one hour.
""",
'EXAMPLES':
"""\
To delete the "node-pool-1" node pool from the cluster
"sample-cluster", run:
$ {command} node-pool-1 --cluster=sample-cluster
""",
}
class Delete(base.DeleteCommand):
"""Delete an existing node pool in a running cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
# TODO(b/28639250): Support remote completion when the SDK supports it.
flags.AddNodePoolNameArg(parser, 'The name of the node pool to delete.')
parser.add_argument(
'--timeout',
type=int,
default=1800,
hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
flags.AddAsyncFlag(parser)
flags.AddNodePoolClusterFlag(
parser, 'The cluster from which to delete the node pool.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
pool_ref = adapter.ParseNodePool(args.name, location)
console_io.PromptContinue(
message=('The following node pool will be deleted.\n'
'[{name}] in cluster [{clusterId}] in [{zone}]').format(
name=pool_ref.nodePoolId,
clusterId=pool_ref.clusterId,
zone=adapter.Zone(pool_ref)),
throw_if_unattended=True,
cancel_on_no=True)
try:
# Make sure it exists (will raise appropriate error if not)
adapter.GetNodePool(pool_ref)
op_ref = adapter.DeleteNodePool(pool_ref)
if args.async_:
op = adapter.GetOperation(op_ref)
if not args.IsSpecified('format'):
args.format = util.OPERATIONS_FORMAT
return op
adapter.WaitForOperation(
op_ref,
'Deleting node pool {0}'.format(pool_ref.nodePoolId),
timeout_s=args.timeout)
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
log.DeletedResource(pool_ref)
return op_ref
Delete.detailed_help = DETAILED_HELP
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
1b255b3ddd1df3b1b17cabceab2a798b41728384
|
164e0f43ef3ad4cb7f6b28dfdd2bfbaa66d38ce2
|
/Remove_Invalid_Parentheses/Remove_Invalid_Parentheses.py
|
034e14e6cc19233f7b8b6abc301cc84c82bcdc96
|
[] |
no_license
|
maoxx241/code
|
b217f2d10065d90f52cfa38788c99e238565b892
|
16e97ec5ee7ae9ffa69da2e001d15a86d73d2040
|
refs/heads/master
| 2021-07-11T14:25:35.098241
| 2020-11-25T14:01:56
| 2020-11-25T14:01:56
| 222,544,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
class Solution:
def removeInvalidParentheses(self, s: str) -> List[str]:
left = 0
right = 0
# First, we find out the number of misplaced left and right parentheses.
for char in s:
# Simply record the left one.
if char == '(':
left += 1
elif char == ')':
# If we don't have a matching left, then this is a misplaced right, record it.
right = right + 1 if left == 0 else right
# Decrement count of left parentheses because we have found a right
# which CAN be a matching one for a left.
left = left - 1 if left > 0 else left
result = {}
def recurse(s, index, left_count, right_count, left_rem, right_rem, expr):
# If we reached the end of the string, just check if the resulting expression is
# valid or not and also if we have removed the total number of left and right
# parentheses that we should have removed.
if index == len(s):
if left_rem == 0 and right_rem == 0:
ans = "".join(expr)
result[ans] = 1
else:
# The discard case. Note that here we have our pruning condition.
# We don't recurse if the remaining count for that parenthesis is == 0.
if (s[index] == '(' and left_rem > 0) or (s[index] == ')' and right_rem > 0):
recurse(s, index + 1,
left_count,
right_count,
left_rem - (s[index] == '('),
right_rem - (s[index] == ')'), expr)
expr.append(s[index])
# Simply recurse one step further if the current character is not a parenthesis.
if s[index] != '(' and s[index] != ')':
recurse(s, index + 1,
left_count,
right_count,
left_rem,
right_rem, expr)
elif s[index] == '(':
# Consider an opening bracket.
recurse(s, index + 1,
left_count + 1,
right_count,
left_rem,
right_rem, expr)
elif s[index] == ')' and left_count > right_count:
# Consider a closing bracket.
recurse(s, index + 1,
left_count,
right_count + 1,
left_rem,
right_rem, expr)
# Pop for backtracking.
expr.pop()
# Now, the left and right variables tell us the number of misplaced left and
# right parentheses and that greatly helps pruning the recursion.
recurse(s, 0, 0, 0, left, right, [])
return list(result.keys())
|
[
"maomaoyu870@gmail.com"
] |
maomaoyu870@gmail.com
|
69e237230ee8790bc12d09eeeae22d58d793a7de
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/binary_20200525114701.py
|
e8cd6e394a55474ea53bc0c4231c7e1d52b17737
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
def solution(N):
print(N)
maximumCount = 0
number = format(N,"b")
print("wow",number)
s = [str(i) for i in number]
binary = int("".join(s))
intialNumber = None
answer = []
# totalCount = 0
# totalCount = 0
print("binary",number)
for i in range(len(str(number))):
if number[i] == '1':
if intialNumber is not None and (intialNumber + int(number[i]) == 2) and maximumCount > 0:
answer.append(maximumCount)
if totalCount > maximumCount:
print("total",totalCount)
return totalCount
else:
print("total",totalCount)
print("max",maximumCount)
# return maximumCount
else:
intialNumber = 1
# print("total",totalCount)
maximumCount = 0
totalCount = maximumCount
if number[i] == '0':
maximumCount +=1
# print("maxcount",maximumCount)
return 0
solution(10001)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
80c7f0807075a35cdcec4e616e655da777916a79
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/leetcode/leetCode/Stack/225_ImplementStackusingQueues.py
|
605f337ca4287019202fd4d6bbc9e42ac90b852a
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: xuezaigds@gmail.com
from collections import deque
class Stack(object):
def __init__(self):
self._queue = deque()
def push(self, x):
# Pushing to back and
# then rotating the queue until the new element is at the front
q = self._queue
q.append(x)
for i in xrange(len(q) - 1):
q.append(q.popleft())
def pop(self):
self._queue.popleft()
def top(self):
return self._queue[0]
def empty(self):
return not len(self._queue)
"""Test
if __name__ == '__main__':
s = Stack()
s.push(1)
s.push(2)
print s.top()
s.pop()
print s.empty()
print s.top()
s.pop()
print s.empty()
"""
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
128cca6fe2e6e5e784f55a61facb0487d837a808
|
127fa3dd454434b4c7526afe161177af2e10226e
|
/learn/python_base/io.py
|
60348faa4adbf54d5d67904473a557cd1779d9e7
|
[] |
no_license
|
lunar-r/sword-to-offer-python
|
966c46a8ddcff8ce5c95697638c988d83da3beab
|
fab4c341486e872fb2926d1b6d50499d55e76a4a
|
refs/heads/master
| 2023-04-18T18:57:12.126441
| 2020-11-29T09:51:23
| 2020-11-29T09:51:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
# -*- coding: utf-8 -*-
"""
File Name: io
Description :
Author : simon
date: 19-3-28
"""
# -- 文件基本操作
output = open(r'C:\spam', 'w') # 打开输出文件,用于写
input = open('data', 'r') # 打开输入文件,用于读。打开的方式可以为'w', 'r', 'a', 'wb', 'rb', 'ab'等
fp.read([size]) # size为读取的长度,以byte为单位
fp.readline([size]) # 读一行,如果定义了size,有可能返回的只是一行的一部分
fp.readlines([size]) # 把文件每一行作为一个list的一个成员,并返回这个list。其实它的内部是通过循环调用readline()来实现的。如果提供size参数,size是表示读取内容的总长。
fp.readable() # 是否可读
fp.write(str) # 把str写到文件中,write()并不会在str后加上一个换行符
fp.writelines(seq) # 把seq的内容全部写到文件中(多行一次性写入)
fp.writeable() # 是否可写
fp.close() # 关闭文件。
fp.flush() # 把缓冲区的内容写入硬盘
fp.fileno() # 返回一个长整型的”文件标签“
fp.isatty() # 文件是否是一个终端设备文件(unix系统中的)
fp.tell() # 返回文件操作标记的当前位置,以文件的开头为原点
fp.next() # 返回下一行,并将文件操作标记位移到下一行。把一个file用于for … in file这样的语句时,就是调用next()函数来实现遍历的。
fp.seek(offset[, whence]) # 将文件打开操作标记移到offset的位置。whence为0表示从头开始计算,1表示以当前位置为原点计算。2表示以文件末尾为原点进行计算。
fp.seekable() # 是否可以seek
fp.truncate([size]) # 把文件裁成规定的大小,默认的是裁到当前文件操作标记的位置。
for line in open('data'):
print(line) # 使用for语句,比较适用于打开比较大的文件
with open('data') as file:
print(file.readline()) # 使用with语句,可以保证文件关闭
with open('data') as file:
lines = file.readlines() # 一次读入文件所有行,并关闭文件
open('f.txt', encoding='latin-1') # Python3.x Unicode文本文件
open('f.bin', 'rb') # Python3.x 二进制bytes文件
# 文件对象还有相应的属性:buffer closed encoding errors line_buffering name newlines等
|
[
"2711772037@qq.com"
] |
2711772037@qq.com
|
404b3dd9bf9118947a73b4b22ab44cac0e5361bd
|
d5e94042ac2b248b7701117a6ea941bcc862067a
|
/upvote/gae/modules/bit9_api/constants.py
|
f473708c778b50b28f70a3411e000b6fa473e0de
|
[
"Apache-2.0"
] |
permissive
|
codegrande/upvote
|
f373105203a0595f76c29e138a18a95dc24a63df
|
e05d477bb13e470127b109eb8905a66a06eed5ac
|
refs/heads/master
| 2020-03-07T19:40:47.185833
| 2019-06-20T14:35:20
| 2019-06-20T14:35:20
| 127,677,753
| 0
| 0
| null | 2018-04-01T22:49:28
| 2018-04-01T22:49:27
| null |
UTF-8
|
Python
| false
| false
| 2,558
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in communication with Bit9."""
from upvote.shared import constants
# RFC 3339/ISO 8601 datetime format.
DATETIME_CONVERSION_STRING = '%Y-%m-%dT%H:%M:%SZ'
DATETIME_CONVERSION_STRING_USEC = '%Y-%m-%dT%H:%M:%S.%fZ'
OLD_DATETIME_CONVERSION_STRING = '%Y-%m-%d %H:%M:%S'
# A subtype is the classification of the kind of event.
SUBTYPE = constants.Namespace(tuples=[
# A file was blocked because it was unapproved.
('UNAPPROVED', 801),
# A file was blocked because it was banned.
('BANNED', 802),
# A file was blocked because of a user response to a prompt.
('PROMPTED_BLOCKED', 837),
# A file was approved because of a user response to a prompt.
('PROMPTED_APPROVED', 838),
# A file was blocked because of a timeout waiting for user response.
('PROMPTED_TIMED_OUT', 839)])
APPROVAL_STATE = constants.Namespace(
tuples=[('UNAPPROVED', 1), ('APPROVED', 2), ('BANNED', 3)])
APPROVAL_STATE.DefineMap('TO_STR', {
APPROVAL_STATE.UNAPPROVED: 'UNAPPROVED',
APPROVAL_STATE.APPROVED: 'APPROVED',
APPROVAL_STATE.BANNED: 'BANNED'})
SHA256_TYPE = constants.Namespace(tuples=[('REGULAR', 5), ('FUZZY', 6)])
SHA256_TYPE.DefineMap('TO_ID_TYPE', {
SHA256_TYPE.REGULAR: constants.ID_TYPE.SHA256,
SHA256_TYPE.FUZZY: constants.ID_TYPE.FUZZY_SHA256})
class FileFlags(object):
"""File flags for a Bit9 file catalog."""
MARKED_INSTALLER = 0x00004
DETECTED_INSTALLER = 0x00010
MARKED_NOT_INSTALLER = 0x10000
class UpvoteHostHealthProperties(object):
"""Host health properties."""
AGENT_CACHE_SIZE = 'agent_cache_size'
AGENT_VERSION = 'agent_version'
CONNECTED = 'connected'
HAS_HEALTH_CHECK_ERRORS = 'has_health_check_errors'
IS_INITIALIZING = 'is_initializing'
LAST_REGISTER_DATE = 'last_register_date'
NAME = 'name'
POLICY_NAME = 'policy_name'
POLICY_STATUS = 'policy_status'
POLICY_STATUS_DETAILS = 'policy_status_details'
UPGRADE_STATUS = 'upgrade_status'
|
[
"msuozzo@google.com"
] |
msuozzo@google.com
|
62dbc06cc71f3f8a7e37df306f12fd1e96d86336
|
284f2bfaabf91899211e56063026857c496965cf
|
/users/mixins.py
|
9da8e98128b9d6e173ab42e6c559d7402fc769a5
|
[] |
no_license
|
vanessa/building-tuirer
|
7b56bb9791659fcd04942d2c84a393c3c226f8c4
|
61d85df7d120387700b2e449a6fde5fb9ca7cfaa
|
refs/heads/master
| 2022-12-11T07:25:14.174448
| 2018-08-07T05:18:29
| 2018-08-07T05:18:29
| 142,210,249
| 18
| 0
| null | 2022-12-08T02:19:48
| 2018-07-24T20:35:34
|
Python
|
UTF-8
|
Python
| false
| false
| 706
|
py
|
from django.contrib.auth.mixins import LoginRequiredMixin
from users.models import User
from django.shortcuts import redirect
from django.contrib import messages
class ProfileAccessMixin(LoginRequiredMixin):
def handle_no_permission(self):
# Mostrando mensagens
messages.error(
self.request,
'Você não pode editar um perfil que não é seu!'
)
return redirect('index')
def dispatch(self, request, *args, **kwargs):
user_pk = kwargs.get('pk')
user = User.objects.get(pk=user_pk)
if not user == request.user:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
|
[
"vanessa@vinta.com.br"
] |
vanessa@vinta.com.br
|
5463c902b0d00d5e90378a570e33e19db4e6b638
|
31a0b0749c30ff37c3a72592387f9d8195de4bd6
|
/release/ray_release/scripts/run_release_test.py
|
6729c6a6630ae109dcf0bf0513abdb49074b30e0
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
longshotsyndicate/ray
|
15100bad514b602a3fa39bfe205288e7bec75d90
|
3341fae573868338b665bcea8a1c4ee86b702751
|
refs/heads/master
| 2023-01-28T15:16:00.401509
| 2022-02-18T05:35:47
| 2022-02-18T05:35:47
| 163,961,795
| 1
| 1
|
Apache-2.0
| 2023-01-14T08:01:02
| 2019-01-03T11:03:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,840
|
py
|
import os
import sys
from typing import Optional
import click
from ray_release.aws import maybe_fetch_api_token
from ray_release.config import (
read_and_validate_release_test_collection,
find_test,
as_smoke_test,
DEFAULT_WHEEL_WAIT_TIMEOUT,
)
from ray_release.exception import ReleaseTestCLIError, ReleaseTestError
from ray_release.glue import run_release_test
from ray_release.logger import logger
from ray_release.reporter.legacy_rds import LegacyRDSReporter
from ray_release.reporter.log import LogReporter
from ray_release.result import Result
from ray_release.wheels import find_and_wait_for_ray_wheels_url
@click.command()
@click.argument("test_name", required=True, type=str)
@click.option(
"--test-collection-file",
default=None,
type=str,
help="File containing test configurations",
)
@click.option(
"--smoke-test",
default=False,
type=bool,
is_flag=True,
help="Finish quickly for testing",
)
@click.option(
"--report",
default=False,
type=bool,
is_flag=True,
help="Report results to database",
)
@click.option(
"--ray-wheels",
default=None,
type=str,
help=(
"Commit hash or URL to Ray wheels to be used for testing. "
"If empty, defaults to the BUILDKITE_COMMIT env variable. "
"Can be e.g. `master` to fetch latest wheels from the "
"Ray master branch. Can also be `<repo_url>:<branch>` or "
"`<repo_url>:<commit>` to specify a different repository to "
"fetch wheels from, if available."
),
)
@click.option(
"--cluster-id",
default=None,
type=str,
help="Cluster ID of existing cluster to be re-used.",
)
@click.option(
"--cluster-env-id",
default=None,
type=str,
help="Cluster env ID of existing cluster env to be re-used.",
)
@click.option(
"--no-terminate",
default=False,
type=bool,
is_flag=True,
help="Do not terminate cluster after test.",
)
def main(
test_name: str,
test_collection_file: Optional[str] = None,
smoke_test: bool = False,
report: bool = False,
ray_wheels: Optional[str] = None,
cluster_id: Optional[str] = None,
cluster_env_id: Optional[str] = None,
no_terminate: bool = False,
):
test_collection_file = test_collection_file or os.path.join(
os.path.dirname(__file__), "..", "..", "release_tests.yaml"
)
test_collection = read_and_validate_release_test_collection(test_collection_file)
test = find_test(test_collection, test_name)
if not test:
raise ReleaseTestCLIError(
f"Test `{test_name}` not found in collection file: "
f"{test_collection_file}"
)
if smoke_test:
test = as_smoke_test(test)
ray_wheels_url = find_and_wait_for_ray_wheels_url(
ray_wheels, timeout=DEFAULT_WHEEL_WAIT_TIMEOUT
)
anyscale_project = os.environ.get("ANYSCALE_PROJECT", None)
if not anyscale_project:
raise ReleaseTestCLIError(
"You have to set the ANYSCALE_PROJECT environment variable!"
)
maybe_fetch_api_token()
result = Result()
reporters = [LogReporter()]
if report:
reporters.append(LegacyRDSReporter())
try:
result = run_release_test(
test,
anyscale_project=anyscale_project,
result=result,
ray_wheels_url=ray_wheels_url,
reporters=reporters,
cluster_id=cluster_id,
cluster_env_id=cluster_env_id,
no_terminate=no_terminate,
)
except ReleaseTestError as e:
logger.exception(e)
logger.info(
f"Release test pipeline for test {test['name']} completed. "
f"Returning with exit code = {result.return_code}"
)
sys.exit(result.return_code)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
longshotsyndicate.noreply@github.com
|
7d59995c77d2bfd70c4e9e24e6d9add01ba90bfb
|
705ca924bc63e8b324b847b21263f823219280e1
|
/apps/its_login_register/migrations/0006_job.py
|
6e32a697bb0da3c4dffe8fc695f96d8bda5c8dfd
|
[] |
no_license
|
Komaldhall/Helping-Hand
|
46a28f70045029794b0feb502db1ce3c8ba721e3
|
a544b3812d3eb968233cfd28464c321f3bc997af
|
refs/heads/master
| 2020-04-16T09:29:15.308558
| 2019-01-13T08:44:26
| 2019-01-13T08:44:26
| 165,465,986
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 948
|
py
|
# Generated by Django 2.0.7 on 2018-07-20 18:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('its_login_register', '0005_auto_20180720_1103'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('desc', models.TextField()),
('location', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post', to='its_login_register.User')),
],
),
]
|
[
"komal.dhall7@gmail.com"
] |
komal.dhall7@gmail.com
|
782369aa9e5911c9a60e033e2124834fa92cff51
|
87849e7794e223214b3e40896c708d4ea17f2a12
|
/tests/test_autogen_computed.py
|
1144560dca5c23996719909df7610844ed7d95cb
|
[
"MIT"
] |
permissive
|
novafacing/alembic
|
0b6d9bfa9a66bd4883e863a6ce70a7094c9bb85b
|
29ff74c2678ab73f6c5a646477c840f5cdded234
|
refs/heads/master
| 2021-01-14T15:21:53.344810
| 2020-02-24T06:22:46
| 2020-02-24T06:22:46
| 242,660,622
| 0
| 0
|
MIT
| 2020-02-24T06:13:49
| 2020-02-24T06:13:49
| null |
UTF-8
|
Python
| false
| false
| 4,577
|
py
|
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from alembic import testing
from alembic.testing import config
from alembic.testing import eq_
from alembic.testing import exclusions
from alembic.testing import is_
from alembic.testing import is_true
from alembic.testing import TestBase
from ._autogen_fixtures import AutogenFixtureTest
class AutogenerateComputedTest(AutogenFixtureTest, TestBase):
__requires__ = ("computed_columns",)
__backend__ = True
def test_add_computed_column(self):
m1 = MetaData()
m2 = MetaData()
Table("user", m1, Column("id", Integer, primary_key=True))
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("foo", Integer, sa.Computed("5")),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_column")
eq_(diffs[0][2], "user")
eq_(diffs[0][3].name, "foo")
c = diffs[0][3].computed
is_true(isinstance(c, sa.Computed))
is_(c.persisted, None)
eq_(str(c.sqltext), "5")
def test_remove_computed_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("foo", Integer, sa.Computed("5")),
)
Table("user", m2, Column("id", Integer, primary_key=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_column")
eq_(diffs[0][2], "user")
c = diffs[0][3]
eq_(c.name, "foo")
is_(c.computed, None)
if config.requirements.computed_reflects_as_server_default.enabled:
is_true(isinstance(c.server_default, sa.DefaultClause))
eq_(str(c.server_default.arg.text), "5")
else:
is_(c.server_default, None)
@testing.combinations(
lambda: (sa.Computed("5"), sa.Computed("5")),
lambda: (sa.Computed("bar*5"), sa.Computed("bar*5")),
lambda: (sa.Computed("bar*5"), sa.Computed("bar * 42")),
lambda: (
sa.Computed("bar*5"),
sa.Computed("bar * 42", persisted=True),
),
lambda: (None, sa.Computed("bar*5")),
(
lambda: (sa.Computed("bar*5"), None),
config.requirements.computed_doesnt_reflect_as_server_default,
),
)
def test_computed_unchanged(self, test_case):
arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
m1 = MetaData()
m2 = MetaData()
arg_before = [] if arg_before is None else [arg_before]
arg_after = [] if arg_after is None else [arg_after]
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_before),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_after),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs), 0)
@config.requirements.computed_reflects_as_server_default
def test_remove_computed_default_on_computed(self):
"""Asserts the current behavior which is that on PG and Oracle,
the GENERATED ALWAYS AS is reflected as a server default which we can't
tell is actually "computed", so these come out as a modification to
the server default.
"""
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, sa.Computed("bar + 42")),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0][0], "modify_default")
eq_(diffs[0][0][2], "user")
eq_(diffs[0][0][3], "foo")
old = diffs[0][0][-2]
new = diffs[0][0][-1]
is_(new, None)
is_true(isinstance(old, sa.DefaultClause))
if exclusions.against(config, "postgresql"):
eq_(str(old.arg.text), "(bar + 42)")
elif exclusions.against(config, "oracle"):
eq_(str(old.arg.text), '"BAR"+42')
|
[
"mike_mp@zzzcomputing.com"
] |
mike_mp@zzzcomputing.com
|
a94cf976e9587529566a28af7ecc54d87fa2a67e
|
733b5c3974dd135c390aedbb75ce863abfac0759
|
/portal/forms.py
|
92d9c7bf4f754d5a879255c286ec998952d941e0
|
[] |
no_license
|
stepin-s/electroportal
|
eb3ade027d548969761a9482aaddbcfb81666d0d
|
d8228ff77805d405f56d18537fa17dcc945cf8a6
|
refs/heads/master
| 2022-12-02T12:33:29.163301
| 2020-08-17T07:05:24
| 2020-08-17T07:05:24
| 284,604,768
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
from django import forms
from .models import News
from .models import Videos
class NewsForm(forms.ModelForm):
class Meta:
model = News
fields = ('title', 'text',)
class VideosForm(forms.ModelForm):
class Meta:
model = Videos
fields = ('title', 'text',)
|
[
"stpn.s@yandex.ru"
] |
stpn.s@yandex.ru
|
27c70bdc66179c2000f823081a3d97b2140bc3e8
|
cf945fb7c961376bfcff37c80fe50312d4f32290
|
/Books/NetworkScraping_Py3/C2_NetworkHrefScraping/E2_HrefScarpingEntry.py
|
30aa204aa2cc770573683076ad0a29dac704befa
|
[] |
no_license
|
lizhenQAZ/code_manage
|
faa1e805326cc8da8463e0f8820c9d092a04dddb
|
f98977d58a9febb8212652846314418bba37bfc7
|
refs/heads/master
| 2020-12-03T00:00:52.205238
| 2018-12-19T16:00:48
| 2018-12-19T16:00:48
| 95,968,266
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
url = 'https://en.wikipedia.org/wiki/Kevin_Bacon'
rex = re.compile('^(/wiki/)((?!:).)*$')
def gettitle(url_info):
try:
html = urlopen(url_info)
except HTTPError as e:
return None
else:
try:
bsobj = BeautifulSoup(html.read())
title = bsobj.find('div', {'id': 'bodyContent'}).find_all('a', {'href': rex})
except AttributeError as e:
return None
else:
return title
# 获取a标签下词条属性href的链接地址
title_list = gettitle(url)
for title_info in title_list:
if 'href' in title_info.attrs:
print(title_info.attrs['href'])
|
[
"www.516960831@qq.com"
] |
www.516960831@qq.com
|
4693784784bb42b021025f1ca712c9ce4534686e
|
50957651c54cfb3cba809eb84cf56c0cb2e2621d
|
/tests/cpydiff/modules_sys_stdassign.py
|
096af430e4f571587577ec543a3dfb426aa26dbd
|
[
"MIT"
] |
permissive
|
whyengineer/micropython-esp32
|
94d11e1f5171ea526ac5f97de60e34560b656435
|
ab95d9cb19fc8cda42bf3fdecd76625ff9929c4e
|
refs/heads/esp32
| 2020-12-02T18:20:19.929696
| 2017-07-07T10:21:24
| 2017-07-07T10:21:24
| 96,515,880
| 4
| 0
| null | 2017-07-07T08:14:40
| 2017-07-07T08:14:39
| null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
"""
categories: Modules,sys
description: Override sys.stdin, sys.stdout and sys.stderr. Impossible as they are stored in read-only memory.
cause: Unknown
workaround: Unknown
"""
import sys
sys.stdin = None
print(sys.stdin)
|
[
"damien.p.george@gmail.com"
] |
damien.p.george@gmail.com
|
20cc519f37b4cc8a0dbe3cb2c7440dd9e4437f7b
|
22d6db28f14ea809fffb3afb187a1b484474713f
|
/azext_keyvault/mgmt/keyvault/models/__init__.py
|
e561ff7983d0c7bb82557cb61856036e4c53e434
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-keyvault-cli-extension
|
631322637f2311b6833bc9664ef92fd77e1eade6
|
a9b4a1f8a1f8e2433f83a81efe6068e3bf4537ef
|
refs/heads/master
| 2023-06-09T18:56:46.388527
| 2023-06-02T16:18:23
| 2023-06-02T16:18:23
| 130,276,163
| 3
| 8
|
MIT
| 2023-06-02T16:18:24
| 2018-04-19T21:49:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,353
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .sku_py3 import Sku
from .permissions_py3 import Permissions
from .access_policy_entry_py3 import AccessPolicyEntry
from .ip_rule_py3 import IPRule
from .virtual_network_rule_py3 import VirtualNetworkRule
from .network_rule_set_py3 import NetworkRuleSet
from .vault_properties_py3 import VaultProperties
from .vault_patch_properties_py3 import VaultPatchProperties
from .vault_access_policy_properties_py3 import VaultAccessPolicyProperties
from .deleted_vault_properties_py3 import DeletedVaultProperties
from .vault_create_or_update_parameters_py3 import VaultCreateOrUpdateParameters
from .vault_patch_parameters_py3 import VaultPatchParameters
from .vault_access_policy_parameters_py3 import VaultAccessPolicyParameters
from .vault_py3 import Vault
from .deleted_vault_py3 import DeletedVault
from .resource_py3 import Resource
from .vault_check_name_availability_parameters_py3 import VaultCheckNameAvailabilityParameters
from .check_name_availability_result_py3 import CheckNameAvailabilityResult
from .operation_display_py3 import OperationDisplay
from .log_specification_py3 import LogSpecification
from .service_specification_py3 import ServiceSpecification
from .operation_py3 import Operation
except (SyntaxError, ImportError):
from .sku import Sku
from .permissions import Permissions
from .access_policy_entry import AccessPolicyEntry
from .ip_rule import IPRule
from .virtual_network_rule import VirtualNetworkRule
from .network_rule_set import NetworkRuleSet
from .vault_properties import VaultProperties
from .vault_patch_properties import VaultPatchProperties
from .vault_access_policy_properties import VaultAccessPolicyProperties
from .deleted_vault_properties import DeletedVaultProperties
from .vault_create_or_update_parameters import VaultCreateOrUpdateParameters
from .vault_patch_parameters import VaultPatchParameters
from .vault_access_policy_parameters import VaultAccessPolicyParameters
from .vault import Vault
from .deleted_vault import DeletedVault
from .resource import Resource
from .vault_check_name_availability_parameters import VaultCheckNameAvailabilityParameters
from .check_name_availability_result import CheckNameAvailabilityResult
from .operation_display import OperationDisplay
from .log_specification import LogSpecification
from .service_specification import ServiceSpecification
from .operation import Operation
from .vault_paged import VaultPaged
from .deleted_vault_paged import DeletedVaultPaged
from .resource_paged import ResourcePaged
from .operation_paged import OperationPaged
from .key_vault_management_client_enums import (
SkuName,
KeyPermissions,
SecretPermissions,
CertificatePermissions,
StoragePermissions,
CreateMode,
NetworkRuleBypassOptions,
NetworkRuleAction,
Reason,
AccessPolicyUpdateKind,
)
__all__ = [
'Sku',
'Permissions',
'AccessPolicyEntry',
'IPRule',
'VirtualNetworkRule',
'NetworkRuleSet',
'VaultProperties',
'VaultPatchProperties',
'VaultAccessPolicyProperties',
'DeletedVaultProperties',
'VaultCreateOrUpdateParameters',
'VaultPatchParameters',
'VaultAccessPolicyParameters',
'Vault',
'DeletedVault',
'Resource',
'VaultCheckNameAvailabilityParameters',
'CheckNameAvailabilityResult',
'OperationDisplay',
'LogSpecification',
'ServiceSpecification',
'Operation',
'VaultPaged',
'DeletedVaultPaged',
'ResourcePaged',
'OperationPaged',
'SkuName',
'KeyPermissions',
'SecretPermissions',
'CertificatePermissions',
'StoragePermissions',
'CreateMode',
'NetworkRuleBypassOptions',
'NetworkRuleAction',
'Reason',
'AccessPolicyUpdateKind',
]
|
[
"sschaab@microsoft.com"
] |
sschaab@microsoft.com
|
2c0dc72ca231da4f98c7a53bddff61f3cebb751f
|
c1ef1f1fa94b5dbecff2ec09e94ae29a9094d82a
|
/study/backjoon/backjoon_2231.py
|
0561b5dae7d63b7ff43bdca2e4d945e638375f74
|
[] |
no_license
|
MONKEYZ9/algorithm
|
cd6039a2232615e9bd40f63e2509fddf7edcede7
|
4ffde1ac47294af87152ed740962db600e0b9755
|
refs/heads/main
| 2023-08-14T17:01:54.792376
| 2021-10-01T06:14:55
| 2021-10-01T06:14:55
| 380,917,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
N = input()
max_N = 9*len(N)+1
ans = []
for i in range(1, max_N):
temp = 0
if int(N)-i > 0:
if len(str(int(N)-i)) >= 2:
for j in list(str(int(N)-i)):
temp += int(j)
if i == temp:
ans.append(int(N)-i)
else:
if ((int(N)-i)*2) == int(N):
ans.append(int(N)-i)
if len(ans) == 0:
print(0)
else:
print(min(ans))
|
[
"sangmin3285@gmail.com"
] |
sangmin3285@gmail.com
|
5a7e2ba68068192502f574cba81b2619a076de0a
|
258f6619c909be6295078d34639f4ffa171257b3
|
/src/edb/model/experiment/__init__.py
|
6bf85c797e89e61f8a4009925a8e3d711c405148
|
[] |
no_license
|
aidanheerdegen/experimentdb
|
a12a168c50517c72028ab7ba231a27bda88fc05d
|
8a5e77b2b489c4cba8766c8071c238586c11c0a3
|
refs/heads/main
| 2023-07-30T00:15:44.168950
| 2021-09-10T06:35:32
| 2021-09-10T06:35:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
from .base import Experiment
from ...utils import all_subclasses
# Import the subclasses so they're loaded
from . import accesscm, um, generic, payu
def experiment_factory(type: str, path: str) -> Experiment:
"""
Try to create a 'type' Experiment at 'path'
"""
# Each Experiment subclass has the type it's associated with as the parameter
# 'type'. If it's an abstract class the type should be None
types = {e.type: e for e in all_subclasses(Experiment) if e.type is not None}
return types[type](path)
|
[
"scott.wales@unimelb.edu.au"
] |
scott.wales@unimelb.edu.au
|
4bded312dca334a10d59f07a72b4fc7556ae4dc3
|
83316f8e2be55b19d81ccee935c9cfa09ac7b0b3
|
/deepaudio/speaker/models/clovaai_resnetse34l/configurations.py
|
48705f3a4bafd38013ba99080910d6b4c6daef84
|
[] |
no_license
|
TrendingTechnology/deepaudio-speaker
|
5769b3ed851c721a57fcc4983c5905401d50f85e
|
46f4edef5957e0211b5fe82146e5ce48b1744e15
|
refs/heads/main
| 2023-07-05T03:39:55.224562
| 2021-08-08T08:02:12
| 2021-08-08T08:02:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
from dataclasses import dataclass, field
from deepaudio.speaker.dataclass.configurations import DeepMMDataclass
@dataclass
class ClovaaiResnetse34lConfigs(DeepMMDataclass):
name: str = field(
default="clovaai_ecapa", metadata={"help": "Model name"}
)
embed_dim: int = field(
default=256, metadata={"help": "Dimension of embedding."}
)
encoder_type: str = field(
default="SAP", metadata={"help": "Encoder type."}
)
optimizer: str = field(
default="adam", metadata={"help": "Optimizer for training."}
)
min_num_frames: int = field(
default=200, metadata={"help": "Min num frames."}
)
max_num_frames: int = field(
default=400, metadata={"help": "Max num frames."}
)
|
[
"yinruiqing110@gmail.com"
] |
yinruiqing110@gmail.com
|
1897130bdb9a24f6cada979c2535f4bc3279dedf
|
d066f7fe739fb78f74ec2de8ccbfefdd4270f60f
|
/appimagebuilder/modules/generate/package_managers/apt/__init__.py
|
7ec1d298ae9776e5b17bc702c58ca69a36ebfb00
|
[
"MIT"
] |
permissive
|
AppImageCrafters/appimage-builder
|
666e75363a74f615cdb3673b3ca9d51a6d292a49
|
f38699ef3644fa5409a5a262b7b6d99d6fb85db9
|
refs/heads/main
| 2023-08-17T06:34:54.029664
| 2023-06-03T17:51:04
| 2023-06-03T17:51:04
| 218,847,680
| 270
| 54
|
MIT
| 2023-09-06T17:04:18
| 2019-10-31T19:44:17
|
Python
|
UTF-8
|
Python
| false
| false
| 751
|
py
|
# Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from .file_package_resolver import FilePackageResolver
from .package_repository_resolver import PackageRepositoryResolver
|
[
"contact@azubieta.net"
] |
contact@azubieta.net
|
8a0a9d68892f0dbac3b8e55eb69e82f1788cc05e
|
ba2d449486c58578581b8de7b2b6f21074be6274
|
/02 Linked Lists/2-4-Partition.py
|
382b61d69de5bcd3a2a3495d4d3dfa4e66b26e1c
|
[] |
no_license
|
theoliao1998/Cracking-the-Coding-Interview
|
4e0abef8659a0abf33e09ee78ce2f445f8b5d591
|
814b9163f68795238d17aad5b91327fbceadf49e
|
refs/heads/master
| 2020-12-09T12:46:10.845579
| 2020-07-25T05:39:19
| 2020-07-25T05:39:19
| 233,306,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
# Partition: Write code to partition a linked list around a value x, such that all nodes less than x come
# before all nodes greater than or equal to x. If x is contained within the list, the values of x only need
# to be after the elements less than x (see below). The partition element x can appear anywhere in the
# "right partition"; it does not need to appear between the left and right partitions.
# EXAMPLE
# Input:
# Output:
# 3 -> 5 -> 8 -> 5 -> 10 -> 2 -> 1 [partition= 5]
# 3 -> 1 -> 2 -> 10 -> 5 -> 5 -> 8
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def append(self, x):
n = self
while n.next:
n = n.next
n.next = ListNode(x)
# recursion, time O(n), unstable
def partition1(n,x):
if not n.next:
return n,n
first, end = partition(n.next, x)
if n.val < x:
n.next = first
first = n
else:
end.next = n
n.next = None
end = n
return first, end
# maintain two lists and combine, O(n), stable
def partition2(n,x):
small = None
big = None
first = None
mid = None
while n:
if n.val < x:
if small:
small.next = n
small = small.next
else:
small = n
first = n
else:
if big:
big.next = n
big = big.next
else:
big = n
mid = n
n = n.next
small.next = mid
big.next = None
return first, big
# n = ListNode(3)
# n.append(5)
# n.append(8)
# n.append(5)
# n.append(10)
# n.append(2)
# n.append(1)
# n,_ = partition2(n,5)
# while n:
# print(n.val)
# n = n.next
|
[
"theoliao1998@gmail.com"
] |
theoliao1998@gmail.com
|
92a0f711757a1bedc1524c74b2a79606503bc2e9
|
b77cc1448ae2c68589c5ee24e1a0b1e53499e606
|
/env/Lib/site-packages/celery/signals.py
|
a9d74096a187bdc77fcb044d3bc0c5991ad6c1e0
|
[] |
no_license
|
PregTech-c/Hrp_system
|
a5514cf6b4c778bf7cc58e8a6e8120ac7048a0a7
|
11d8dd3221497c536dd7df9028b9991632055b21
|
refs/heads/master
| 2022-10-09T07:54:49.538270
| 2018-08-21T11:12:04
| 2018-08-21T11:12:04
| 145,424,954
| 1
| 1
| null | 2022-10-01T09:48:53
| 2018-08-20T13:58:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,252
|
py
|
# -*- coding: utf-8 -*-
"""Celery Signals.
This module defines the signals (Observer pattern) sent by
both workers and clients.
Functions can be connected to these signals, and connected
functions are called whenever a signal is called.
.. seealso::
:ref:`signals` for more information.
"""
from __future__ import absolute_import, unicode_literals
from .utils.dispatch import Signal
__all__ = [
'before_task_publish', 'after_task_publish',
'task_prerun', 'task_postrun', 'task_success',
'task_retry', 'task_failure', 'task_revoked', 'celeryd_init',
'celeryd_after_setup', 'worker_init', 'worker_process_init',
'worker_ready', 'worker_shutdown', 'setup_logging',
'after_setup_logger', 'after_setup_task_logger',
'beat_init', 'beat_embedded_init', 'heartbeat_sent',
'eventlet_pool_started', 'eventlet_pool_preshutdown',
'eventlet_pool_postshutdown', 'eventlet_pool_apply',
]
# - Task
before_task_publish = Signal(
name='before_task_publish',
providing_args={
'body', 'exchange', 'routing_key', 'headers',
'properties', 'declare', 'retry_policy',
},
)
after_task_publish = Signal(
name='after_task_publish',
providing_args={'body', 'exchange', 'routing_key'},
)
task_prerun = Signal(
name='task_prerun',
providing_args={'task_id', 'task', 'args', 'kwargs'},
)
task_postrun = Signal(
name='task_postrun',
providing_args={'task_id', 'task', 'args', 'kwargs', 'retval'},
)
task_success = Signal(
name='task_success',
providing_args={'result'},
)
task_retry = Signal(
name='task_retry',
providing_args={'request', 'reason', 'einfo'},
)
task_failure = Signal(
name='task_failure',
providing_args={
'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo',
},
)
task_revoked = Signal(
name='task_revoked',
providing_args={
'request', 'terminated', 'signum', 'expired',
},
)
task_rejected = Signal(
name='task_rejected',
providing_args={'message', 'exc'},
)
task_unknown = Signal(
name='task_unknown',
providing_args={'message', 'exc', 'name', 'id'},
)
#: Deprecated, use after_task_publish instead.
task_sent = Signal(
name='task_sent',
providing_args={
'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset',
},
)
# - Prorgam: `celery worker`
celeryd_init = Signal(
name='celeryd_init',
providing_args={'instance', 'conf', 'options'},
use_caching=False,
)
celeryd_after_setup = Signal(
name='celeryd_after_setup',
providing_args={'instance', 'conf'},
use_caching=False,
)
# - Worker
import_modules = Signal(name='import_modules')
worker_init = Signal(name='worker_init', use_caching=False)
# use_caching must be false when sender is None.
worker_process_init = Signal(
name='worker_process_init',
use_caching=False,
)
worker_process_shutdown = Signal(
name='worker_process_shutdown',
use_caching=False,
)
worker_ready = Signal(name='worker_ready', use_caching=False)
worker_shutdown = Signal(name='worker_shutdown', use_caching=False)
heartbeat_sent = Signal(name='heartbeat_sent')
# - Logging
setup_logging = Signal(
name='setup_logging',
providing_args={
'loglevel', 'logfile', 'format', 'colorize',
},
use_caching=False,
)
after_setup_logger = Signal(
name='after_setup_logger',
providing_args={
'logger', 'loglevel', 'logfile', 'format', 'colorize',
},
use_caching=False,
)
after_setup_task_logger = Signal(
name='after_setup_task_logger',
providing_args={
'logger', 'loglevel', 'logfile', 'format', 'colorize',
},
use_caching=False,
)
# - Beat
beat_init = Signal(name='beat_init', use_caching=False)
beat_embedded_init = Signal(name='beat_embedded_init', use_caching=False)
# - Eventlet
eventlet_pool_started = Signal(name='eventlet_pool_started')
eventlet_pool_preshutdown = Signal(name='eventlet_pool_preshutdown')
eventlet_pool_postshutdown = Signal(name='eventlet_pool_postshutdown')
eventlet_pool_apply = Signal(
name='eventlet_pool_apply',
providing_args={'target', 'args', 'kwargs'},
)
# - Programs
user_preload_options = Signal(
name='user_preload_options',
providing_args={'app', 'options'},
use_caching=False,
)
|
[
"imugabi64@yahoo.com"
] |
imugabi64@yahoo.com
|
1770f233133bfac4134d3c943a64c6377601bf89
|
5d0e76e3c741adc120ce753bacda1e723550f7ac
|
/804. Unique Morse Code Words.py
|
32ec17fb4276bfed3e16e772c733fbe1447b419f
|
[] |
no_license
|
GoldF15h/LeetCode
|
d8d9d5dedca3cce59f068b94e2edf986424efdbf
|
56fcbede20e12473eaf09c9d170c86fdfefe7f87
|
refs/heads/main
| 2023-08-25T12:31:08.436640
| 2021-10-20T04:36:23
| 2021-10-20T04:36:23
| 392,336,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
class Solution:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
morse = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
l = []
for cur in words :
tmp = ''
for i in cur :
tmp += morse[ord(i)-ord('a')]
l.append(tmp)
l = list(set(l))
return len(l)
|
[
"todsapon.singsunjit@gmail.com"
] |
todsapon.singsunjit@gmail.com
|
f99716fda4cb4563b8a60f98be2ac6d07ada0747
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/reaching-points/412272224.py
|
f83c584abb244333b9df14020e18ff100cbbc336
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601
| 2021-04-06T11:30:21
| 2021-04-06T11:30:21
| 201,942,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
# title: reaching-points
# detail: https://leetcode.com/submissions/detail/412272224/
# datetime: Fri Oct 23 22:45:10 2020
# runtime: 20 ms
# memory: 14 MB
class Solution:
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
if sx == tx and sy == ty:
return True
while tx >= sx and ty >= sy:
if tx > ty:
tx = tx % ty
if sy == ty and sx % sy == tx:
return True
elif tx < ty:
ty = ty % tx
if sx == tx and sy % sx == ty:
return True
else:
return False
return False
|
[
"ljm51689@gmail.com"
] |
ljm51689@gmail.com
|
111b23c3006355c235c2d5856e279a4634f63d1d
|
761e133170e1c34a2360d488ddca43fa40107b96
|
/src/tools/MaxMa.py
|
c04d37033046bb1d752839b484a050412fa19f2c
|
[] |
no_license
|
bjzz/StockParser
|
c85b7180eea7ac5fa79b320fe1ad8934513c0482
|
a2dc1d2de06b78055786b956de940548bca75054
|
refs/heads/master
| 2023-03-18T04:54:31.487171
| 2019-09-27T09:48:28
| 2019-09-27T09:48:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
#coding:utf-8
#!/usr/bin/env python
import os
import re
import requests,time
import shutil
import sys
import threading
import time
import datetime
reload(sys)
sys.setdefaultencoding('utf-8')
rootPath = sys.path[0][0:sys.path[0].index('StockParser')]+'/StockParser'
sys.path.append(rootPath+'/src')
from common import Tools
from parsers import BaseParser
'''
用“明日涨停价”算最大MA5
'''
def getParams():
code = False if (len(sys.argv) <= 1) else sys.argv[1]
parseDay = time.strftime('%Y-%m-%d',time.localtime(time.time())) if (len(sys.argv) <= 2) else sys.argv[2]
return (code,parseDay)
def getRes(code,parseDay):
parser = BaseParser.BaseParser(parseDay)
priceFile = Tools.getPriceDirPath()+'/'+str(code)
res = open(priceFile,'r').read()
return res
def compute(code,parseDay):
res = getRes(code,parseDay)
parser = BaseParser.BaseParser(parseDay)
dayList = parser.getPastTradingDayList(parseDay,4)
print dayList
e1 = parser.getEndPriceOfDay(res,dayList[0])
e2 = parser.getEndPriceOfDay(res,dayList[1])
e3 = parser.getEndPriceOfDay(res,dayList[2])
e4 = parser.getEndPriceOfDay(res,dayList[3])
e5 = e4 * 1.1
print e1,e2,e3,e4,e5
if 0 == e1*e2*e3*e4:
print 'End Price Error !'
else:
ma3 = (e3+e4+e5)/3.0
ma5 = (e1+e2+e3+e4+e5)/5.0
print 'MA3 = ' + str(ma3)+',MA5 = ' + str(ma5)
if __name__ == '__main__':
(code,parseDay) = getParams()
print code,parseDay
compute(code,parseDay)
|
[
"you@example.com"
] |
you@example.com
|
1e581d0645442ece0090ccefed1d44c58c5b6f27
|
1f98ccf9ef52d3adab704676480c85fe22c9542d
|
/simpledb/index/planner/IndexUpdatePlanner.py
|
08656dd3f154a4650f863d9049ab5e54285bdf67
|
[] |
no_license
|
61515/simpleDB_Python
|
234c671cbbf57f3e8fc5489ec4c292365085b7a8
|
b6846da4a78369838f5b3c7a704de704e18f7be7
|
refs/heads/master
| 2023-02-22T14:07:52.660633
| 2021-01-24T02:25:40
| 2021-01-24T02:25:40
| 332,343,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,129
|
py
|
#
# * A modification of the basic update planner.
# * It dispatches each update statement to the corresponding
# * index planner.
# * @author Edward Sciore
#
from simpledb.plan.SelectPlan import SelectPlan
from simpledb.plan.TablePlan import TablePlan
from simpledb.plan.UpdatePlanner import UpdatePlanner
class IndexUpdatePlanner(UpdatePlanner):
def __init__(self, mdm):
super(IndexUpdatePlanner, self).__init__()
self.mdm = mdm
def executeInsert(self, data, tx):
tblname = data.tableName()
p = TablePlan(tx, tblname, self.mdm)
# first, insert the record
s = p.open()
s.insert()
rid = s.getRid()
# then modify each field, inserting an index record if appropriate
indexes = self.mdm.getIndexInfo(tblname, tx)
valIter = data.vals().__iter__()
for fldname in data.fields():
val = valIter.__next__()
s.setVal(fldname, val)
ii = indexes.get(fldname)
if ii is not None:
idx = ii.open()
idx.insert(val, rid)
idx.close()
s.close()
return 1
def executeDelete(self, data, tx):
tblname = data.tableName()
p = TablePlan(tx, tblname, self.mdm)
p = SelectPlan(p, data.pred())
indexes = self.mdm.getIndexInfo(tblname, tx)
s = p.open()
count = 0
while s.next():
# first, delete the record's RID from every index
rid = s.getRid()
for fldname in indexes.keys():
val = s.getVal(fldname)
idx = indexes.get(fldname).open()
idx.delete(val, rid)
idx.close()
# then delete the record
s.delete()
count += 1
s.close()
return count
def executeModify(self, data, tx):
tblname = data.tableName()
fldname = data.targetField()
p = TablePlan(tx, tblname, self.mdm)
p = SelectPlan(p, data.pred())
ii = self.mdm.getIndexInfo(tblname, tx).get(fldname)
idx = None if (ii is None) else ii.open()
s = p.open()
count = 0
while s.next():
# first, update the record
newval = data.newValue().evaluate(s)
oldval = s.getVal(fldname)
s.setVal(data.targetField(), newval)
# then update the appropriate index, if it exists
if idx is not None:
rid = s.getRid()
idx.delete(oldval, rid)
idx.insert(newval, rid)
count += 1
if idx is not None:
idx.close()
s.close()
return count
def executeCreateTable(self, data, tx):
self.mdm.createTable(data.tableName(), data.newSchema(), tx)
return 0
def executeCreateView(self, data, tx):
self.mdm.createView(data.viewName(), data.viewDef(), tx)
return 0
def executeCreateIndex(self, data, tx):
self.mdm.createIndex(data.indexName(), data.tableName(), data.fieldName(), tx)
return 0
|
[
"1632039752@qq.com"
] |
1632039752@qq.com
|
3da2984b78ac4b9d85c60ceb621e0e1f35020a67
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/e2e/paddleLT/donotuse/debug/jit_export_CSPResNet.py
|
223881fbcff5703f345d459931528af5ae9a9e8b
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 785
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
ocr rec_srn_head
"""
import copy
import numpy as np
import paddle
import ppdet
paddle.seed(33)
np.random.seed(33)
def randtool(dtype, low, high, shape):
"""
np random tools
"""
if dtype == "int":
return np.random.randint(low, high, shape)
elif dtype == "float":
return low + (high - low) * np.random.random(shape)
def main():
"""main"""
input = {"image": paddle.to_tensor(randtool("float", -1, 1, shape=[4, 3, 224, 224]).astype("float32"))}
net = ppdet.modeling.backbones.cspresnet.CSPResNet()
# net = paddle.jit.to_static(net)
print(net.out_shape)
net(inputs=input)
# paddle.jit.save(net, path='CSPResNet')
main()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
ba7a076e824d53bf5d8d6cbbcd4e609c30366bbc
|
650e1bea7cec90b3c88ad84a80f5134434920c68
|
/larflow/Reco/test/vis_clusters.py
|
adebb2eeb34637224859b0f832b8b388f6ac7d34
|
[] |
no_license
|
NuTufts/larflow
|
7698329f50ec7d0db2f0a715e5a9f6dc09998f55
|
1ba2b426f191704a141bb72d7675d9746538eed4
|
refs/heads/master
| 2023-08-31T04:35:10.251625
| 2020-09-01T01:49:33
| 2020-09-01T01:49:33
| 136,974,430
| 1
| 1
| null | 2020-09-03T03:00:40
| 2018-06-11T19:56:04
|
C++
|
UTF-8
|
Python
| false
| false
| 6,996
|
py
|
from __future__ import print_function
import os,sys,argparse,json
parser = argparse.ArgumentParser("Plot Reco Clusters for Inspection")
parser.add_argument("-ll","--input-larlite",required=True,type=str,help="kpsrecomanager larlite output file")
args = parser.parse_args()
import numpy as np
import ROOT as rt
from larlite import larlite
from larcv import larcv
from larflow import larflow
larcv.SetPyUtil()
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import lardly
color_by_options = ["larmatch","keypoint"]
colorscale = "Viridis"
option_dict = []
for opt in color_by_options:
option_dict.append( {"label":opt,"value":opt} )
# OPEN LARLITE FILE
io = larlite.storage_manager( larlite.storage_manager.kREAD )
io.add_in_filename( args.input_larlite )
io.open()
nentries = io.get_entries()
CURRENT_EVENT = None
print("NENTRIES: ",nentries)
def make_figures(entry,clustername):
"""
if clustername is None return all clusters.
else if string, return specific cluster
"""
from larcv import larcv
larcv.load_pyutil()
detdata = lardly.DetectorOutline()
from larflow import larflow
larcv.SetPyUtil()
print("making figures for entry={} cluster={}".format(entry,clustername))
global io
global kpsanatree
io.go_to(entry)
traces_v = []
cluster_list = []
plot_producer = None
plot_index = None
if clustername != "all":
plot_producer = clustername.split(":")[0]
plot_index = int(clustername.split(":")[1])
# PLOT TRACK PCA-CLUSTERS: FULL/COSMIC
clusters = [("cosmic","trackprojsplit_full","rgb(150,150,150)",0.15,False),
("wctrack","trackprojsplit_wcfilter","rgb(125,200,125)",1.0,True),
("wcshower","showergoodhit","rgb(200,125,125)",0.5,False)]
for (name,producer,rgbcolor,opa,drawme) in clusters:
if not drawme:
continue
ev_trackcluster = io.get_data(larlite.data.kLArFlowCluster, producer )
ev_pcacluster = io.get_data(larlite.data.kPCAxis, producer )
for icluster in range(ev_trackcluster.size()):
lfcluster = ev_trackcluster.at( icluster )
cluster_trace = lardly.data.visualize_larlite_larflowhits( lfcluster, name="%s[%d]"%(name,icluster) )
clabel = "%s:%d (%d hits)"%(producer,icluster,lfcluster.size())
cvalue = "%s:%d"%(producer,icluster)
cluster_list.append( {"label":clabel,"value":cvalue} )
if clustername!="all":
cluster_trace["marker"]["color"] = "rgb(50,50,50)"
else:
r3 = np.random.randint(255,size=3)
rand_color = "rgb(%d,%d,%d)"%( r3[0], r3[1], r3[2] )
cluster_trace["marker"]["color"] = rand_color
cluster_trace["marker"]["opacity"] = opa
cluster_trace["marker"]["width"] = 5.0
pcaxis = ev_pcacluster.at( icluster )
pcatrace = lardly.data.visualize_pcaxis( pcaxis )
pcatrace["name"] = "%s-pca[%d]"%(name,icluster)
pcatrace["line"]["color"] = "rgb(0,0,0)"
pcatrace["line"]["width"] = 1
pcatrace["line"]["opacity"] = 1.0
if plot_producer is not None and plot_producer==producer and plot_index==icluster:
cluster_trace["marker"]["color"] = rgbcolor
traces_v.append(cluster_trace)
traces_v.append( pcatrace )
# add detector outline
traces_v += detdata.getlines(color=(10,10,10))
print("Number of clusters in event: ",len(cluster_list))
return traces_v,cluster_list
def test():
pass
app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
server = app.server
# 3D PLOT WINDOW
axis_template = {
"showbackground": True,
#"backgroundcolor": "#141414", # black
#"gridcolor": "rgba(255, 255, 255)",
#"zerolinecolor": "rgba(255, 255, 255)",
"backgroundcolor": "rgba(100, 100, 100,0.5)",
"gridcolor": "rgb(50, 50, 50)",
"zerolinecolor": "rgb(0, 0, 0)",
}
plot_layout = {
"title": "",
"height":800,
"margin": {"t": 0, "b": 0, "l": 0, "r": 0},
"font": {"size": 12, "color": "black"},
"showlegend": False,
#"plot_bgcolor": "#141414",
#"paper_bgcolor": "#141414",
"plot_bgcolor": "#ffffff",
"paper_bgcolor": "#ffffff",
"scene": {
"xaxis": axis_template,
"yaxis": axis_template,
"zaxis": axis_template,
"aspectratio": {"x": 1, "y": 1, "z": 3},
"camera": {"eye": {"x": 1, "y": 1, "z": 1},
"up":dict(x=0, y=1, z=0)},
"annotations": [],
},
}
# INPUT FORM: EVENT NUM
eventinput = dcc.Input(
id="input_event",
type="number",
placeholder="Input Event")
# INPUT FORM: CLUSTER LIST
plotcluster = dcc.Dropdown(
options=[
{'label':'all','value':'all'},
],
value='all',
id='plotcluster',
)
# PAGE LAYOUT
app.layout = html.Div( [
html.Div( [ eventinput,
plotcluster,
html.Button("Plot",id="plot")
] ),
html.Hr(),
html.Div( [
dcc.Graph(
id="det3d",
figure={
"data": [],
"layout": plot_layout,
},
config={"editable": True, "scrollZoom": False},
)],
className="graph__container"),
html.Div(id="out")
] )
@app.callback(
[Output("det3d","figure"),
Output("plotcluster","options"),
Output("plotcluster","value"),
Output("out","children")],
[Input("plot","n_clicks")],
[State("input_event","value"),
State("plotcluster","value"),
State("det3d","figure")],
)
def cb_render(*vals):
"""
runs when plot button is clicked
"""
global EVENT_DATA
global UNMATCHED_CLUSTERS
global io
global CURRENT_EVENT
if vals[1] is None:
print("Input event is none")
raise PreventUpdate
if vals[1]>=nentries or vals[1]<0:
print("Input event is out of range")
raise PreventUpdate
clustername = vals[2]
entry = int(vals[1])
if entry!=CURRENT_EVENT:
# first time we access an entry, we default to the "all" view of the vertices
CURRENT_EVENT = entry
clustername = "all"
cluster_traces_v,cluster_options = make_figures(int(vals[1]),clustername)
cluster_options.append( {'label':"all",'value':"all"} )
# update the figure's traces
vals[-1]["data"] = cluster_traces_v
return vals[-1],cluster_options,clustername,"event requested: {}; cluster: {}".format(vals[1],vals[2])
if __name__ == "__main__":
app.run_server(debug=True)
|
[
"taritree.wongjirad@gmail.com"
] |
taritree.wongjirad@gmail.com
|
1115dc0f1240a03cd10c4c47d711092e5ac14e36
|
64764cbae8641d051c2e26c0c2283e8e626d88fb
|
/ecf/tbl/GLBCNO.py
|
86a5dd5680a44379898071b551f23ffb7ba4d13b
|
[] |
no_license
|
jazlee/csp-accounting
|
eb801ce902170337121a6dbe2b1382be4089ecca
|
85f50f9d8defbf52e6c85f5c0fc0464101a01d03
|
refs/heads/master
| 2021-01-25T14:11:18.700456
| 2018-03-03T06:34:57
| 2018-03-03T06:34:57
| 123,666,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
from elixir import *
#
# G/L Batch Numbering Option
#
class GLBCNO(Entity):
GLBCNOID = Field(String(3), primary_key=True)
GLBCNONM = Field(String(32))
GLBCMINO = Field(Integer)
GLBCMXNO = Field(Integer)
GLBCLSNO = Field(Integer)
GLBCAUDT = Field(Numeric(8, 0))
GLBCAUTM = Field(Numeric(6, 0))
GLBCAUUS = Field(String(24))
def getLSNO(cls, noid):
if noid in (None, ''):
raise Exception('Default batch option has not been setup properly')
q = GLBCNO.query
q = q.filter_by(GLBCNOID = noid)
obj = q.first()
if not obj:
raise Exception('Batch option %s does not exist' % noid)
ret = None
if (obj.GLBCMINO > obj.GLBCLSNO):
ret = obj.GLBCMINO
else:
ret = obj.GLBCLSNO + 1
if ret > obj.GLBCMXNO:
raise Exception('Maximum number batch has been reached')
obj.GLBCLSNO = ret
session.update(obj)
return ret
getLSNO = classmethod(getLSNO)
|
[
"jaimy@usg.co.id"
] |
jaimy@usg.co.id
|
46fe70a671b7a3d75410988284962ba930d7a7ae
|
9f59d55bd8466f6f50c5bbec4725c8a073b964bd
|
/base/urls.py
|
2533140c3f85ca26346f0e0d8f03a059d330063f
|
[] |
no_license
|
youngsoul/django-todo-list
|
9a88e14ba2bf305844058d6db94ffc8e11b36e5f
|
f1d687b72fd066a44b29d8974e6e49a094572a6d
|
refs/heads/master
| 2023-04-11T15:08:54.400185
| 2021-04-26T00:57:14
| 2021-04-26T00:57:14
| 361,575,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
from django.urls import path
from .views import TaskList, TaskDetail, TaskCreate, TaskUpdate, TaskDelete, CustomLoginView, RegisterPage
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(next_page='login'), name='logout'),
path('register/', RegisterPage.as_view(), name='register'),
path('', TaskList.as_view(), name='tasks'),
path('task/<int:pk>', TaskDetail.as_view(), name='task'),
path('task-create/', TaskCreate.as_view(), name='task-create'),
path('task-update/<int:pk>', TaskUpdate.as_view(), name='task-update'),
path('task-delete/<int:pk>', TaskDelete.as_view(), name='task-delete'),
]
|
[
"theyoungsoul@gmail.com"
] |
theyoungsoul@gmail.com
|
e472a596c694aca6cb4500d419d1493f0e53bcfa
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/mode_selector/mode_selector_wt_widget_model.py
|
2378eac9b0d2e89a56f1349cd4960f6589d3ef65
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/mode_selector/mode_selector_wt_widget_model.py
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_base_widget_model import ModeSelectorBaseWidgetModel
class ModeSelectorWtWidgetModel(ModeSelectorBaseWidgetModel):
__slots__ = ()
def __init__(self, properties=4, commands=0):
super(ModeSelectorWtWidgetModel, self).__init__(properties=properties, commands=commands)
def getCurrentProgress(self):
return self._getNumber(1)
def setCurrentProgress(self, value):
self._setNumber(1, value)
def getTotalCount(self):
return self._getNumber(2)
def setTotalCount(self, value):
self._setNumber(2, value)
def getTicketCount(self):
return self._getNumber(3)
def setTicketCount(self, value):
self._setNumber(3, value)
def _initialize(self):
super(ModeSelectorWtWidgetModel, self)._initialize()
self._addNumberProperty('currentProgress', 0)
self._addNumberProperty('totalCount', 0)
self._addNumberProperty('ticketCount', 0)
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
f4021e8727f0afecf7a0bdc8479df954272d1dde
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/131/usersdata/172/37508/submittedfiles/al10.py
|
8b72b71b334d840e9babcac59072316f61d2bb92
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
# -*- coding: utf-8 -*-
#NÃO APAGUE A LINHA ACIMA. COMECE ABAIXO DESTA LINHA
n=int(input('digite um valor'))
i=2
d=3
while 0<n
soma=4*((i/d)*((i+2)/d))
i=i+2
d=d+2
print('%.5d'%soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
40960e8b4d96b9073c43ba86fde89699ce912374
|
993c6595f2d7cb2c4efae2c5264fb09008b9c7d4
|
/pychallenge/pychallenge/users/migrations/0003_auto_20190123_1757.py
|
b1783f138089146c10d8deef5cf7e3a8b036e0c6
|
[] |
no_license
|
triump0870/pychallenge
|
985af46268a0a83cb3c8a891d3ff0faf01570ef5
|
c6c117b41bf981efc0acce814a5b17eec49903c6
|
refs/heads/master
| 2022-12-15T11:58:39.045942
| 2019-01-23T19:17:33
| 2019-01-23T19:17:33
| 167,192,119
| 1
| 0
| null | 2022-12-08T01:36:26
| 2019-01-23T13:57:47
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
# Generated by Django 2.1.5 on 2019-01-23 17:57
from django.db import migrations, models
import django.utils.timezone
import markdownx.models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20190123_1746'),
]
operations = [
migrations.AddField(
model_name='about',
name='edited',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='about',
name='status',
field=models.CharField(choices=[('D', 'Draft'), ('P', 'Published')], default='D', max_length=1),
),
migrations.AddField(
model_name='about',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='about',
name='content',
field=markdownx.models.MarkdownxField(),
),
]
|
[
"b4you0870@gmail.com"
] |
b4you0870@gmail.com
|
3068573ee1705acd83f26145b16387a3fb624f9f
|
0bde5f7f09aa537ed1f4828d4e5ebee66475918f
|
/h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_table.py
|
64286297b4600561a077e8c45d8e6733baddfdb9
|
[
"Apache-2.0"
] |
permissive
|
Winfredemalx54/h2o-3
|
d69f1c07e1f5d2540cb0ce5e6073415fa0780d32
|
dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7
|
refs/heads/master
| 2022-12-14T08:59:04.109986
| 2020-09-23T08:36:59
| 2020-09-23T08:36:59
| 297,947,978
| 2
| 0
|
Apache-2.0
| 2020-09-23T11:28:54
| 2020-09-23T11:28:54
| null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_table():
"""
Python API test: h2o.frame.H2OFrame.table(data2=None, dense=True)
Copied from pyunit_table.py
"""
df = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate_cat.csv"))
tableFrame = df[['DPROS','RACE']].table(data2=None, dense=True)
assert_is_type(tableFrame, H2OFrame)
assert tableFrame.sum(axis=0).sum(axis=1).flatten()==df.nrow, \
"h2o.H2OFrame.table() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_table())
else:
h2o_H2OFrame_table()
|
[
"noreply@github.com"
] |
Winfredemalx54.noreply@github.com
|
d3897c6338e630a1d5c705c0bc9eafc08f859249
|
0e383ccac5fdf21dc5059502b9aae26412fd6a88
|
/sheaths.icmes/src/extract.py
|
d3de533033d78081270a6c72c86f666f948d5acf
|
[
"MIT"
] |
permissive
|
jimsrc/seatos
|
63c8ad99f2b5d4ae5f203cdc8f8e061948f257f4
|
e775dba1a2a96ff44b837cf8d85101ccfef302b1
|
refs/heads/master
| 2021-01-02T08:38:51.349670
| 2017-09-01T01:59:35
| 2017-09-01T01:59:35
| 99,040,968
| 0
| 1
| null | 2017-09-01T01:59:36
| 2017-08-01T20:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,572
|
py
|
#!/usr/bin/env ipython
from pylab import *
from numpy import *
from scipy.io.netcdf import netcdf_file
from datetime import datetime, time, timedelta
#------------ shared libraries:
"""
--- antes de modificar cosas, tener en cuenta los bugs en:
'../../shared_lib/COMENTARIOS.txt'
"""
import sys
sys.path.append('../../shared_lib')
from shared_funcs import * #c_funcs import *
#------------------------------
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
from ShiftTimes import *
import numpy as np
from z_expansion_gulisano import z as z_exp
import console_colors as ccl
import read_NewTable as tb
class boundaries:
def __init__(self):
name = 'name'
HOME = os.environ['HOME']
PAO = os.environ['PAO']
gral = general()
day = 86400.
#---- cosas input
gral.fnames = fnames = {}
fnames['ACE'] = '%s/data_ace/64sec_mag-swepam/ace.1998-2014.nc' % HOME
fnames['McMurdo'] = '%s/actividad_solar/neutron_monitors/mcmurdo/mcmurdo_utc_correg.dat' % HOME
fnames['table_richardson'] = '%s/ASOC_ICME-FD/icmes_richardson/data/rich_events_ace.nc' % HOME
fnames['Auger'] = '%s/data_auger/estudios_AoP/data/unir_con_presion/data_final_2006-2013.h5' % PAO
#---- directorios de salida
gral.dirs = dirs = {}
dirs['dir_plots'] = '../plots'
dirs['dir_ascii'] = '../ascii'
dirs['suffix'] = '_auger.data_' # sufijo para el directorio donde guardare
# estas figuras
#-------------------------------------------------------------
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MCwant = {'flags': ('0', '1', '2', '2H'),
'alias': '0.1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('1', '2', '2H'),
# 'alias': '1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2', '2H'),
# 'alias': '2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2',),
# 'alias': '2'} # para "flagear" el nombre/ruta de las figuras
FILTER = {}
FILTER['Mcmultiple'] = False # True para incluir eventos multi-MC
FILTER['CorrShift'] = False
FILTER['wang'] = False #False #True
FILTER['vsw_filter'] = False
FILTER['z_filter_on'] = False
FILTER['MCwant'] = MCwant
FILTER['B_filter'] = False
FILTER['filter_dR.icme'] = False #True
FILTER['choose_1998-2006'] = False # False:no se restringe al periodo 1998-2006
CUTS = {}
CUTS['ThetaThres'] = 90.0 # all events with theta>ThetaThres
CUTS['dTday'] = 0.0
CUTS['v_lo'] = 550.0
CUTS['v_hi'] = 3000.0
CUTS['z_lo'] = -50.0
CUTS['z_hi'] = 0.65
nBin = {}
nBin['before'] = 2
nBin['after'] = 4
nBin['bins_per_utime'] = 50 # bins por unidad de tiempo
nBin['total'] = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime']
fgap = 0.2
#--- bordes de estructura
bounds = boundaries()
bounds.tini = tb.tshck #tb.tini_mc #tb.tshck
bounds.tend = tb.tini_icme #tb.tend_mc #tb.tini_mc
#+++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'McMurdo' #'ACE'
FILTER['vsw_filter'] = False
emgr = events_mgr(gral, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
#emgr.run_all()
#+++++++++++++++++++++++++++++++++++++++++++++++++++
emgr.data_name = 'Auger' #'McMurdo'
emgr.FILTER['vsw_filter'] = False
#emgr.run_all()
emgr.filter_events()
emgr.load_data_and_timeshift()
emgr.collect_data()
#+++++++++++++++++++++++++++++++++++++++++++++++++++
"""
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'Auger' #'ACE'
FILTER['vsw_filter'] = False
emgr = events_mgr(gral, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
#emgr.run_all()
#++++ limites
LOW, MID1, MID2, TOP = 100.0, 450.0, 550.0, 3000.0
emgr.FILTER['vsw_filter'] = True
emgr.CUTS['v_lo'], emgr.CUTS['v_hi'] = MID2, TOP #MID1, MID2 #LOW, MID1 #
#emgr.run_all()
emgr.filter_events()
emgr.load_data_and_timeshift()
emgr.collect_data()
"""
# save to file
#---- dest directory
dir_dst = '../ascii/MCflag%s' % FILTER['MCwant']['alias']
if FILTER['CorrShift']:
dir_dst += '/wShiftCorr/events_data'
else:
dir_dst += '/woShiftCorr/events_data'
if not(os.path.isdir(dir_dst)):
print "\n ### ERROR ### --> does NOT exist: " + dir_dst
raise SystemExit
#-------------------
events = emgr.out['events_data'].keys()
n_evnts = len(events)
evd = emgr.out['events_data']
"""
for id, i in zip(events, range(n_evnts)):
t = emgr.out['events_data'][id]['t_days']
ndata = len(t)
data_out = np.nan*np.ones((ndata, 3))
data_out[:,0] = t
B = emgr.out['events_data'][id]['B'] # B-data from 'id' event
rmsB = emgr.out['events_data'][id]['rmsB'] # data from 'id' event
data_out[:,1] = B
data_out[:,2] = rmsB
fname_out = '%s/event.data_vlo.%04d_vhi.%04d_id.%s.txt' % (dir_dst, emgr.CUTS['v_lo'], emgr.CUTS['v_hi'], id[3:])
np.savetxt(fname_out, data_out, fmt='%g')
# append a legend
f = open(fname_out, 'a') # append to file
dtsh = emgr.dt_sh[int(id[3:])] # [days] sheath duration
dtmc = emgr.dt_mc[int(id[3:])] # [days] MC duration
COMMS = '# dt_sheath [days]: %g' % dtsh
COMMS += '\n# dt_MC [days]: %g' % dtmc
f.write(COMMS)
f.close()
"""
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##
|
[
"jimmy.ilws@gmail.com"
] |
jimmy.ilws@gmail.com
|
df49c2e8770f6ff76b848c7878d8f60d0083ec8f
|
84566b23a26a3eeadc3d99e6ada39296759da3e6
|
/ptero_auth/implementation/models/scopes.py
|
3ca8e29c1c981954ec1c7c2c872f8a68d35e51f6
|
[] |
no_license
|
iferguson90/ptero-auth
|
2cfd28c11add633c78ef768fede2ff04e2fe064b
|
97047466387df71a8cb8ae29d955f6471540ebfe
|
refs/heads/master
| 2021-01-22T14:39:45.335063
| 2014-08-29T20:56:25
| 2014-08-29T20:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
from .base import Base
from sqlalchemy import Column, ForeignKey, Integer, Text
from sqlalchemy import Table, PrimaryKeyConstraint
from sqlalchemy.orm import relationship
__all__ = ['Scope']
class Scope(Base):
__tablename__ = 'scope'
scope_pk = Column(Integer, primary_key=True)
value = Column(Text, index=True, unique=True, nullable=False)
allowed_scope_table = Table('allowed_scope_bridge', Base.metadata,
Column('client_pk', Integer, ForeignKey('client.client_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('client_pk', 'scope_pk')
)
default_scope_table = Table('default_scope_bridge', Base.metadata,
Column('client_pk', Integer, ForeignKey('client.client_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('client_pk', 'scope_pk')
)
scope_audience_table = Table('scope_audience_bridge', Base.metadata,
Column('client_pk', Integer, ForeignKey('client.client_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('client_pk', 'scope_pk')
)
grant_scope_table = Table('grant_scope_bridge', Base.metadata,
Column('grant_pk', Integer, ForeignKey('grant.grant_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('grant_pk', 'scope_pk')
)
|
[
"mark.m.burnett@gmail.com"
] |
mark.m.burnett@gmail.com
|
7adedccffebabc3cee9a05501b6ab85fe7b4b3e1
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/FXJSMM/YW_FXJSMM_SZSJ_292.py
|
06e684a9bd632dff4ab6408ce3316867e466936b
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,070
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FXJSMM_SZSJ_292(xtp_test_case):
# YW_FXJSMM_SZSJ_292
def test_YW_FXJSMM_SZSJ_292(self):
title = '深圳A股股票交易日本方最优卖——错误的数量(数量<0)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 10210301,
'errorMSG': queryOrderErrorMsg(10210301),
'是否生成报单': '否',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '0', '0', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': -100,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
4c6342b8a903410e1a5d3185e85c44b88365c921
|
b1e52f926116286c138890ca0d86bf74433e8ee4
|
/lib/SpriteLoader.py
|
a1063b8adea2fc0d10c23c115df455528f5701c7
|
[] |
no_license
|
topherCantrell/pixel-sign
|
22f35b84bbaaf98fb143229f2df6afe0911e1bb0
|
b8f1c1723f81259fc3dc3e91b275aea0215802df
|
refs/heads/master
| 2021-07-11T16:51:31.799934
| 2020-07-22T17:36:14
| 2020-07-22T17:36:14
| 160,055,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,822
|
py
|
class SpriteLoader:
def __init__(self):
self.sprites = {}
with open("Sprites.txt") as f:
lines = f.readlines()
currentName = None
currentSprite = []
for line in lines:
line = line.strip()
if len(line)==0:
continue
if line[0]=='-':
if currentName != None:
self.sprites[currentName] = currentSprite
currentName = line[1:]
currentSprite = []
continue
currentSprite.append(line)
if currentName != None:
self.sprites[currentName] = currentSprite
def colorSprite(self,name,colorMap):
sprite = self.sprites[name]
ret = [[[] for _ in range(len(sprite[0]))] for _ in range(len(sprite))]
for y in range(0,len(sprite)):
s = sprite[y]
for x in range(len(s)):
c = s[x]
v = 0
if c=='.' or c==' ':
v = 0
else:
for z in range(0,len(colorMap),2):
if colorMap[z]==c:
v = colorMap[z+1]
break
ret[y][x] = v
return ret
def doubler(self,colorSprite):
ret =[]
for y in range(0,len(colorSprite)):
drow = []
for x in range(0,len(colorSprite[y])):
drow.append(colorSprite[y][x])
drow.append(colorSprite[y][x])
ret.append(drow)
ret.append(drow)
return ret
def flipLeftRight(self,sprite):
'''
int [][] ret = new int[colorSprite.length][colorSprite[0].length];
for(int y=0;y<colorSprite.length;++y) {
for(int x=0;x<colorSprite[y].length;++x) {
ret[y][colorSprite[y].length-x-1] = colorSprite[y][x];
}
}
return ret;
'''
ret = [[[] for _ in range(len(sprite[0]))] for _ in range(len(sprite))]
for y in range(0,len(sprite)):
for x in range(0,len(sprite[y])):
ret[y][len(sprite[y])-x-1] = sprite[y][x]
return ret
def flipUpDown(self,colorSprite):
'''
int [][] ret = new int[colorSprite.length][];
int i = ret.length-1;
for(int x=0;x<colorSprite.length;++x) {
ret[i] = colorSprite[x];
--i;
}
return ret;
'''
if __name__ == '__main__':
sp = SpriteLoader()
|
[
"topherCantrell@gmail.com"
] |
topherCantrell@gmail.com
|
12ea1bd995b23ab1c185e7562c2fbb73ddf63694
|
f8f8651ab604acc4937f8725caadaca1fb97a5e8
|
/src/pytorch_lightning/plugins/precision/native_amp.py
|
4df1b166ca8dd31d4fbea638d32dc81110659018
|
[
"Apache-2.0"
] |
permissive
|
neptune-ai/pytorch-lightning
|
ac59e746a486e07e21abae426b28e5d72812ac98
|
702014418e2ec0437e67d8bf97809edef686a02c
|
refs/heads/master
| 2022-09-28T09:34:07.653729
| 2022-09-12T11:13:48
| 2022-09-12T11:13:48
| 229,063,811
| 1
| 1
|
Apache-2.0
| 2022-09-26T03:29:49
| 2019-12-19T13:48:16
|
Python
|
UTF-8
|
Python
| false
| false
| 5,106
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, Optional, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.optim import LBFGS, Optimizer
import pytorch_lightning as pl
from pytorch_lightning.plugins.precision.mixed import MixedPrecisionPlugin
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10, AMPType
from pytorch_lightning.utilities.exceptions import MisconfigurationException
if _TORCH_GREATER_EQUAL_1_10:
from torch import autocast as new_autocast
else:
from torch.cuda.amp import autocast as old_autocast
class NativeMixedPrecisionPlugin(MixedPrecisionPlugin):
"""Plugin for Native Mixed Precision (AMP) training with ``torch.autocast``.
Args:
precision: Whether to use ``torch.float16`` (``16``) or ``torch.bfloat16`` (``'bf16'``).
device: The device for ``torch.autocast``.
scaler: An optional :class:`torch.cuda.amp.GradScaler` to use.
"""
backend = AMPType.NATIVE
def __init__(
self, precision: Union[str, int], device: str, scaler: Optional[torch.cuda.amp.GradScaler] = None
) -> None:
super().__init__()
if precision == "bf16" and not _TORCH_GREATER_EQUAL_1_10:
raise MisconfigurationException(
"To use bfloat16 with native amp you must install torch greater or equal to 1.10."
)
if scaler is None and precision == 16:
scaler = torch.cuda.amp.GradScaler()
if scaler is not None and precision == "bf16":
raise MisconfigurationException(f"`precision='bf16'` does not use a scaler, found {scaler}.")
self.precision = precision
self.device = device
self.scaler = scaler
def pre_backward(self, model: "pl.LightningModule", closure_loss: Tensor) -> Tensor:
if self.scaler is not None:
closure_loss = self.scaler.scale(closure_loss)
return super().pre_backward(model, closure_loss)
def _run_backward(self, tensor: Tensor, model: Optional[Module], *args: Any, **kwargs: Any) -> None:
if self.scaler is not None:
tensor = self.scaler.scale(tensor)
super()._run_backward(tensor, model, *args, **kwargs)
def optimizer_step(
self,
model: Optional[Union["pl.LightningModule", Module]],
optimizer: Optimizer,
optimizer_idx: int,
closure: Callable[[], Any],
**kwargs: Any,
) -> Any:
if self.scaler is None:
# skip scaler logic, as bfloat16 does not require scaler
return super().optimizer_step(model, optimizer, optimizer_idx, closure, **kwargs)
if isinstance(optimizer, LBFGS):
raise MisconfigurationException(
f"Native AMP and the LBFGS optimizer are not compatible (optimizer {optimizer_idx})."
)
closure_result = closure()
# `unscale` after the closure is executed but before the `on_before_optimizer_step` hook.
self.scaler.unscale_(optimizer)
self._after_closure(model, optimizer, optimizer_idx)
skipped_backward = closure_result is None
# in manual optimization, the closure does not return a value
if not isinstance(model, pl.LightningModule) or not model.automatic_optimization or not skipped_backward:
# note: the scaler will skip the `optimizer.step` if nonfinite gradients are found
step_output = self.scaler.step(optimizer, **kwargs)
self.scaler.update()
return step_output
return closure_result
def autocast_context_manager(self) -> Union["old_autocast", "new_autocast"]:
if _TORCH_GREATER_EQUAL_1_10:
# the dtype could be automatically inferred but we need to manually set it due to a bug upstream
# https://github.com/pytorch/pytorch/issues/67233
return new_autocast(self.device, dtype=torch.bfloat16 if self.precision == "bf16" else torch.half)
return old_autocast()
@contextmanager
def forward_context(self) -> Generator[None, None, None]:
"""Enable autocast context."""
with self.autocast_context_manager():
yield
def state_dict(self) -> Dict[str, Any]:
if self.scaler is not None:
return self.scaler.state_dict()
return {}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if self.scaler is not None:
self.scaler.load_state_dict(state_dict)
|
[
"noreply@github.com"
] |
neptune-ai.noreply@github.com
|
a7ecf43316deac0104330b55d0d997358e4b3b58
|
19bb4caf8a06868498c5b7c35c46e5e1da188548
|
/simpleorm.py
|
2349a3ba2db1ac9933b2255e4e709ce279b1f82a
|
[] |
no_license
|
MUIC-CS/summer2017-week2-tabkeeper
|
3585924534972d354c04007845e18a9b6868a48f
|
68b1b989f3d8a8100b599b9a8cdb5251639aa14d
|
refs/heads/master
| 2021-01-15T12:37:37.513496
| 2017-08-09T19:12:16
| 2017-08-09T19:12:16
| 99,653,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,478
|
py
|
from db import get_cursor
class SimpleBean:
columns = ()
def __init__(self, *args, **kwds):
# TODO: make it set
tmp = {k: v for k, v in zip(self.columns, args)}
tmp.update(kwds)
self.__dict__ = self.filter_params(tmp)
@classmethod
def filter_params(cls, d):
ret = {}
for col in cls.columns:
if(col not in d):
ret[col] = None
else:
ret[col] = d[col]
return ret
@classmethod
def from_dict(cls, d):
dd = {k: v for k, v in d.items() if k in cls.columns}
return cls(**cls.filter_params(d))
@classmethod
def id_col(cls):
return cls.columns[0]
def tuple_values(self, with_id=False):
start = 0 if with_id else 1
return tuple(self.__dict__[col] for col in self.columns[start:])
def __repr__(self):
values = ['{c}={v}'.format(c=col, v=self.__dict__[col])
for col in self.columns]
vals = ', '.join(values)
classname = self.__class__.__name__
return '<{classname} {vals}>'.format(classname=classname, vals=vals)
class SimpleRepo:
table_name = ''
bean_class = None
create_query = ''
@classmethod
def create_table(cls, drop=False):
if drop:
cls.drop_table()
with get_cursor() as cur:
cur.execute(cls.create_query.format(table_name=cls.table_name))
cur.connection.commit()
@classmethod
def find_by_col(cls, col, value):
with get_cursor() as cur:
cur.execute(
"""
SELECT * from {table_name} where {col}=%s
""".format(table_name=cls.table_name, col=col),
(value,)
)
rs = cur.fetchone()
return bean_class.from_dict(rs)
@classmethod
def find_all(cls):
with get_cursor() as cur:
cur.execute(
"""
SELECT * from {table_name}
""".format(table_name=cls.table_name)
)
return [cls.bean_class.from_dict(d) for d in cur.fetchall()]
@classmethod
def find_by_id(cls, value):
return cls.find_by_col(bean_class.id_col(), value)
@classmethod
def delete_by_id(cls, id):
with get_cursor() as cur:
cur.execute(
"""
DELETE FROM {table_name} where {id_col}=%s
""".format(
table_name=cls.table_name,
id_col=bean_class.id_col()),
(id,)
)
cur.connection.commit()
@classmethod
def add(cls, obj):
col_tuple = ', '.join(cls.bean_class.columns[1:])
ph = ', '.join(['%s'] * (len(cls.bean_class.columns) - 1))
id_col = cls.bean_class.id_col()
print obj.tuple_values()
with get_cursor() as cur:
cur.execute(
"""
INSERT INTO {table_name}({col_tuple})
VALUES ({ph}) RETURNING {id_col}
""".format(table_name=cls.table_name,
col_tuple=col_tuple,
ph=ph,
id_col=id_col),
obj.tuple_values()
)
id = cur.fetchone()[id_col]
cur.connection.commit()
obj.id = id
return obj
@classmethod
def add_all(cls, objs):
return [cls.add(obj) for obj in objs]
@classmethod
def drop_table(cls):
with get_cursor() as cur:
cur.execute(
"""
DROP TABLE IF EXISTS {table_name}
""".format(table_name=cls.table_name)
)
cur.connection.commit()
@classmethod
def delete_table(cls):
with get_cursor() as cur:
cur.execute(
"""
DELETE FROM {table_name}
""".format(table_name=cls.table_name)
)
cur.connection.commit()
@classmethod
def fetch_by_condition(cls, cond, args):
with get_cursor() as cur:
cur.execute(
"""SELECT *
FROM {table_name}
WHERE {cond}
""".format(cond=cond, table_name=cls.table_name),
args
)
rs = cur.fetchall()
return [cls.bean_class.from_dict(row) for row in rs]
|
[
"piti118@gmail.com"
] |
piti118@gmail.com
|
970ed11ee3587e2517db5c3285c8aa9aff4724f4
|
f36d2e601b9aa0498c68951e8a081b6ce2036116
|
/modules/ip_commands.py
|
dfea39dfb7b8e1745dfedf9f473a43782d70fc07
|
[] |
no_license
|
jaconsta/rpi_notify
|
16de79acfb916646cb0ebd4cb8bbb3a7def87c31
|
e0aac512dc96eb66fb61ac13560a59e4b1929b89
|
refs/heads/master
| 2021-01-11T20:48:57.761601
| 2017-01-17T05:05:21
| 2017-01-17T05:05:21
| 79,190,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
"""
Obtain current local IP address.
"""
import platform
import socket
import subprocess
def run_command(cmd):
"""
Execute this OS command and return the formated response.
"""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return err if err else out.decode('UTF-8')
def get_ip():
"""
Get the local IP.
"""
if platform.system() == 'Linux':
ip = run_command(['ip', 'route'])
elif platform.system() == 'Windows':
ip = run_command(['ipconfig'])
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 0))
ip = s.getsockname()[0]
except:
ip = '127.0.0.1'
finally:
s.close()
return ip
whoami = socket.gethostname()
|
[
"constainopia@gmail.com"
] |
constainopia@gmail.com
|
dfc79ab2926282ff85f93564e287333570fd498b
|
b8ef1a5cd3856a8e9134c3313a4e23522f199df7
|
/Programmers/84325_직업군 추천하기/84325_직업군 추천하기.py
|
2d65ae9cecc92be4f0db236416f553f114f185dc
|
[] |
no_license
|
scl2589/Algorithm_problem_solving
|
910623d9675ae0219320abfd1fefc7d576027544
|
80db697cdd0180a7d4dbcfae4944d4a54191bddf
|
refs/heads/master
| 2023-07-29T10:56:38.225206
| 2021-09-11T13:50:46
| 2021-09-11T13:50:46
| 235,363,353
| 0
| 0
| null | 2021-03-04T15:39:41
| 2020-01-21T14:36:41
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
def solution(table, languages, preference):
answer = ''
answer_list = []
for t in table:
lst = t.split(' ')
new = [lst[0]]
new.extend(lst[5:0:-1])
total = 0
for idx, language in enumerate(languages):
if language in new:
total += new.index(language) * preference[idx]
answer_list.append([total, new[0]])
answer_list = sorted(answer_list, key = lambda x : (-x[0], x[1]))
return answer_list[0][1]
|
[
"chaelinshin96@gmail.com"
] |
chaelinshin96@gmail.com
|
7c83619bbc76cacaee8fce5c49099d93ca880d70
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631572862566400_0/Python/icedingo/bff.py
|
6f6a5da5564dfe239f4a6b0dd80a606d45e35cb2
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,118
|
py
|
T = int(input())
class Pair(object):
def __init__(self, a, b, ap, bp):
self.a = a
self.b = b
self.ap = ap
self.bp = bp
def get_longest(self, pairs, rev, seen):
#print('Getting longest for', self)
alen = 0
for p in self.ap:
if p == self.b:
continue
seena = set()
nex = [p]
_alen = 0
while nex:
_alen += 1
_nex = []
for n in nex:
seena.add(n)
for c in rev[n]:
if c not in seena:
_nex.append(c)
nex = _nex
if _alen > alen:
alen = _alen
blen = 0
for p in self.bp:
if p == self.a:
continue
seenb = set()
nex = [p]
_blen = 0
while nex:
_blen += 1
_nex = []
for n in nex:
seenb.add(n)
for c in rev[n]:
if c not in seenb:
_nex.append(c)
nex = _nex
if _blen > blen:
blen = _blen
#print(' A chain', alen)
#print(' B chain', blen)
seen.add(self)
submax = 0
for p in pairs:
#print(' Checking', p)
if p in seen:
#print(' -- NAH')
continue
_submax = p.get_longest(pairs, rev, seen)
if _submax > submax:
submax = _submax
seen.remove(self)
#print('ret!')
if seen:
return 2 + max(submax, alen, blen)
else:
return 2 + max(submax + alen, submax + blen, alen + blen)
def __str__(self):
return 'Pair<{}, {}>'.format(self.a, self.b)
for t in range(1, T+1):
N = int(input())
bffs = [int(i) - 1 for i in input().split()]
rev = [[] for i in range(N)]
for c, b in enumerate(bffs):
rev[b].append(c)
pairs = set()
pairtuples = set()
max_len = 0
#print(bffs)
for n in range(N):
current = bffs[n]
seen = set()
while current not in seen:
seen.add(current)
#print(current, 'bff of', end = ' ')
current = bffs[current]
#print(current)
if n not in seen:
#print(n, 'not in cycle :(')
continue
lseen = len(seen)
if lseen == 2:
#print(seen, 'are a pair!')
ptuple = tuple(sorted(seen))
if ptuple not in pairtuples:
a = seen.pop()
b = seen.pop()
pairs.add(Pair(a, b, rev[a], rev[b]))
pairtuples.add(ptuple)
if lseen > max_len:
#print('new circle!', seen)
max_len = lseen
for p in pairs:
plen = p.get_longest(pairs, rev, set())
if plen > max_len:
max_len = plen
print('Case #{}: {}'.format(t, max_len))
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
32a13a3c4ef90bca8e60735e77b5dcd0de843596
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/homeassistant/components/plex/services.py
|
0847583635de398594b64c383690af167bf32786
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931
| 2022-11-09T00:27:20
| 2022-11-09T00:27:20
| 123,929,062
| 5
| 4
|
Apache-2.0
| 2023-02-22T06:14:31
| 2018-03-05T14:11:09
|
Python
|
UTF-8
|
Python
| false
| false
| 6,634
|
py
|
"""Services for the Plex integration."""
import json
import logging
from plexapi.exceptions import NotFound
import voluptuous as vol
from yarl import URL
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
DOMAIN,
PLEX_UPDATE_PLATFORMS_SIGNAL,
PLEX_URI_SCHEME,
SERVERS,
SERVICE_REFRESH_LIBRARY,
SERVICE_SCAN_CLIENTS,
)
from .errors import MediaNotFound
from .models import PlexMediaSearchResult
REFRESH_LIBRARY_SCHEMA = vol.Schema(
{vol.Optional("server_name"): str, vol.Required("library_name"): str}
)
_LOGGER = logging.getLogger(__package__)
async def async_setup_services(hass):
"""Set up services for the Plex component."""
async def async_refresh_library_service(service_call: ServiceCall) -> None:
await hass.async_add_executor_job(refresh_library, hass, service_call)
async def async_scan_clients_service(_: ServiceCall) -> None:
_LOGGER.warning(
"This service is deprecated in favor of the scan_clients button entity. "
"Service calls will still work for now but the service will be removed in a future release"
)
for server_id in hass.data[DOMAIN][SERVERS]:
async_dispatcher_send(hass, PLEX_UPDATE_PLATFORMS_SIGNAL.format(server_id))
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH_LIBRARY,
async_refresh_library_service,
schema=REFRESH_LIBRARY_SCHEMA,
)
hass.services.async_register(
DOMAIN, SERVICE_SCAN_CLIENTS, async_scan_clients_service
)
return True
def refresh_library(hass: HomeAssistant, service_call: ServiceCall) -> None:
"""Scan a Plex library for new and updated media."""
plex_server_name = service_call.data.get("server_name")
library_name = service_call.data["library_name"]
plex_server = get_plex_server(hass, plex_server_name)
try:
library = plex_server.library.section(title=library_name)
except NotFound:
_LOGGER.error(
"Library with name '%s' not found in %s",
library_name,
[x.title for x in plex_server.library.sections()],
)
return
_LOGGER.debug("Scanning %s for new and updated media", library_name)
library.update()
def get_plex_server(hass, plex_server_name=None, plex_server_id=None):
"""Retrieve a configured Plex server by name."""
if DOMAIN not in hass.data:
raise HomeAssistantError("Plex integration not configured")
plex_servers = hass.data[DOMAIN][SERVERS].values()
if not plex_servers:
raise HomeAssistantError("No Plex servers available")
if plex_server_id:
return hass.data[DOMAIN][SERVERS][plex_server_id]
if plex_server_name:
plex_server = next(
(x for x in plex_servers if x.friendly_name == plex_server_name), None
)
if plex_server is not None:
return plex_server
friendly_names = [x.friendly_name for x in plex_servers]
raise HomeAssistantError(
f"Requested Plex server '{plex_server_name}' not found in {friendly_names}"
)
if len(plex_servers) == 1:
return next(iter(plex_servers))
friendly_names = [x.friendly_name for x in plex_servers]
raise HomeAssistantError(
f"Multiple Plex servers configured, choose with 'plex_server' key: {friendly_names}"
)
def process_plex_payload(
hass, content_type, content_id, default_plex_server=None, supports_playqueues=True
) -> PlexMediaSearchResult:
"""Look up Plex media using media_player.play_media service payloads."""
plex_server = default_plex_server
extra_params = {}
if content_id.startswith(PLEX_URI_SCHEME + "{"):
# Handle the special payload of 'plex://{<json>}'
content_id = content_id[len(PLEX_URI_SCHEME) :]
content = json.loads(content_id)
elif content_id.startswith(PLEX_URI_SCHEME):
# Handle standard media_browser payloads
plex_url = URL(content_id)
if plex_url.name:
if len(plex_url.parts) == 2:
if plex_url.name == "search":
content = {}
else:
content = int(plex_url.name)
else:
# For "special" items like radio stations
content = plex_url.path
server_id = plex_url.host
plex_server = get_plex_server(hass, plex_server_id=server_id)
else:
# Handle legacy payloads without server_id in URL host position
if plex_url.host == "search":
content = {}
else:
content = int(plex_url.host) # type: ignore[arg-type]
extra_params = dict(plex_url.query)
else:
content = json.loads(content_id)
if isinstance(content, dict):
if plex_server_name := content.pop("plex_server", None):
plex_server = get_plex_server(hass, plex_server_name)
if not plex_server:
plex_server = get_plex_server(hass)
if content_type == "station":
if not supports_playqueues:
raise HomeAssistantError("Plex stations are not supported on this device")
playqueue = plex_server.create_station_playqueue(content)
return PlexMediaSearchResult(playqueue)
if isinstance(content, int):
content = {"plex_key": content}
content_type = DOMAIN
content.update(extra_params)
if playqueue_id := content.pop("playqueue_id", None):
if not supports_playqueues:
raise HomeAssistantError("Plex playqueues are not supported on this device")
try:
playqueue = plex_server.get_playqueue(playqueue_id)
except NotFound as err:
raise MediaNotFound(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
return PlexMediaSearchResult(playqueue, content)
search_query = content.copy()
shuffle = search_query.pop("shuffle", 0)
# Remove internal kwargs before passing copy to plexapi
for internal_key in ("resume", "offset"):
search_query.pop(internal_key, None)
media = plex_server.lookup_media(content_type, **search_query)
if supports_playqueues and (isinstance(media, list) or shuffle):
playqueue = plex_server.create_playqueue(
media, includeRelated=0, shuffle=shuffle
)
return PlexMediaSearchResult(playqueue, content)
return PlexMediaSearchResult(media, content)
|
[
"noreply@github.com"
] |
Adminiuga.noreply@github.com
|
061696e1fd6d7402e0460b8c2bd8cc4d09085cb6
|
cce6364dd85b62782671cd8048873eede2045137
|
/high/2_mergeKLists.py
|
152a70aed8a7ea6300262b0c85c3284a60f19832
|
[] |
no_license
|
gmt710/leetcode_python
|
ed647958440f66583b8717dae7bca49c516984da
|
441623afee3713506b702c5fd462c7ba84b48442
|
refs/heads/master
| 2020-03-28T05:11:02.851792
| 2019-04-17T09:14:51
| 2019-04-17T09:14:51
| 147,761,046
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# heapq使用说明
# a为普通列表
# - heapq.heapify(a) 调整a,使得其满足最小堆
# - heapq.heappop(a) 从最小堆中弹出最小的元素
# - heapq.heappush(a,b) 向最小堆中压入新的元素
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# https://blog.csdn.net/iyuanshuo/article/details/79600011
# 思想:首先将每个list里面的第一个元素,也就是每个list的最小元素(因为list都是已排序),
# 共K个指放入大小为K的堆中,将其维护成最小堆结构。每次将堆顶的元素,也就是最小元素放到结果中,
# 然后取出该元素原先所处的list中的下一个元素放入队中,维护最小堆结构。
# 当所有元素读取完,所有的元素就按照从小到大放到结果链表中。
import heapq
# 用于保存最小堆
heap = []
for ln in lists:
if ln:
# 将k 个排序链表的第一个元素及其指针放进去,即最小的元素
heap.append((ln.val, ln))
dummy = ListNode(0)
cur = dummy
# 调整heap,使其满足最小堆
heapq.heapify(heap)
while heap:
# 将最小堆中最小的元素值及其指针返回
valu, ln_index = heapq.heappop(heap)
cur.next = ln_index
cur = cur.next
# 如果刚放入结果中的元素指针后还有元素的情况下,将其后的元素及指针放进去
if ln_index.next:
heapq.heappush(heap, (ln_index.next.val, ln_index.next))
return dummy.next
|
[
"noreply@github.com"
] |
gmt710.noreply@github.com
|
aa712c04989960f93bbe5288b6a2119889c460a7
|
86e904c75d0140eea3e4169d216955e1c34801b3
|
/python_test2/cloud/DB_update.py
|
ef46c6c42d6fecb4b51755d7aa89d1ae39675299
|
[] |
no_license
|
reharmony/cloudpython
|
d62f61749e5b5862d3b81e449d5154e188a14d21
|
98e033e537d763ba86d162f58d0fe8f64249a291
|
refs/heads/master
| 2020-04-29T16:58:55.281917
| 2019-05-15T12:11:43
| 2019-05-15T12:11:43
| 176,281,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
'''
Created on 2019. 4. 30.
@author: user
'''
import pymysql
from cloud.UI_Main import *
# 수강정보 DB에서 수정
def db_process_update(id, price):
# 1. db인증 -> 연결
con = pymysql.connect(host ='localhost', user='root',password='1234', db = 'course')
print("1. db인증 -> 연결 성공...")
print(con)
# 2. 연결정보 -> 통로
cur = con.cursor()
print()
print("2. 연결정보 -> 통로 만들기 성공...")
# 3. sql문 만들어서 -> 전송
sql = "update course_info set price=%d where id='" % (int(price)) + id + "'"
cur.execute(sql)
con.commit()
print()
print("3. sql문 만들어서 -> 전송 성공...")
# 4. db연결해제
con.close()
print()
print("4. db 연결해제 성공...")
print("===============================")
print()
|
[
"noreply@github.com"
] |
reharmony.noreply@github.com
|
c59d517b4583b3b3e62c9432003fb472aceb46b2
|
b1a69cd1d3ad792e8c50f2266493b586b0633168
|
/repos/system_upgrade/el8toel9/actors/mariadbcheck/tests/test_mariadbcheck.py
|
e91345f2d56e2ee79676d33b485203afb6ec5cec
|
[
"Apache-2.0"
] |
permissive
|
bmarzins/leapp-repository
|
aaf4c0394fd7d23ea639bd2aa8299e815ff9ba40
|
e4f733297937847522ecf4b306182c2bcb293676
|
refs/heads/master
| 2022-05-26T06:51:34.428355
| 2022-05-04T11:39:41
| 2022-05-04T15:18:35
| 242,839,597
| 0
| 0
|
Apache-2.0
| 2020-02-24T20:45:26
| 2020-02-24T20:45:25
| null |
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
import pytest
from leapp import reporting
from leapp.libraries.actor.mariadbcheck import report_installed_packages
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
from leapp.libraries.stdlib import api
from leapp.models import InstalledRedHatSignedRPM, RPM
def _generate_rpm_with_name(name):
"""
Generate new RPM model item with given name.
Parameters:
name (str): rpm name
Returns:
rpm (RPM): new RPM object with name parameter set
"""
return RPM(name=name,
version='0.1',
release='1.sm01',
epoch='1',
pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51',
packager='Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>',
arch='noarch')
@pytest.mark.parametrize('has_server', [
(True), # with server
(False), # without server
])
def test_actor_execution(monkeypatch, has_server):
"""
Parametrized helper function for test_actor_* functions.
First generate list of RPM models based on set arguments. Then, run
the actor feeded with our RPM list. Finally, assert Reports
according to set arguments.
Parameters:
has_server (bool): mariadb-server installed
"""
# Couple of random packages
rpms = [_generate_rpm_with_name('sed'),
_generate_rpm_with_name('htop')]
if has_server:
# Add mariadb-server
rpms += [_generate_rpm_with_name('mariadb-server')]
curr_actor_mocked = CurrentActorMocked(msgs=[InstalledRedHatSignedRPM(items=rpms)])
monkeypatch.setattr(api, 'current_actor', curr_actor_mocked)
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
# Executed actor feeded with fake RPMs
report_installed_packages(_context=api)
if has_server:
# Assert for mariadb-server package installed
assert reporting.create_report.called == 1
else:
# Assert for no mariadb packages installed
assert not reporting.create_report.called
|
[
"xstodu05@gmail.com"
] |
xstodu05@gmail.com
|
a075f64105d9368199759c95a39be2c79cbeb562
|
5e381364c2ab31ff3618369085afffba6caa8edb
|
/recipes/sdf/all/test_package/conanfile.py
|
f0f5021b597eea4126dc06ed8a9f8ad497bdd594
|
[
"MIT"
] |
permissive
|
CAMOBAP/conan-center-index
|
16aea68a6d22da22831ba985773125e8eda08f00
|
67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1
|
refs/heads/master
| 2023-07-30T08:58:57.285571
| 2021-10-02T14:57:54
| 2021-10-02T14:57:54
| 323,262,699
| 1
| 0
|
MIT
| 2021-05-29T13:37:04
| 2020-12-21T07:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 615
|
py
|
import os
from conans import ConanFile, CMake, tools
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def requirements(self):
self.requires("stb/20200203")
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path = os.path.join("bin", "test_package")
img_path = os.path.join(self.source_folder, "test.png")
self.run("{0} {1}".format(bin_path, img_path), run_environment=True)
|
[
"noreply@github.com"
] |
CAMOBAP.noreply@github.com
|
ce967c34d1d8457b8429f0af4029fc82dd5382d1
|
99e57f00fcaf4469c1c1b79f2d17176aaef9a790
|
/purchase_order_revision/tests/test_purchase_order_revision.py
|
e1e2bc27d76d6d62d40e1a89903a87294617ee1f
|
[] |
no_license
|
detian08/mcl
|
d007ffd0e869f3bd9a8c74bc8473119901f0de2a
|
32d61148326c931aca0107c3894061773f287e33
|
refs/heads/master
| 2022-03-23T19:36:29.608645
| 2019-12-11T10:15:50
| 2019-12-11T10:15:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
from odoo.tests import common
from odoo import fields
class TestPurchaseOrderRevision(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestPurchaseOrderRevision, cls).setUpClass()
cls.partner = cls.env['res.partner'].create({
'name': 'Test partner',
})
cls.product = cls.env['product.product'].create({
'name': 'Product Test',
})
cls.order = cls.env['purchase.order'].create({
'partner_id': cls.partner.id,
'date_planned': fields.Date.today(),
'order_line': [(0, 0, {
'product_id': cls.product.id,
'name': cls.product.name,
'price_unit': 79.80,
'product_qty': 15.0,
'product_uom': cls.env.ref('product.product_uom_unit').id,
'date_planned': fields.Date.today(),
})]
})
def test_new_revision(self):
# I cancel the PO and create a new revision
self.order.button_cancel()
self.assertEqual(self.order.state, 'cancel')
old_name = self.order.name
new_name = '%s-01' % old_name
self.order.new_revision()
self.assertEqual(self.order.name, new_name)
self.assertEqual(len(self.order.old_revision_ids), 1)
self.assertEqual(self.order.revision_number, 1)
old_order = self.env['purchase.order'].search([
('name', '=', old_name),
])
self.assertEqual(old_order.active, False)
|
[
"adarsh@prixgen.com"
] |
adarsh@prixgen.com
|
64c26ac7295c6f11bf94d56f120e2003ed55fb26
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/120_design_patterns/012_proxy/examples/proxy_005.py
|
b41f04843274d5f16824f10b2dd7bc621e24ab5d
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
"""Proxy pattern
Proxy is a structural design pattern. A proxy is a surrogate object which can
communicate with the real object (aka implementation). Whenever a method in the
surrogate is called, the surrogate simply calls the corresponding method in
the implementation. The real object is encapsulated in the surrogate object when
the latter is instantiated. It's NOT mandatory that the real object class and
the surrogate object class share the same common interface.
"""
from abc import ABC, abstractmethod
class CommonInterface(ABC):
"""Common interface for Implementation (real obj) and Proxy (surrogate)."""
@abstractmethod
def load(self):
pass
@abstractmethod
def do_stuff(self):
pass
class Implementation(CommonInterface):
def __init__(self, filename):
self.filename = filename
def load(self):
print("load {}".format(self.filename))
def do_stuff(self):
print("do stuff on {}".format(self.filename))
class Proxy(CommonInterface):
def __init__(self, implementation):
self.__implementation = implementation # the real object
self.__cached = False
def load(self):
self.__implementation.load()
self.__cached = True
def do_stuff(self):
if not self.__cached:
self.load()
self.__implementation.do_stuff()
def main():
p1 = Proxy(Implementation("RealObject1"))
p2 = Proxy(Implementation("RealObject2"))
p1.do_stuff() # loading necessary
p1.do_stuff() # loading unnecessary (use cached object)
p2.do_stuff() # loading necessary
p2.do_stuff() # loading unnecessary (use cached object)
p1.do_stuff() # loading unnecessary (use cached object)
if __name__ == "__main__":
main()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
880fe3920c58f152896de23d4ee6ad0c457ad28d
|
8766852cddd9cb02cdc57452d6d907a3b0ddcc2b
|
/test/goose/version.py
|
fedcbb6de8c5c54f44fe7bea9ef1247fc6013416
|
[] |
no_license
|
glqglq/Crawler
|
24f532af305e9513dad61670eacef09081c85093
|
769397e0dc723b30955382e22fdbab6aaff35387
|
refs/heads/master
| 2021-01-20T01:41:34.360112
| 2017-09-15T12:55:44
| 2017-09-15T12:55:44
| 89,318,133
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
version_info = (1, 0, 25)
__version__ = ".".join(map(str, version_info))
|
[
"546751140@qq.com"
] |
546751140@qq.com
|
0fe69463926f471c995bb00ebd8a5997679f2c6c
|
55e9f3b00fc2e488597bab5225ed321c86efbd4b
|
/sdk/test/test_credit_line.py
|
6e143a986df22cb26461a1dcdc7c013dac722df1
|
[
"MIT"
] |
permissive
|
bs-yapily/yapily-sdk-python
|
ad9d04c28f3d744830734c3444c1cef8215206fd
|
0bba45e351b674eb655425a51190f539c4e9896f
|
refs/heads/master
| 2020-08-26T17:18:53.156429
| 2019-10-22T11:01:16
| 2019-10-22T11:01:16
| 217,085,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import yapily
from yapily.models.credit_line import CreditLine # noqa: E501
from yapily.rest import ApiException
class TestCreditLine(unittest.TestCase):
"""CreditLine unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCreditLine(self):
"""Test CreditLine"""
# FIXME: construct object with mandatory attributes with example values
# model = yapily.models.credit_line.CreditLine() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"systems@yapily.com"
] |
systems@yapily.com
|
571cc5f4406ecac2c0cd3d6bc9e1c26aef47177f
|
1e64f178931f5efed25c244dce48d5014aab3a3a
|
/HackerRank-Algorithm/02. Implementation/004. Between Two Sets.py
|
99c311611b5537bf830b4752648c328ab6cd6ed5
|
[] |
no_license
|
im876/Python-Codes
|
8f672136742a447f2e8d62fe3f37b4a763787ab5
|
be06e97f2fa7fb2125a899b7ff49bbe97362c7a3
|
refs/heads/master
| 2023-08-05T07:25:20.555054
| 2021-09-20T08:17:34
| 2021-09-20T08:17:34
| 279,281,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
input();
a=list(map(int, input().split()))
b=list(map(int, input().split()))
ans=0
for i in range(1, 101):
if all(i%x==0 for x in a) and all(x%i==0 for x in b):
ans+=1
print(ans)
|
[
"noreply@github.com"
] |
im876.noreply@github.com
|
ef8501011cc5ff6b245c337330ba692b929dd21b
|
70280955a5382d73e58395eba78c119a400f4ce7
|
/aoj/itp2/6d.py
|
7780d11018469786985006bb7c193acc99bec21a
|
[] |
no_license
|
cohock13/atcoder
|
a7d0e26a10a4e58690347a2e36839c2f503a79ba
|
d268aa68fc96203eab94d021bd158cf84bdb00bc
|
refs/heads/master
| 2021-01-03T00:41:31.055553
| 2020-10-27T12:28:06
| 2020-10-27T12:28:06
| 239,839,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
import bisect
input()
a = list(map(int,input().split()))
for i in range(int(input())):
k = int(input())
print(bisect.bisect_left(a,k),bisect.bisect(a,k))
|
[
"callout2690@gmail.com"
] |
callout2690@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.