blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
458996ee7b14a8721fdcab21afb7075fdc55aa48 | 84c1e780a349c4bae2d6cf4c1da72889d5222797 | /Statistics/Day_6_The Central Limit Theorem II/the_central_limit_theorem_ii.py | dc0b8e850078bec0c972d4511669fc112d3c477a | [
"MIT"
] | permissive | brianchiang-tw/HackerRank | 18e31583b10cf2189adac97e7cb2997d46790bcd | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | refs/heads/master | 2020-09-23T23:18:08.253868 | 2020-02-13T14:16:22 | 2020-02-13T14:16:22 | 225,612,833 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from math import erf
def normal_distribution_cdf( x, miu, sigma):
z_score = ( x - miu )/ sigma
cdf_of_x = ( 1 + erf( z_score / (2 ** 0.5) ) ) / 2
return cdf_of_x
def to_percentage( ratio ):
return ratio * 100.0
if __name__ == '__main__':
max_ticket = 250
num_of_student = 100
avg_tck_per_student = 2.4
std_tck_per_student = 2
avg_of_sample_sum = num_of_student * avg_tck_per_student
std_of_sample_sum = (num_of_student ** 0.5) * std_tck_per_student
under_limit = normal_distribution_cdf( max_ticket, avg_of_sample_sum, std_of_sample_sum)
print( round(under_limit, 4) ) | [
"brianchiang1988@icloud.com"
] | brianchiang1988@icloud.com |
f0b57eae419c94d4ae3469be8f30b508875e23b3 | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2016_schizConnect/2018_analysis_2ndpart_clinic/clustering_based_on_PCs/corrected/correction_age_sex_site/3_clusters_solution/01_controls_clusters_correlations.py | 323e2bf01fa45a50b33ae435e1229d711b1ef8d5 | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,802 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 11 17:02:41 2018
@author: ad247405
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nibabel as nib
import pandas as pd
import nibabel as nib
import json
from nilearn import plotting
from nilearn import image
from scipy.stats.stats import pearsonr
import shutil
import scipy.stats
import matplotlib.pyplot as plt
import seaborn as sns
import parsimony.utils.check_arrays as check_arrays
from sklearn import preprocessing
import statsmodels.api as sm
from statsmodels.formula.api import ols
import seaborn as sns
DATA_PATH = "/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/data"
INPUT_CLINIC_FILENAME = "/neurospin/abide/schizConnect/data/december_2017_clinical_score/schizconnect_NUSDAST_assessmentData_4495.csv"
site = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/site.npy")
pop = pd.read_csv("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/population.csv")
clinic = pd.read_csv(INPUT_CLINIC_FILENAME)
pop= pop[pop["site_num"]==3]
age = pop["age"].values
sex = pop["sex_num"].values
y = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/y.npy")
labels_cluster = np.load("/neurospin/brainomics/2016_schizConnect/\
2018_analysis_2ndpart_clinic/results/clustering/corrected_results/\
correction_age_sex_site/3_clusters_solution/with_controls/labels_all.npy")
labels_cluster = labels_cluster
df_scores = pd.DataFrame()
df_scores["subjectid"] = pop.subjectid
for score in clinic.question_id.unique():
df_scores[score] = np.nan
for s in pop.subjectid:
curr = clinic[clinic.subjectid ==s]
for key in clinic.question_id.unique():
if curr[curr.question_id == key].empty == False:
df_scores.loc[df_scores["subjectid"]== s,key] = curr[curr.question_id == key].question_value.values[0]
################################################################################
df_stats = pd.DataFrame(columns=["T","p"])
df_stats.insert(0,"clinical_scores",clinic.question_id.unique())
################################################################################
output = "/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/\
results/clustering/corrected_results/correction_age_sex_site/3_clusters_solution/\
with_controls/controls_clusters_clinics_p_values.csv"
for key in clinic.question_id.unique():
try:
neurospycho = df_scores[key].astype(np.float).values
df = pd.DataFrame()
df[key] = neurospycho[np.array(np.isnan(neurospycho)==False)]
df["labels"]=labels_cluster[np.array(np.isnan(neurospycho)==False)]
T,p = scipy.stats.f_oneway(df[df["labels"]=="controls"][key],\
df[df["labels"]==0][key],\
df[df["labels"]==1][key],\
df[df["labels"]==2][key])
print(p)
df_stats.loc[df_stats.clinical_scores==key,"T"] = T
df_stats.loc[df_stats.clinical_scores==key,"p"] = p
except:
print("issue")
df_stats.loc[df_stats.clinical_scores==key,"T"] = np.nan
df_stats.loc[df_stats.clinical_scores==key,"p"] = np.nan
df_stats.to_csv(output)
################################################################################
output = "/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/\
results/clustering/corrected_results/correction_age_sex_site/3_clusters_solution/with_controls/nudast"
df = pd.DataFrame()
score = df_scores["vocabsca"].astype(np.float).values
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
LABELS_DICT = {"controls":"controls",0: "cluster 1", 1: "cluster 2", 2: "cluster 3"}
df["labels_name"] = df["labels"].map(LABELS_DICT)
df["vocabsca"] = score[np.array(np.isnan(score)==False)]
T,p = scipy.stats.f_oneway(df[df["labels"]=="controls"]["vocabsca"],\
df[df["labels"]==0]["vocabsca"],\
df[df["labels"]==1]["vocabsca"],\
df[df["labels"]==2]["vocabsca"])
ax = sns.violinplot(x="labels_name", y="vocabsca", data=df,order=["controls","cluster 1","cluster 2","cluster 3"])
plt.title("ANOVA: t = %s, and p= %s"%(T,p))
plt.savefig(os.path.join(output,"vocabsca.png"))
df = pd.DataFrame()
score = df_scores["cvlfps"].astype(np.float).values
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
LABELS_DICT = {"controls":"controls",0: "cluster 1", 1: "cluster 2", 2: "cluster 3"}
df["labels_name"] = df["labels"].map(LABELS_DICT)
df["cvlfps"] = score[np.array(np.isnan(score)==False)]
T, p = scipy.stats.f_oneway(df[df["labels"]=="controls"]["cvlfps"],\
df[df["labels"]==0]["cvlfps"],\
df[df["labels"]==1]["cvlfps"],\
df[df["labels"]==2]["cvlfps"])
ax = sns.violinplot(x="labels_name", y="cvlfps", data=df,order=["controls","cluster 1","cluster 2","cluster 3"])
plt.title("ANOVA: t = %s, and p= %s"%(T,p))
plt.savefig(os.path.join(output,"cvlfps.png"))
df = pd.DataFrame()
score = df_scores["matrxraw"].astype(np.float).values
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
LABELS_DICT = {"controls":"controls",0: "cluster 1", 1: "cluster 2", 2: "cluster 3"}
df["labels_name"] = df["labels"].map(LABELS_DICT)
df["matrxraw"] = score[np.array(np.isnan(score)==False)]
T, p = scipy.stats.f_oneway(df[df["labels"]=="controls"]["matrxraw"],\
df[df["labels"]==0]["matrxraw"],\
df[df["labels"]==1]["matrxraw"],\
df[df["labels"]==2]["matrxraw"])
ax = sns.violinplot(x="labels_name", y="matrxraw", data=df,order=["controls","cluster 1","cluster 2","cluster 3"])
plt.title("ANOVA: t = %s, and p= %s"%(T,p))
plt.savefig(os.path.join(output,"matrxraw.png"))
df = pd.DataFrame()
score = df_scores["dstscalc"].astype(np.float).values
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
LABELS_DICT = {"controls":"controls",0: "cluster 1", 1: "cluster 2", 2: "cluster 3"}
df["labels_name"] = df["labels"].map(LABELS_DICT)
df["dstscalc"] = score[np.array(np.isnan(score)==False)]
T, p = scipy.stats.f_oneway(df[df["labels"]=="controls"]["dstscalc"],\
df[df["labels"]==0]["dstscalc"],\
df[df["labels"]==1]["dstscalc"],\
df[df["labels"]==2]["dstscalc"])
ax = sns.violinplot(x="labels_name", y="dstscalc", data=df,order=["controls","cluster 1","cluster 2","cluster 3"])
plt.title("ANOVA: t = %s, and p= %s"%(T,p))
plt.savefig(os.path.join(output,"dstscalc.png"))
df = pd.DataFrame()
score = df_scores["sstscalc"].astype(np.float).values
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
df["labels"]=labels_cluster[np.array(np.isnan(score)==False)]
LABELS_DICT = {"controls":"controls",0: "cluster 1", 1: "cluster 2", 2: "cluster 3"}
df["labels_name"] = df["labels"].map(LABELS_DICT)
df["sstscalc"] = score[np.array(np.isnan(score)==False)]
T, p = scipy.stats.f_oneway(df[df["labels"]=="controls"]["sstscalc"],\
df[df["labels"]==0]["sstscalc"],\
df[df["labels"]==1]["sstscalc"],\
df[df["labels"]==2]["sstscalc"])
ax = sns.violinplot(x="labels_name", y="sstscalc", data=df,order=["controls","cluster 1","cluster 2","cluster 3"])
plt.title("ANOVA: t = %s, and p= %s"%(T,p))
plt.savefig(os.path.join(output,"sstscalc.png"))
################################################################################
| [
"ad247405@is222241.intra.cea.fr"
] | ad247405@is222241.intra.cea.fr |
01834489d2b6648a02c08315475f21f26b0cbe27 | 4013cf1b3b65130f36fac82f23b7e6e373ca3ac3 | /appliers/models.py | f63808e62e84556a454b24d8bd42e0e58404e2cf | [] | no_license | malkoG/HI-ARC-Portal | b89fa3571a155b3a23927a96a9c16d9bcc2a3974 | a3bd3cca8c1dd9c30c840f29cb8ad10227b17a36 | refs/heads/master | 2021-11-27T00:38:51.080620 | 2018-07-29T03:30:44 | 2018-07-29T03:30:44 | 140,441,235 | 1 | 0 | null | 2018-07-10T14:05:05 | 2018-07-10T14:05:05 | null | UTF-8 | Python | false | false | 904 | py | from django.db import models
class Applier(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
# 학회 가입 승인 시 Portal 에서 사용할 아이디/패스워드
username = models.CharField(max_length=30, blank=False)
password = models.CharField(max_length=100, blank=False)
# 학회 가입 승인 시 임원이 봐야 할 필드
realname = models.CharField(max_length=30, blank=False)
student_id = models.CharField(max_length=10, blank=False)
major = models.CharField(max_length=20, blank=False)
grade = models.CharField(max_length=10, blank=False)
phone_number = models.CharField(max_length=15, blank=False)
email = models.CharField(max_length=50, blank=False)
motivation = models.TextField(blank=False)
portfolio = models.TextField()
class Meta:
ordering = ('created_at', ) | [
"rijgndqw012@gmail.com"
] | rijgndqw012@gmail.com |
85741891712bca7d83bab0d31be89df2c4157aa8 | 50d41a527633bcc853e4c4b6a6b749a97112c5c4 | /build-files/setup.py | f35039fb17879188a55535f1001b6e047350f360 | [
"MIT"
] | permissive | deone/ussd-session | 1f5e906e955e37f329946b8b19b3e78ed1cda81d | 1d1d7adb4abc5df2672f7c13f04ad5cad332b0cc | refs/heads/master | 2021-07-05T03:15:29.372744 | 2019-06-20T12:56:50 | 2019-06-20T12:56:50 | 102,523,642 | 1 | 0 | null | 2021-06-10T21:36:37 | 2017-09-05T19:51:53 | Python | UTF-8 | Python | false | false | 1,133 | py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ussd-session',
version='0.1.6',
description='USSD session app for Django.',
long_description=long_description,
url='https://github.com/deone/ussd-session',
author='Dayo Osikoya',
author_email='alwaysdeone@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='ussd http session json response django',
packages=find_packages(),
install_requires=['Django==1.11.10'],
)
| [
"alwaysdeone@gmail.com"
] | alwaysdeone@gmail.com |
125dccc70afbb6f7e7dc28dbb95cbe4a87f0b0a4 | 71a7d88907c9fc02c989db84fb9b3d452e613278 | /python/maintcal/maintcal/tests/unit/date/test_timezone.py | 21a9ef80b749611d3619dec63b8de4b287765f18 | [] | no_license | raychorn/svn_rackspace | 5ca38426775d5475ad62c06dbfa7ec2ed05f3ca9 | c87323446f2820d165edab7f8c7a7436b7bee19e | refs/heads/main | 2023-01-01T19:16:56.682787 | 2020-10-15T22:20:58 | 2020-10-15T22:20:58 | 304,458,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,493 | py | from maintcal.tests.lib import MaintcalUnitTest
from maintcal.lib.date.maintcal_datetime import MaintcalDatetime
from maintcal.lib.date.timezone import TimezoneType, get_timezone_type
from datetime import datetime, timedelta
class TestTzFile(MaintcalUnitTest):
def test_bogus_timezone_name(self):
self.assertRaises( ValueError, get_timezone_type, timezone_name = 'blah')
def test_get_timezone_type(self):
tzfile = get_timezone_type( 'America/Chicago' )
if tzfile == None:
self.fail()
def testfind_timezone_info_utc(self):
# Just as a reminder, all these START date times are UTC
tzfile = get_timezone_type( 'America/Chicago' )
# Jan 2nd
maintcal_datetime = MaintcalDatetime(2009, 1, 2, 9, 4, 5)
self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
TimezoneType(offset=-21600, delta=timedelta(-1, 64800), isdst=0, abbr='CST', isstd=False, isgmt=False) )
# This is in UTC so this is actually Oct 31, 20:59:59 CDT -5
maintcal_datetime = MaintcalDatetime(2009, 11, 1, 1, 59, 59)
self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
TimezoneType(offset=-18000, delta=timedelta(-1, 68400), isdst=1, abbr='CDT', isstd=False, isgmt=False) )
def testfind_timezone_info_utc_nov_case(self):
# Just as a reminder, all these START date times are UTC
tzfile = get_timezone_type( 'America/Chicago' )
maintcal_datetime = MaintcalDatetime(2009, 11, 1, 6, 59, 59)
self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
TimezoneType(offset=-18000, delta=timedelta(-1, 68400), isdst=1, abbr='CDT', isstd=False, isgmt=False) )
maintcal_datetime = MaintcalDatetime(2009, 11, 1, 7, 00, 00)
self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
TimezoneType(offset=-21600, delta=timedelta(-1, 64800), isdst=0, abbr='CST', isstd=False, isgmt=False) )
def test_find_timezone_info_utc_mar_case(self):
# Just as a reminder, all these START date times are UTC
tzfile = get_timezone_type( 'America/Chicago' )
maintcal_datetime = MaintcalDatetime(2009, 3, 8, 7, 59, 59)
self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
TimezoneType(offset=-21600, delta=timedelta(-1, 64800), isdst=0, abbr='CST', isstd=False, isgmt=False) )
maintcal_datetime = MaintcalDatetime(2009, 3, 8, 8, 00, 00)
self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
TimezoneType(offset=-18000, delta=timedelta(-1, 68400), isdst=1, abbr='CDT', isstd=False, isgmt=False) )
def test_find_timezone_info_utc_nov_case_st_john(self):
# Just as a reminder, all these START date times are UTC
tzfile = get_timezone_type( 'America/St_Johns' )
maintcal_datetime = MaintcalDatetime(2009, 11, 01, 02, 30, 59)
self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
TimezoneType(offset=-9000, delta=timedelta(-1, 77400), isdst=1, abbr='NDT', isstd=False, isgmt=False) )
#maintcal_datetime = MaintcalDatetime(2009, 3, 8, 8, 00, 00)
#self.assertEqual(tzfile.find_timezone_info_utc( maintcal_datetime ),
#TimezoneType(offset=-18000, delta=timedelta(-1, 68400), isdst=1, abbr='CDT', isstd=False, isgmt=False) )
| [
"raychorn@gmail.com"
] | raychorn@gmail.com |
e524410846d1ac49a2d5220b3bde9e207e0073ca | d5a82c882160e73e4dce5b0dd02d7ec42c0a8c25 | /DataCamp_Natural_Language_Processing_Fundamentals/4.4.1.Improving_your_model.py | fb42edd845e81c04c2274a3cbc11cb255ee90ced | [] | no_license | ContangoRango/Machine_Learning_AI_Blockchain | d31e57682f4b70988f60bfa9d1be598421e9a9eb | 2161721493c14ed89f09a81140c4622dd31bebab | refs/heads/master | 2022-03-22T03:33:23.333076 | 2019-12-08T12:12:49 | 2019-12-08T12:12:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | '''Your job in this exercise is to test a few different alpha levels using the Tfidf vectors to determine if there is a better performing combination.
The training and test sets have been created, and tfidf_vectorizer, tfidf_train, and tfidf_test have been computed.'''
#TASK
# Create a list of alphas to try using np.arange(). Values should range from 0 to 1 with steps of 0.1.
# Create a function train_and_predict() that takes in one argument: alpha. The function should:
# Instantiate a MultinomialNB classifier with alpha=alpha.
# Fit it to the training data.
# Compute predictions on the test data.
# Compute and return the accuracy score.
# Using a for loop, print the alpha, score and a newline in between. Use your train_and_predict() function to compute the score. Does the score change along with the alpha? What is the best alpha?
# Create the list of alphas: alphas
alphas = ____
# Define train_and_predict()
____ ____(____):
# Instantiate the classifier: nb_classifier
nb_classifier = ____
# Fit to the training data
____
# Predict the labels: pred
pred = ____
# Compute accuracy: score
score = ____
return score
# Iterate over the alphas and print the corresponding score
for alpha in alphas:
print('Alpha: ', alpha)
print('Score: ', ____)
print()
#SOLUTION
# Create the list of alphas: alphas
alphas = np.arange(0, 1, .1)
# Define train_and_predict()
def train_and_predict(alpha):
# Instantiate the classifier: nb_classifier
nb_classifier = MultinomialNB(alpha=alpha)
# Fit to the training data
nb_classifier.fit(tfidf_train, y_train)
# Predict the labels: pred
pred = nb_classifier.predict(tfidf_test)
# Compute accuracy: score
score = metrics.accuracy_score(y_test, pred)
return score
# Iterate over the alphas and print the corresponding score
for alpha in alphas:
print('Alpha: ', alpha)
print('Score: ', train_and_predict(alpha))
print()
| [
"buryj666@gmail.com"
] | buryj666@gmail.com |
0d5e5bdd7d18d03796431ffffa84106d00541083 | 29f859eee50687a42e4f4ba38f85aa55a45b0b45 | /program.py | 9d8fe2fb527fb0d31faed08531ae921d7d3807ce | [] | no_license | Ratnambar/PythoTest | 7ddd010a689d3fcff3a5c9889433c3b5f138c360 | 33d9138d3f246102d6316cf4a5e8229c70d17392 | refs/heads/master | 2023-02-04T19:45:17.721093 | 2020-12-25T17:48:33 | 2020-12-25T17:48:33 | 324,407,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | # first Program
l=[]
def add():
for i in range(int(input())):
l.append(int(input()))
return sum(l)
add()
#second program
Dict2= {"1" : 50,"2" : 60,"3" : 70}
def Max_in_Dict(Dict2):
max=0
d=dict()
for k,v in Dict2.items():
if v>max:
max=v
key = k
d[key]=max
return d
Max_in_Dict(Dict2)
#third program
l = [0,0,0,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1]
def count_max_one(l):
count=0
max=0
for i in range(len(l)):
if l[i]==1:
count+=1
if count>max:
max=count
else:
count=0
return max
count_max_one(l)
#fourth program in sql
create table user(
-> user_id int not null auto_increment,
-> username varchar(100) not null,
-> password varchar(50) not null,
-> primary key(user_id));
table address
create table addresses( id int not null auto_increment, user_id int, street varchar(100) not null, pincode int(20) not null, country varchar(50) not null, state varchar(50) not null, phone varchar(20) not null, primary key(id), foreign key (user_id) references user(user_id) );
| [
"ratnambar123gupta@gmail.com"
] | ratnambar123gupta@gmail.com |
9910b246044b2c58c1009f3ca86c485952f4398d | 6a34130452f9ac3b6c7d8d515bc12ae5980a4695 | /CIFAR10_LeNet5/LeNet_CIFAR10.py | 5a0f3c0be17a797ad29664b766aaf4d0b3fd25e0 | [] | no_license | ximitiejiang/MyCodeForPytorch | 8b251a85d522d7857feab78d5ef673b559fbe052 | cb1f48f3a75e4fd25b544d2a31adcf72cd64f760 | refs/heads/master | 2021-08-17T04:38:42.475584 | 2018-12-27T09:56:50 | 2018-12-27T09:56:50 | 150,266,218 | 1 | 0 | null | 2018-10-21T04:18:35 | 2018-09-25T13:03:27 | Python | UTF-8 | Python | false | false | 7,342 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 09:49:10 2018
@author: suliang
参考‘深度学习框架pytorch入门与实践’ chapter-2, 正确率53%
该代码来自于pytorch官方英文教程,实现LeNet5
"""
import torch
import torchvision
from torch.autograd import Variable
from torchvision import transforms, datasets
from torchvision.transforms import ToPILImage
import time
'''
创建数据变换器
'''
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.5,0.5,0.5],
std=[0.5,0.5,0.5])])
'''
加载数据
'''
trainset = datasets.CIFAR10(root = '/Users/suliang/MyDatasets/CIFAR10/',
transform = transform,
train = True,
download = True)
testset = datasets.CIFAR10(root = '/Users/suliang/MyDatasets/CIFAR10/',
transform = transform,
train = False,
download = True)
# 数据加载后直接使用:
# trainset是一个可迭代对象,等效于set(data, label)
data, label = trainset[0]
# 这是已定义好顺序的标签
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
'''
查看trainset中的第102张图片内容
'''
data, label = trainset[102] # tensor, size() = 3x32x32
print(classes[label])
data = data*0.5 + 0.5 # 归一化的逆运算
#img = ToPILImage(data) #不知道为什么这个命令不能显示
#img.show()
import matplotlib.pyplot as plt
import numpy as np
data = np.transpose(data, (1,2,0)) # 转秩,从CxHxW变为HxWxC, shape = 32x32x3
plt.imshow(data) # plt.imshow()只能显示numpy的HxWxC格式
'''
分包数据:dataloader函数生成一个可迭代的数据结构, 提供按大小打包,随机分发的服务
'''
trainloader = torch.utils.data.DataLoader(trainset,
batch_size = 4,
shuffle = True,
num_workers = 2)
testloader = torch.utils.data.DataLoader(testset,
batch_size = 4,
shuffle = True,
num_workers = 2)
'''
查看第一个随机batch的图片
'''
data, label = next(iter(trainloader)) # size = 4x3x32x32
data = data*0.5 + 0.5
# make_grid可以把一个batch的数据重组拼接成一行,所以batch=4的图片被排列为1行
# 变为3x32x128, 同时默认有p=2的padding,所以变为3x(32+4)x(128+10)
data = torchvision.utils.make_grid(data) # size = 3x36x138
# data = T.resize(200)(data) # 图片变大,同样显示空间像素更多了,看会不会变清楚?
data = np.transpose(data, (1,2,0))
plt.imshow(data)
'''
构建网络
- 需要继承父类网络,并定义每一层网络类型和节点数
- 需要定义前向计算
'''
import torch
import torch.nn.functional as F
class LeNet1(torch.nn.Module):
def __init__(self):
super(LeNet1, self).__init__()
self.conv1 = torch.nn.Conv2d(3,6,5) # 输入3通道,输出6通道,卷积核5x5
self.conv2 = torch.nn.Conv2d(6,16,5) # 输入6通道,输出16通道,卷积核5x5
self.fc1 = torch.nn.Linear(16*5*5, 120) #
self.fc2 = torch.nn.Linear(120, 84)
self.fc3 = torch.nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2,2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
'''
新建网络对象
'''
# 初始化
net = LeNet1()
criteria = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr = 0.001, momentum = 0.9)
'''
打印模型参数进行查看
'''
f = open("parmas.txt", "w")
print(net, file = f)
print('-'*20,file=f)
# 查看model.parameters()
params = list(net.parameters()) # 获得该网络所有可学习参数列表 - 已初始化,未训练
print('length of params: '.format(len(params))) # 打印参数长度 = 10,params[0]就代表conv1的参数
# 查看model.state_dict()
print("Model's state_dict:", file = f)
for param_tensor in net.state_dict():
print(param_tensor, "\t", net.state_dict()[param_tensor].size(), file=f)
print('-'*20,file=f)
# 查看optimizer.state_dict()
print("Optimizer's state_dict:", file = f)
for var_name in optimizer.state_dict():
print(var_name, "\t", optimizer.state_dict()[var_name], file=f)
print('-'*20,file=f)
f.close()
'''
训练网络
'''
since = time.time()
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0): # 取出每个batch
inputs, labels = data
inputs, labels = Variable(inputs), Variable(labels)
optimizer.zero_grad()
outputs = net(inputs)
loss = criteria(outputs, labels)
loss.backward()
optimizer.step()
running_loss +=loss.data[0]
if i%2000 == 1999:
print('[%d, %5d] loss: %.3f' %(epoch+1, i+1, running_loss/2000))
running_loss = 0.0
print('finished training!')
print('last time: {}'.format(time.time()-since))
print()
# 查看model.state_dict()
f = open("parmas.txt", "a+") # 只有模式a和a+能够让指针在文件末尾
print("After training, Model's state_dict:", file = f)
for param_tensor in net.state_dict():
print(param_tensor, "\t", net.state_dict()[param_tensor].size(), file=f)
print('-'*20,file=f)
# 查看optimizer.state_dict()
print("After training, Optimizer's state_dict:", file = f)
for var_name in optimizer.state_dict():
print(var_name, "\t", optimizer.state_dict()[var_name], file=f)
print('-'*20,file=f)
f.close()
'''
保存模型
'''
PATH = '/Users/suliang/MyCodeForPytorch/CIFAR10_LeNet5/saved_model'
torch.save(net.state_dict(), PATH)
'''
预测1个batch看看效果
'''
images, labels = next(iter(testloader))
for i in range(4):
print(classes[labels[i]])
newimgs = images*0.5 + 0.5
newimgs = torchvision.utils.make_grid(newimgs)
newimgs = np.transpose(newimgs,(1,2,0))
plt.imshow(newimgs)
# 每一个image会得到一个概率数组,包含对每个类预测概率值,其中概率最高值就是预测
outputs = net(Variable(images))
_,predicted = torch.max(outputs.data, 1) # 返回output中每行最大值,1代表列坍塌
for i in range(4):
print(classes[predicted[i]])
'''
预测整个数据集
'''
correct = 0 # 初始化正确张数 = 0
total = 0 # 初始化总张数 = 0
for data in testloader:
images, labels = data
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('correct rate: {}%'.format(100*correct/total))
# tensor相除,小于0的结果会显示成0,所以乘以100就是百分比的显示方式
'''
检测是否可以在GPU运行
'''
if torch.cuda.is_available():
net.cuda()
images = images.cuda()
labels = labels.cuda()
outputs = net(variable(images))
loss = criteria(outputs, Variable(labels))
| [
"ximitiejiang@163.com"
] | ximitiejiang@163.com |
a5c59876489acae90e3406792dbaa57267963669 | 6478723d180a8ef39941ba04b80c1eca9f437323 | /5. Longest Palindromic Substring.py | 65d4b6571eb7df73c29e99b510e7b4a8f0598327 | [] | no_license | NiuNiu-jupiter/Leetcode | 2a49a365898ecca393cb1eb53a47f4501b25952d | e278ae6ded32f6a2d054ae11ad8fcc45e7bd0f86 | refs/heads/master | 2022-11-22T01:05:57.417538 | 2020-07-28T23:34:39 | 2020-07-28T23:34:39 | 182,104,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | """
Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example 2:
Input: "cbbd"
Output: "bb"
"""
def longestPalindrome(s):
"""
:type s: str
:rtype: str
Based on two scenarios, find a index, expand to head and tail from this center point.
"""
res = ""
for i in range(len(s)):
# odd case, like "aba"
tmp = self.helper(s, i, i)
if len(tmp) > len(res):
res = tmp
# even case, like "abba"
tmp = self.helper(s, i, i+1)
if len(tmp) > len(res):
res = tmp
return res
# get the longest palindrome, l, r are the middle indexes
# from inner to outer
def helper(s, l, r):
while l >= 0 and r <= len(s)-1 and s[l] == s[r]:
l -= 1
r += 1
return s[l+1:r]
| [
"cmyumo.zhang@gmail.com"
] | cmyumo.zhang@gmail.com |
4d6f975ccfbd53b199a02aed7e90dec5ff4610d2 | 3f09e77f169780968eb4bd5dc24b6927ed87dfa2 | /src/Problems/Spiral_Matrix_II.py | 5bc244a74bc4140bf7b1987d3a88e1d09de558ba | [] | no_license | zouyuanrenren/Leetcode | ad921836256c31e31cf079cf8e671a8f865c0660 | 188b104b81e6c73792f7c803c0fa025f9413a484 | refs/heads/master | 2020-12-24T16:59:12.464615 | 2015-01-19T21:59:15 | 2015-01-19T21:59:15 | 26,719,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | '''
Created on 19 Jan 2015
@author: Yuan
'''
'''
Two possible solutions:
1. go through 1 ... n**2, fill them into the cell of the matrix;
2. go through the matrix in a spiral order, fill the numbers incrementally;
In this program we go for the first solution.
'''
class Solution:
# @return a list of lists of integer
def generateMatrix(self, n):
horizontal = True
forward = True
row = 0
col = 0
num = 0
matrix = [[0 for i in range(n)] for j in range(n)]
for num in range(1,n ** 2+1):
matrix[row][col] = num
if horizontal:
if forward:
if col + 1 >= n or matrix[row][col+1] > 0:
horizontal = False
row += 1
else:
col += 1
else:
if col -1 < 0 or matrix[row][col -1] > 0:
horizontal = False
row -= 1
else:
col -= 1
else:
if forward:
if row + 1 >= n or matrix[row+1][col] > 0:
horizontal = True
forward = False
col -= 1
else:
row += 1
else:
if row - 1 < 0 or matrix[row-1][col] > 0:
horizontal = True
forward = True
col += 1
else:
row -= 1
return matrix
print Solution().generateMatrix(4) | [
"y.ren@abdn.ac.uk"
] | y.ren@abdn.ac.uk |
0e91abe36fe8af630beaddf7c335d82b75229805 | b12487590e46aa0b244c9800bf1727020e2c1d79 | /TG/accounts/views.py | 1b42f7c468956cfc188379ec33477c3c0accbfce | [] | no_license | heejinshin/team-project-eatseasy | 70dd91d8c49197410e117a13236c70c2816a4b50 | 54f6595a97c3e465d41fe54c3eafc731d5901c09 | refs/heads/main | 2023-07-24T12:09:43.464885 | 2021-09-04T03:17:22 | 2021-09-04T03:17:22 | 392,162,002 | 0 | 0 | null | 2021-09-04T03:17:22 | 2021-08-03T02:31:13 | JavaScript | UTF-8 | Python | false | false | 954 | py | from django.shortcuts import render
from django.views import generic
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.contrib.auth.mixins import AccessMixin
from django.views.defaults import permission_denied
class UserCreateView(generic.CreateView):
template_name = 'registration/register.html'
form_class = UserCreationForm
success_url = reverse_lazy('accounts:register_done')
class UserCreateDoneView(generic.TemplateView):
template_name = 'registration/register_done.html'
class OwnerOnlyMixin(AccessMixin):
raise_exception = True
permission_denied_message = "Owner only can update/delete the object"
def get(self, request, *args, **kwargs):
self.object = self.get_object() # 모델 인스턴스 얻기
if self.request.user != self.object.owner:
self.handle_no_permission()
return super().get(request, *args, **kwargs) | [
"nugeat23@gmail.com"
] | nugeat23@gmail.com |
750f942333db9c067f0c2af7e1c688c2f1cb3f49 | 2c9ac88bdba143bacb25b527a35b934c206c06f0 | /tweet_me/tweets/models.py | 19e3c5c44eae2e0eb14706234cd9a1600bd76c30 | [] | no_license | Asingjr2/tweet_me_dj | f9d64a42dc15a24233c369c255b01034a3d8418a | 34f9f30d8fa673fab038c8521bcacb8b0e14db95 | refs/heads/master | 2020-03-22T08:48:21.900726 | 2018-07-23T03:25:17 | 2018-07-23T03:25:17 | 139,792,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.urls import reverse
import uuid
from .validators.validation_errors import clean_text
# Create your models here.
# Updating manager for message object to allow reference to self in retweet
class MessageManager(models.Manager):
def retweet(self, creator, parent_obj):
if primary_key.parent:
og_parent = parent_obj.parent
else:
og_parent = parent_obj
obj = self.model(
parent = og_parent,
creator = user,
text = parent_obj.text
)
obj.save()
return obj
class Message(models.Model):
parent = models.ForeignKey("self", blank=True, null=True, on_delete = models.CASCADE)
id = models.UUIDField(primary_key=True, default= uuid.uuid4, editable=False)
text = models.CharField(max_length=100, validators=[clean_text])
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, on_delete = models.CASCADE, default=1)
objects = MessageManager()
class Meta:
ordering = ["-created_at",]
def __str__(self):
return "Text content is {}".format(self.text)
def get_absolute_url(self):
return reverse("detail", args=(self.id,))
| [
"asingjr2@gmail.com"
] | asingjr2@gmail.com |
71d95ba0b2cf56f580e7e9979840ab7d45a732c9 | 06bc4f76ba6099277d408ddc16edd95fabcd95d0 | /ext/sam/SCRIPTS/python/sam/make_ic_file.py | a2798c8685f5f2201ea6f1874dc547bb211b388e | [
"MIT"
] | permissive | kbren/uwnet | 31d4267b23012cda61646b94aa8a9e283017f83b | aac01e243c19686b10c214b1c56b0bb7b7e06a07 | refs/heads/master | 2020-04-06T20:31:25.849132 | 2018-11-14T22:02:52 | 2018-11-14T22:05:01 | 157,771,236 | 0 | 0 | MIT | 2018-11-15T20:52:46 | 2018-11-15T20:52:45 | null | UTF-8 | Python | false | false | 1,348 | py | #!/usr/bin/env python
import argparse
import os
import xarray as xr
def rename_var(z, coords):
rename_d = {'xc': 'x', 'yc': 'y', 'ys': 'y', 'xs': 'x'}
rename_d = {key: val for key, val in rename_d.items() if key in z.dims}
return z.rename(rename_d).assign_coords(**coords)
def parse_arguments():
parser = argparse.ArgumentParser(description='Create initial condition for'
'coarse resolution SAM')
parser.add_argument('basedir', type=str)
parser.add_argument('output', type=str)
parser.add_argument('-t', '--time', type=int, default=0)
return parser.parse_args()
args = parse_arguments()
time = args.time
stagger_path = os.path.join(args.basedir, "stagger", "3d", "all.nc")
center_path = os.path.join(args.basedir, "coarse", "3d", "all.nc")
stat_path = os.path.join(args.basedir, "stat.nc")
cent = xr.open_dataset(center_path, engine='netcdf4').isel(time=time)
time = cent.time
stag = (xr.open_dataset(stagger_path).sel(time=time)
.apply(lambda x: rename_var(x, cent.coords)))
stat = xr.open_dataset(stat_path)
ic = xr.Dataset({
'U': stag.U,
'V': stag.V,
'W': cent.W,
'QV': cent.QV,
'TABS': cent.TABS,
'QN': cent.QN,
'QP': cent.QP,
'RHO': stat.RHO.sel(time=time),
'Ps': stat.Ps.sel(time=time)
})
ic.to_netcdf(args.output)
| [
"nbren12@gmail.com"
] | nbren12@gmail.com |
99b41def6fd1d2374de9435c323b651065f847d7 | 67d76057aee86c43d32e0b74f3ac94d521ee03d8 | /tests/journal.api/firewall_sanity.py | aff2e094d2fec8304653e999f6a72f58be4fe721 | [
"BSD-3-Clause"
] | permissive | jlmaurer/pyre | 0f94b1855bf029210f07c528747221751e37687f | 6af38a83621d7d6228d147b4bb94f97fbb10f6e2 | refs/heads/master | 2023-05-25T04:33:19.907452 | 2020-06-18T14:07:54 | 2020-06-18T14:07:54 | 273,362,988 | 0 | 0 | NOASSERTION | 2021-06-10T23:42:14 | 2020-06-18T23:50:28 | null | UTF-8 | Python | false | false | 363 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2020 all rights reserved
def test():
"""
Verify the channel is accessible
"""
# access
from journal import firewall
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| [
"michael.aivazis@para-sim.com"
] | michael.aivazis@para-sim.com |
67d3cc773bc7ae6f9f07541943eac88f2ea88bff | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_1_01a/brocade_mpls_rpc/show_mpls_bypass_lsp_name_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_common_info/lsp_config_frr_admin_groups/__init__.py | 0ab4e564eb9593ee83def8ffdf9e7b42e82d81b3 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,479 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import lsp_admin_group
class lsp_config_frr_admin_groups(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-bypass-lsp-name-extensive/output/bypass-lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-common-info/lsp-config-frr-admin-groups. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_admin_group',)
_yang_name = 'lsp-config-frr-admin-groups'
_rest_name = 'lsp-config-frr-admin-groups'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_admin_group = YANGDynClass(base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-bypass-lsp-name-extensive', u'output', u'bypass-lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-common-info', u'lsp-config-frr-admin-groups']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-bypass-lsp-name-extensive', u'output', u'bypass-lsp', u'lsp-config-frr-admin-groups']
def _get_lsp_admin_group(self):
"""
Getter method for lsp_admin_group, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_lsp_name_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_common_info/lsp_config_frr_admin_groups/lsp_admin_group (container)
"""
return self.__lsp_admin_group
def _set_lsp_admin_group(self, v, load=False):
"""
Setter method for lsp_admin_group, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_lsp_name_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_common_info/lsp_config_frr_admin_groups/lsp_admin_group (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_admin_group = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group(self):
self.__lsp_admin_group = YANGDynClass(base=lsp_admin_group.lsp_admin_group, is_container='container', presence=False, yang_name="lsp-admin-group", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
lsp_admin_group = __builtin__.property(_get_lsp_admin_group, _set_lsp_admin_group)
_pyangbind_elements = {'lsp_admin_group': lsp_admin_group, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
3a67240c0fe4fc3c2611cf0b12e141adadf6adcf | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_29131.py | b2149fd624633e6da886f98bfba13e8e6f47fb0c | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | # Django / xhtml2pdf - object has no attribute 'encode'
<link href="http://fonts.googleapis.com/css?family=Lato:400,700" rel="stylesheet" type="text/css">
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
75079e74736b89ccc2a829a05d003e9830e09796 | 5da2f1f49a35b7c446abf6e56982cb0d91cf4915 | /utils/triage_team.py | effc1457b335ab43c71e79e4263cc10768721176 | [
"Apache-2.0"
] | permissive | python/core-workflow | 632992bb65b051cd7af89d0f2bbad046062e5cc8 | a0db0d98b41ef1debf443c988c450de278932ee2 | refs/heads/main | 2023-08-28T20:43:27.464184 | 2023-07-23T13:36:51 | 2023-07-23T13:36:51 | 76,080,865 | 94 | 74 | Apache-2.0 | 2023-07-23T13:36:52 | 2016-12-10T00:36:07 | Python | UTF-8 | Python | false | false | 1,505 | py | # One time script for creating the triage team, and adding the necessary repos to the team
import os
import asyncio
import aiohttp
from gidgethub.aiohttp import GitHubAPI
import cachetools
cache = cachetools.LRUCache(maxsize=500)
async def get_core_repos(gh, team_id):
"""
Return the team's public repos
"""
async for repo in gh.getiter(f"/teams/{team_id}/repos"):
if not repo["private"] and not repo["fork"]:
print(repo)
yield repo["full_name"]
async def get_team(gh, team_name):
"""
Get a team by name (slug)
"""
return await gh.getitem(f"/orgs/python/teams/{team_name}")
async def main():
"""
- Get Python core team
- Get Python core's public repos
- Create Python triage team, assign the repos
:return:
"""
async with aiohttp.ClientSession() as session:
# must have repo, and admin:org permissions
gh = GitHubAPI(session, "python", oauth_token=os.getenv("GH_AUTH"), cache=cache)
core_team = await get_team(gh, "python-core")
repo_names = [repo async for repo in get_core_repos(gh, core_team["id"])]
await gh.post(
"/orgs/python/teams",
data={
"name": "Python triage",
"description": "Triagers for core Python",
"maintainers": ["mariatta", "zware", "vstinner"],
"privacy": "closed",
"repo_names": repo_names,
},
)
asyncio.run(main())
| [
"noreply@github.com"
] | python.noreply@github.com |
c04e73246dee908a69e18c15464cd9ff14b54944 | 39689ee725bc7183d5d59fb34f7d2ffe5fd6ad36 | /ABC_C/ABC010C.py | 1cec8071d54a2dd07ac82ba752123cce6c881a01 | [] | no_license | yu5shi8/AtCoder | b6eb920a9046bdfa98012dd3fc65f75f16214ffe | f9ca69001ece8379e3a70c993c44b540f8be2217 | refs/heads/master | 2021-06-15T17:58:07.027699 | 2021-03-20T14:04:03 | 2021-03-20T14:04:03 | 177,757,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
# C - 浮気調査
# https://atcoder.jp/contests/abc010/tasks/abc010_3
txa, tya, txb, tyb, T, V = map(int, input().split())
n = int(input())
time = T * V
for i in range(n):
x, y = map(int, input().split())
a = ((x-txa)**2 + (y-tya)**2) ** 0.5
b = ((x-txb)**2 + (y-tyb)**2) ** 0.5
if (a + b) <= time:
print('YES')
exit()
print('NO')
# 21:25 - 21:41(WA)- 21:42(WA)- 21:45(WA)- 21:50(WA)- 21:51(AC)
| [
"royal_unicorn411@hotmail.co.jp"
] | royal_unicorn411@hotmail.co.jp |
28d9c080f28741dc23fd084ad52fa0a2fb5b01aa | 1498d0999af02644e784a40dcaf35f3fd834a495 | /models/app_funcs.py | 12c3265aca6309bfcbafe50d4117e0e449529709 | [] | no_license | gibil5/matrix | 349f3b156b644f2a3b3a0319701ee3044482c7be | f8723780bce6ef30fe0e4adbfce2a69a127395e4 | refs/heads/master | 2020-07-07T05:28:19.887904 | 2020-01-20T02:17:47 | 2020-01-20T02:17:47 | 203,264,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | # -*- coding: utf-8 -*-
import datetime
# ----------------------------------------------------------- Getters -------------------------
def search_patient_by_id_document(self):
print()
print('Search Patient')
# Protect against On change first time calls
if self.dni_pre != False:
# Search Patient - by ID IDOC
patient = self.env['oeh.medical.patient'].search([
('x_id_doc', '=', self.dni_pre),
],
order='write_date desc',
limit=1,
)
# Search Patient - by DNI
if patient.name == False:
patient = self.env['oeh.medical.patient'].search([
('x_dni', '=', self.dni_pre),
],
order='write_date desc',
limit=1,
)
#print(patient.name)
return patient.id
else:
return False
# ----------------------------------------------- Time Funcs --------------------------------
#@api.multi
def time_delta(self, appointment_date, delta_min):
"""
Time Delta
"""
date_format = "%Y-%m-%d %H:%M:%S"
new_dt = datetime.datetime.strptime(appointment_date, date_format) + datetime.timedelta(hours=0, minutes=delta_min)
new_str = new_dt.strftime(date_format)
return new_str
| [
"jrevilla55@gmail.com"
] | jrevilla55@gmail.com |
4497b03a3490654e2f445a07d358babd068ed3e5 | 3ec84a6e34f9bc709cb203f8b3f668f2b6697e2a | /python20200322-master/class_Python기초/py14리스트/py14_ex05_유효점수.py | 0c5287859317d69d2b75b0815424bd3bc7eda728 | [] | no_license | eopr12/pythonclass | 52079bd99358ac73664beed236659b97c8b63d40 | 2526fe255969a799f6c534c9db6bff9e4eccd877 | refs/heads/master | 2022-07-10T11:17:31.692754 | 2020-05-16T08:43:00 | 2020-05-16T08:43:00 | 263,377,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py |
###################################
# 심사 위원의 점수를 입력하여 유효 점수와 평균을 출력하는 프로그램을 작성합니다.
# 유효점수는 최고점과 최저점을 제외한 점수이며 합계와 평균은 유효점수로 계산합니다.
# 유효점수 합계 구하는 부분을 함수를 이용하시오.
# 평균 구하는 부분을 함수를 이용하시오.
# 평균은 소수점 두 자리까지만 출력하시오.
#
#
# ▪ 실행결과예시
# 심사 위원 수를 입력하시오 : 5
# 심사 위원 점수 입력 : 7
# 심사 위원 점수 입력 : 9
# 심사 위원 점수 입력 : 4
# 심사 위원 점수 입력 : 8
# 심사 위원 점수 입력 : 5
# 유효점수 : 5 7 8
# 합계 : 20
# 평균 : 6.67
#
#
# . 심사 위원수를 입력 받는다.
# . 심사 위원의 점수 입력 받아서 "점수리스트"에 저장.
# 몇 번 입력 받아야 하는가? 심사 위원수 만큼
# . 리스트 정렬하기.
# . 1번방부터 마지막방 -1 까지 합계를 구하는 메서드 만들기
# . 평균을 구하는 메서드 만들기.
# . 합계를 구하고 출력한다.
# . 평균을 구하고 출력한다.
###################################
def getList():
result = None
try:
심사위원수 = input("심사 위원 수를 입력하시오 : ")
심사위원수 = int(심사위원수)
result = []
for i in range(0, 심사위원수, 1):
성적 = input("심사 위원 점수 입력 : ")
성적 = int(성적)
result.append(성적)
except Exception as ex:
print("getList 예외", ex)
pass
return result
def getSum(리스트):
result = None
try:
result = sum(리스트)
except Exception as ex:
print("getSum 예외", ex)
return result
def getAvg(리스트):
result = None
try:
합계 = sum(리스트)
result = 합계 / len(리스트)
except Exception as ex:
print("getAvg 예외", ex)
return result
def printScore(리스트):
print("유효 점수 : ", end="")
for i in 리스트:
print(i, end=", ")
print()
def main():
# 점수 입력 받기
입력점수리스트 = getList()
# 정렬 리스트 만들기
입력점수리스트.sort()
# 유효점수 리스트 만들기
입력점수리스트.sort()
유효점수리스트 = 입력점수리스트[1:-1]
# 유효점수 출력
printScore(유효점수리스트)
합계 = getSum(유효점수리스트)
print("합계 : %s " % 합계)
평균 = getAvg(유효점수리스트)
print("평균 : %.2f " % 평균)
if __name__ == "__main__":
main()
| [
"kye9565@gmail.com"
] | kye9565@gmail.com |
d2c145bef6974660ef0683dcac9cd247c4e1ef79 | 49663ea34b41c8180d7484f778f5cad2e701d220 | /tests/macsec/test_deployment.py | 3ca8ddad561c60fa4765f2af4e28d6b2b4a6a42c | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | stepanblyschak/sonic-mgmt | ed08c98e7bff1615b057daa8711686aa5986073d | a1ae1e0b4e9927e6f52916f76121780d19ec3e54 | refs/heads/master | 2023-04-07T01:30:11.403900 | 2023-03-29T10:16:52 | 2023-03-29T10:16:52 | 135,678,178 | 0 | 0 | NOASSERTION | 2023-03-29T16:13:55 | 2018-06-01T06:41:49 | Python | UTF-8 | Python | false | false | 1,169 | py | from time import sleep
import pytest
import logging
import re
import scapy.all as scapy
import ptf.testutils as testutils
from collections import Counter
from tests.common.utilities import wait_until
from tests.common.devices.eos import EosHost
from tests.common import config_reload
from .macsec_helper import *
from .macsec_config_helper import *
from .macsec_platform_helper import *
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.macsec_required,
pytest.mark.topology("t0", "t2"),
]
class TestDeployment():
@pytest.mark.disable_loganalyzer
def test_config_reload(self, duthost, ctrl_links, policy, cipher_suite, send_sci, wait_mka_establish):
# Save the original config file
duthost.shell("cp /etc/sonic/config_db.json config_db.json")
# Save the current config file
duthost.shell("sonic-cfggen -d --print-data > /etc/sonic/config_db.json")
config_reload(duthost)
assert wait_until(300, 6, 12, check_appl_db, duthost, ctrl_links, policy, cipher_suite, send_sci)
# Recover the original config file
duthost.shell("sudo cp config_db.json /etc/sonic/config_db.json")
| [
"noreply@github.com"
] | stepanblyschak.noreply@github.com |
1c52c5aea4f1c9c33fe87240c6e5927fdc227ee6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2462/60708/289114.py | 79e6c35a208cdfcd2d26f8810223b0868e5a5299 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | def find(list,l,r,index):
if(l>=r):
return index
mid=(l+r)//2
if(list[mid]>list[mid-1] and list[mid]>list[mid+1]):
index=mid
find(list,l,mid-1,index)
return index
else:
index=find(list,l,mid-1,index)
index=find(list,mid+1,r,index)
return index
if __name__ == '__main__':
temp = input().split(",")
list = []
for item in temp:
list.append(int(item))
print(find(list, 0, len(list) - 1,-1)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
9f1bb560cac4870270ac3d6933b5685fa2300ca3 | 510a6545c9bd300d8477287d887ab41f3d385154 | /notifications/tests/test_notifications.py | f1f2ab52b5addf6936de773d288032eed6338918 | [
"MIT"
] | permissive | City-of-Helsinki/open-city-signups | 222a80905e46443e1dc933e033628ca1c84293c5 | 3c36d3c1cba6f6fc85deadc54fb49a4318f4b1d4 | refs/heads/master | 2021-06-08T14:13:13.921115 | 2018-10-26T10:27:29 | 2018-10-26T10:27:29 | 133,327,658 | 0 | 1 | MIT | 2021-04-20T17:36:23 | 2018-05-14T08:15:49 | Python | UTF-8 | Python | false | false | 3,546 | py | import pytest
from notifications.enums import NotificationType
from notifications.models import NotificationTemplate, NotificationTemplateException
from notifications.utils import render_notification_template
@pytest.fixture
def notification_template(settings):
settings.LANGUAGES = (('fi', 'Finnish'), ('en', 'English'))
template = NotificationTemplate.objects.language('en').create(
type=NotificationType.SIGNUP_CREATED,
subject="test subject, variable value: {{ subject_var }}!",
html_body="<b>test html body</b>, variable value: {{ html_body_var }}!",
text_body="test text body, variable value: {{ text_body_var }}!",
)
template.set_current_language('fi')
template.subject = "testiotsikko, muuttujan arvo: {{ subject_var }}!"
template.html_body = "<b>testihötömölöruumis</b>, muuttujan arvo: {{ html_body_var }}!"
template.text_body = "testitekstiruumis, muuttujan arvo: {{ text_body_var }}!"
template.save()
return template
def test_notification_template_rendering(notification_template):
context = {
'extra_var': 'foo',
'subject_var': 'bar',
'html_body_var': 'html_baz',
'text_body_var': 'text_baz',
}
rendered = render_notification_template(NotificationType.SIGNUP_CREATED, context, 'en')
assert len(rendered) == 3
assert rendered.subject == "test subject, variable value: bar!"
assert rendered.html_body == "<b>test html body</b>, variable value: html_baz!"
assert rendered.text_body == "test text body, variable value: text_baz!"
rendered = render_notification_template(NotificationType.SIGNUP_CREATED, context, 'fi')
assert len(rendered) == 3
assert rendered.subject == "testiotsikko, muuttujan arvo: bar!"
assert rendered.html_body == "<b>testihötömölöruumis</b>, muuttujan arvo: html_baz!"
assert rendered.text_body == "testitekstiruumis, muuttujan arvo: text_baz!"
def test_notification_template_rendering_no_text_body_provided(notification_template):
context = {
'extra_var': 'foo',
'subject_var': 'bar',
'html_body_var': 'html_baz',
'text_body_var': 'text_baz',
}
notification_template.set_current_language('fi')
notification_template.text_body = ''
notification_template.save()
notification_template.set_current_language('en')
notification_template.text_body = ''
notification_template.save()
rendered = render_notification_template(NotificationType.SIGNUP_CREATED, context, 'en')
assert len(rendered) == 3
assert rendered.subject == "test subject, variable value: bar!"
assert rendered.html_body == "<b>test html body</b>, variable value: html_baz!"
assert rendered.text_body == "test html body, variable value: html_baz!"
rendered = render_notification_template(NotificationType.SIGNUP_CREATED, context, 'fi')
assert len(rendered) == 3
assert rendered.subject == "testiotsikko, muuttujan arvo: bar!"
assert rendered.html_body == "<b>testihötömölöruumis</b>, muuttujan arvo: html_baz!"
assert rendered.text_body == "testihötömölöruumis, muuttujan arvo: html_baz!"
def test_undefined_rendering_context_variable(notification_template):
context = {
'extra_var': 'foo',
'subject_var': 'bar',
'text_body_var': 'baz',
}
with pytest.raises(NotificationTemplateException) as e:
render_notification_template(NotificationType.SIGNUP_CREATED, context, 'fi')
assert "'html_body_var' is undefined" in str(e)
| [
"tuomas.haapala@anders.fi"
] | tuomas.haapala@anders.fi |
319849dec7617cd17631edf37a228080d5809019 | 0ccab2965458454d6a4802b47d33310e43c10d8f | /W5D3_Arrays and Matrices/list_comp.py | d169019b665f0ba72db17e5e56480c4e4022ebd6 | [] | no_license | jazib-mahmood-attainu/Ambedkar_Batch | 11e66125647b3b348d4567862f8fc20a3457b2f0 | c99be9a401b8d00f6ca47398f48e90ead98f4898 | refs/heads/main | 2023-08-01T13:13:43.357769 | 2021-09-25T03:54:27 | 2021-09-25T03:54:27 | 390,405,238 | 16 | 10 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | """
list comprehension is a shortcut way to make a list
"""
l1 = []
n = 5
for i in range(n):
l1.append(i)
print(l1)
#Short hand operation
m = 5
l2 = [i for i in range(m)]
print(l2) | [
"jazib.prof@gmail.com"
] | jazib.prof@gmail.com |
7b8c34804f93edd534f34f4c67b52e71d00660bc | 7284e65314d263d80f5743cb70e2a26de73f6e8d | /compiler/code.py | 2533bc459ba6e6578016150aacedbc5b93b8f274 | [] | no_license | roctbb/pithon | e6833b899b0e4008936b7a472d430925e14dbe7c | 298297c5a11cfee423fd10ab603a47fe640970da | refs/heads/master | 2021-01-20T01:28:19.710475 | 2019-03-10T17:18:56 | 2019-03-10T17:18:56 | 101,291,135 | 21 | 4 | null | 2019-03-10T17:18:57 | 2017-08-24T12:11:50 | Python | UTF-8 | Python | false | false | 615 | py | import random
for i in range(10):
print(i)
print("10...Лотерея!!!")
prizes = ["А-а-а-втомобиль!", "Банка с огурцами", "Орущая кошка, покормите ее уже!",
"Чирик", "Путевка в Крым, но кажется далеко от моря", "Вьетнамские флешбэк"]
people = ["Дед Макар", "Путин", "Шмель", "Твоя собака"]
for participant in people:
prize = random.choice(prizes)
print("{0} получает лот '{1}'! Поздравляем!"
.format(participant, prize)) | [
"roctbb@gmail.com"
] | roctbb@gmail.com |
0e03b92016cf2f6d83f50b4e4f0c14a7fa172c76 | ecf9902d67a65563bf5014d3d2693fc44b610fb8 | /P1030-NextGreaterNode.py | 728eddab328e734dac825309ad725e210c299d30 | [] | no_license | hkmamike/leetcode | 18956c0ecefe9c315d45b67d9a12625ea342cc00 | cb4fa5ce108e4f4cedec4a3c28d3bb2f980e3831 | refs/heads/master | 2021-06-13T16:36:21.732382 | 2019-09-15T19:26:37 | 2019-09-15T19:26:37 | 133,134,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def nextLargerNodes(self, head: ListNode) -> List[int]:
result = []
unprocessed = []
i = 0
while head:
while len(unprocessed) > 0 and head.val > unprocessed[-1][0]:
index = unprocessed.pop()[1]
result[index] = head.val
unprocessed.append((head.val, i))
result.append(-1)
i += 1
head = head.next
while len(unprocessed) > 0:
index = unprocessed.pop()[1]
result[index] = 0
return result
| [
"mikebrianleung@gmail.com"
] | mikebrianleung@gmail.com |
424a73eea0697c89335e5751c2c818fdece50608 | a9b0f97b97967d0c4688b7df1aeaf1809eac3db6 | /tests/test_muffin.py | 0f5a0f08226f77172ba017f64355263021aae229 | [
"BSD-3-Clause",
"MIT"
] | permissive | intermezzo-fr/muffin | 6a720ef21a3e0498296fbe9d5b41bf2652a705e8 | 92f64ef58bf5ecdcfb66e8fb9f545c497e7d3b90 | refs/heads/master | 2021-01-18T04:37:57.459706 | 2015-02-03T13:24:32 | 2015-02-03T13:24:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | """ Tests for `muffin` module. """
def test_app(client):
response = client.get('/')
assert response.status_code == 200
assert "Hello world!" in response.text
response = client.get('/404', status=404)
assert response.status_code == 404
response = client.get('/json')
assert response.json
assert response.json['json'] == 'here'
response = client.get('/db-sync')
assert response.json
response = client.get('/db-async')
assert response
| [
"horneds@gmail.com"
] | horneds@gmail.com |
5fa2a321652584e5647cd2d6e47991f075ab54ab | f38ac7dfc887f71d201eca3a8adf8bb593982d30 | /src/VersionControlProvider/IssueDefault.py | 6f02950a457e23ece3f54a78b005f76a61d3caef | [
"Apache-2.0"
] | permissive | flexiooss/flexio-flow | ed442c052d599715034f1d796bdc279cacf11f7c | f350a6abb211789f4caaf9002a9c61f1be75e598 | refs/heads/master | 2023-09-01T21:37:33.276016 | 2022-09-15T07:24:53 | 2022-09-15T07:24:53 | 159,647,155 | 0 | 0 | Apache-2.0 | 2022-12-08T10:36:30 | 2018-11-29T10:17:02 | Python | UTF-8 | Python | false | false | 949 | py | from __future__ import annotations
from VersionControlProvider.Issue import Issue
from VersionControlProvider.IssueState import IssueState
class IssueDefault(Issue):
def get_ref(self) -> str:
if self.number is None:
raise ValueError('Issue should have a number')
return '{prefix!s}{number!s}'.format(prefix=self.PREFIX, number=self.number)
def __dict__(self):
issue: dict = {
'title': self.title
}
if self.body is not None:
issue['body'] = self.body
if self.milestone is not None:
issue['milestone'] = self.milestone
if self.url is not None:
issue['url'] = self.url
if self.state is not None:
issue['state'] = self.state.value
if len(self.labels):
issue['labels'] = self.labels
if len(self.assignees):
issue['assignees'] = self.assignees
return issue
| [
"thomas@flexio.fr"
] | thomas@flexio.fr |
87886b5df391638a216b771d4cb08fdb89191766 | d2153eab38e5be8401162bb4913a938fe264c124 | /tweets/migrations/0002_auto_20190529_0025.py | 91bcc4663d23be43b247831aa6a3b39e57eadf73 | [
"MIT"
] | permissive | rakibulislam01/Tweetme | 4379a6724e96db9dd8b11ba069e2a74c7274f142 | ae787b1b6c0303ba8a52a804764e9fc5853b2219 | refs/heads/master | 2021-06-17T23:41:06.043076 | 2020-01-21T07:50:17 | 2020-01-21T07:50:17 | 188,770,184 | 0 | 0 | MIT | 2021-03-19T22:30:22 | 2019-05-27T04:21:42 | Python | UTF-8 | Python | false | false | 637 | py | # Generated by Django 2.1.7 on 2019-05-28 18:25
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tweets', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='tweet',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
| [
"mrakibul70@gmail.com"
] | mrakibul70@gmail.com |
d4e45f4c40639ad0d1e9321c4c0fd0b810fff068 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2647/60769/262538.py | 2466f6b8a6a58970730c5979bc7b6fb85ac62845 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | num = int(input())
for j in range(num):
nn = int(input())
bi = str(bin(nn))[2:]
sum = 0
for i in range(len(bi)):
if bi[i] == "1":
sum += 1
print(sum) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
97406c712d00d3c0c18c7e45aa341901ed3af6c5 | 9f0e740c6486bcb12f038c443b039c886124e55c | /python-study/tools/verifycode/verify_code_2.py | b3d0462c8d289321f79dfb20a916f28991924189 | [] | no_license | zfanai/python-study | 373ff09bd1e6be9e098bde924c98f5277ad58a54 | de11a6c8018730bb27e26808f5cbc0c615b4468f | refs/heads/master | 2021-01-18T17:59:16.817832 | 2017-11-06T09:33:21 | 2017-11-06T09:33:21 | 86,831,175 | 1 | 0 | null | null | null | null | GB18030 | Python | false | false | 4,269 | py | #encoding:gbk
import random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
_letter_cases = "abcdefghjkmnpqrstuvwxy" # 小写字母,去除可能干扰的i,l,o,z
_upper_cases = _letter_cases.upper() # 大写字母
_numbers = ''.join(map(str, range(3, 10))) # 数字
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
def create_validate_code(size=(120, 30),
chars=init_chars,
img_type="GIF",
mode="RGB",
bg_color=(255, 255, 255),
fg_color=(0, 0, 255),
font_size=18,
font_type="ae_AlArabiya.ttf",
length=4,
draw_lines=True,
n_line=(1, 2),
draw_points=True,
point_chance = 2):
'''
@todo: 生成验证码图片
@param size: 图片的大小,格式(宽,高),默认为(120, 30)
@param chars: 允许的字符集合,格式字符串
@param img_type: 图片保存的格式,默认为GIF,可选的为GIF,JPEG,TIFF,PNG
@param mode: 图片模式,默认为RGB
@param bg_color: 背景颜色,默认为白色
@param fg_color: 前景色,验证码字符颜色,默认为蓝色#0000FF
@param font_size: 验证码字体大小
@param font_type: 验证码字体,默认为 ae_AlArabiya.ttf
@param length: 验证码字符个数
@param draw_lines: 是否划干扰线
@param n_lines: 干扰线的条数范围,格式元组,默认为(1, 2),只有draw_lines为True时有效
@param draw_points: 是否画干扰点
@param point_chance: 干扰点出现的概率,大小范围[0, 100]
@return: [0]: PIL Image实例
@return: [1]: 验证码图片中的字符串
'''
width, height = size # 宽, 高
img = Image.new(mode, size, bg_color) # 创建图形
draw = ImageDraw.Draw(img) # 创建画笔
def get_chars():
'''生成给定长度的字符串,返回列表格式'''
return random.sample(chars, length)
def create_lines():
'''绘制干扰线'''
line_num = random.randint(*n_line) # 干扰线条数
for i in range(line_num):
# 起始点
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
#结束点
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
'''绘制干扰点'''
chance = min(100, max(0, int(point_chance))) # 大小限制在[0, 100]
for w in xrange(width):
for h in xrange(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
'''绘制验证码字符'''
c_chars = get_chars()
strs = ' %s ' % ' '.join(c_chars) # 每个字符前后以空格隔开
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
# 图形扭曲参数
params = [1 - float(random.randint(1, 2)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params) # 创建扭曲
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) # 滤镜,边界加强(阈值更大)
return img, strs
if __name__ == "__main__":
code_img,code = create_validate_code(font_type="arial.ttf")
#code_img.save("validate.gif", "GIF")
code_img.save("validate.jpg", "JPEG")
print code | [
"zf_sch@126.com"
] | zf_sch@126.com |
55ca65b0efa49bd99c0050951177f9a7bacb5efa | 81406671f82b4fb13f4936eebe20d4f4ff954177 | /ltr/ltr/settings.py | 1e9f5ee259a312fd14f78e4d748a09134de75f77 | [] | no_license | thatandromeda/ltr | a6e98832015daee26c2fb208c9d858a72ffbf8bd | 7d7790444acccf476c90f415bb88d96216948a15 | refs/heads/master | 2021-01-21T12:52:48.521458 | 2014-12-09T20:26:32 | 2014-12-09T20:26:32 | 19,824,699 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | py | """
Django settings for ltr project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lr72a%5hsvgves1+959cn*q=t$iuj#d!#002_2w!&+9@=ai)*%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'taggit',
'south',
'ltr',
'crispy_forms',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ltr.urls'
WSGI_APPLICATION = 'ltr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Templates
def rel(*x):
return os.path.abspath(os.path.join(BASE_DIR, *x))
TEMPLATE_DIRS = (
rel('templates'),
)
# required for taggit
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
} | [
"andromeda.yelton@gmail.com"
] | andromeda.yelton@gmail.com |
ef9505fa441399af3d3e976ecb1cfb158d2a657b | 62ffd1d0ca1a325988e0b89341e8ae5236ddd127 | /Android_Tensorflow/testTF/TF_sotmax.py | 99a21e0ba31d9a88697c43389764795aacb8f427 | [] | no_license | curryli/APP_Action | 3edc6c88590869fabdc41729eb228b29adc5024f | 06a9ecc7961caf6760c588bd7e060041cdc8b2a7 | refs/heads/master | 2021-09-09T19:22:58.794998 | 2018-03-19T07:01:28 | 2018-03-19T07:01:28 | 114,858,653 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
from sklearn.datasets import load_iris
import numpy as np
import pandas as pd
iris=load_iris()
iris_data=iris.data
iris_target=iris.target
iris_target1=pd.get_dummies(iris_target).values
print(iris_data.shape)
X=iris_data
print(X.shape)
x=tf.placeholder(dtype=tf.float32,shape=[None,4],name="input")
y=tf.placeholder(dtype=tf.float32,shape=[None,3],name="output") #三分类
w=tf.get_variable("weight",shape=[4,3],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.1))
bais=tf.get_variable("bais",shape=[3],dtype=tf.float32,initializer=tf.constant_initializer(0))
y_out=tf.nn.bias_add(tf.matmul(x,w),bais)
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_out))
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(y,1),tf.arg_max(y_out,1)),tf.float32))
train_step=tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(3001):
sess.run(train_step,feed_dict={x:X,y:iris_target1})
if i%500==0:
accuracy_print=sess.run(accuracy,feed_dict={x:X,y:iris_target1})
print(accuracy_print)
| [
"xurui.lee@msn.com"
] | xurui.lee@msn.com |
b3e012396df4d1b6ef5cc53c65309913c6a1fb2d | d37a19ab3bcaba6e808a18df411c653c644d27db | /Year2/CA268/Week03/suspicious.py | 81f85e716f34f9fb63756c14c9cfe66ae0744652 | [] | no_license | Andrew-Finn/DCU | 9e7009dac9a543aaade17e9e94116259dcc1de20 | 013789e8150d80d3b3ce2c0c7ba968b2c69a7ce0 | refs/heads/master | 2023-02-21T05:13:42.731828 | 2022-02-14T12:39:20 | 2022-02-14T12:39:20 | 157,438,470 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | import sys
with open(sys.argv[1], "right") as f:
students = f.readlines()
with open(sys.argv[2], "right") as f:
bad = f.readlines()
out = sorted(list(set(students).intersection(set(bad))))
for i in range(len(out)):
print("{}. {}".format(i + 1, out[i]), end="")
| [
"git@afinn.me"
] | git@afinn.me |
d39b2b73aac648eca88713fd542c196b8c01e4d5 | e578b27fe0f8a47931e459c162cd699c6338a862 | /payments/management/commands/send_payment_reminders.py | 8fc399ff0e2e74d9f974963bd7201b9ba5ef38ad | [
"MIT"
] | permissive | City-of-Helsinki/berth-reservations | 0ded0448cc3a9391b6c1bde61767633b34124e80 | d1983366452eff9714d425bcef2d7d78483738f8 | refs/heads/master | 2023-04-06T06:49:31.433852 | 2023-03-09T12:38:46 | 2023-03-09T12:41:29 | 153,620,377 | 5 | 2 | MIT | 2023-03-21T22:56:51 | 2018-10-18T12:30:27 | Python | UTF-8 | Python | false | false | 398 | py | from payments.models import Order
from utils.base_expiration_command import FeatureFlagCommand
class Command(FeatureFlagCommand):
help = "Send payment reminder notifications"
feature_flag_name = "PAYMENTS_REMINDER_NOTIFICATION_CRONJOB_ENABLED"
def run_operation(self, dry_run, **options) -> int:
return Order.objects.send_payment_reminders_for_unpaid_orders(dry_run=dry_run)
| [
"juha.louhiranta@anders.fi"
] | juha.louhiranta@anders.fi |
dfb05342707d079df69d72120d8cdf801bbe6daf | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/sysdebug/SysdebugEp.py | 3525c9442aa33eb9bcc4dbf25dc0ecf3287dd24d | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py | """This module contains the general information for SysdebugEp ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SysdebugEpConsts():
pass
class SysdebugEp(ManagedObject):
"""This is SysdebugEp class."""
consts = SysdebugEpConsts()
naming_props = set([])
mo_meta = MoMeta("SysdebugEp", "sysdebugEp", "sysdebug", VersionMeta.Version101e, "InputOutput", 0x1f, [], ["read-only"], [u'topSystem'], [u'sysdebugAutoCoreFileExportTarget', u'sysdebugLogControlEp', u'sysdebugLogExportPolicy'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "SysdebugEp", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
a1640304c859f34680877002f692057f623b2b28 | cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101 | /st2reactor/tests/unit/test_timer.py | 9b86ac90438b1ac657a18287adc6f29a57a8cc2d | [
"Apache-2.0"
] | permissive | Junsheng-Wu/st2 | 6451808da7de84798641882ca202c3d1688f8ba8 | c3cdf657f7008095f3c68b4132b9fe76d2f52d81 | refs/heads/master | 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 | Apache-2.0 | 2022-03-29T22:04:26 | 2020-03-02T06:53:58 | Python | UTF-8 | Python | false | false | 3,837 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
import mock
from st2common.constants.triggers import TIMER_TRIGGER_TYPES
from st2common.models.db.trigger import TriggerDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.trigger import TriggerType
from st2common.persistence.trigger import Trigger
from st2reactor.timer.base import St2Timer
from st2tests.base import CleanDbTestCase
class St2TimerTestCase(CleanDbTestCase):
def test_trigger_types_are_registered_on_start(self):
timer = St2Timer()
timer._scheduler = mock.Mock()
# Verify there are no TriggerType in the db when we start
self.assertItemsEqual(TriggerType.get_all(), [])
timer.start()
# Verify TriggerType objects have been created
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), len(TIMER_TRIGGER_TYPES))
timer_trigger_type_refs = TIMER_TRIGGER_TYPES.keys()
for trigger_type in trigger_type_dbs:
ref = ResourceReference(pack=trigger_type.pack, name=trigger_type.name).ref
self.assertTrue(ref in timer_trigger_type_refs)
def test_existing_rules_are_loaded_on_start(self):
# Assert that we dispatch message for every existing Trigger object
St2Timer._handle_create_trigger = mock.Mock()
timer = St2Timer()
timer._scheduler = mock.Mock()
timer._trigger_watcher.run = mock.Mock()
# Verify there are no Trigger and TriggerType in the db wh:w
self.assertItemsEqual(Trigger.get_all(), [])
self.assertItemsEqual(TriggerType.get_all(), [])
# Add a dummy timer Trigger object
type_ = TIMER_TRIGGER_TYPES.keys()[0]
parameters = {'unit': 'seconds', 'delta': 1000}
trigger_db = TriggerDB(id=bson.ObjectId(), name='test_trigger_1', pack='dummy',
type=type_, parameters=parameters)
trigger_db = Trigger.add_or_update(trigger_db)
# Verify object has been added
self.assertEqual(len(Trigger.get_all()), 1)
timer.start()
timer._trigger_watcher._load_thread.wait()
# Verify handlers are called
timer._handle_create_trigger.assert_called_with(trigger_db)
@mock.patch('st2common.transport.reactor.TriggerDispatcher.dispatch')
def test_timer_trace_tag_creation(self, dispatch_mock):
timer = St2Timer()
timer._scheduler = mock.Mock()
timer._trigger_watcher = mock.Mock()
# Add a dummy timer Trigger object
type_ = TIMER_TRIGGER_TYPES.keys()[0]
parameters = {'unit': 'seconds', 'delta': 1}
trigger_db = TriggerDB(name='test_trigger_1', pack='dummy', type=type_,
parameters=parameters)
timer.add_trigger(trigger_db)
timer._emit_trigger_instance(trigger=trigger_db.to_serializable_dict())
self.assertEqual(dispatch_mock.call_args[1]['trace_context'].trace_tag,
'%s-%s' % (TIMER_TRIGGER_TYPES[type_]['name'], trigger_db.name))
| [
"wei.ying@easystack.cn"
] | wei.ying@easystack.cn |
617e130bde2ab5ea550cd079c100cdc889688e4a | b4cf3c5caacd99d0fb0b864f4ee9f30056a52c05 | /asynch/proto/streams/compressed.py | 30d1327a05f980c8e4c0c56664a2adc3678e37c2 | [
"Apache-2.0"
] | permissive | dbrojas/asynch | 4376ca20e15897e0efe4345402d5d5af3a7c1212 | 94054ba4acb9f0d05ddedf5ae66278b5e5301fdd | refs/heads/master | 2023-03-12T03:24:42.176643 | 2021-02-24T02:18:11 | 2021-02-24T02:18:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | from clickhouse_cityhash.cityhash import CityHash128
from asynch.proto.compression import get_decompressor_cls
from asynch.proto.context import Context
from asynch.proto.io import BufferedReader, BufferedWriter
from asynch.proto.streams.native import BlockInputStream, BlockOutputStream
class CompressedBlockOutputStream(BlockOutputStream):
def __init__(
self,
reader: BufferedReader,
writer: BufferedWriter,
context: Context,
compressor_cls,
compress_block_size,
):
super().__init__(reader, writer, context)
self.compressor_cls = compressor_cls
self.compress_block_size = compress_block_size
self.compressor = self.compressor_cls(writer)
def get_compressed_hash(self, data):
return CityHash128(data)
def finalize(self):
await self.writer.flush()
compressed = self.get_compressed()
compressed_size = len(compressed)
compressed_hash = self.get_compressed_hash(compressed)
await self.writer.write_uint128(compressed_hash,)
block_size = self.compress_block_size
i = 0
while i < compressed_size:
await self.writer.write_bytes(compressed[i : i + block_size]) # noqa: E203
i += block_size
await self.writer.flush()
def get_compressed(self):
compressed = BufferedWriter()
if self.compressor.method_byte is not None:
await compressed.write_uint8(self.compressor.method_byte)
extra_header_size = 1 # method
else:
extra_header_size = 0
data = self.compressor.get_compressed_data(extra_header_size)
await compressed.write_bytes(data)
return compressed.buffer
class CompressedBlockInputStream(BlockInputStream):
def __init__(self, reader: BufferedReader, writer: BufferedWriter, context):
super().__init__(reader, writer, context)
def get_compressed_hash(self, data):
return CityHash128(data)
async def read_block(self):
compressed_hash = await self.reader.read_uint128()
method_byte = await self.reader.read_uint8()
decompressor_cls = get_decompressor_cls(method_byte)
decompressor = decompressor_cls(self.reader)
if decompressor.method_byte is not None:
extra_header_size = 1 # method
else:
extra_header_size = 0
return decompressor.get_decompressed_data(method_byte, compressed_hash, extra_header_size)
| [
"long2ice@gmail.com"
] | long2ice@gmail.com |
49540f1af5bee2a45784e914d60ebd789c1f3138 | 82c6dedfe9040b453c22c3f93f1a2c9a922c988b | /ClusterFind/cluster_dbscan_pyclustering.py | ad11d9af82948f325cf54160c74697b4323c2fbf | [] | no_license | njcuk9999/g_clustering | 8d34439fd78ef7017c0414c932d21cd19fc6551c | 20e6a6ab17c72c5652ae33125f7dabf4131aa8d5 | refs/heads/master | 2021-05-11T16:10:41.382938 | 2018-05-08T22:55:03 | 2018-05-08T22:55:03 | 117,753,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,410 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2018-01-16 at 14:13
@author: cook
Version 0.0.0
"""
import numpy as np
from astropy.table import Table
import random
import matplotlib.pyplot as plt
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.dbscan import dbscan
from pyclustering.utils import read_sample
from pyclustering.utils import timedcall
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
# =============================================================================
# Define variables
# =============================================================================
# Define paths
WORKSPACE = '/scratch/Projects/Gaia_clustering'
WRITEPATH = WORKSPACE + '/data/Sim/Simulation_simple.fits'
# -----------------------------------------------------------------------------
COLOURS = ['r', 'g', 'b', 'c', 'm', 'orange']
MARKERS = ['o', 's', '*', 'd', 'v', '<', '>', '^', 'h', 'D', 'p', '8']
SUBSET = True
SUBSETSIZE = 100000
DIMNAMES = ['X [pc]', 'Y [pc]', 'Z [pc]',
'U [mas/yr]', 'V [mas/yr]', 'W [mas/yr]']
# =============================================================================
# Define functions
# =============================================================================
def get_random_choices(array, num):
mask = random.choices(range(len(array)), k=num)
return mask
def optimal_grid(num):
# get maximum shape
shape = int(np.ceil(np.sqrt(num)))
# get number of rows and columns based on maximum shape
if shape ** 2 == num:
nrows = shape
ncols = shape
else:
nrows = int(np.ceil(num / shape))
ncols = int(np.ceil(num / nrows))
# get position of figures
pos = []
for i in range(nrows):
for j in range(ncols):
pos.append([i, j])
# return nrows, ncols and positions
return nrows, ncols, pos
def plot_selection(data, clusters, noise):
plt.scatter(data[:, 0], data[:, 1], marker='.')
clustermask = get_mask(clusters)
noisemask = np.array(noise)
plt.scatter(data[:, 0][noisemask], data[:, 1][noisemask],
color='k', marker='.', s=1)
plt.scatter(data[:, 0][clustermask], data[:, 1][clustermask],
color='r', marker='x')
plt.show()
plt.close()
def get_mask(ll):
mask = []
for l in range(len(ll)):
mask = np.append(mask, ll[l])
mask = np.array(mask, dtype=int)
return mask
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# get the data
print("Loading data...")
rawdata = Table.read(WRITEPATH)
# apply subset to data
if SUBSET:
mask = get_random_choices(rawdata, SUBSETSIZE)
else:
mask = np.ones(len(rawdata['X']), dtype=bool)
rawdata = rawdata[mask]
# construct data matrix
data = np.array([rawdata['X'], rawdata['Y'], rawdata['Z'],
rawdata['U'], rawdata['V'], rawdata['W']]).T
# data = np.array([rawdata['X'], rawdata['Y'], rawdata['Z']]).T
datalist = []
for row in range(data.shape[0]):
datalist.append(list(data[row]))
# get the true labels and group names
labels_true = np.array(rawdata['row'])
groups = np.array(rawdata['group'])
# convert data to 32 bit
data = np.array(data, dtype=np.float32)
# ----------------------------------------------------------------------
# DBscan example from :
# scikit-learn.org/stable/modules/clustering.html#dbscan
# http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan
# .html#sphx-glr-auto-examples-cluster-plot-dbscan-py
print("Calculating clustering using 'DBSCAN (pyclustering)'...")
dbscan_instance = dbscan(data=datalist, eps=10, neighbors=10, ccore=True)
(ticks, _) = timedcall(dbscan_instance.process)
print("\t\tExecution time: ", ticks, "\n")
clusters = dbscan_instance.get_clusters()
noise = dbscan_instance.get_noise()
plot_selection(data, clusters, noise)
# =============================================================================
# End of code
# =============================================================================
| [
"neil.james.cook@gmail.com"
] | neil.james.cook@gmail.com |
c739f531d187d8de6de5e69205382a8f417ae64b | 1287bbb696e240dd0b92d56d4fdf4246370f3e14 | /_os.py | c72f9cdf864f39a229eeb2990576289335ca9bf5 | [] | no_license | omerfarukcelenk/PythonCalismalari | ed0c204084860fddcb892e6edad84fdbc1ed38ec | 28da12d7d042ec306f064fb1cc3a1a026cb57b74 | refs/heads/main | 2023-04-13T18:23:15.270020 | 2021-04-26T21:06:21 | 2021-04-26T21:06:21 | 361,893,918 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import os
import datetime
result = dir(os)
result = os.name
# os.chdir("C:\\")
# os.chdir("../..")
# result = os.getcwd()
# klosör oluşturma
# os.mkdir("newdirectory")
# os.makedirs("newdirectory/yeniklasör")
# listeleme
# result = os.listdir()
# result = os.listdir("C:\\")
# for dosya in os.listdir():
# if dosya.endswith(".py"):
# print(dosya)
# result = os.stat("date.py")
# result = result.st_size/1024
# result = datetime.datetime.fromtimestamp(result.st_ctime) # oluşturulma tarihi
# result = datetime.datetime.fromtimestamp(result.st_atime) # son erişilme tarihi
# result = datetime.datetime.fromtimestamp(result.st_mtime) # değiştirilme tarihi
# os.system("notepad.exe")
# os.rename("newdirectory","DENEMEklasörü")
# os.rmdir("silmelik")
# os.removedirs("DENEMEklasörü/yeniklasör")
# path
result = os.path.abspath("_os.py")
result = os.path.dirname("D:/Python/_os.py")
result = os.path.dirname(os.path.abspath("_os.py"))
result = os.path.exists("_os.py")
result = os.path.isdir("D:/Python/_os.py")
result = os.path.isfile("D:/Python/_os.py")
result = os.path.join("C://","deneme","deneme1")
result = os.path.split("C://deneme")
result = os.path.splitext("_os.py")
# result = result[0]
result = result[1]
print(result) | [
"omerfar0133@gmail.com"
] | omerfar0133@gmail.com |
c3c1c5c2e9bb7f825bf97947cb99b00c311d398b | 4c117ea3617a576ddd07d8ea8aaab1a925fc402f | /bin/Race/Statistic/CompareRace.py | de1aa74809d77348fa72ce283a8cc5be725edc2e | [] | no_license | 452990729/Rep-seq | 7be6058ba3284bea81282f2db7fd3bd7895173ba | e217b115791e0aba064b2426e4502a5c1b032a94 | refs/heads/master | 2021-12-11T14:27:46.912144 | 2019-06-04T03:49:40 | 2019-06-04T03:49:40 | 190,124,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | #!/usr/bin/env python
import sys
import re
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from scipy import stats
def ReadTab(file_in):
dict_tmp = {}
label = re.split('\.', os.path.basename(file_in))[0]
with open(file_in, 'r') as in1:
for line in in1:
list_split = re.split('\s+', line.strip())
dict_tmp['|'.join(list_split[:-2])] = int(list_split[-2])
tal = sum(dict_tmp.values())
return {key:round(float(value)/tal, 4) for key,value in \
dict_tmp.items()}, label
def ScatterPlot(dict1, dict2, label1, label2, tp, outpath):
fig, ax = plt.subplots()
keys = set(dict1.keys())|set(dict2.keys())
x = np.zeros(len(keys))
y = np.zeros(len(keys))
colors = np.random.rand(len(keys))
i = 0
for key in keys:
if key in dict1:
x[i] = dict1[key]
if key in dict2:
y[i] = dict2[key]
i += 1
ax.scatter(x, y, c=colors, alpha=0.5)
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
base = (int(max(x.max(), y.max())*100)+1)*0.01
if tp != 'VDJ':
for i, txt in enumerate(keys):
if abs(x[i]-y[i]) >= 0.2*base:
ax.annotate(txt, (x[i],y[i]))
ax.annotate('$R^{2}$ = '+str(round(r_value, 3)), (0.3*base,0.9*base))
m, b = np.polyfit(x, y, 1)
ax.plot(x, m*x + b, '-')
ax.set_xlim(-0.001, base)
ax.set_ylim(-0.001, base)
ax.set_xlabel(label1)
ax.set_ylabel(label2)
ax.set_title('Comparison of {} between {} and {}'.format(tp, label1, label2))
plt.savefig(os.path.join(outpath, \
'ComparisonOf{}Between{}and{}.png'.format(tp, label1, label2)))
def main():
dict1, label1 = ReadTab(sys.argv[2])
dict2, label2 = ReadTab(sys.argv[3])
ScatterPlot(dict1, dict2, label1, label2, sys.argv[1], sys.argv[4])
if __name__ == '__main__':
main()
| [
"452990729@qq.com"
] | 452990729@qq.com |
21b5f6abaa58f11530183e3547fa53284d2b8c4d | 3b3ca3b587b4ed9181038444b03bf526d4b14346 | /axelerate/networks/yolo/backend/utils/box.py | 0c31ccf5ed52e3d6cbaccf53ad3a897ef663e481 | [
"MIT"
] | permissive | ujuo/aXeleRate | fd217cc539ed31b43635e989c2e2ce9bb12f8486 | 2ff08e314e5e56becbaaebb28b0181f90fed5f5e | refs/heads/master | 2023-07-07T18:29:39.994544 | 2021-08-12T09:12:27 | 2021-08-12T09:12:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,064 | py | import numpy as np
import cv2
class BoundBox:
def __init__(self, x, y, w, h, c = None, classes = None):
self.x = x
self.y = y
self.w = w
self.h = h
self.c = c
self.classes = classes
def get_label(self):
return np.argmax(self.classes)
def get_score(self):
return self.classes[self.get_label()]
def iou(self, bound_box):
b1 = self.as_centroid()
b2 = bound_box.as_centroid()
return centroid_box_iou(b1, b2)
def as_centroid(self):
return np.array([self.x, self.y, self.w, self.h])
def boxes_to_array(bound_boxes):
"""
# Args
boxes : list of BoundBox instances
# Returns
centroid_boxes : (N, 4)
probs : (N, nb_classes)
"""
centroid_boxes = []
probs = []
for box in bound_boxes:
centroid_boxes.append([box.x, box.y, box.w, box.h])
probs.append(box.classes)
return np.array(centroid_boxes), np.array(probs)
def nms_boxes(boxes, n_classes, nms_threshold=0.3, obj_threshold=0.3):
"""
# Args
boxes : list of BoundBox
# Returns
boxes : list of BoundBox
non maximum supressed BoundBox instances
"""
# suppress non-maximal boxes
for c in range(n_classes):
sorted_indices = list(reversed(np.argsort([box.classes[c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
else:
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if boxes[index_i].iou(boxes[index_j]) >= nms_threshold:
boxes[index_j].classes[c] = 0
# remove the boxes which are less likely than a obj_threshold
boxes = [box for box in boxes if box.get_score() > obj_threshold]
return boxes
def draw_scaled_boxes(image, boxes, probs, labels, desired_size=400):
img_size = min(image.shape[:2])
if img_size < desired_size:
scale_factor = float(desired_size) / img_size
else:
scale_factor = 1.0
h, w = image.shape[:2]
img_scaled = cv2.resize(image, (int(w*scale_factor), int(h*scale_factor)))
if boxes != []:
boxes_scaled = boxes*scale_factor
boxes_scaled = boxes_scaled.astype(np.int)
else:
boxes_scaled = boxes
return draw_boxes(img_scaled, boxes_scaled, probs, labels)
def draw_boxes(image, boxes, probs, labels):
for box, classes in zip(boxes, probs):
x1, y1, x2, y2 = box
cv2.rectangle(image, (x1,y1), (x2,y2), (0,255,0), 3)
cv2.putText(image,
'{}: {:.2f}'.format(labels[np.argmax(classes)], classes.max()),
(x1, y1 - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
(0,0,255), 1)
return image
def centroid_box_iou(box1, box2):
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
_, _, w1, h1 = box1.reshape(-1,)
_, _, w2, h2 = box2.reshape(-1,)
x1_min, y1_min, x1_max, y1_max = to_minmax(box1.reshape(-1,4)).reshape(-1,)
x2_min, y2_min, x2_max, y2_max = to_minmax(box2.reshape(-1,4)).reshape(-1,)
intersect_w = _interval_overlap([x1_min, x1_max], [x2_min, x2_max])
intersect_h = _interval_overlap([y1_min, y1_max], [y2_min, y2_max])
intersect = intersect_w * intersect_h
union = w1 * h1 + w2 * h2 - intersect
return float(intersect) / union
def to_centroid(minmax_boxes):
"""
minmax_boxes : (N, 4) [[100, 120, 140, 200]]
centroid_boxes: [[120. 160. 40. 80.]]
"""
#minmax_boxes = np.asarray([[100, 120, 140, 200]])
minmax_boxes = minmax_boxes.astype(np.float)
centroid_boxes = np.zeros_like(minmax_boxes)
x1 = minmax_boxes[:,0]
y1 = minmax_boxes[:,1]
x2 = minmax_boxes[:,2]
y2 = minmax_boxes[:,3]
centroid_boxes[:,0] = (x1 + x2) / 2
centroid_boxes[:,1] = (y1 + y2) / 2
centroid_boxes[:,2] = x2 - x1
centroid_boxes[:,3] = y2 - y1
return centroid_boxes
def to_minmax(centroid_boxes):
centroid_boxes = centroid_boxes.astype(np.float)
minmax_boxes = np.zeros_like(centroid_boxes)
cx = centroid_boxes[:,0]
cy = centroid_boxes[:,1]
w = centroid_boxes[:,2]
h = centroid_boxes[:,3]
minmax_boxes[:,0] = cx - w/2
minmax_boxes[:,1] = cy - h/2
minmax_boxes[:,2] = cx + w/2
minmax_boxes[:,3] = cy + h/2
return minmax_boxes
def create_anchor_boxes(anchors):
"""
# Args
anchors : list of floats
# Returns
boxes : array, shape of (len(anchors)/2, 4)
centroid-type
"""
boxes = []
n_boxes = int(len(anchors)/2)
for i in range(n_boxes):
boxes.append(np.array([0, 0, anchors[2*i], anchors[2*i+1]]))
return np.array(boxes)
def find_match_box(centroid_box, centroid_boxes):
"""Find the index of the boxes with the largest overlap among the N-boxes.
# Args
box : array, shape of (1, 4)
boxes : array, shape of (N, 4)
# Return
match_index : int
"""
match_index = -1
max_iou = -1
for i, box in enumerate(centroid_boxes):
iou = centroid_box_iou(centroid_box, box)
if max_iou < iou:
match_index = i
max_iou = iou
return match_index
| [
"dmitrywat@gmail.com"
] | dmitrywat@gmail.com |
d85c622fbd7c8ecae479ca39479dd1212ab795cc | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.ROH/Sans_16/pdf_to_json_test_Latn.ROH_Sans_16.py | e170b801401e0141c2c13650a2c4b158f8c1a120 | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.ROH/Sans_16/udhr_Latn.ROH_Sans_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
2ac1d285ba6c3768c202afcc8a5a6a7e0e1c731d | 350db570521d3fc43f07df645addb9d6e648c17e | /1480_Running_Sum_of_1d_Array/solution_test.py | aa3df5123a9a3b0d950bbc9f0bba6ab00a0c8c5d | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 379 | py |
'''
1480. Running Sum of 1d Array
Level: Easy
https://leetcode.com/problems/running-sum-of-1d-array
'''
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main() | [
"bhuang@rms.com"
] | bhuang@rms.com |
b4ed3f8fcd2bd524cc2d83d695bf06fa722eca15 | 63782b2e7bc03274f918868cf2f3bc221a0f32a1 | /hj57.py | c672183778d1569782cb48555108fc6a396a3e97 | [] | no_license | imsilence/huawei_nowcoder | a111aea3b99e30029a5c954a1e329d88817747aa | 30ca82d05340155e918b13783a32cf1802dd3e3b | refs/heads/master | 2022-12-27T05:57:52.097157 | 2020-10-12T01:54:22 | 2020-10-12T01:54:22 | 302,632,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | #encoding: utf-8
'''
题目描述
在计算机中,由于处理器位宽限制,只能处理有限精度的十进制整数加减法,比如在32位宽处理器计算机中,
参与运算的操作数和结果必须在-2^31~2^31-1之间。如果需要进行更大范围的十进制整数加法,需要使用特殊
的方式实现,比如使用字符串保存操作数和结果,采取逐位运算的方式。如下:
9876543210 + 1234567890 = ?
让字符串 num1="9876543210",字符串 num2="1234567890",结果保存在字符串 result = "11111111100"。
-9876543210 + (-1234567890) = ?
让字符串 num1="-9876543210",字符串 num2="-1234567890",结果保存在字符串 result = "-11111111100"。
要求编程实现上述高精度的十进制加法。
要求实现方法:
public String add (String num1, String num2)
【输入】num1:字符串形式操作数1,如果操作数为负,则num1的前缀为符号位'-'
num2:字符串形式操作数2,如果操作数为负,则num2的前缀为符号位'-'
【返回】保存加法计算结果字符串,如果结果为负,则字符串的前缀为'-'
注:
(1)当输入为正数时,'+'不会出现在输入字符串中;当输入为负数时,'-'会出现在输入字符串中,且一定在输入字符串最左边位置;
(2)输入字符串所有位均代表有效数字,即不存在由'0'开始的输入字符串,比如"0012", "-0012"不会出现;
(3)要求输出字符串所有位均为有效数字,结果为正或0时'+'不出现在输出字符串,结果为负时输出字符串最左边位置为'-'。
输入描述:
输入两个字符串
输出描述:
输出给求和后的结果
示例1
输入
9876543210
1234567890
输出
11111111100
'''
def add(left, right):
rt = []
def getValue(txt, i):
if i < 0:
return 0
return int('0' if i >= len(txt) else txt[i])
flag = 0
for i in range(max(len(left), len(right))):
value = getValue(left, len(left) - i - 1) + getValue(right, len(right) - i - 1) + flag
if value >= 10:
flag = 1
value -= 10
else:
flag = 0
rt.append(str(value))
if flag:
rt.append('1')
return ''.join(rt[::-1])
def sub(flag, left, right):
rt = []
def getValue(txt, i):
if i < 0:
return 0
return int('0' if i >= len(txt) else txt[i])
subFlag = 0
for i in range(max(len(left), len(right))):
value = getValue(left, len(left) - i - 1) - getValue(right, len(right) - i - 1) - subFlag
if value < 0:
subFlag = 1
value += 10
else:
subFlag = 0
rt.append(str(value))
if flag:
rt.append('-')
return ''.join(rt[::-1])
def solution():
left = input().strip()
right = input().strip()
leftFlag = 0
if left[0] == '-':
leftFlag = 1
left = left[1:]
rightFlag = 0
if right[0] == '-':
rightFlag = 1
right = right[1:]
if rightFlag + leftFlag == 0:
return add(left, right)
elif rightFlag + leftFlag == 2:
return "-" + add(left, right)
elif len(left) == len(right):
if left == right:
return '0'
elif left > right:
return sub(leftFlag, left, right)
else:
return sub(rightFlag, right, left)
elif len(left) > len(right):
return sub(leftFlag, left, right)
else:
return sub(rightFlag, right, left)
if __name__ == '__main__':
while True:
try:
print(solution())
except Exception as e:
print(e)
break | [
"imsilence@outlook.com"
] | imsilence@outlook.com |
523c535969ca9eb444fb4f8250bf8bd22bc01f0d | ba9cb3bbc46faeea1edc01ef7e18131ae2dbf923 | /problem-030.py | e03c5f5788b3724615b117587ada3195a2cd5e6a | [] | no_license | beautytiger/project-euler | fb9908d35a82cd4b912a541282842adca03b17e2 | a8de90a2e5b98660505169afd9c8c27b1b3af28e | refs/heads/master | 2021-06-19T00:40:17.331130 | 2017-05-31T11:04:05 | 2017-05-31T11:04:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from tools.runningTime import runTime
from tools.common import DigitsPowerSum as DPS
# below function will give you a hint for the uplimit of this problem
@runTime
def numberStep(limit=9**5*6):
step = [1, 1]
for i in xrange(2, limit):
s = DPS(i, 5)
if s > step[-1]:
step.extend([i, s])
print step
# below function will print numbers that its DPS bigger than 9999 and in [10000, 89999).
# output: [19999, 29999, 39999, 49999, 59999, 69999, 79999, 88999, 89899, 89989, 89998]
@runTime
def oneMoreDigit():
target = []
st = DPS(9999, 5)
for i in range(10000, 89999):
if DPS(i, 5)> st:
target.append(i)
print target
# the key is to find the bounds of this list.
@runTime
def bruteForce(exp=5):
# one obvious up-edge is 9**5*6 = 354294
# one edge is 9**exp*(exp-1) which is more efficient. 9**5*4 = 236196
print "Result: {}".format(sum([n for n in xrange(11, 9**exp*(exp-1)) if DPS(n, exp)==n]))
if __name__ == "__main__":
# numberStep()
# oneMoreDigit()
bruteForce()
| [
"konmyn@163.com"
] | konmyn@163.com |
3bab6516ea13afd54cb7cbbcf9c233a58a308fd1 | dacb257a90310eba03f3128221120a7d54b894ba | /dev/unix/rf433switch.py | bdcb7ab9d8602202d7d3198ff4192c99297560d2 | [
"MIT"
] | permissive | SiChiTong/pysmartnode | 92351efa02e52aa84185a53896957c453b12540a | a0998ad6582a28fe5a0529fb15dd4f61e254d25f | refs/heads/master | 2023-01-05T10:00:14.907988 | 2020-09-01T10:07:45 | 2020-09-01T10:07:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | # Author: Kevin Köck
# Copyright Kevin Köck 2019-2020 Released under the MIT license
# Created on 2019-07-03
"""
example config:
{
package: .unix.rf433switch
component: RF433
constructor_args: {
unit_code: "10001"
unit: "1"
# expected_execution_time_on: 500 # optional, estimated execution time; allows other coroutines to run during that time
# expected_execution_time_off: 500 # optional, estimated execution time; allows other coroutines to run during that time
# iterations: 1 # optional, number of times the command will be executed
# iter_delay: 20 # optional, delay in ms between iterations
# mqtt_topic: null #optional, defaults to <mqtt_home>/<device_id>/RF433<count>/set
# friendly_name: null # optional, friendly name shown in homeassistant gui with mqtt discovery
}
}
"""
__updated__ = "2019-09-29"
__version__ = "0.4"
import gc
from pysmartnode import config
from pysmartnode import logging
from pysmartnode.utils.component import Switch, DISCOVERY_SWITCH
from .popen_base import Popen
####################
COMPONENT_NAME = "RF433Switch"
# define the type of the component according to the homeassistant specifications
_COMPONENT_TYPE = "switch"
####################
_mqtt = config.getMQTT()
_log = logging.getLogger(COMPONENT_NAME)
gc.collect()
_unit_index = -1
COMMAND_ON = "~/raspberry-remote/send {!s} {!s} 1"
COMMAND_OFF = "~/raspberry-remote/send {!s} {!s} 0"
EXPECTED_RETURN_ON = 'using pin 0\nsending systemCode[{!s}] unitCode[{!s}] command[1]\n'
EXPECTED_RETURN_OFF = 'using pin 0\nsending systemCode[{!s}] unitCode[{!s}] command[0]\n'
class RF433(Switch):
lock = config.Lock() # only one method can have control over the RF433 device
def __init__(self, unit_code, unit, expected_execution_time_on=500, expected_execution_time_off=500,
iterations=1, iter_delay=10, mqtt_topic=None, friendly_name=None):
super().__init__()
self._log = _log
# This makes it possible to use multiple instances of Switch
global _unit_index
self._count = _count
_unit_index += 1
self._topic = mqtt_topic or _mqtt.getDeviceTopic("{!s}{!s}".format(COMPONENT_NAME, self._count),
is_request=True)
self._subscribe(self._topic, self.on_message)
self._frn = friendly_name
gc.collect()
self.unit_lock = config.Lock()
self._c_on = Popen(COMMAND_ON.format(unit_code, unit), EXPECTED_RETURN_ON.format(unit_code, unit),
expected_execution_time_on, iterations, iter_delay)
self._c_off = Popen(COMMAND_OFF.format(unit_code, unit), EXPECTED_RETURN_OFF.format(unit_code, unit),
expected_execution_time_off, iterations, iter_delay)
async def on_message(self, topic, msg, retain):
if self.unit_lock.locked():
return False
async with self.lock:
async with self.unit_lock:
if msg in _mqtt.payload_on:
r = await self._c_on.execute()
if r is True:
await _mqtt.publish(self._topic[:-4], "ON", qos=1, retain=True) # makes it easier to subclass
return True
else:
await self._log.asyncLog("warn", "Got unexpected return: {!s}".format(r))
return False
elif msg in _mqtt.payload_off:
r = await self._c_off.execute()
if r is True:
await _mqtt.publish(self._topic[:-4], "OFF", qos=1, retain=True)
return True
else:
await self._log.asyncLog("warn", "Got unexpected return: {!s}".format(r))
return False
async def _discovery(self):
name = "{!s}{!s}".format(COMPONENT_NAME, self._count)
await self._publishDiscovery(_COMPONENT_TYPE, self._topic[:-4], name, DISCOVERY_SWITCH, self._frn)
# note that _publishDiscovery does expect the state topic but we have the command topic stored.
| [
"kevinkk525@users.noreply.github.com"
] | kevinkk525@users.noreply.github.com |
14ae5a589fdaf24861ddaa9f4769af9675d93a7c | fcf735d736c15cb825f2d6f64f6098a5c4b9e851 | /2016/19.py | 59635417c98280d70020d03853065238c989530a | [] | no_license | csudcy/adventofcode | de3bc7078cd07bc4a8e0c5fff065ede78dc280f1 | a68df4f53f35219351cd5d91fc945c4764112b70 | refs/heads/master | 2020-06-10T13:27:41.389206 | 2016-12-26T09:35:29 | 2016-12-26T09:35:29 | 75,956,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | NEXT_ELF_INDEX, PRESENT_COUNT = range(2)
def generate_elves(count):
# Return a list of (next_elf_index, present_count)
return [
((i+1) % count, 1)
for i in xrange(count)
]
assert generate_elves(1) == [(0, 1)]
assert generate_elves(3) == [(1, 1), (2, 1), (0, 1)]
def move_presents_once(elves):
elf_count = len(elves)
elf_index = 0
while True:
# Skip elves with no presents
while elves[elf_index][PRESENT_COUNT] == 0:
elf_index = (elf_index + 1) % elf_count
next_elf_index, present_count = elves[elf_index]
# Move to the next next_elf_index with presents
while elves[next_elf_index][PRESENT_COUNT] == 0:
next_elf_index = (next_elf_index + 1) % elf_count
# If I am the only elf with presents left, we're done
total_present_count = present_count + elves[next_elf_index][PRESENT_COUNT]
if total_present_count == elf_count:
return elf_index + 1
# Update elf status
elves[elf_index] = ((next_elf_index+1) % elf_count, total_present_count)
elves[next_elf_index] = (-1, 0)
# Move to the next elf which might have presents
elf_index = (next_elf_index + 1) % elf_count
assert move_presents_once(generate_elves(5)) == 3
print move_presents_once(generate_elves(3017957))
| [
"csudcy@gmail.com"
] | csudcy@gmail.com |
b1864609f51d8bc2bc7cc5aff5a1fa5821989503 | 70f564990215f47b139a777826f211477e9b44f6 | /plan2vec_experiments/baselines/vae_greedy_streetlearn.py | 2b901d932170cea3865e03aaf622f61544a12dae | [] | no_license | geyang/plan2vec | de87f2d77732c4aacdefd00067ebebacb7cd763f | aeeb50aed3d7da4c266b4ca163e96d4c0747e3c1 | refs/heads/master | 2022-11-16T03:40:42.638239 | 2022-10-28T04:01:29 | 2022-10-28T04:01:29 | 261,273,420 | 65 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | from plan2vec.plan2vec.plan2vec_streetlearn_2 import main, Args
if __name__ == "__main__":
import jaynes
from plan2vec_experiments import instr, config_charts
Args.load_local_metric = "same as before"
Args.load_global_metric = "same as local metric"
jaynes.config("vector-gpu")
Args.visualization_interval = False
local_metric_exp_path = "/geyang/plan2vec/2019/07-31/streetlearn/local_metric/manhattan-xl/LocalMetricConvDeep/lr-(3e-05)/00.37/13.921429"
for env_id, epx_path in {
"manhattan-tiny":
# "/geyang/plan2vec/2019/07-30/vae/streetlearn_vae/dim-(3)/manhattan-tiny/lr-(0.003)/23.00/08.673423"
"/geyang/plan2vec/2019/07-30/vae/streetlearn_vae-incomplete/dim-(2)/manhattan-tiny/lr-(0.0003)/22.59/48.194736",
"manhattan-small":
# "/geyang/plan2vec/2019/07-30/vae/streetlearn_vae/dim-(3)/manhattan-small/lr-(0.003)/23.00/14.916093",
"/geyang/plan2vec/2019/07-30/vae/streetlearn_vae-incomplete/dim-(2)/manhattan-small/lr-(3e-05)/22.59/51.306878",
"manhattan-medium": "",
# "manhattan-large": ""
}.items():
_ = instr(main,
__postfix=f"{env_id}",
lr=0,
env_id=env_id,
data_path=f"~/fair/streetlearn/processed-data/{env_id}",
plan_steps=1,
neighbor_r=0.9,
evaluate=True, term_r=1.2e-4 * float(2),
num_epochs=200,
visualization_interval=False,
global_metric="LocalMetricConvDeep",
latent_dim=32, # use the same as local metric
load_local_metric=f"{local_metric_exp_path}/models/local_metric_400.pkl",
load_global_metric=f"{local_metric_exp_path}/models/local_metric_400.pkl")
config_charts(path="streetlearn.charts.yml")
jaynes.run(_)
jaynes.listen()
jaynes.listen()
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
5b8e1c159431a579d9e523ef91e56eea4d9be509 | 6d0ca19b8c0f986954135bca68fd3abc558e8ab8 | /PKUTreeMaker/test/Wcrab/crab3_analysisWA.py | cba1ce5ca67f58e076e88a590559cf3217ee9e62 | [] | no_license | AnYpku/Ntuple | 8e018a2980d0440bf48c918a328d75e406df9595 | 7e3a41a7da5ef0005be67e32d615752ca6f130e1 | refs/heads/master | 2020-03-22T14:24:27.658961 | 2018-04-06T12:26:08 | 2018-04-06T12:26:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'WA-1'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_23Sep2016V3_MC_L1FastJet_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L2Relative_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L1FastJet_AK4PFpuppi.txt','Summer16_23Sep2016V3_MC_L2Relative_AK4PFpuppi.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK4PFpuppi.txt']
# Name of the CMSSW configuration file
config.JobType.psetName = 'analysis.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
config.Data.inputDataset = '/WGToLNuG_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'WA-1'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
| [
"1501110102@pku.edu.cn"
] | 1501110102@pku.edu.cn |
35349035fa9deec5305c020cefe758b7875d2922 | 8defed57154f96a27fe368e7fcdc8e58d4ff8b53 | /django_celery/api/tasks.py | 2a39390d3755412eaf1cadeda9bd6d30405d6548 | [] | no_license | defnngj/test_dev4 | 227c5bb53d96adb4cd4583b33c232cfcb4d85874 | 126f11eba933bf06bea4c3ef5e03024675549633 | refs/heads/main | 2023-02-21T17:54:55.098914 | 2021-01-24T10:02:14 | 2021-01-24T10:02:14 | 305,059,110 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | import os
from time import sleep
from celery import shared_task
import requests
import unittest
import xmlrunner
from api.test_case.test_interface import RunConfig
API_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_CASE = os.path.join(API_DIR, "api", "test_case")
TEST_REPORT = os.path.join(API_DIR, "api", "reports")
print(TEST_CASE)
print(TEST_REPORT)
# @shared_task
# def add(x, y):
# sleep(10)
# return x + y
#
#
# @shared_task
# def mul(x, y):
# return x * y
#
#
# @shared_task
# def xsum(numbers):
# return sum(numbers)
#
# @shared_task
# def test():
# sleep(10)
@shared_task
def running_task(taskID):
RunConfig.task = taskID
print("task id-->", RunConfig.task)
suit = unittest.defaultTestLoader.discover(TEST_CASE)
report = os.path.join(TEST_REPORT, "002.xml")
print("--->", report)
with open(report, 'wb') as output:
xmlrunner.XMLTestRunner(output=output).run(suit)
# 把 xml 报告里面的内容写 测试结果表里面
# r = requests.get('https://api.github.com/events')
# assert r.status_code == 200
#
# suit = unittest.defaultTestLoader.discover(TEST_CASE)
# with open(TEST_REPORT + '/001.xml', 'wb') as output:
# xmlrunner.XMLTestRunner(output=output).run(suit)
# print(suit)
| [
"defnngj@gmail.com"
] | defnngj@gmail.com |
77a963659c22bc78b97bc981518926205e92bd30 | bcb8337b488f6acb91b700a2312a5b2018855413 | /federatedml/optim/test/updater_test.py | f2d3e05532916611ca665287568914e038b999cc | [
"Apache-2.0"
] | permissive | freeeedooom/FATE | d17a4729f059cfec6bc07c3142bebcd3b470dc3c | 7bbce8ee037543f280791378681742b40a300b0a | refs/heads/master | 2020-08-19T12:15:22.517131 | 2019-10-17T07:09:22 | 2019-10-17T07:09:22 | 215,918,890 | 1 | 0 | Apache-2.0 | 2019-10-18T01:45:43 | 2019-10-18T01:45:43 | null | UTF-8 | Python | false | false | 2,113 | py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from arch.api import eggroll
from federatedml.optim import L1Updater
from federatedml.optim import L2Updater
import numpy as np
import unittest
class TestUpdater(unittest.TestCase):
def setUp(self):
alpha = 0.5
learning_rate = 0.1
self.l1_updater = L1Updater(alpha, learning_rate)
self.l2_updater = L2Updater(alpha, learning_rate)
self.coef_ = np.array([1, -2, 3, -4, 5, -6, 7, -8, 9])
self.gradient = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
# l2 regular
self.l2_loss_norm = 0.5 * alpha * np.sum(np.array([i * i for i in self.coef_]))
self.l2_update_coef = self.coef_ - self.gradient - learning_rate * alpha * self.coef_
# l1 regular
self.l1_loss_norm = 22.5
self.l1_update_coef = [0, -2.95, 1.95, -4.95, 3.95, -6.95, 5.95, -8.95, 7.95]
def test_l2updater(self):
loss_norm = self.l2_updater.loss_norm(self.coef_)
self.assertEqual(loss_norm, self.l2_loss_norm)
l2_update_coef = self.l2_updater.update_coef(self.coef_, self.gradient)
self.assertListEqual(list(l2_update_coef), list(self.l2_update_coef))
def test_l1updater(self):
loss_norm = self.l1_updater.loss_norm(self.coef_)
self.assertEqual(loss_norm, self.l1_loss_norm)
l1_update_coef = self.l1_updater.update_coef(self.coef_, self.gradient)
self.assertListEqual(list(l1_update_coef), list(self.l1_update_coef))
if __name__ == "__main__":
unittest.main()
| [
"jicezeng@gmail.com"
] | jicezeng@gmail.com |
5904a77dcaa3f8739c2f3bf5900512b189487013 | 81f2d4aa3bfb216e04efec81c7f614603a8fd384 | /irekua/selia/urls/collections_admin.py | af1faca4aa8db97339d9dd03b7f487e21ecce3c1 | [] | no_license | CONABIO-audio/irekua | 44564020c342e8bd49a14707f206962869bc026d | 4531a6dbb8b0a0014567930a134bc4399c2c00d4 | refs/heads/master | 2022-12-10T09:43:05.866848 | 2019-10-17T16:18:21 | 2019-10-17T16:18:21 | 170,434,169 | 0 | 1 | null | 2022-12-08T01:44:04 | 2019-02-13T03:32:55 | Python | UTF-8 | Python | false | false | 350 | py | from django.urls import path
from selia.views import collections_admin
urlpatterns = [
path(
'manage/',
collections_admin.Home.as_view(),
name='collections_manager_home'),
path(
'manage/managed_collections/',
collections_admin.ManagedCollectionsView.as_view(),
name='managed_collections'),
]
| [
"santiago.mbal@gmail.com"
] | santiago.mbal@gmail.com |
dd8d18efbc81be9cc56316bfb9bd50f9618a5a05 | b5826ee957641a233e5b100e19e0fbdc0e8a0f6e | /fundingharvest/dao.py | 7639f4eec0c7ef95771ab4c4868e043984d9b3b0 | [] | no_license | emanuil-tolev/fundingharvest | 1f610534cd7e8e7a64a65489c588d5d968559d78 | 00b49f708605d1f59eac5b5e21c55cd25e15da17 | refs/heads/master | 2016-09-11T02:09:45.387114 | 2014-04-29T15:28:35 | 2014-04-29T15:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,654 | py | import json
import uuid
import UserDict
import httplib
from datetime import datetime
import pyes
from fundingharvest.config import config
def init_db():
conn, db = get_conn()
try:
conn.create_index(db)
except pyes.exceptions.IndexAlreadyExistsException:
pass
def get_conn():
host = "127.0.0.1:9200"
db_name = "fundfind"
# host = config["ELASTIC_SEARCH_HOST"]
# db_name = config["ELASTIC_SEARCH_DB"]
# print host, db_name
conn = pyes.ES([host])
return conn, db_name
class DomainObject(UserDict.IterableUserDict):
# set __type__ on inheriting class to determine elasticsearch object
__type__ = None
def __init__(self, **kwargs):
'''Initialize a domain object with key/value pairs of attributes.
'''
# IterableUserDict expects internal dictionary to be on data attribute
self.data = dict(kwargs)
@property
def id(self):
'''Get id of this object.'''
return self.data.get('id', None)
def save(self):
'''Save to backend storage.'''
# TODO: refresh object with result of save
if 'modified' in self.data:
self.data['modified'] = datetime.now().isoformat()
return self.upsert(self.data)
@classmethod
def pull(cls, id_):
'''Retrieve object by id.'''
conn, db = get_conn()
out = conn.get(db, cls.__type__, id_)
return cls(**out['_source'])
@classmethod
def delete(cls, id_):
'''Delete object by id.'''
conn, db = get_conn()
out = conn.delete(db, cls.__type__, id_)
return cls(out['_source']['ok'])
@classmethod
def upsert(cls, data):
'''Update backend object with a dictionary of data.
If no id is supplied an uuid id will be created before saving.'''
conn, db = get_conn()
if 'id' in data:
id_ = data['id']
else:
id_ = uuid.uuid4().hex
data['id'] = id_
if 'created' not in data and 'modified' not in data:
data['created'] = datetime.now().isoformat()
data['modified'] = datetime.now().isoformat()
conn.index(data, db, cls.__type__, id_)
return cls(**data)
@classmethod
def delete_by_query(cls, query):
url = "127.0.0.1:9200"
loc = fundfind + "/" + cls.__type__ + "/_query?q=" + query
conn = httplib.HTTPConnection(url)
conn.request('DELETE', loc)
resp = conn.getresponse()
return resp.read()
@classmethod
def query(cls, q='', terms=None, facet_fields=None, flt=False, **kwargs):
'''Perform a query on backend.
:param q: maps to query_string parameter.
:param terms: dictionary of terms to filter on. values should be lists.
:param facet_fields: we need a proper comment on this TODO
:param kwargs: any keyword args as per
http://www.elasticsearch.org/guide/reference/api/search/uri-request.html
'''
conn, db = get_conn()
if not q:
ourq = pyes.query.MatchAllQuery()
else:
if flt:
ourq = pyes.query.FuzzyLikeThisQuery(like_text=q,**kwargs)
else:
ourq = pyes.query.StringQuery(q, default_operator='AND')
if terms:
for term in terms:
for val in terms[term]:
termq = pyes.query.TermQuery(term, val)
ourq = pyes.query.BoolQuery(must=[ourq,termq])
ourq = ourq.search(**kwargs)
if facet_fields:
for item in facet_fields:
ourq.facet.add_term_facet(item['key'], size=item.get('size',100), order=item.get('order',"count"))
out = conn.search(ourq, db, cls.__type__)
return out
@classmethod
def raw_query(self, query_string):
if not query_string:
msg = json.dumps({
'error': "Query endpoint. Please provide elastic search query parameters - see http://www.elasticsearch.org/guide/reference/api/search/uri-request.html"
})
return msg
host = "127.0.0.1:9200"
db_path = "fundfind"
fullpath = '/' + db_path + '/' + self.__type__ + '/_search' + '?' + query_string
c = httplib.HTTPConnection(host)
c.request('GET', fullpath)
result = c.getresponse()
# pass through the result raw
return result.read()
class Funder(DomainObject):
__type__ = 'funder'
class FundingOpp(DomainObject):
__type__ = 'funding_opportunity' | [
"emanuil.tolev@gmail.com"
] | emanuil.tolev@gmail.com |
d28c1620b4795e92a88c32d852a82433a3acf006 | 711c11d0111a40055ba110e7089a231c2ba42b8e | /toontown/ai/ServiceStart.py | 113573dab399c4070258ce5574e020998d3f0c1c | [
"Apache-2.0"
] | permissive | DeadMemez/ProjectAltis-OldAcornAcres | 03c8dc912ecccae8456d89790f6b332547b75cc3 | e8e0087389933795973e566782affcaec65a2980 | refs/heads/master | 2021-01-19T13:59:07.234192 | 2017-08-20T14:41:45 | 2017-08-20T14:41:45 | 100,869,782 | 0 | 2 | null | 2017-08-20T15:14:35 | 2017-08-20T15:14:35 | null | UTF-8 | Python | false | false | 2,621 | py | import __builtin__
__builtin__.process = 'ai'
# Temporary hack patch:
__builtin__.__dict__.update(__import__('pandac.PandaModules', fromlist=['*']).__dict__)
from direct.extensions_native import HTTPChannel_extensions
from toontown.toonbase import ToonPythonUtil as PythonUtil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--base-channel', help='The base channel that the server may use.')
parser.add_argument('--max-channels', help='The number of channels the server may use.')
parser.add_argument('--stateserver', help="The control channel of this AI's designated State Server.")
parser.add_argument('--district-name', help="What this AI Server's district will be named.")
parser.add_argument('--astron-ip', help="The IP address of the Astron Message Director to connect to.")
parser.add_argument('--eventlogger-ip', help="The IP address of the Astron Event Logger to log to.")
parser.add_argument('--start-time', help="The time of day to start at")
parser.add_argument('config', nargs='*', default=['config/general.prc', 'config/release/dev.prc'], help="PRC file(s) to load.")
args = parser.parse_args()
for prc in args.config:
loadPrcFile(prc)
localconfig = ''
if args.base_channel: localconfig += 'air-base-channel %s\n' % args.base_channel
if args.max_channels: localconfig += 'air-channel-allocation %s\n' % args.max_channels
if args.stateserver: localconfig += 'air-stateserver %s\n' % args.stateserver
if args.district_name: localconfig += 'district-name %s\n' % args.district_name
if args.astron_ip: localconfig += 'air-connect %s\n' % args.astron_ip
if args.eventlogger_ip: localconfig += 'eventlog-host %s\n' % args.eventlogger_ip
if args.start_time: localconfig += 'start-time %s\n' % args.start_time
loadPrcFileData('Command-line', localconfig)
from otp.ai.AIBaseGlobal import *
from toontown.ai.ToontownAIRepository import ToontownAIRepository
simbase.air = ToontownAIRepository(config.GetInt('air-base-channel', 401000000),
config.GetInt('air-stateserver', 4002),
config.GetString('district-name', 'Devhaven'),
config.GetInt('start-time', 6))
host = config.GetString('air-connect', '127.0.0.1')
port = 7100
if ':' in host:
host, port = host.split(':', 1)
port = int(port)
simbase.air.connect(host, port)
try:
run()
except SystemExit:
raise
except Exception:
info = PythonUtil.describeException()
simbase.air.writeServerEvent('ai-exception', simbase.air.getAvatarIdFromSender(), simbase.air.getAccountIdFromSender(), info)
raise
| [
"tewtow5@gmail.com"
] | tewtow5@gmail.com |
d1721732a4cb0290b1e1c779d98ffbd556b00350 | c24ad19b65992dd2be3d3210b889d970e43b9cdc | /class/phase2/RE/exercise.py | ea6935386b44c7c9c4ee02508bb9a92003d42f16 | [] | no_license | ivoryli/myproject | 23f39449a0bd23abcc3058c08149cebbfd787d12 | cebfa2594198060d3f8f439c971864e0639bbf7e | refs/heads/master | 2020-05-30T06:25:41.345645 | 2019-05-31T11:15:55 | 2019-05-31T11:15:55 | 189,578,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | import sys
import re
# port = sys.argv[1]
# fd = open("1.txt")
# while True:
# data = ''
# for line in fd:
# if line != '\n':
# data += line
# else:
# break
# # print(">>>s>>>>",data)
#
# #匹配字符串首个单词
# key_word = re.match(r'\S+',data).group()
# if key_word == port:
# #匹配目标内容
# pattern = r"[0-9a-f]{4}\.[0-9a-f]{4}\.[0-9a-f]{4}"
# pattern1 = r"(\d{1,3}\.){3}\d{1,3}/\d+|Unknow"
# try:
# address = re.search(pattern,data).group()
# print(address)
# address1 = re.search(pattern1,data).group()
# print(address1)
# except:
# print("No address")
# break
# if not data:
# print("Not Found the %s"%port)
# break
# myself
# cmd = sys.argv[1]
# fd = open("./RE/1.txt")
# s = ''
# while True:
# data = fd.read(1024)
# if not data:
# break
# s += data
#
# fd.close()
# L = s.split("\n")
# # print(L)
# flag = False
# for item in L:
# if not flag:
# start = re.findall("^\S+",item)
# # print(start[0])
# try:
# if start[0] == cmd:
# flag = True
# except Exception:
# continue
# if flag:
# # print(item)
# #findall里不能用(),代表第几项
# # address = re.findall(r"(\d{1,3}\.){3}\d{1,3}/\d+",item)
# address = re.search(r"(\d{1,3}\.){3}\d{1,3}/\d+",item)
# if address:
# address = address.group()
# if address:
# print(address)
# break
| [
"2712455490@qq.com"
] | 2712455490@qq.com |
1fa00ffbfd674b2cd55fb92276e5f8116a643b10 | c26332e2b12888069efe3664999a314b3bd56d7d | /backend/mobile_122_dev_5929/wsgi.py | 6b6ae74d42b50e58a4ec368459dab5d82879d108 | [] | no_license | crowdbotics-apps/mobile-122-dev-5929 | b7dee33e9c4492567601113440d541e2e101a552 | f51b63024c7c5bbfa7a4d99626c2019ac8408bf3 | refs/heads/master | 2022-10-11T09:56:45.538625 | 2020-06-12T05:25:16 | 2020-06-12T05:25:16 | 271,715,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
WSGI config for mobile_122_dev_5929 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_122_dev_5929.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2c0191293e8c452ed7859c3cde2961e80e81d050 | 030cea4006a4ff559f23cb3b3c31cd038ed2e332 | /week8/informatics/array_f.py | 0231e876ee6b041411b446562543e4a28a75e84b | [] | no_license | ayananygmetova/Web-Dev-2020 | f8834e0ee26f0f0f06d0e3a282c73b373954a430 | 957bca91554f015e9a3d13b4ec12e64de7ac633e | refs/heads/master | 2023-01-22T16:49:39.857983 | 2020-03-31T10:09:54 | 2020-03-31T10:09:54 | 236,937,810 | 1 | 0 | null | 2023-01-07T16:34:35 | 2020-01-29T08:41:10 | Python | UTF-8 | Python | false | false | 380 | py | <<<<<<< HEAD
n = int(input())
nums = [int(i) for i in input().split()]
cnt=0
for i in range(1,n-1):
if nums[i]>nums[i-1] and nums[i]>nums[i+1]:
cnt+=1
=======
n = int(input())
nums = [int(i) for i in input().split()]
cnt=0
for i in range(1,n-1):
if nums[i]>nums[i-1] and nums[i]>nums[i+1]:
cnt+=1
>>>>>>> 02dce2f7d1883584c5b5f3cac5f0e37321f79bfe
print(cnt) | [
"ayananyfmetova@gmail.com"
] | ayananyfmetova@gmail.com |
03b2c7c933d5a7132ca19ec39915b583600ab8e9 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /ABC/abc201-abc250/abc249/a/main.py | 865b1e51de55bf7b935c7adf0cc1a54134c2cbb1 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 508 | py | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
a, b, c, d, e, f, x = map(int, input().split())
dist_t, dist_a = 0, 0
p1, q1 = divmod(x, a + c)
dist_t = a * p1 * b
dist_t += min(q1, a) * b
p2, q2 = divmod(x, d + f)
dist_a = d * p2 * e
dist_a += min(q2, d) * e
if dist_t > dist_a:
print('Takahashi')
elif dist_t < dist_a:
print('Aoki')
else:
print('Draw')
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
c0467c1f1792cf8edb3adc594aa6a34ac8af51cf | 0ed63027e39120c8f0ca355aa5abbeaee42104f4 | /dex_api_python/models/post_deposit_body.py | dd59b63647579ac1cbcf29c748c3ce392528a6e6 | [] | no_license | Felix-Fenix/dex-api-python | 592f4737936fbc1968e7c3725bf2ecfda607ff9a | bb5a2d86d476ab3c080383ad5197ed5f116712ed | refs/heads/master | 2022-08-24T16:22:28.795164 | 2020-05-26T07:11:34 | 2020-05-26T07:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,475 | py | # coding: utf-8
"""
CET-Lite for CoinEx Chain
A REST interface for state queries, transaction generation and broadcasting. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PostDepositBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'base_req': 'BaseReq',
'depositor': 'Address',
'amount': 'list[Coin]'
}
attribute_map = {
'base_req': 'base_req',
'depositor': 'depositor',
'amount': 'amount'
}
def __init__(self, base_req=None, depositor=None, amount=None): # noqa: E501
"""PostDepositBody - a model defined in Swagger""" # noqa: E501
self._base_req = None
self._depositor = None
self._amount = None
self.discriminator = None
if base_req is not None:
self.base_req = base_req
if depositor is not None:
self.depositor = depositor
if amount is not None:
self.amount = amount
@property
def base_req(self):
"""Gets the base_req of this PostDepositBody. # noqa: E501
:return: The base_req of this PostDepositBody. # noqa: E501
:rtype: BaseReq
"""
return self._base_req
@base_req.setter
def base_req(self, base_req):
"""Sets the base_req of this PostDepositBody.
:param base_req: The base_req of this PostDepositBody. # noqa: E501
:type: BaseReq
"""
self._base_req = base_req
@property
def depositor(self):
"""Gets the depositor of this PostDepositBody. # noqa: E501
:return: The depositor of this PostDepositBody. # noqa: E501
:rtype: Address
"""
return self._depositor
@depositor.setter
def depositor(self, depositor):
"""Sets the depositor of this PostDepositBody.
:param depositor: The depositor of this PostDepositBody. # noqa: E501
:type: Address
"""
self._depositor = depositor
@property
def amount(self):
"""Gets the amount of this PostDepositBody. # noqa: E501
:return: The amount of this PostDepositBody. # noqa: E501
:rtype: list[Coin]
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PostDepositBody.
:param amount: The amount of this PostDepositBody. # noqa: E501
:type: list[Coin]
"""
self._amount = amount
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PostDepositBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PostDepositBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"huahui.gao@matrixport.com"
] | huahui.gao@matrixport.com |
fa3aa2d7d1b50914f148e89a4638fb89c4c58898 | ebbaa59b07f170e7fcff3f97c1ac17d9c8f70dbf | /cars_server/settings.py | 39000cbcfaf46dc699f4c8adf566bc0ec155a75c | [] | no_license | sudhanshuchopra/cars_server | fe22e89a7d08c8d34da3d86e18d73e5f1e7c6368 | 40fe65debb10745bbe6ba1ca9a58576983f47caa | refs/heads/master | 2021-01-12T04:19:29.460116 | 2016-12-29T10:55:19 | 2016-12-29T10:55:19 | 77,587,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | """
Django settings for cars_server project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o7vmpy9jt8dqfy#x867=rjd&)!c3vi-hef*a0al41gqnlq8o5w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cars'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cars_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cars_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"sudhanshuccp@gmail.com"
] | sudhanshuccp@gmail.com |
e531c93400227515a0cff8a32d5fd493da27819d | 3327a87cefa2275bd0ba90a500444f3494b14fdf | /bwu/array/229-majority-element-ii.py | 97f32d59b609136a9b16159f5b89aaa2dc146d20 | [] | no_license | captainhcg/leetcode-in-py-and-go | e1b56f4228e0d60feff8f36eb3d457052a0c8d61 | 88a822c48ef50187507d0f75ce65ecc39e849839 | refs/heads/master | 2021-06-09T07:27:20.358074 | 2017-01-07T00:23:10 | 2017-01-07T00:23:10 | 61,697,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums:
return []
count1, count2, cand1, cand2 = 0, 0, 0, 1
for n in nums:
if n == cand1:
count1 += 1
elif n == cand2:
count2 += 1
elif count1 == 0:
cand1, count1 = n, 1
elif count2 == 0:
cand2, count2 = n, 1
else:
count1 -= 1
count2 -= 1
return [i for i in (cand1, cand2) if nums.count(i) > len(nums) // 3]
| [
"noreply@github.com"
] | captainhcg.noreply@github.com |
e2cc5bec1c205a1ac7954da2c42e2c572adc1402 | 6fc061385b59852d86a6d00a7c1ae9136d072d11 | /localpubsub/localpubsub.py | 78ee2d6c65d2f8dabba58969685173485f7053d8 | [
"MIT"
] | permissive | SimLeek/localpubsubs | d07c9d31be63761fd70ba37e6acb5ba36dfa53ed | 2e2351e4047ace6f68ce2217fe2222f1f18aec09 | refs/heads/master | 2020-04-18T01:28:13.496094 | 2019-03-10T23:59:31 | 2019-03-10T23:59:31 | 167,119,863 | 0 | 0 | MIT | 2019-03-10T23:59:32 | 2019-01-23T04:57:06 | Python | UTF-8 | Python | false | false | 2,216 | py | from threading import Lock
if False:
from typing import Dict
class NoData(object):
pass
def _args_for_lock(blocking, timeout):
if blocking:
args = (blocking, timeout)
else:
args = (blocking,)
return args
class VariableSub(object):
def __init__(self, pub):
self.pub = pub
self.lock = Lock()
self.return_on_no_data = NoData()
self.lock.acquire()
self.name = hash(self)
def get(self, blocking=False, timeout=0.0):
if self.lock.acquire(*_args_for_lock(blocking, timeout)):
data = self.pub.get_data(blocking, timeout)
else:
data = self.return_on_no_data
return data
def release(self):
try:
try:
del self.pub.subs[self.name]
except KeyError:
pass
self.lock.release()
except RuntimeError:
pass
def __del__(self):
self.release()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def __aexit__(self, exc_type, exc_val, exc_tb):
self.release()
class VariablePub(object):
def __init__(self):
self.subs = {} # type: Dict[VariableSub]
self.__data = None
self.__write_lock = Lock()
def make_sub(self): # type: ()->VariableSub
sub = VariableSub(self)
self.subs[sub.name] = sub
return sub
def publish(self, data, blocking=True, timeout=0.0, force_all_read=False):
self.__set_data(data, blocking, timeout)
for sub in self.subs.values():
try:
sub.lock.release()
except RuntimeError:
pass
def get_data(self, blocking=False, timeout=0.0):
self.__write_lock.acquire(*_args_for_lock(blocking, timeout))
try:
self.__write_lock.release()
except RuntimeError:
pass
return self.__data
def __set_data(self, new_data, blocking=True, timeout=0.0):
self.__write_lock.acquire(*_args_for_lock(blocking, timeout))
self.__data = new_data
try:
self.__write_lock.release()
except RuntimeError:
pass
| [
"josh.miklos@gmail.com"
] | josh.miklos@gmail.com |
412d4c1d26dc9b4425b7ac7a327faca556197f81 | 61587071621eefd6dd0f549d618753d769eededc | /src/pages/alagoas/novoextra.py | e16386b7609ff56c1b43bfa185920200c19dd23d | [] | no_license | diegothuran/rss3 | df1e9e07b0552eb8bdbe6131235df3b991ddbabc | 3177cf768047ced21e38a060d2306cd028e752fb | refs/heads/master | 2022-12-07T19:53:14.609867 | 2019-02-10T20:15:13 | 2019-02-10T20:15:13 | 145,567,411 | 0 | 0 | null | 2022-11-22T02:50:19 | 2018-08-21T13:25:00 | Python | UTF-8 | Python | false | false | 1,117 | py | # coding: utf-8
import sys
sys.path.insert(0, '../../src')
from bs4 import BeautifulSoup
import requests
GLOBAL_RANK = 590712
RANK_BRAZIL = 32566
NAME = 'novoextra.com.br'
def get_urls():
try:
urls = []
root = 'https://novoextra.com.br'
links = [
'https://novoextra.com.br/so-no-site/alagoas',
'https://novoextra.com.br/so-no-site/geral',
'https://novoextra.com.br/so-no-site/internacional',
'https://novoextra.com.br/so-no-site/nacional',
'https://novoextra.com.br/so-no-site/politica'
]
for link in links:
req = requests.get(link)
noticias = BeautifulSoup(req.text, "html.parser").find_all('div', class_='item')
for noticia in noticias:
href = noticia.find_all('a', href=True)[0]['href']
full_link = root + href
# print(full_link)
urls.append(full_link)
return urls
except:
raise Exception('Exception in novoextra')
| [
"victor.lorena@gmail.com"
] | victor.lorena@gmail.com |
ec5cda2e94a7432262ed358abe6b66255554e044 | 825930f372fdf8c9c42cd2f9b1f424ab9de90b38 | /accounts/models.py | 77d49558c6ab9090318df7e4d9bcc8553e09e68f | [] | no_license | Xasanjon/crm2 | 56cbfa05d910144c75a3cdfe7423ba68fd576534 | 52279925e64e4268830fbeae6af897aef14b64d0 | refs/heads/master | 2023-07-02T04:13:33.928305 | 2021-08-16T14:53:43 | 2021-08-16T14:53:43 | 395,755,429 | 0 | 0 | null | 2021-08-16T14:53:44 | 2021-08-13T18:30:32 | Python | UTF-8 | Python | false | false | 1,880 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(
User, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True, blank=True)
email = models.CharField(max_length=200, null=True, blank=True)
profile_pic = models.ImageField(
default="profile.png", null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.name
class Product(models.Model):
CATEGORY = (
('Indoor', 'Indoor'),
('Outdoor', 'Outdoor')
)
name = models.CharField(max_length=200, null=True)
price = models.FloatField(null=True)
category = models.CharField(max_length=200, null=True, choices=CATEGORY)
description = models.TextField(null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
tag = models.ManyToManyField(Tag)
def __str__(self):
return self.name
class Order(models.Model):
STATUS = (
('Pending', 'Pending'),
('Out for delivery', 'Out for delivery'),
('Deleivered', 'Deleivered')
)
customer = models.ForeignKey(
Customer, null=True, on_delete=models.SET_NULL)
product = models.ForeignKey(Product, null=True, on_delete=models.SET_NULL)
date_created = models.DateTimeField(auto_now_add=True, null=True)
status = models.CharField(max_length=200, null=True, choices=STATUS)
note = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.product.name
| [
"xasanboy99akaxon@gmail.com"
] | xasanboy99akaxon@gmail.com |
f3905abd94b637d98fa7979ff6d9141620545f55 | 0f79fd61dc47fcafe22f83151c4cf5f2f013a992 | /BOJ/11053.py | c97d0ad92ba2aa4c8f3ec542dfb9fe9e3fe02f62 | [] | no_license | sangm1n/problem-solving | 670e119f28b0f0e293dbc98fc8a1aea74ea465ab | bc03f8ea9a6a4af5d58f8c45c41e9f6923f55c62 | refs/heads/master | 2023-04-22T17:56:21.967766 | 2021-05-05T12:34:01 | 2021-05-05T12:34:01 | 282,863,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | """
author : Lee Sang Min
github : https://github.com/sangm1n
e-mail : dltkd96als@naver.com
title : 가장 긴 증가하는 부분 수열
description : Dynamic Programming
"""
N = int(input())
arr = list(map(int, input().split()))
dp = [1] * N
for i in range(N):
for j in range(i+1, N):
if arr[i] < arr[j]:
dp[j] = max(dp[j], dp[i] + 1)
print(max(dp))
| [
"dltkd96als@naver.com"
] | dltkd96als@naver.com |
715e9f56ac3e6ff7f765b338d1357f4f59ecd8f2 | 51a902289b357ad6cf467e51c9740fa2a07c1c1c | /first_sem/lab_5/protect5.py | 4a02342594f304c825d684e7cf4e593a92379b99 | [] | no_license | ivaaahn/bmstu-python | d3910356353b0ab3faab243a53a1d657b7d793ad | 9f2d8eb390d804ad86545c992add2a6d97148a5d | refs/heads/main | 2023-03-24T08:52:12.723117 | 2021-03-16T13:19:46 | 2021-03-16T13:19:46 | 348,353,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | x, eps = map(float, (input('Через пробел введите x, eps: ')).split(' '))
# S = 0.0
# n = 1
# elNext = x
# k = n + 1
#
# while abs(elNext) > eps:
# S += elNext
# n += 1
# elNext = -elNext * x * x / k / (k + 1)
# k = k + 2
s = 0.0
n = 2
nextEl = x
while abs(nextEl) >= eps:
s += nextEl
nextEl *= - x * x * (n-1) / (n * (n + 1) * (n + 1))
n += 2
print('S = {:.5}'.format(s))
| [
"ivahnencko01@gmail.com"
] | ivahnencko01@gmail.com |
9654adeb6cb6a9aed8c3e0304e89ee23367bafbe | 903a5afa943971c2be30007f0445264d417727e8 | /src/openprocurement/tender/openua/views/complaint_post_document.py | e7012422a55ba07117b724366dade2076a325b34 | [
"Apache-2.0"
] | permissive | pontostroy/api | 39ca2ff864daa9c6b8a96b17c49f6eec2f489398 | 5afdd3a62a8e562cf77e2d963d88f1a26613d16a | refs/heads/master | 2021-03-14T13:11:24.314136 | 2020-06-24T08:34:20 | 2020-06-24T08:34:20 | 246,768,251 | 0 | 0 | Apache-2.0 | 2020-06-24T07:18:10 | 2020-03-12T07:18:07 | Python | UTF-8 | Python | false | false | 5,210 | py | # -*- coding: utf-8 -*-
from openprocurement.api.utils import (
get_file,
upload_file,
update_file_content_type,
json_view,
context_unpack,
APIResource,
)
from openprocurement.api.validation import (
validate_file_update,
validate_file_upload,
validate_patch_document_data,
)
from openprocurement.tender.core.validation import (
validate_complaint_document_update_not_by_author,
)
from openprocurement.tender.core.utils import save_tender, optendersresource, apply_patch
from openprocurement.tender.openua.validation import (
validate_complaint_post_review_date,
validate_complaint_post_complaint_status,
validate_complaint_post_document_upload_by_author,
)
@optendersresource(
name="aboveThresholdUA:Tender Complaint Post Documents",
collection_path="/tenders/{tender_id}/complaints/{complaint_id}/posts/{post_id}/documents",
path="/tenders/{tender_id}/complaints/{complaint_id}/posts/{post_id}/documents/{document_id}",
procurementMethodType="aboveThresholdUA",
description="Tender complaint post documents",
)
class TenderComplaintPostDocumentResource(APIResource):
@json_view(permission="view_tender")
def collection_get(self):
"""
Tender Complaint Post Documents List
"""
if self.request.params.get("all", ""):
collection_data = [i.serialize("view") for i in self.context.documents]
else:
collection_data = sorted(
dict([(i.id, i.serialize("view")) for i in self.context.documents]).values(),
key=lambda i: i["dateModified"],
)
return {"data": collection_data}
@json_view(
validators=(
validate_file_upload,
validate_complaint_post_document_upload_by_author,
validate_complaint_post_complaint_status,
validate_complaint_post_review_date,
),
permission="edit_complaint",
)
def collection_post(self):
"""
Tender Complaint Post Document Upload
"""
document = upload_file(self.request)
document.author = self.request.authenticated_role
self.context.documents.append(document)
if save_tender(self.request):
self.LOGGER.info(
"Created tender complaint post document {}".format(document.id),
extra=context_unpack(
self.request, {"MESSAGE_ID": "tender_complaint_post_document_create"},
{"document_id": document.id}
),
)
self.request.response.status = 201
document_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers["Location"] = self.request.current_route_url(
_route_name=document_route, document_id=document.id, _query={}
)
return {"data": document.serialize("view")}
@json_view(permission="view_tender")
def get(self):
"""
Tender Complaint Post Document Read
"""
if self.request.params.get("download"):
return get_file(self.request)
document = self.request.validated["document"]
document_data = document.serialize("view")
document_data["previousVersions"] = [
i.serialize("view") for i in self.request.validated["documents"] if i.url != document.url
]
return {"data": document_data}
@json_view(
validators=(
validate_file_update,
validate_complaint_document_update_not_by_author,
validate_complaint_post_complaint_status,
validate_complaint_post_review_date,
),
permission="edit_complaint",
)
def put(self):
"""
Tender Complaint Post Document Update
"""
document = upload_file(self.request)
document.author = self.request.authenticated_role
self.request.validated["post"].documents.append(document)
if save_tender(self.request):
self.LOGGER.info(
"Updated tender complaint document {}".format(self.request.context.id),
extra=context_unpack(self.request, {"MESSAGE_ID": "tender_complaint_post_document_put"}),
)
return {"data": document.serialize("view")}
@json_view(
content_type="application/json",
validators=(
validate_patch_document_data,
validate_complaint_document_update_not_by_author,
validate_complaint_post_complaint_status,
validate_complaint_post_review_date,
),
permission="edit_complaint",
)
def patch(self):
"""
Tender Complaint Post Document Update
"""
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
self.LOGGER.info(
"Updated tender complaint document {}".format(self.request.context.id),
extra=context_unpack(self.request, {"MESSAGE_ID": "tender_complaint_post_document_patch"}),
)
return {"data": self.request.context.serialize("view")}
| [
"smithumble@gmail.com"
] | smithumble@gmail.com |
ccd648d652a9be83c5c2a3cf25506b3328aa808b | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/common/system/systemhost.py | 52b88d6fcc1d525a4ac3586c4a0e186a9942bd2a | [
"MIT",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 2,434 | py | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import platform
import sys
from webkitpy.common.system import environment, executive, filesystem, platforminfo, user, workspace
class SystemHost(object):
def __init__(self):
self.executive = executive.Executive()
self.filesystem = filesystem.FileSystem()
self.user = user.User()
self.platform = platforminfo.PlatformInfo(sys, platform, self.executive)
self.workspace = workspace.Workspace(self.filesystem, self.executive)
def copy_current_environment(self):
return environment.Environment(os.environ.copy())
def print_(self, *args, **kwargs):
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
file = kwargs.get('file', None)
stderr = kwargs.get('stderr', False)
file = file or (sys.stderr if stderr else sys.stdout)
file.write(sep.join([str(arg) for arg in args]) + end)
| [
"karun.matharu@gmail.com"
] | karun.matharu@gmail.com |
1d215416e8655f9b2cca93de4850235b5d6baf1e | 78f0234c1f20605008a98aa9956a6071ccb7996f | /gag.py | f0fb681ef919f103cd7e3f5d6e36be0b7a257995 | [] | no_license | sashgorokhov-heaven/python-9gag-gui-old | b6ce8ee683323a7712d48a3c0f948703901245b2 | 870b803462180fcfaeb39c51869ebf279cb8a0d6 | refs/heads/master | 2021-05-27T23:08:47.641736 | 2014-05-23T19:22:33 | 2014-05-23T19:22:33 | 18,153,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | __author__ = 'Alexander'
from libs import constants, util
from libs.gorokhovlibs.vk import accesstokener
try:
import requests
except ImportError:
print("Module 'requests' is not found")
exit(-1)
try:
import PyQt4
except ImportError:
print("Module 'PyQt4' is not found")
exit(-1)
access_token = user_id = None
if not accesstokener.good():
from libs.gorokhovlibs.vk.qt.auth import show_browser
access_token, user_id, expires = show_browser(constants.application_id, constants.permissions_scope)
if not access_token:
exit(-1)
accesstokener.new(access_token, user_id, expires)
else:
access_token, user_id, expires = accesstokener.get()
if not access_token:
util.showmessage("Didn't get secret key!")
exit(-1)
from forms.main_form import MainForm
app = PyQt4.QtGui.QApplication([])
mainform = MainForm(access_token)
mainform.show()
app.exec_()
| [
"sashgorokhov@gmail.com"
] | sashgorokhov@gmail.com |
315cdd5b8427d37fcd29c953161c986ad08c4fc7 | e9536f9dc5d0ed823f36e1f68ee10ccd01ecb250 | /aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/DescribeLiveStreamNumberListRequest.py | 2f11b8e61cfade747f0c2af3e0c8dcd204323d47 | [
"Apache-2.0"
] | permissive | Linyee/aliyun-openapi-python-sdk | 70aee05946d2cb0520d378f7aff6a1f7e8177d6d | 436f6251ec32fdccd58c940f2c508a95deeacd8b | refs/heads/master | 2021-01-20T03:54:40.275523 | 2017-08-25T02:15:01 | 2017-08-25T02:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeLiveStreamNumberListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'DescribeLiveStreamNumberList')
def get_AppName(self):
return self.get_query_params().get('AppName')
def set_AppName(self,AppName):
self.add_query_param('AppName',AppName)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
79f647d28b8a1806e461603d2ce0c7ed4feb685c | 6b1b506139088aa30de9fd65cff9e3b6a3a36874 | /sofia_redux/instruments/forcast/tests/test_merge_correlation.py | bf607a0bd2a309efc56e0610b7ce6f90da3bbc00 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SOFIA-USRA/sofia_redux | df2e6ad402b50eb014b574ea561734334d70f84d | 493700340cd34d5f319af6f3a562a82135bb30dd | refs/heads/main | 2023-08-17T11:11:50.559987 | 2023-08-13T19:52:37 | 2023-08-13T19:52:37 | 311,773,000 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,672 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io import fits
import numpy as np
import sofia_redux.instruments.forcast.configuration as dripconfig
from sofia_redux.instruments.forcast.merge_correlation \
import addhist, merge_correlation
from sofia_redux.instruments.forcast.tests.resources \
import nmc_testdata, npc_testdata
class TestMergeCorrelation(object):
def test_addhist(self):
header = fits.header.Header()
addhist(header, 'test history message')
assert 'HISTORY' in header
assert header['HISTORY'] == 'Merge: test history message'
def test_nmc_merge_correlation(self):
test = nmc_testdata()
data = test['data'].copy()
header = test['header'].copy()
varval = 2.0
variance = np.full_like(data, varval)
normmap = np.full_like(data, np.nan)
dripconfig.load()
dripconfig.configuration['border'] = 0
merged, var = merge_correlation(
data, header, variance=variance, normmap=normmap)
dripconfig.load()
nm = np.nanmax(normmap)
assert nm == 4
assert np.nanmin(var) == varval * (nm - 1) / (nm ** 2)
assert np.allclose(np.nanmax(data),
np.nanmax(merged * nm / (nm - 1)),
atol=0.1)
for key in ['MRGDX', 'MRGDY']:
for i in range(2):
assert '%s%i' % (key, i) in header
assert 'MRGX2' not in header
def test_npc_merge_correlation(self):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
varval = 2.0
variance = np.full_like(data, varval)
normmap = np.full_like(data, np.nan)
dripconfig.load()
dripconfig.configuration['border'] = 0
merged, var = merge_correlation(
data, header, variance=variance, normmap=normmap)
dripconfig.load()
nm = np.nanmax(normmap)
assert nm == 4
assert np.nanmin(var) == varval * nm / (nm ** 2)
assert np.allclose(np.nanmax(data), np.nanmax(merged), atol=0.1)
for key in ['MRGDX', 'MRGDY']:
for i in range(2):
assert '%s%i' % (key, i) in header
def test_errors(self):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
assert merge_correlation(data, 'a') is None
assert merge_correlation(np.array(10), header) is None
dripconfig.load()
dripconfig.configuration['border'] = data.shape[0]
merged = merge_correlation(data, header)
assert merged is None
dripconfig.load()
# check bad variance
merged = merge_correlation(data, header, variance=10)
assert merged[0] is not None
assert merged[1] is None
def test_upsample(self):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
dripconfig.load()
dripconfig.configuration['border'] = 0
merge_correlation(data, header, upsample=100)
dx = header['MRGDX0']
assert not np.allclose(dx, int(dx), atol=0.01)
merge_correlation(data, header, upsample=1)
dx = header['MRGDX0']
assert dx == int(dx)
dripconfig.load()
def test_maxregister(self):
test = npc_testdata()
data = test['data'].copy()
dmax = np.nanmax(data)
header = test['header'].copy()
# These settings should result in 0 chop nod so shift
# algorithm will esentially subtract out all source
header['CHPAMP1'] = 0
header['NODAMP'] = 0
dripconfig.load()
dripconfig.configuration['border'] = 0
merged, _ = merge_correlation(data, header, maxshift=0)
mmax = np.nanmax(merged)
# maximum should be close to zero
assert np.allclose(mmax, 0, atol=0.01)
# Now allow a search over the whole image
# Note that this solution may be incorrect as we cannot
# guarantee which negative source correlates with which
# positive source... That's why we need the shift from
# the header as an initial guess.
merged, _ = merge_correlation(data, header, maxregister=None)
dripconfig.load()
# should be closer to data than 0
mmax = np.nanmax(merged)
assert dmax - mmax < mmax
def test_resize(self, capsys):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
varval = 2.0
variance = np.full_like(data, varval)
normmap = np.full_like(data, np.nan)
dripconfig.load()
dripconfig.configuration['border'] = 0
msmall, vsmall = merge_correlation(
data, header, variance=variance, normmap=normmap,
resize=False)
mlarge, vlarge = merge_correlation(
data, header, variance=variance, normmap=normmap,
resize=True)
for s, l in zip(msmall.shape, mlarge.shape):
assert s < l
for s, l in zip(vsmall.shape, vlarge.shape):
assert s < l
# test border
dripconfig.configuration['border'] = 10
mborder, vborder = merge_correlation(
data, header, variance=variance, normmap=normmap,
resize=False)
assert mborder.shape[0] == msmall.shape[0]
assert mborder.shape[1] == msmall.shape[1]
capt = capsys.readouterr()
assert 'Removing 10 pixel border from consideration' in capt.out
assert np.allclose(msmall, mborder, equal_nan=True)
| [
"melanie.j.clarke@nasa.gov"
] | melanie.j.clarke@nasa.gov |
4bc041a12e26bb826ba1c8c93b32b7804937f6ae | e22e03d9761f5c6d581b5af2e77343e8ee4b201d | /edk2/BaseTools/Tests/TestRegularExpression.py | 8b09841d5078520cc2351b9f24739b392a38c852 | [
"OpenSSL",
"BSD-2-Clause"
] | permissive | SamuelTulach/SecureFakePkg | 759975fcc84d62f05ac577da48353752e5334878 | f34080a6c0efb6ca3dd755365778d0bcdca6b991 | refs/heads/main | 2023-08-17T07:51:22.175924 | 2021-10-01T10:46:14 | 2021-10-01T10:46:14 | 410,938,306 | 94 | 14 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | ## @file
# Routines for generating Pcd Database
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
import unittest
from Common.Misc import RemoveCComments
from Workspace.BuildClassObject import ArrayIndex
class TestRe(unittest.TestCase):
def test_ccomments(self):
TestStr1 = """ {0x01,0x02} """
self.assertEquals(TestStr1, RemoveCComments(TestStr1))
TestStr2 = """ L'TestString' """
self.assertEquals(TestStr2, RemoveCComments(TestStr2))
TestStr3 = """ 'TestString' """
self.assertEquals(TestStr3, RemoveCComments(TestStr3))
TestStr4 = """
{CODE({
{0x01, {0x02, 0x03, 0x04 }},// Data comment
{0x01, {0x02, 0x03, 0x04 }},// Data comment
})
} /*
This is multiple line comments
The seconde line comment
*/
// This is a comment
"""
Expect_TestStr4 = """{CODE({
{0x01, {0x02, 0x03, 0x04 }},
{0x01, {0x02, 0x03, 0x04 }},
})
}"""
self.assertEquals(Expect_TestStr4, RemoveCComments(TestStr4).strip())
def Test_ArrayIndex(self):
TestStr1 = """[1]"""
self.assertEquals(['[1]'], ArrayIndex.findall(TestStr1))
TestStr2 = """[1][2][0x1][0x01][]"""
self.assertEquals(['[1]','[2]','[0x1]','[0x01]','[]'], ArrayIndex.findall(TestStr2))
if __name__ == '__main__':
unittest.main()
| [
"samtulach@gmail.com"
] | samtulach@gmail.com |
f3c98c3f6c5281bef2b32f3be2008492f48a8504 | cdd693705f336a225ef82e1b20f32a2192f43b93 | /check_model_layers.py | ef67e10a05efbc10e81b2b76209dbfd7252cfe52 | [] | no_license | gombru/iMaterialistFashion | 5a2d6628729d0e2e8d9eed1c146b09e9b0c284bc | 27a278ef54856abd3415bb3eb4a66cc59e9c1d08 | refs/heads/master | 2020-03-11T09:17:48.069509 | 2018-12-14T09:36:50 | 2018-12-14T09:36:50 | 129,906,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | import caffe
# Run in GPU
caffe.set_device(0)
caffe.set_mode_gpu()
# load net
net = caffe.Net('evaluation/deploy.prototxt', '../../datasets/iMaterialistFashion/iMaterialistFashion_Inception_iter_95000.caffemodel', caffe.TEST)
print net.blobs
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
507c972bd2d7d0d30352b3d624b7ae44473e0264 | bf287afda71c4360f40ecd78e74049510965db82 | /rampwf/score_types/brier_score.py | 491df244acdd5639c0e22d9e85b7a94e53aac1cf | [
"BSD-3-Clause"
] | permissive | djgagne/ramp-workflow | 26b1bb7188c734c7a46c1c8298504ab522fafda6 | cf1f53e5ef5a2b7d5ca27a21ca30098a17e9fcd7 | refs/heads/master | 2021-01-23T21:48:30.554531 | 2017-09-15T14:42:45 | 2017-09-15T14:42:45 | 102,907,436 | 1 | 0 | null | 2017-09-08T21:52:03 | 2017-09-08T21:52:03 | null | UTF-8 | Python | false | false | 6,207 | py | from .base import BaseScoreType
import numpy as np
from sklearn.metrics import brier_score_loss
class BrierScore(BaseScoreType):
is_lower_the_better = True
minimum = 0.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3):
self.name = name
self.precision = precision
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
return brier_score_loss(y_true_proba, y_proba)
class BrierSkillScore(BaseScoreType):
is_lower_the_better = False
minimum = -1.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3):
self.name = name
self.precision = precision
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
climo = np.ones(y_true_proba.size) * y_true_proba.mean()
bs = brier_score_loss(y_true_proba, y_proba)
bs_c = brier_score_loss(y_true_proba, climo)
return 1 - bs / bs_c
class BrierScoreReliability(BaseScoreType):
is_lower_the_better = True
minimum = 0.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3, bins=np.arange(0, 1.2, 0.1)):
self.name = name
self.precision = precision
self.bins = bins
self.bin_centers = (bins[1:] - bins[:-1]) * 0.05
self.bin_centers[self.bin_centers > 1] = 1
self.bin_centers[self.bin_centers < 0] = 0
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
pos_obs_freq = np.histogram(y_proba[y_true_proba == 1], bins=self.bins)[0]
fore_freq = np.histogram(y_proba, bins=self.bins)[0]
pos_obs_rel_freq = np.zeros(pos_obs_freq.size)
for p in range(pos_obs_rel_freq.size):
if fore_freq[p] > 0:
pos_obs_rel_freq[p] = pos_obs_freq[p] / fore_freq[p]
else:
pos_obs_rel_freq[p] = np.nan
score = 1 / float(y_proba.size) * np.nansum(fore_freq * (self.bin_centers - pos_obs_rel_freq) ** 2)
return score
class BrierScoreResolution(BaseScoreType):
is_lower_the_better = False
minimum = 0.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3, bins=np.arange(0, 1.2, 0.1)):
self.name = name
self.precision = precision
self.bins = bins
self.bin_centers = (bins[1:] - bins[:-1]) * 0.05
self.bin_centers[self.bin_centers > 1] = 1
self.bin_centers[self.bin_centers < 0] = 0
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
"""
See Murphy (1973) A vector partition of the probability score
"""
np.seterr(divide="ignore")
pos_obs_freq = np.histogram(y_proba[y_true_proba == 1], bins=self.bins)[0]
fore_freq = np.histogram(y_proba, bins=self.bins)[0]
climo = y_true_proba.mean()
unc = climo * (1 - climo)
pos_obs_rel_freq = np.zeros(pos_obs_freq.size)
for p in range(pos_obs_rel_freq.size):
if fore_freq[p] > 0:
pos_obs_rel_freq[p] = pos_obs_freq[p] / fore_freq[p]
else:
pos_obs_rel_freq[p] = np.nan
score = 1 / float(y_proba.size) * np.nansum(fore_freq * (pos_obs_rel_freq - climo) ** 2)
return score / unc
| [
"djgagne@ou.edu"
] | djgagne@ou.edu |
e70211a6287250bb4f1c24e75fbc7cbe02920446 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03610/s002378580.py | 10133c2cc30824488c99f4f6846df646f0f70405 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | s = input()
s_list = list(s)
l = len(s_list)
k = []
for i in range(0,l,2):
k.append(s_list[i])
s_joined = ''.join(k)
print("{}".format(s_joined)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e225c392fed27a2f54d0b2c84bee22f46b5f8f73 | 493a36f1f8606c7ddce8fc7fe49ce4409faf80be | /.history/B073040023/client_20210614202922.py | 5127563384ff453b6516c3cabfe2a42f5794beb1 | [] | no_license | ZhangRRz/computer_network | f7c3b82e62920bc0881dff923895da8ae60fa653 | 077848a2191fdfe2516798829644c32eaeded11e | refs/heads/main | 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null | UTF-8 | Python | false | false | 4,670 | py | import socket
import threading
import tcppacket
import struct
from time import sleep
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(msg):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = str("video "+str(i+1)).encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address)
fin_flag = raw[5] % 2
recvdata += data[s:]
if(raw[2] == ack_seq and raw[7] == 0):
if(fin_flag):
break
elif(raw[2] == ack_seq):
print("Receive ERROR packet from ", address)
ack_seq += 1
counter += 1
# --------------------------------------------
# send ACK
if(counter == 3 or fin_flag):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,"with ack seq:", ack_seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
# Calculation--------------------------------------
print("Demo calculation function")
init_new_calc_req("calc (5+5)-(10*10)+(30/6)+")
print("-"*60)
print("Demo DNS request function")
# for i in range(3):
# threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
# threads[-1].start()
# for i in range(2):
# threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
# threads[-1].start() | [
"tom95011@gmail.com"
] | tom95011@gmail.com |
5600afb59e1efe3b3404e4b6d43937db72335b2f | 40438f8fb232f8f5a213be6d9fd634fcad75f17f | /1_decisionTreeClassifier.py | 5d76aa69af68713d1e947f992f00fad0fc0c0528 | [] | no_license | LintangWisesa/ML_Sklearn_DecisionTree | 6ceaec0ef0c6191353751581d682ac5658eed1ee | b58779d08d4a39090554bdb725fd668e5f748b38 | refs/heads/master | 2020-04-28T14:46:36.064558 | 2019-07-05T02:27:05 | 2019-07-05T02:27:05 | 175,348,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | import pandas as pd
import numpy as np
# =================================
# load csv & create dataframe
df = pd.read_csv('0_data.csv')
# print(df)
# =================================
# convert nominal data => ordinal data
from sklearn.preprocessing import LabelEncoder
labelKantor = LabelEncoder()
df['kantorLE'] = labelKantor.fit_transform(df['kantor'])
labelJabatan = LabelEncoder()
df['jabatanLE'] = labelJabatan.fit_transform(df['jabatan'])
labelTitel = LabelEncoder()
df['titelLE'] = labelTitel.fit_transform(df['titel'])
df = df.drop(
['kantor', 'jabatan', 'titel'],
axis = 'columns'
)
# print(df)
# ===============================
# kantor : 0 Facebook, 1 Google, 2 Tesla
# jabatan : 0 GM, 1 Manager, 2 Staf
# titel : 0 S1, 1 S2
# ===============================
# decision tree algo
from sklearn import tree
model = tree.DecisionTreeClassifier()
# train
model.fit(
df[['kantorLE', 'jabatanLE', 'titelLE']],
df['gaji>50']
)
# accuracy
acc = model.score(
df[['kantorLE', 'jabatanLE', 'titelLE']],
df['gaji>50']
)
print(acc * 100, '%')
# predict kantor, jabatan, titel
print(model.predict([[0, 0, 0]]))
print(model.predict([[2, 0, 0]]))
print(model.predict([[1, 3, 0]]))
| [
"lintangwisesa@ymail.com"
] | lintangwisesa@ymail.com |
c1c88a42c6e2b5f2661230d25e9771b94beb8910 | 5524e844f2cbfeebc769397c5a489183db886612 | /3_chapter/links.py | 2f10679f79c029f4c14b210d4d4aebdc376f88b6 | [] | no_license | sirajmuneer123/anand_python_problems | 7c674fbf5097f82c1bd866704bd95866fe0286db | 3c0cb960533da6362e53612cbaf19130a2d69d30 | refs/heads/master | 2021-01-04T02:36:46.064891 | 2015-09-08T05:32:46 | 2015-09-08T05:32:46 | 40,727,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | '''
Problem 8: Write a program links.py that takes URL of a webpage as argument and prints all the URLs linked from that webpage.
'''
import urllib
import re
import sys
url=sys.argv[1]
#connect to a URL
website = urllib.urlopen(url)
#read html code
html = website.read()
#use re.findall to get all the links
links = re.findall('"((http|ftp)s?://.*?)"', html)
for i in links:
print i,"\n"
| [
"sirajmuneer4@gmail.com"
] | sirajmuneer4@gmail.com |
9e454ce7dc822bc4e590a1f283e9adfdb8a1967d | 083c174d42b658b108d35b1600c88ee2fac1b187 | /mvp/config.py | 705ed800f9462b85fc0772f9da695fbc59195c4d | [
"MIT"
] | permissive | sgodanas/mvp | 5202157ad5217e5a19e9bbfe80fa7c5838e3f3ad | 1814db4a7f9966e3c066433f1864c77b8cd0ad5d | refs/heads/master | 2023-04-18T08:31:03.142712 | 2021-04-27T15:09:45 | 2021-04-27T15:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # -*- coding: utf-8 -*-
'''
Configure mvp presets and hooks
'''
import os
import sys
USER_PRESETS_PATH = os.path.expanduser('~/.mvp')
PRESETS_PATH = [USER_PRESETS_PATH]
# Add paths from MVP_PRESETS env var
for path in os.environ.get('MVP_PRESETS', '').split(os.pathsep):
if path:
PRESETS_PATH.insert(0, path)
for path in PRESETS_PATH:
if not os.path.exists(path):
os.makedirs(path)
sys.path.insert(1, path)
| [
"danielbradham@gmail.com"
] | danielbradham@gmail.com |
9249ad119efcabee5c40af68d32a6c718bedc9cd | 6909de83dd90ee1169d6c453c327ab2ce2687485 | /scheme/tests/11.py | 9cbe458b96614b51b34436060c73fa7461d32e63 | [] | no_license | dantefung/cs61a-2021-summer | 730cb0b9ab7327c32c619779d71882531bf328dd | 4f22f20fcfddfb5bf121081919310413209da1b2 | refs/heads/master | 2023-08-19T14:51:27.380738 | 2021-11-01T06:54:33 | 2021-11-01T06:54:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,528 | py | test = {
"name": "Problem 11",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
scm> (define (square x) (* x x))
square
scm> square
(lambda (x) (* x x))
scm> (square 21)
441
scm> square ; check to make sure lambda body hasn't changed
(lambda (x) (* x x))
scm> (define square (lambda (x) (* x x)))
square
scm> (square (square 21))
194481
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> ((lambda (x) (list x (list (quote quote) x))) (quote (lambda (x) (list x (list (quote quote) x))))) ; if you're failing this test case and have checked your implementation of Q11, you may want to check your Q6 solution
((lambda (x) (list x (list (quote quote) x))) (quote (lambda (x) (list x (list (quote quote) x)))))
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": "",
"teardown": "",
"type": "scheme",
},
{
"cases": [
{
"code": r"""
>>> double = do_lambda_form(read_line("((n) (* 2 n))"), env) # make double a LambdaProcedure that doubles a number
>>> f1 = double.make_call_frame(Pair(10, nil), env)
>>> f1.lookup('n')
10
>>> env.define('n', 5)
>>> add_n = do_lambda_form(read_line("((x) (+ x n))"), env)
>>> f2 = add_n.make_call_frame(Pair(5, nil), f1) # pass in a different environment as env
>>> f2.lookup('x')
5
>>> f2.lookup('n') # Hint: make sure you're using self.env not env
5
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
>>> do_twice = do_lambda_form(read_line("((f x) (f (f x)))"), env) # make do_twice a LambdaProcedure that takes f, x, and returns f(f(x))
>>> double = do_lambda_form(read_line("((x) (* 2 x))"), env) # make double a LambdaProcedure that doubles a number
>>> call_frame = do_twice.make_call_frame(Pair(double, Pair(3, nil)), env) # Hint: make sure you're not evaluating args again in make_call_frame
>>> call_frame.lookup('x') # Check that x is properly defined
3
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": r"""
>>> from scheme import *
>>> env = create_global_frame()
""",
"teardown": "",
"type": "doctest",
},
{
"cases": [
{
"code": r"""
scm> (define (outer x y)
.... (define (inner z x)
.... (+ x (* y 2) (* z 3)))
.... (inner x 10))
71fe94b728b1cb1923a1c51c2533bcd8
# locked
scm> (outer 1 2)
5d3ec98dabcf5b4a06694ccc93722cfb
# locked
scm> (define (outer-func x y)
.... (define (inner z x)
.... (+ x (* y 2) (* z 3)))
.... inner)
0b6323ff730faa1f7ac702f64f4cbfcb
# locked
scm> ((outer-func 1 2) 1 10)
5d3ec98dabcf5b4a06694ccc93722cfb
# locked
""",
"hidden": False,
"locked": True,
},
{
"code": r"""
scm> (define square (lambda (x) (* x x)))
square
scm> (define (sum-of-squares x y) (+ (square x) (square y)))
sum-of-squares
scm> (sum-of-squares 3 4)
25
scm> (define double (lambda (x) (* 2 x)))
double
scm> (define compose (lambda (f g) (lambda (x) (f (g x)))))
compose
scm> (define apply-twice (lambda (f) (compose f f)))
apply-twice
scm> ((apply-twice double) 5)
20
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": "",
"teardown": "",
"type": "scheme",
},
],
}
| [
"wuyudi1109@gmail.com"
] | wuyudi1109@gmail.com |
a8d7239b51a01a59c937dba8d1195588f320b114 | 007d35a3a2def8b0801ca0eab3469e8422b9f767 | /1_ Preparation/2_Triangle.py | 49dfe65bba82534c2104ba8055903f0b773eb657 | [] | no_license | yosho-18/Programming-Contest-Challenge-Book_Ant-Book | 91fedf6f608f2e0f9d410922c847d2a2046c1b95 | 94cd41d8c7be50e2f09150ac63b86821933268f8 | refs/heads/master | 2020-06-02T10:53:09.218986 | 2019-06-10T08:55:06 | 2019-06-10T08:55:06 | 191,132,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | n = int(input())
a = [int(i) for i in input().split()]
#Longest bar < sum other bar
ans = 0
for i in range(n):
for j in range(i + 1, n):
for k in range(j + 1, n):
len = a[i] + a[j] + a[k]
ma = max(a[i], a[j], a[k])
rest = len - ma
if ma < rest:
ans = max(ans, len)
print(ans) | [
"44283410+wato18@users.noreply.github.com"
] | 44283410+wato18@users.noreply.github.com |
f1bdee81fdd6d3bda77e4cfdcccb42cda93e8d97 | 9009ad47bc1d6adf8ee6d0f2f2b3125dea44c0aa | /00-MY-TEMPLATE-001.py | 84d5cda056758d652204415992f14aea2354c511 | [] | no_license | luctivud/Coding-Trash | 42e880624f39a826bcaab9b6194add2c9b3d71fc | 35422253f6169cc98e099bf83c650b1fb3acdb75 | refs/heads/master | 2022-12-12T00:20:49.630749 | 2020-09-12T17:38:30 | 2020-09-12T17:38:30 | 241,000,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # जय श्री राम
import sys; import math; from collections import *
# sys.setrecursionlimit(10**6)
def get_ints(): return map(int, input().split())
def get_list(): return list(get_ints())
def printspx(*args): return print(*args, end="")
def printsp(*args): return print(*args, end=" ")
MODPRIME = int(1e9+7); BABYMODPR = 998244353; MAXN = int(1e5)
sys.stdin = open("input.txt","r"); sys.stdout = open("output.txt","w")
# for _testcases_ in range(int(input())):
'''
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
THE LOGIC AND APPROACH IS MINE @luctivud ( UDIT GUPTA )
Link may be copy-pasted here if it's taken from other source.
DO NOT PLAGIARISE.
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
''' | [
"luctivud@gmail.com"
] | luctivud@gmail.com |
a41b5aa994c57a14e4dc4af3ec85cdec17b93536 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/hzy46_fast-neural-style-tensorflow/fast-neural-style-tensorflow-master/preprocessing/preprocessing_factory.py | 39ee9bf7bc24744fd430f7a725f856708945b2d0 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,861 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, output_height, output_width, is_training=is_training, **kwargs)
def unprocessing_fn(image, **kwargs):
return preprocessing_fn_map[name].unprocess_image(
image, **kwargs)
return preprocessing_fn, unprocessing_fn
| [
"659338505@qq.com"
] | 659338505@qq.com |
729b0ad28719a30dcb5d3de097cab5678f6cb208 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /kS8tfJD2ggohQbWx7_9.py | 10d83a002c78aabd9196a850bec31aca4ebc4060 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py |
def last_name_lensort(names):
return [' '.join(a) for a in sorted([x.split() for x in names],key=lambda x: (len(x[1]), x[1]))]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
dc8aa8d63bfa7714751d7ffc7b3f4c2a3e2b6b2f | 237ff79d806953b55d9e6db8d2fa498333123949 | /app_verifications/text/text_verifications.py | f234ffd13a301804f934726714fd3d7cbafd388b | [
"MIT"
] | permissive | kskarbinski/threads-api | f86358f43a31271cd939695af200975d5c2825ba | c144c1cb51422095922310d278f80e4996c10ea0 | refs/heads/master | 2020-03-11T04:29:16.776817 | 2019-11-29T15:40:34 | 2019-11-29T15:40:34 | 129,777,543 | 0 | 0 | MIT | 2019-11-29T15:40:35 | 2018-04-16T17:00:27 | Python | UTF-8 | Python | false | false | 531 | py | from .text_checks import TextChecks
from app_errors.http_exceptions import HttpException
class TextVerifications(TextChecks):
"""
Every method verifies something related to text. If verification does not pass a HttpException is thrown.
"""
def verify_text_length(self, min_l, max_l):
if self.check_text_length(min_l=min_l, max_l=max_l):
return True
HttpException.throw_422(
"Text has to be between {min_l} and {max_l} characters".format(min_l=min_l, max_l=max_l)
)
| [
"kripperr@gmail.com"
] | kripperr@gmail.com |
1fd59d3de0e61312329e973df01ed1a703dd2d0c | 2db55786df5fdf009eeec5f000937f0ec3eb6527 | /tasks/files/config/polybar/scripts/translate_lyrics | 475248a2e2c07bd0ca45147da10883c36f61e07c | [] | no_license | preachermanx/environment | 08ef7a1e558a3c9c78bbfb789584b81997b9787d | cba8277663915ce173bab79f44f890a666abbbed | refs/heads/master | 2020-07-21T13:05:45.011963 | 2019-09-05T08:40:31 | 2019-09-05T08:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | #!/usr/bin/env python3
import os.path
import re
from subprocess import Popen, PIPE
def convert_line(line):
time_search = re.search(r'\[(\d+):(\d+).\d+\](.*)', line)
in_seconds = 0
if time_search:
in_seconds = int(time_search.group(1)) * 60
in_seconds += int(time_search.group(2))
return (in_seconds, time_search.group(3))
def lyric_exists(music, artist):
return os.path.exists('/tmp/{}_{}.txt'.format(music, artist))
def save_lyric(music, artist, output):
with open('/tmp/{}_{}.txt'.format(music, artist), 'w') as file:
file.write(output)
def recover_lyric(music, artist):
with open('/tmp/{}_{}.txt'.format(music, artist)) as file:
return file.read()
result = os.popen(
'/home/diegorubin/.newenv/polybar/scripts/lyrics | trans :pt')
result = result.readlines()[2]
print(result)
| [
"rubin.diego@gmail.com"
] | rubin.diego@gmail.com | |
ea780519562cd70c7534e0046b792df7a61f53bb | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/scikit-image-0.12.3-np110py27_0/lib/python2.7/site-packages/skimage/segmentation/tests/test_quickshift.py | 21dbe8eaab30c11172730b52c6e9c8cdb2b3a251 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,760 | py | import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from nose.tools import assert_true
from skimage._shared.testing import assert_greater, test_parallel
from skimage.segmentation import quickshift
@test_parallel()
def test_grey():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21))
img[:10, 10:] = 0.2
img[10:, :10] = 0.4
img[10:, 10:] = 0.6
img += 0.1 * rnd.normal(size=img.shape)
seg = quickshift(img, kernel_size=2, max_dist=3, random_seed=0,
convert2lab=False, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
# that mostly respect the 4 regions:
for i in range(4):
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
assert_greater(hist[i], 20)
def test_color():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = quickshift(img, random_seed=0, max_dist=30, kernel_size=10, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 1)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 0)
assert_array_equal(seg[10:, 10:], 3)
seg2 = quickshift(img, kernel_size=1, max_dist=2, random_seed=0,
convert2lab=False, sigma=0)
# very oversegmented:
assert_equal(len(np.unique(seg2)), 7)
# still don't cross lines
assert_true((seg2[9, :] != seg2[10, :]).all())
assert_true((seg2[:, 9] != seg2[:, 10]).all())
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
d582dbffd90140f90a648bfa5407b2d45d6eb998 | aa01560e68a07033d4b24c4770966771349e2b4f | /src/jobs/migrations/0007_jobapplication_timestamp.py | 4d5d3057c72f090b51b9eeaba9de95a481cbf144 | [] | no_license | fluffcoding/solitaireHR | a0a357e1b19b955caae8df11ca92188cad79e217 | b97a29f9accc5b45cd62986b62673a6ba802771b | refs/heads/main | 2023-04-05T11:46:41.855323 | 2021-04-26T04:57:27 | 2021-04-26T04:57:27 | 322,067,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # Generated by Django 3.1.2 on 2020-12-09 15:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0006_auto_20201209_1527'),
]
operations = [
migrations.AddField(
model_name='jobapplication',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| [
"fluffcoding@gmail.com"
] | fluffcoding@gmail.com |
3157e2ee8eb20e93eacbac32b7d5b25a09afd0f8 | aa9f8d7b48dbe3cbecca5eaa2ad3bbea262dbf24 | /preprocess_tweets.py | afa800638256beb34c242c6a4bccc1bca1796074 | [] | no_license | ssinayoko/Pyhton_Cours | 5381a98c42cba021f34b482776933accd3442a6c | 56b391aeb673b40b564c59053295ac68e2576a1c | refs/heads/master | 2020-08-30T13:32:59.662715 | 2019-10-25T12:50:25 | 2019-10-25T12:50:25 | 218,395,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py |
import re
with open("tweets.csv", "w", encoding="utf-8") as result:
with open("stockerbot-export.csv", "r", encoding="utf-8") as f:
lines = f.readlines()
res_lines = []
for line in lines:
if ",http" in line:
line = line[:20] + line[20:].replace(",http", ',"http')\
.replace(",True",'",True')\
.replace(",False",'",False')
res_lines.append(line)
result.writelines(res_lines)
# try:
# f = open("stockerbot-export.csv", "r", encoding="utf-8")
# except:
# pass
# finally:
# f.close()
| [
"mickael.bolnet@gmail.com"
] | mickael.bolnet@gmail.com |
d43ae73fa9193326ebf2db488e5cf9323da92c95 | 1904c201d353118f319586d7ed6827b4bbefa0fe | /gcg/exceptions.py | b32edae34ed4afac8ecb74b1b3119f3fa06f2d23 | [] | no_license | byt3-m3/lab_conf_generator | aa9c023c6223869d41d5cf4d5a1d072f3cd8db3c | eb79ca8cb6f6bb45b44605660e70d0585968c598 | refs/heads/master | 2022-12-20T13:42:38.219334 | 2020-10-14T12:55:46 | 2020-10-14T12:55:46 | 225,886,211 | 0 | 1 | null | 2022-12-08T09:54:37 | 2019-12-04T14:28:38 | Python | UTF-8 | Python | false | false | 416 | py | class InvalidTypeError(BaseException):
"""
Used to represent an Invalid Type
"""
class LCGSchemaValidationError(BaseException):
"""
Used to represent an Schema Validation Error
"""
class GCGValidationError(BaseException):
"""
Used to represent an Schema Validation Error
"""
class GCGError(BaseException):
"""
Used to represent an basic GCG Error
"""
| [
"cbaxtertech@gmail.com"
] | cbaxtertech@gmail.com |
849e328890d84d7e0fef4cd4f87c9db2a4533ae8 | 4e33bfc70b0fa0a07ff00960406281ec04044fbc | /task_server/test.py | 8120b975f2822afddaf7102c04169067ffe3ca9b | [] | no_license | JX-Wang/NS_Server_Kafka | 53e22cb7858c20e2ad6fa970ba8b2a3011e7652f | 59f1270084c1cab3736bdab16f17b64cf04722fa | refs/heads/master | 2022-01-24T05:12:19.276923 | 2019-08-04T13:33:36 | 2019-08-04T13:33:36 | 197,998,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # from confluent_kafka import Consumer, KafkaError
#
# mybroker = "10.245.146.221:9092"
#
# c = Consumer({
# 'bootstrap.servers': mybroker,
# 'group.id': '1',
# 'default.topic.config': {
# 'auto.offset.reset': 'smallest'
# }
# })
#
# c.subscribe(['test'])
#
# while True:
# msg = c.poll()
#
# if msg is None:
# print "ops"
# continue
# if msg.error():
# if msg.error().code() == KafkaError._PARTITION_EOF:
# continue
# else:
# print(msg.error())
# break
#
# print('Received message: {}'.format(msg.value().decode('utf-8')))
#
# c.close()
from confluent_kafka import Producer
p = Producer({'bootstrap.servers': '10.245.146.221:9092'})
def delivery_report(err, msg):
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
data = "XXXX"
p.produce('test', data.encode('utf-8'), callback=delivery_report)
p.poll(10)
p.flush()
| [
"1411349759@qq.com"
] | 1411349759@qq.com |
058851f99bfd8b50b0aac5390fb417b484e5be27 | 1b6a460652f5b43e42614d63c26a57b4e14b6360 | /tests/vm/primitives/test_mirror.py | 5630c176666b04ef700b94d7df85b77fd2a521c6 | [] | no_license | cfbolz/tinySelf | 68800b2782f998f46b7950a688a6a0ec663c661b | f20b38aaaa85b4be89cf1fc2ebc890cd1e33c62b | refs/heads/master | 2020-05-09T10:02:05.385599 | 2019-04-12T15:12:23 | 2019-04-12T15:15:50 | 181,025,576 | 0 | 0 | null | 2019-04-12T14:35:12 | 2019-04-12T14:35:11 | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
from tinySelf.vm.primitives import Mirror
from tinySelf.vm.primitives import PrimitiveIntObject
from tinySelf.vm.primitives import PrimitiveStrObject
from tinySelf.vm.object_layout import Object
def test_mirror():
o = Object()
m = Mirror(o)
assert not o.slot_lookup("v")
add_primitive = m.slot_lookup("toSlot:Add:")
assert add_primitive.map.primitive_code
result = add_primitive.map.primitive_code(
None,
m,
[PrimitiveStrObject("v"), PrimitiveIntObject(1)]
)
assert result == o
assert o.slot_lookup("v") == PrimitiveIntObject(1)
| [
"bystrousak@kitakitsune.org"
] | bystrousak@kitakitsune.org |
46b800034adec043d313b5378e4f031082b04f5b | 78d5a6e0846cb6b03544e4f717651ca59dfc620c | /treasury-admin/cashflow/migrations/0014_auto_20180509_1721.py | b65d15269a321a05a67752040eeb929833d9dfb9 | [] | no_license | bsca-bank/treasury-admin | 8952788a9a6e25a1c59aae0a35bbee357d94e685 | 5167d6c4517028856701066dd5ed6ac9534a9151 | refs/heads/master | 2023-02-05T12:45:52.945279 | 2020-12-13T08:07:41 | 2020-12-13T08:07:41 | 320,323,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-09 16:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cashflow', '0013_delete_cashflowdetailtwowayflowproxy'),
]
operations = [
migrations.AlterField(
model_name='cashflowdetail',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tiers.AccountCorresp'),
),
]
| [
"cn.makodo@gmail.com"
] | cn.makodo@gmail.com |
8283511947571039bbb770029f9d366f2c907ae6 | f47e6aab3e58a1cc12e390492fe982ab1da9aa28 | /modoboa/core/__init__.py | cc91ec6c11ea9816f34bd1f1a699c6c4f799d4c2 | [
"ISC"
] | permissive | kaxdev/modoboa | 7e80b5496f362adf5c503588cc5680a894e365eb | d5f2652a5f957a0d68043f063b8c530dbf76ebf6 | refs/heads/master | 2021-01-12T21:45:59.003052 | 2015-04-10T07:16:58 | 2015-04-10T07:16:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | import os
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.lib import parameters, events
BASE_EVENTS = [
"CanCreate",
"AccountCreated",
"AccountAutoCreated",
"AccountModified",
"AccountDeleted",
"AccountExported",
"AccountImported",
"PasswordUpdated",
"ExtraAccountActions",
"RoleChanged",
"GetExtraRoles",
"GetExtraRolePermissions",
"PasswordChange",
"UserCanSetRole",
"InitialDataLoaded",
"UserMenuDisplay",
"AdminMenuDisplay",
"GetStaticContent",
"UserLogin",
"UserLogout",
"GetAnnouncement",
"TopNotifications",
"ExtraAdminContent",
"ExtraUprefsRoutes",
"ExtraUprefsJS",
"GetExtraParameters",
"ExtraFormFields",
"SaveExtraFormFields",
]
PERMISSIONS = {
"DomainAdmins": [
["core", "user", "add_user"],
["core", "user", "change_user"],
["core", "user", "delete_user"],
],
"SimpleUsers": []
}
def load_core_settings():
"""Load core settings.
This function must be manually called (see :file:`urls.py`) in
order to load base settings.
"""
from modoboa.core.app_settings import GeneralParametersForm, UserSettings
parameters.register(GeneralParametersForm, ugettext_lazy("General"))
parameters.register(UserSettings, ugettext_lazy("General"))
events.declare(BASE_EVENTS)
@events.observe("TopNotifications")
def check_for_new_version(request, include_all):
"""
Check if a new version of Modoboa is available.
"""
from modoboa.core.utils import new_version_available
if not request.user.is_superuser:
return []
if new_version_available(request) is None:
return [{"id": "newversionavailable"}] if include_all else []
return [{
"id": "newversionavailable",
"url": reverse("core:index") + "#info/",
"text": _("New Modoboa version available"),
"level": "info",
}]
| [
"tonio@ngyn.org"
] | tonio@ngyn.org |
3b630469b7e080681f9dcfbc22de9de397c47abe | 90afc972b2259054e7cc9b63ec19bf11c3153e48 | /problems/B/KanaAndDragonQuestGame.py | fa2b220b68f40f19c967dba57ce00ca8c6361ddf | [
"MIT"
] | permissive | Ahsanhabib1080/CodeForces | 88ca768ceefa409b0c10cac500148bfaf19e3c7e | 707b374f03012ec68054841f791d48b33ae4ef1b | refs/heads/master | 2023-05-26T20:58:23.180369 | 2021-06-19T02:40:15 | 2021-06-19T02:40:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | __author__ = 'Devesh Bajpai'
'''
https://codeforces.com/contest/1337/problem/B
Solution: The need is decide the ordering of spells.
Void Absorption: VA : x/2 + 10
Lightening Strikes: LS: x - 10
if we do LS and then VA: (x-10)/2 + 10 = x/2 - 5 + 10 = x/2 + 5
if we do VA and then LS: x/2 + 10 - 10 = x/2
Hence we would want to do all the VAs till doing that actually reduces x. e.g. imagine x = 2.
Then x/2 + 10 = 11. Then in second round it becomes 11/2 + 10 = 15. So it stars growing. That is when
we need to employ LS. So once the VAs are over, we need to check we have enough LS to bring x to 0 or less.
This means x <= m * 10. Return the decision accordingly.
'''
def solve(x, n, m):
while x > 0 and n > 0 and x/2 + 10 < x:
if n > 0:
x = x/2 + 10
n -= 1
return "YES" if x <= m * 10 else "NO"
if __name__ == "__main__":
t = int(raw_input())
results = list()
for _ in xrange(0, t):
x, n, m = map(int, raw_input().split(" "))
results.append(solve(x, n, m))
for result in results:
print result
| [
"devesh.bajpai19@gmail.com"
] | devesh.bajpai19@gmail.com |
23c67eb0e36af3db5f176a78c5e5de976a388429 | e780a5bd72f98ca2513c993d64a85b08578166a6 | /zinstance/bin/fullrelease | 92515ab7490c40a6ef8bc78e3378398b986397a2 | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | #!/home/ubuntu/workspace/Plone/zinstance/bin/python
import sys
sys.path[0:0] = [
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/zest.releaser-6.13.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/zest.pocompile-1.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/python_gettext-3.0-py2.7.egg',
'/home/ubuntu/workspace/Plone/zinstance/lib/python2.7/site-packages',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/twine-1.9.1-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/six-1.10.0-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/colorama-0.3.9-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/tqdm-4.19.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/requests_toolbelt-0.8.0-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/requests-2.18.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/pkginfo-1.4.1-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/urllib3-1.22-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/idna-2.6-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/chardet-3.0.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/certifi-2017.11.5-py2.7.egg',
]
import zest.releaser.fullrelease
if __name__ == '__main__':
sys.exit(zest.releaser.fullrelease.main())
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com | |
86e84e1014becbb3d10692c91931c5a9e404c2ed | 63381bdd170be82ac307cb7038f66883af219b40 | /filmweb/migrations/0005_auto_20190416_0022.py | 59ab2f8337a8792d4910ac3d1e621cb4aed62781 | [] | no_license | ofemofem/filmwebapi | 469bb0592c7c201f09a2fd39f0b027fa0537cd53 | 843d10422ab547e39d6e69febcef2ff6887a2a48 | refs/heads/master | 2020-05-10T00:01:59.815109 | 2019-05-14T21:53:07 | 2019-05-14T21:53:07 | 181,519,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Generated by Django 2.2 on 2019-04-15 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filmweb', '0004_auto_20190413_1557'),
]
operations = [
migrations.AlterField(
model_name='movierate',
name='rate',
field=models.FloatField(),
),
]
| [
"you@example.com"
] | you@example.com |
face982b4ba6173f837eb471228815ecc2215c09 | 3af6960c805e9903eb27c09d8bc7ebc77f5928fe | /problems/0086_Partition_List/__init__.py | c7c3687f27954729aac793e8ba3b85961232e9fd | [] | no_license | romain-li/leetcode | b3c8d9d4473eebd039af16ad2d4d99abc2768bdd | 5e82b69bd041c2c168d75cb9179a8cbd7bf0173e | refs/heads/master | 2020-06-04T20:05:03.592558 | 2015-06-08T18:05:03 | 2015-06-08T18:05:03 | 27,431,664 | 2 | 1 | null | 2015-06-08T18:05:04 | 2014-12-02T12:31:58 | Python | UTF-8 | Python | false | false | 455 | py | ID = '86'
TITLE = 'Partition List'
DIFFICULTY = 'Medium'
URL = 'https://oj.leetcode.com/problems/partition-list/'
BOOK = False
PROBLEM = r"""Given a linked list and a value _x_, partition it such that all nodes less
than _x_ come before nodes greater than or equal to _x_.
You should preserve the original relative order of the nodes in each of the
two partitions.
For example,
Given `1->4->3->2->5->2` and _x_ = 3,
return `1->2->2->4->3->5`.
"""
| [
"romain_li@163.com"
] | romain_li@163.com |
38cd6a598240e65f5b94b46d8558809f61b8ac38 | 43b6bffc820d26dfd223728bed71241fb3d54983 | /abc/243/b.py | ba04b3356257b7a3f024e0b783adebfaa9600184 | [] | no_license | kiccho1101/atcoder | 3a163b6a38a62c578dad6d15ccb586d0fcd1e004 | c86cb8e08b881a0a01dc2ef538f0699f3951e897 | refs/heads/master | 2023-03-02T13:27:17.747402 | 2022-05-30T13:51:00 | 2022-05-30T13:51:00 | 223,152,693 | 1 | 0 | null | 2023-02-11T01:29:47 | 2019-11-21T10:52:49 | Python | UTF-8 | Python | false | false | 283 | py | N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
count_1 = 0
count_2 = 0
A_set = set(A)
for i in range(N):
if A[i] == B[i]:
count_1 += 1
if B[i] in A_set and A[i] != B[i]:
count_2 += 1
print(count_1)
print(count_2)
| [
"youodf11khp@gmail.com"
] | youodf11khp@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.