blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
5e1380e7e83fc9ec185495578654737b55934163
13f836eb4770d3d2b0e4be27067411a9d71b8e0d
/__init__.py
f61e61e171dabbbe82865b96ec71b02b37e4a5a4
[ "ISC" ]
permissive
willsion/push_api
b6f5395178543a6139bffa2406a8955b69c8b393
91b5ab8f15029a698216791365b2f589dc340d5c
refs/heads/master
2021-01-15T14:23:45.981417
2016-09-07T02:41:35
2016-09-07T02:41:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
390
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= # FileName: __init__.py # Desc: 2015-15/1/7:下午1:32 # Author: 苦咖啡 # Email: voilet@qq.com # HomePage: http://blog.kukafei520.net # History: # =============================================================================
[ "voilet@voiletdeMacBook-Pro-2.local" ]
voilet@voiletdeMacBook-Pro-2.local
1d5d22515ceb69a934cb6bae79eabbf50bc1f463
47744b621bd0bc03f2eb6c0fead3ad2347a70aac
/ud120-projects/k_means/k_means_3_features.py
54960b687f087c22669e3e9627fb3600a5022b27
[]
no_license
shivam04/udacity-intro-to-machine-learning
55be33ab1c426d7578bac4cf6c23486feca52c0d
5e3a535bc31ec3d29088db832a0fa921a6b4b467
refs/heads/master
2020-04-06T04:20:40.663517
2017-06-24T07:46:43
2017-06-24T07:46:43
82,966,759
0
0
null
null
null
null
UTF-8
Python
false
false
2,623
py
#!/usr/bin/python """ Skeleton code for k-means clustering mini-project. """ import pickle import numpy import matplotlib.pyplot as plt import sys sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"): """ some plotting code designed to help you visualize your clusters """ ### plot each cluster with a different color--add more colors for ### drawing more than five clusters colors = ["b", "c", "k", "m", "g"] for ii, pp in enumerate(pred): plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]]) ### if you like, place red stars over points that are POIs (just for funsies) if mark_poi: for ii, pp in enumerate(pred): if poi[ii]: plt.scatter(features[ii][0], features[ii][1], color="r", marker="*") plt.xlabel(f1_name) plt.ylabel(f2_name) plt.savefig(name) plt.show() ### load in the dict of dicts containing all the data on each person in the dataset data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") ) ### there's an outlier--remove it! data_dict.pop("TOTAL", 0) #print data_dict ### the input features we want to use ### can be any key in the person-level dictionary (salary, director_fees, etc.) feature_1 = "salary" feature_2 = "exercised_stock_options" feature_3 = "total_payments" poi = "poi" features_list = [poi, feature_1, feature_2,feature_3] data = featureFormat(data_dict, features_list ) poi, finance_features = targetFeatureSplit( data ) ### in the "clustering with 3 features" part of the mini-project, ### you'll want to change this line to ### for f1, f2, _ in finance_features: ### (as it's currently written, the line below assumes 2 features) for f1, f2 ,f3 in finance_features: plt.scatter( f1, f2, f3 ) plt.show() ### cluster here; create predictions of the cluster labels ### for the data and store them to a list called pred from sklearn.cluster import KMeans data2 = featureFormat(data_dict, features_list ) poi, finance_features = targetFeatureSplit( data2 ) clf = KMeans(n_clusters=2, random_state=0) pred = clf.fit_predict( finance_features ) ### rename the "name" parameter when you change the number of features ### so that the figure gets saved to a different file try: Draw(pred, finance_features, poi, mark_poi=False, name="clusters3.pdf", f1_name=feature_1, f2_name=feature_2) except NameError: print "no predictions object named pred found, no clusters to plot"
[ "sinhahsivam04@gmail.com" ]
sinhahsivam04@gmail.com
75ae57ff4ee114e75afc618e0e52b41489e7628d
f6e2744ba52b0655d82071edc741d36cb840e1ff
/Lab1/DataGenerator/CsvParser.py
50f2e1699047b5e143d5ee2e059879b9de1ecd62
[]
no_license
PinarTurkyilmaz/SDM-Lab1
abb77192cf4c85b1751263e1939434cb2de8d3c2
0df7b8d2c47d588e15633a684c857ddda5ebefa7
refs/heads/master
2020-04-28T18:15:46.867738
2019-03-13T18:28:40
2019-03-13T18:28:40
175,473,072
0
0
null
null
null
null
UTF-8
Python
false
false
444
py
import csv with open('Populate.csv', 'r') as csv_file: csv_reader = csv.DictReader(csv_file) with open('Cititation.csv', 'w') as new_file: fieldnames = ['title'] csv_writer = csv.DictWriter(new_file, fieldnames) csv_writer.writeheader() for line in csv_reader: del line['author'] #remove this column csv_writer.writerow(line) print(line) #upload title to another csv
[ "pinar.turkyilmaz@estudiant.upc.edu" ]
pinar.turkyilmaz@estudiant.upc.edu
ec1ff974949d84acfe1277e786b564a0462c7d31
c20a7a651e63c1e7b1c5e6b5c65c8150898bbaf2
/OJ/LeetCode/74. Search a 2D Matrix.py
bafc00d5a9be337a73709ca26357f10eba16536e
[]
no_license
Nobody0321/MyCodes
08dbc878ae1badf82afaf0c9fc608b70dfce5cea
b60e2b7a8f2ad604c7d28b21498991da60066dc3
refs/heads/master
2023-08-19T14:34:23.169792
2023-08-15T15:50:24
2023-08-15T15:50:24
175,770,050
0
0
null
null
null
null
UTF-8
Python
false
false
712
py
# 剑指offer出现过 # 矩阵每一行都是左到右递增,每一列都是上到下递增,所以从左下角开始,类似二分查找 class Solution: def searchMatrix(self, matrix, target): if matrix == []: return False height, width = len(matrix), len(matrix[0]) i, j= height-1, 0 while 0<=i<=height-1 and 0<=j<=width-1: if 0<=i<=height-1 and 0<=j<=width-1 and matrix[i][j] < target: j+=1 elif 0<=i<=height-1 and 0<=j<=width-1 and matrix[i][j] > target: i-=1 else: return True return False if __name__ == "__main__": print(Solution().searchMatrix([[1]], 2))
[ "cyk19951@gmail.com" ]
cyk19951@gmail.com
7035acb8a40194eba9b97b8e70803602562936bc
a13ffbab0f24047e43f003131b06052c4a29adff
/Chapter_07/shop/admin.py
c416b49b87b43f83b8f9e5857d303c64c142eaee
[ "Unlicense" ]
permissive
codingEzio/code_py_book_django2_by_example
58a02b7b8e6a549804834d28488412243df94ea2
d215d0c87a557685824286822186966b06fa8d59
refs/heads/master
2020-04-09T18:24:58.821192
2019-01-24T08:49:26
2019-01-24T08:49:26
160,511,430
1
0
null
null
null
null
UTF-8
Python
false
false
964
py
from django.contrib import admin from parler.admin import TranslatableAdmin from .models import Category, Product @admin.register(Category) class CategoryAdmin(TranslatableAdmin): """ For the "translated_fields", you need to use `get_prepopulated_fields` instead of `prepopulated_fields` Since it provides the same functionality, there's no difference actually, just a different way to get it :P """ list_display = ['name', 'slug'] def get_prepopulated_fields(self, request, obj=None): return { 'slug': ('name',) } @admin.register(Product) class ProductAdmin(TranslatableAdmin): list_display = ['name', 'slug', 'price', 'available', 'created', 'updated'] list_filter = ['available', 'created', 'updated'] list_editable = ['price', 'available'] def get_prepopulated_fields(self, request, obj=None): return { 'slug': ('name',) }
[ "assassinste@gmail.com" ]
assassinste@gmail.com
ea590224b5586f898dbc17f6cb755bd3676f56a1
ee01a1f16e63483ebfd304b838f015f9f2d168b7
/streamtools/web/main.py
0188e48a1b050926b6b4c97b32e5a60899ae5eef
[ "MIT" ]
permissive
mariocesar/stream-tools
284aa494676d27204d71da3a0bdb9a196bcab861
384c10d364d8b40b9dfa15eeebed15da6f90ed31
refs/heads/master
2022-12-22T14:52:01.033784
2021-07-19T01:32:17
2021-07-19T01:32:17
250,092,686
1
1
MIT
2022-12-12T08:22:55
2020-03-25T21:14:04
Python
UTF-8
Python
false
false
867
py
import asyncio from asyncio import Queue from aiohttp import web from streamtools.relay.main import fetch_events routes = web.RouteTableDef() @routes.get("/ws/") async def websocket_handler(request): ws = web.WebSocketResponse() await ws.prepare(request) task = asyncio.create_task(fetch_events(request.app.queue)) try: while True: message = await request.app.queue.get() print(message) await ws.send_json(message.asdata()) finally: await ws.close() task.cancel() return ws @routes.get("/") async def landing(request): return web.Response(text="Hello!") def get_application(): app = web.Application() app.queue = Queue() app.queue.empty() app.add_routes(routes) return app if __name__ == "__main__": web.run_app(get_application(), port=3000)
[ "mariocesar.c50@gmail.com" ]
mariocesar.c50@gmail.com
636e89e816adde63d47a7c4d4e3d83e62438d8d6
c3dc08fe8319c9d71f10473d80b055ac8132530e
/challenge-133/paulo-custodio/python/ch-1.py
0c9b9ec563b3fa36246755408c816a2874d78618
[]
no_license
southpawgeek/perlweeklychallenge-club
d4b70d9d8e4314c4dfc4cf7a60ddf457bcaa7a1e
63fb76188e132564e50feefd2d9d5b8491568948
refs/heads/master
2023-01-08T19:43:56.982828
2022-12-26T07:13:05
2022-12-26T07:13:05
241,471,631
1
0
null
2020-02-18T21:30:34
2020-02-18T21:30:33
null
UTF-8
Python
false
false
760
py
#!/usr/bin/env python3 # Challenge 133 # # TASK #1 > Integer Square Root # Submitted by: Mohammad S Anwar # You are given a positive integer $N. # # Write a script to calculate the integer square root of the given number. # # Please avoid using built-in function. Find out more about it here. # # Examples # Input: $N = 10 # Output: 3 # # Input: $N = 27 # Output: 5 # # Input: $N = 85 # Output: 9 # # Input: $N = 101 # Output: 10 # solution: https://en.wikipedia.org/wiki/Integer_square_root import sys def isqrt(n): x0 = n >> 1 # initial estimate if x0 == 0: return n # loop x1 = int(x0 + n/x0) >> 1 while x1 < x0: x0 = x1; x1 = int(x0 + n/x0) >> 1 return x0 n = int(sys.argv[1]) print(isqrt(n))
[ "pauloscustodio@gmail.com" ]
pauloscustodio@gmail.com
70f76bc9b439c416383973f9088f2be3a89488ca
cb9b861f5f3c0a36acfa2d0e1664216587b91f07
/svr_surrogate.py
df181b6f63917221a1b40cfcb1ae7a7835fb5914
[]
no_license
rubinxin/SoTL
feae052dba5506b3750126b9f7180a02a01bd998
16e24371972aab2a5fa36f8febbe83ae4dacf352
refs/heads/master
2023-01-20T14:35:37.027939
2020-11-30T16:51:29
2020-11-30T16:51:29
307,888,874
0
0
null
null
null
null
UTF-8
Python
false
false
5,080
py
# Our implementation of SVR-based learning curve extrapolation surrogate # based on the description in B. Baker, O. Gupta, R. Raskar, and N. Naik, # “Accelerating neural architecture search using performance prediction,” arXiv preprint arXiv:1705.10823, 2017. import numpy as np from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import BayesianRidge from sklearn.model_selection import cross_val_score, train_test_split from sklearn.svm import NuSVR import time from scipy import stats def loguniform(low=0, high=1, size=None): return np.exp(np.random.uniform(np.log(low), np.log(high), size)) class LcSVR(object): def __init__(self, VC_all_archs_list, HP_all_archs_list, AP_all_archs_list, test_acc_all_archs_list, n_hypers=1000, n_train=200, seed=0, all_curve=True, model_name='svr'): self.n_hypers = n_hypers self.all_curve = all_curve self.n_train = n_train self.seed = seed self.model_name = model_name self.VC = np.vstack(VC_all_archs_list) self.HP = np.vstack(HP_all_archs_list) self.AP = np.vstack(AP_all_archs_list) self.DVC = np.diff(self.VC, n=1, axis=1) self.DDVC = np.diff(self.DVC, n=1, axis=1) self.max_epoch = self.VC.shape[1] self.test_acc_seed_all_arch = test_acc_all_archs_list def learn_hyper(self, epoch): n_epoch = int(epoch) VC_sub = self.VC[:, :n_epoch] DVC_sub = self.DVC[:, :n_epoch] DDVC_sub = self.DDVC[:, :n_epoch] mVC_sub = np.mean(VC_sub, axis=1)[:, None] stdVC_sub = np.std(VC_sub, axis=1)[:, None] mDVC_sub = np.mean(DVC_sub, axis=1)[:, None] stdDVC_sub = np.std(DVC_sub, axis=1)[:, None] mDDVC_sub = np.mean(DDVC_sub, axis=1)[:, None] stdDDVC_sub = np.std(DDVC_sub, axis=1)[:, None] if self.all_curve: TS = np.hstack([VC_sub, DVC_sub, DDVC_sub, mVC_sub, stdVC_sub]) else: TS = np.hstack([mVC_sub, stdVC_sub, mDVC_sub, stdDVC_sub, mDDVC_sub, stdDDVC_sub]) X = np.hstack([self.AP, self.HP, TS]) y_val_acc = self.VC[:, -1] y_test_acc = np.array(self.test_acc_seed_all_arch) y = np.vstack([y_val_acc, y_test_acc]).T # split into train/test data sets split = (X.shape[0] - self.n_train) / X.shape[0] X_train, X_test, y_both_train, y_both_test = train_test_split( X, y, test_size=split, random_state=self.seed) y_train = y_both_train[:, 0] # all final validation acc y_test = y_both_test[:, 1] # all final test acc np.random.seed(self.seed) # specify model parameters if self.model_name == 'svr': C = loguniform(1e-5, 10, self.n_hypers) nu = np.random.uniform(0, 1, self.n_hypers) gamma = loguniform(1e-5, 10, self.n_hypers) hyper = np.vstack([C, nu, gamma]).T else: print('Not implemented') print(f'start CV on {self.model_name}') mean_score_list = [] t_start = time.time() for i in range(self.n_hypers): # define model if self.model_name == 'svr': model = NuSVR(C=hyper[i, 0], nu=hyper[i, 1], gamma=hyper[i, 2], kernel='rbf') # model = SVR(C=hyper[i, 0], nu=hyper[i, 1], gamma= ,kernel='linear') elif self.model_name == 'blr': model = BayesianRidge(alpha_1=hyper[i, 0], alpha_2=hyper[i, 1], lambda_1=hyper[i, 2], lambda_2=hyper[i, 3]) elif self.model_name == 'rf': model = RandomForestRegressor(n_estimators=int(hyper[i, 0]), max_features=hyper[i, 1]) # perform cross validation to learn the best hyper value scores = cross_val_score(model, X_train, y_train, cv=3) mean_scores = np.mean(scores) mean_score_list.append(mean_scores) t_end = time.time() best_hyper_idx = np.argmax(mean_score_list) best_hyper = hyper[best_hyper_idx] max_score = np.max(mean_score_list) time_taken = t_end - t_start print(f'{self.model_name} on {self.seed} n_train={self.n_train}: ' f'best_hyper={best_hyper}, score={max_score}, time={time_taken}') self.epoch = epoch self.best_hyper = best_hyper self.X_train, self.X_test = X_train, X_test self.y_train, self.y_test = y_train, y_test return best_hyper, time_taken def extrapolate(self): if self.model_name == 'svr': best_model = NuSVR(C=self.best_hyper[0], nu=self.best_hyper[1], gamma=self.best_hyper[2], kernel='rbf') else: print('Not implemented') # train and fit model best_model.fit(self.X_train, self.y_train) y_pred = best_model.predict(self.X_test) rank_corr, p = stats.spearmanr(self.y_test, y_pred) print(f'{self.model_name} on n_train={self.n_train} e={self.epoch}: rank_corr={rank_corr}') return rank_corr
[ "robin@robots.ox.ac.uk" ]
robin@robots.ox.ac.uk
69fffa27f3895ac0ec76dfa70d08f3e0ab8e62f2
e76c8b127ae58c5d3b5d22c069719a0343ea8302
/tf_ex_5_linear_reg_with_eager_api.py
31dc0013a8602daf6ff29927b818a2c4785c48cd
[]
no_license
janFrancoo/TensorFlow-Tutorials
18f3479fc647db3cbdb9fb9d5c0b9a67be804642
b34dbf903d2f5ff7bde6fb279fef6d7e2004a3bf
refs/heads/master
2020-07-23T02:01:14.940932
2019-10-26T08:41:23
2019-10-26T08:41:23
207,410,175
0
0
null
null
null
null
UTF-8
Python
false
false
1,490
py
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt # Set Eager API tf.enable_eager_execution() tfe = tf.contrib.eager # Parameters num_steps = 1000 learning_rate = .01 # Training data x_train = np.array([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1]) y_train = np.array([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3]) # Weights w = tfe.Variable(np.random.randn()) b = tfe.Variable(np.random.randn()) # Construct a linear model def linear_regression(inputs): return (inputs * w) + b # Define loss function def mean_square_fn(model_fn, inputs, labels): return tf.reduce_sum(((model_fn(inputs) - labels) ** 2) / (2 * len(x_train))) # Define optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate) # Compute gradients grad = tfe.implicit_gradients(mean_square_fn) # Start training for step in range(num_steps): optimizer.apply_gradients(grad(linear_regression, x_train, y_train)) if (step + 1) % 50 == 0: print("Epoch: {}, Loss: {}, W: {}, b: {}".format(step + 1, mean_square_fn(linear_regression, x_train, y_train), w.numpy(), b.numpy())) # Display plt.plot(x_train, y_train, 'ro', label='Original data') plt.plot(x_train, np.array(w * x_train + b), label='Fitted line') plt.legend() plt.show()
[ "noreply@github.com" ]
janFrancoo.noreply@github.com
c094ca768a09f90b5ff71b83649c758e5347b11a
dbf34d933a288e6ebca568eaebaa53e5b98ba7c1
/src/rebecca/index/splitter.py
34d4aa005a6bca3c5d5273b7acd574142148f30d
[]
no_license
rebeccaframework/rebecca.index
dc68dfa2c1b77fc273a12b3f934074722fb8300c
ab452c9b375227e84fab42496633dc026421a283
refs/heads/master
2021-01-10T21:11:53.567186
2013-05-05T18:53:58
2013-05-05T18:53:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
759
py
from zope.interface import implementer from zope.index.text.interfaces import ISplitter from persistent import Persistent from igo.Tagger import Tagger @implementer(ISplitter) class IgoSplitter(Persistent): def __init__(self, dictionary): self.dictionary = dictionary @property def tagger(self): if not hasattr(self, '_v_tagger'): self._v_tagger = Tagger(self.dictionary) return self._v_tagger def process(self, terms): results = [] for term in terms: results.extend(self.tagger.wakati(term)) return results def processGlob(self, terms): results = [] for term in terms: results.extend(self.tagger.wakati(term)) return results
[ "aodagx@gmail.com" ]
aodagx@gmail.com
c361eca7aae2a04817c28fe837c042af887c9567
411e5de8629d6449ff9aad2eeb8bb1dbd5977768
/AlgoExpert/greedy/minimumWaitingTime.py
654f08d249d968d38d7b072c4abfa1fdfa5e8e37
[ "MIT" ]
permissive
Muzque/Leetcode
cd22a8f5a17d9bdad48f8e2e4dba84051e2fb92b
2c37b4426b7e8bfc1cd2a807240b0afab2051d03
refs/heads/master
2022-06-01T20:40:28.019107
2022-04-01T15:38:16
2022-04-01T15:39:24
129,880,002
1
1
MIT
2022-04-01T15:39:25
2018-04-17T09:28:02
Python
UTF-8
Python
false
false
486
py
""" """ testcases = [ { 'input': [3, 2, 1, 2, 6], 'output': 17, }, { 'input': [2], 'output': 0, }, ] def minimumWaitingTime(queries): queries.sort() ret = 0 for i in range(len(queries)-1): if i > 0: queries[i] += queries[i-1] ret += queries[i] return ret if __name__ == '__main__': for tc in testcases: ret = minimumWaitingTime(tc['input']) assert(ret == tc['output'])
[ "zerosky1943@gmail.com" ]
zerosky1943@gmail.com
3d90e1a3792eaec38062f7ea1dbe0cfdf9455b06
3fa4a77e75738d00835dcca1c47d4b99d371b2d8
/backend/pyrogram/raw/base/server_dh_inner_data.py
6813099ac6c9cffd446ad983b6da40d37ae93590
[ "Apache-2.0" ]
permissive
appheap/social-media-analyzer
1711f415fcd094bff94ac4f009a7a8546f53196f
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
refs/heads/master
2023-06-24T02:13:45.150791
2021-07-22T07:32:40
2021-07-22T07:32:40
287,000,778
5
3
null
null
null
null
UTF-8
Python
false
false
1,903
py
# Pyrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2021 Dan <https://github.com/delivrance> # # This file is part of Pyrogram. # # Pyrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. # # # # # # # # # # # # # # # # # # # # # # # # # !!! WARNING !!! # # This is a generated file! # # All changes made in this file will be lost! # # # # # # # # # # # # # # # # # # # # # # # # # from typing import Union from pyrogram import raw from pyrogram.raw.core import TLObject ServerDHInnerData = Union[raw.types.ServerDHInnerData] # noinspection PyRedeclaration class ServerDHInnerData: # type: ignore """This base type has 1 constructor available. Constructors: .. hlist:: :columns: 2 - :obj:`ServerDHInnerData <pyrogram.raw.types.ServerDHInnerData>` """ QUALNAME = "pyrogram.raw.base.ServerDHInnerData" def __init__(self): raise TypeError("Base types can only be used for type checking purposes: " "you tried to use a base type instance as argument, " "but you need to instantiate one of its constructors instead. " "More info: https://docs.pyrogram.org/telegram/base/server-dh-inner-data")
[ "taleb.zarhesh@gmail.com" ]
taleb.zarhesh@gmail.com
6927adb2877ed18131676f2f35fb65189fcc17a5
ca617409a3a992a2014eab34bf45ea5cd22021d7
/event_management/serializers/venue.py
c595ecb61bac9e3ec3a25c229f8cb80dabf5c790
[]
no_license
Imam-Hossain-45/ticketing
89463b048db3c7b1bc92a4efc39b83c4f17d967f
65a124d579162a687b20dfbdba7fd85c110006c6
refs/heads/master
2022-04-14T22:36:23.152185
2020-03-07T11:52:38
2020-03-07T11:52:38
230,717,468
0
0
null
null
null
null
UTF-8
Python
false
false
495
py
from rest_framework import serializers from event_management.models import Venue from settings.models import Address class AddressCreateSerializer(serializers.ModelSerializer): class Meta: model = Address fields = '__all__' class VenueCreateSerializer(serializers.ModelSerializer): venue_address = AddressCreateSerializer() class Meta: model = Venue fields = ('name', 'amenities', 'capacity', 'contact_person', 'contact_mobile', 'venue_address')
[ "imamhossain1310@gmail.com" ]
imamhossain1310@gmail.com
2ea30df6db951105fb4bc2b8f1eb8fdd7e346f4d
cbd60a20e88adb174b40832adc093d848c9ca240
/solutions/busnumbers/busnumbers.py
690a0be7996a788721974b7b20150d4091bcf299
[]
no_license
maxoja/kattis-solution
377e05d468ba979a50697b62ce8efab5dcdddc63
b762bfa9bbf6ef691d3831c628d9d16255ec5e33
refs/heads/master
2018-10-09T04:53:31.579686
2018-07-19T12:39:09
2018-07-19T12:39:09
111,871,691
0
0
null
null
null
null
UTF-8
Python
false
false
579
py
n = int(input()) seq = sorted(list(map(int, input().split()))) prevs = [] for i in range(len(seq)): current = seq[i] nxt = -1 if i == len(seq)-1 else seq[i+1] if nxt == current+1: prevs.append(current) continue else: if prevs: ## print('enter' , prevs) if len(prevs) >= 2: print(str(prevs[0]) + '-' + str(current), end=' ') else: print(prevs[0], current, end=' ') prevs = [] else: print(current, end=' ')
[ "-" ]
-
3bd03fe4d769ba382d80392cf0c083c66cb30acb
71501709864eff17c873abbb97ffabbeba4cb5e3
/llvm13.0.0/lldb/test/API/functionalities/thread/concurrent_events/TestConcurrentTwoBreakpointThreads.py
1f6832d9ecdb1b993193adf3655ceba218a19e06
[ "NCSA", "Apache-2.0", "LLVM-exception" ]
permissive
LEA0317/LLVM-VideoCore4
d08ba6e6f26f7893709d3285bdbd67442b3e1651
7ae2304339760685e8b5556aacc7e9eee91de05c
refs/heads/master
2022-06-22T15:15:52.112867
2022-06-09T08:45:24
2022-06-09T08:45:24
189,765,789
1
0
NOASSERTION
2019-06-01T18:31:29
2019-06-01T18:31:29
null
UTF-8
Python
false
false
700
py
import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.concurrent_base import ConcurrentEventsBase from lldbsuite.test.lldbtest import TestBase @skipIfWindows class ConcurrentTwoBreakpointThreads(ConcurrentEventsBase): mydir = ConcurrentEventsBase.compute_mydir(__file__) # Atomic sequences are not supported yet for MIPS in LLDB. @skipIf(triple='^mips') @expectedFailureAll(archs=["aarch64"], oslist=["freebsd"], bugnumber="llvm.org/pr49433") def test(self): """Test two threads that trigger a breakpoint. """ self.build(dictionary=self.getBuildFlags()) self.do_thread_actions(num_breakpoint_threads=2)
[ "kontoshi0317@gmail.com" ]
kontoshi0317@gmail.com
433be8a7d7781edf3a6c0b6fd7ea8ce7d790b2f2
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02803/s436740421.py
e18ddeaa284997e7a2fa19641dcbbc710be7f0af
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
1,057
py
from collections import deque H, W = map(int, input().split()) field = ['#' * (W+2)] * (H+2) for i in range(1, H+1): field[i] = '#' + input() + '#' di = [1, 0, -1, 0] dj = [0, 1, 0, -1] ans = 0 q = deque() for si in range(1, H+1): for sj in range(1, W+1): if field[si][sj] == '#': continue q.clear() q.append([si, sj]) dist = [[-1 for _ in range(W+2)] for _ in range(H+2)] dist[si][sj] = 0 dist_max = 0 while len(q) != 0: current = q.popleft() ci = current[0] cj = current[1] for d in range(4): next_i = ci + di[d] next_j = cj + dj[d] if field[next_i][next_j] == '#': continue if dist[next_i][next_j] != -1: continue q.append([next_i, next_j]) dist[next_i][next_j] = dist[ci][cj] + 1 dist_max = max(dist_max, dist[next_i][next_j]) ans = max(ans, dist_max) print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
76f9cf3c70feb1745228287e760e543e56e9ce1d
900f3e5e0a5f9bbc28aa8673153046e725d66791
/less15/chat_v3/chat/chat/settings.py
e6a04b5f17f725ceee6cdb1c8879e9c0dbd6a011
[]
no_license
atadm/python_oop
c234437faebe5d387503c2c7f930ae72c2ee8107
2ffbadab28a18c28c14d36ccb008c5b36a426bde
refs/heads/master
2021-01-23T04:40:10.092048
2017-05-30T12:22:53
2017-05-30T12:22:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,427
py
""" Django settings for chat project. Generated by 'django-admin startproject' using Django 1.11.1. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '692yjr^yr4$)m_4ud6j7^^!*gd%r+jcp!vn+nr@a4iuzy=m1js' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'chatApp.apps.ChatappConfig', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'chat.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')] , 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'chat.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'chatdb', 'USER': 'postgres', 'PASSWORD': 'Univer123', 'HOST': '', # Set to empty string for localhost. 'PORT': '5433', # Set to empty string for default. } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/'
[ "anna@3g.ua" ]
anna@3g.ua
6b09c02200e9cd1e184bdbbc08dba0c6c89f9b8e
e8f6a0d45cc5b98747967169cea652f90d4d6489
/week2/day2/taco_project/taco_project/settings.py
0d5bc4a9d6be4b8a4afe7e0834b1445e8f8b528f
[]
no_license
prowrestler215/python-2020-09-28
1371695c3b48bbd89a1c42d25aa8a5b626db1d19
d250ebd72e7f2a76f40ebbeb7fbb31ac36afd75f
refs/heads/master
2022-12-28T22:24:52.356588
2020-10-06T18:50:12
2020-10-06T18:50:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,128
py
""" Django settings for taco_project project. Generated by 'django-admin startproject' using Django 2.2.4. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'kkc*+j-s50vfg_6s%p!rc^#5$pc2=okw94a6=r17z+lz1s&y@a' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'taco_stand_app', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'taco_project.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'taco_project.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/'
[ "CBaut@users.noreply.github.com" ]
CBaut@users.noreply.github.com
d45bbc2ebc3e163699e0e18d6bf32523bccae91f
978248bf0f275ae688f194593aa32c267832b2b6
/xlsxwriter/test/worksheet/test_write_sheet_views8.py
16dd94be70b03ae21d672a359c7baa4a50a33d46
[ "BSD-2-Clause-Views" ]
permissive
satish1337/XlsxWriter
b0c216b91be1b74d6cac017a152023aa1d581de2
0ab9bdded4f750246c41a439f6a6cecaf9179030
refs/heads/master
2021-01-22T02:35:13.158752
2015-03-31T20:32:28
2015-03-31T20:32:28
33,300,989
1
0
null
null
null
null
UTF-8
Python
false
false
3,068
py
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # import unittest from ...compatibility import StringIO from ...worksheet import Worksheet class TestWriteSheetViews(unittest.TestCase): """ Test the Worksheet _write_sheet_views() method. """ def setUp(self): self.fh = StringIO() self.worksheet = Worksheet() self.worksheet._set_filehandle(self.fh) def test_write_sheet_views1(self): """Test the _write_sheet_views() method with split panes + selection""" self.worksheet.select() self.worksheet.set_selection('A2') self.worksheet.split_panes(15, 0) self.worksheet._write_sheet_views() exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="600" topLeftCell="A2" activePane="bottomLeft"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/></sheetView></sheetViews>' got = self.fh.getvalue() self.assertEqual(got, exp) def test_write_sheet_views2(self): """Test the _write_sheet_views() method with split panes + selection""" self.worksheet.select() self.worksheet.set_selection('B1') self.worksheet.split_panes(0, 8.43) self.worksheet._write_sheet_views() exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1350" topLeftCell="B1" activePane="topRight"/><selection pane="topRight" activeCell="B1" sqref="B1"/></sheetView></sheetViews>' got = self.fh.getvalue() self.assertEqual(got, exp) def test_write_sheet_views3(self): """Test the _write_sheet_views() method with split panes + selection""" self.worksheet.select() self.worksheet.set_selection('G4') self.worksheet.split_panes(45, 54.14) self.worksheet._write_sheet_views() exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6150" ySplit="1200" topLeftCell="G4" activePane="bottomRight"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight" activeCell="G4" sqref="G4"/></sheetView></sheetViews>' got = self.fh.getvalue() self.assertEqual(got, exp) def test_write_sheet_views4(self): """Test the _write_sheet_views() method with split panes + selection""" self.worksheet.select() self.worksheet.set_selection('I5') self.worksheet.split_panes(45, 54.14) self.worksheet._write_sheet_views() exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6150" ySplit="1200" topLeftCell="G4" activePane="bottomRight"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight" activeCell="I5" sqref="I5"/></sheetView></sheetViews>' got = self.fh.getvalue() self.assertEqual(got, exp)
[ "jmcnamara@cpan.org" ]
jmcnamara@cpan.org
bd1244422c562b95b4abe609a6ecc9151d8cc0f3
2c5edd9a3c76f2a14c01c1bd879406850a12d96e
/config/default.py
37e61c16709d86117c0fa3d63970296d5b8742d2
[ "MIT" ]
permissive
by46/coffee
e13f5e22a8ff50158b603f5115d127e07c2e322b
f12e1e95f12da7e322a432a6386a1147c5549c3b
refs/heads/master
2020-08-14T04:42:05.248138
2017-10-23T13:39:05
2017-10-23T13:39:05
73,526,501
0
0
null
null
null
null
UTF-8
Python
false
false
486
py
HTTP_HOST = '0.0.0.0' HTTP_PORT = 8080 DEBUG = False SECRET_KEY = "\x02|\x86.\\\xea\xba\x89\xa3\xfc\r%s\x9e\x06\x9d\x01\x9c\x84\xa1b+uC" # Flask-NegLog Settings LOG_LEVEL = 'debug' LOG_FILENAME = "logs/error.log" LOG_BACKUP_COUNT = 10 LOG_MAX_BYTE = 1024 * 1024 * 10 LOG_FORMATTER = '%(asctime)s - %(levelname)s - %(message)s' LOG_ENABLE_CONSOLE = True # Flask-CORS Settings CORS_ORIGINS = "*" CORS_METHODS = "GET,POST,PUT" CORS_ALLOW_HEADERS = "Content-Type,Host"
[ "ycs_ctbu_2010@126.com" ]
ycs_ctbu_2010@126.com
e53bee84de0b19c27956646ed221e41449d3e3ae
0818a9020adc6e25b86060a8e84171d0b4958625
/tensorflow-piece/file_gene_scripts.py
840928b309ae60acdab8f757ab590e178999874f
[]
no_license
wgwangang/mycodes
2107becb6c457ed88b46426974a8f1fa07ed37dd
9fa48ca071eacf480034d1f69d3c05171d8a97d2
refs/heads/master
2020-03-28T07:58:45.017910
2018-03-14T07:21:14
2018-03-14T07:21:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,196
py
import os def generate_train_test_txt(data_root_dir, save_dir, rate=.1): train_txt_file_path = os.path.join(save_dir, "train.txt") test_txt_file_path = os.path.join(save_dir, "test.txt") dirs = os.listdir(data_root_dir) train_txt_file = open(train_txt_file_path, mode="w") test_txt_file = open(test_txt_file_path, mode="w") for i,level1 in enumerate(dirs): path_to_level1 = os.path.join(data_root_dir, level1) img_names = os.listdir(path_to_level1) test_num = rate*len(img_names) if test_num < 1: test_num = 1 test_num = int(test_num) for img in img_names[:-test_num]: abs_path = os.path.join(level1, img) item = abs_path+" "+str(i)+"\n" train_txt_file.write(item) for img in img_names[-test_num:]: abs_path = os.path.join(level1, img) item = abs_path + " " + str(i) + "\n" test_txt_file.write(item) print("people ", i, " Done!") train_txt_file.close() test_txt_file.close() def main(): generate_train_test_txt("/home/dafu/PycharmProjects/data", save_dir="../data") if __name__ == "__main__": main()
[ "yinpenghhz@hotmail.com" ]
yinpenghhz@hotmail.com
8e0277e0fae0c9499d3837975223b854aed5431e
6ba406a7c13d5c76934e36494c32c927bbda7ae7
/tests/trails_fhn.py
8ff814c29523973081000a55dda715750cf1dacf
[ "MIT" ]
permissive
sowmyamanojna/neuronmd
50e5e4bd9a4340fda73e363133314118ea815360
3994d02214c3cc4996261324cfe9238e34e29f1c
refs/heads/main
2023-07-09T01:31:32.597268
2021-08-23T14:33:38
2021-08-23T14:33:38
398,491,112
0
0
null
null
null
null
UTF-8
Python
false
false
315
py
import numpy as np import matplotlib.pyplot as plt from fitzhugh_nagumo import FHNNeuron neuron = FHNNeuron() tmax = 100 dt = 0.01 I = 1.75 t = np.arange(0, tmax, dt) neuron.simulate(0.6, 0, t, 0.6) neuron.plot(name="0.1") current_list = np.arange(0.01, I, 0.01) neuron.animate(t, current_list, ylim=[-0.45,1.5])
[ "sowmyamanojna@gmail.com" ]
sowmyamanojna@gmail.com
8fe76f727f44429df1fc0876ce238e15960bc6ec
549d11c89ce5a361de51f1e1c862a69880079e3c
/python高级语法/线程/都任务版的UDP聊天器.py
9d751967f4b2eaba2e1846ea3f723ae9a52eca1e
[]
no_license
BaldSuperman/workspace
f304845164b813b2088d565fe067d5cb1b7cc120
4835757937b700963fdbb37f75a5e6b09db97535
refs/heads/master
2020-08-01T15:32:02.593251
2019-09-26T08:04:50
2019-09-26T08:04:50
211,034,750
0
0
null
null
null
null
UTF-8
Python
false
false
953
py
import socket import threading def recv_msg( udp_socket): while True: recv_data = udp_socket.recvfrom(1024) print("收到的数据:{0}".format(recv_data)) def send_msg(udp_socket, dest_port,dest_ip): '''发送数据''' while True : send_data = input("输入发送的数据: ") udp_socket.sendto(send_data.encode('utf-8'), (dest_ip,dest_port)) def main(): #创建套接字 udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #绑定本地信息 udp_socket.bind(("", 7890)) #获取对方ip dest_ip = input("请输入对方IP: ") dest_port = int(input("请输入对方port: ")) #接受数据 #创建线程执行接受发送功能 t_recv = threading.Thread(target=recv_msg, args=(udp_socket, )) t_send = threading.Thread(target=send_msg, args=(udp_socket, dest_port,dest_ip)) t_recv.start() t_send.start() if __name__ == '__main__': main()
[ "you@example.com" ]
you@example.com
f2c68ec335f6a55681c06b381e461d9c65326cee
a316a0018bd1cb42c477423916669ed32e2c5d7c
/homie/node/property/property_boolean.py
49d8eb14cd18735035fe6c9f1f25173d63f9b69c
[ "MIT" ]
permissive
mjcumming/HomieV3
a8b60ee9119059d546b69b31280cf15a7978e6fc
62278ec6e5f72071b2aaebe8e9f66b2071774ef7
refs/heads/master
2020-04-28T06:49:22.193682
2020-04-04T12:36:08
2020-04-04T12:36:08
175,072,550
5
7
MIT
2020-01-15T03:41:38
2019-03-11T19:44:17
Python
UTF-8
Python
false
false
624
py
from .property_base import Property_Base class Property_Boolean(Property_Base): def __init__(self, node, id, name, settable=True, retained=True, qos=1, unit=None, data_type='boolean', data_format=None, value=None, set_value=None): super().__init__(node,id,name,settable,retained,qos,unit,'boolean',data_format,value,set_value) def validate_value(self, value): return True # tests below validate def get_value_from_payload(self,payload): if payload == 'true': return True elif payload == 'false': return False else: return None
[ "mike@4831.com" ]
mike@4831.com
6ee0b28d5d47fef3766ee9a9567b845b290892dd
3298ad5f82b30e855637e9351fe908665e5a681e
/Regression/Polynomial Regression/polynomial_regression.py
07f16298a93575a5156ff3fe4ef8b1102a78ad42
[]
no_license
Pratyaksh7/Machine-learning
8ab5281aecd059405a86df4a3ade7bcb308f8120
74b7b045f17fe52bd02e99c0e25b8e0b4ac3f3a6
refs/heads/master
2022-12-21T14:05:28.882803
2020-09-28T17:03:55
2020-09-28T17:03:55
292,274,874
1
0
null
null
null
null
UTF-8
Python
false
false
1,924
py
import pandas as pd import numpy as np import matplotlib.pyplot as plt dataset = pd.read_csv('Position_Salaries.csv') X = dataset.iloc[: , 1:-1].values y = dataset.iloc[: , -1].values # training the linear regression model on the whole dataset from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X, y) # training the polynomial regression model on the whole dataset from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) lin_reg_2 = LinearRegression() lin_reg_2.fit(X_poly,y) # visualising the linear regression results plt.scatter(X,y, color='red') plt.plot(X, lin_reg.predict(X), color= 'blue') # plotting the linear regression line for the X values and the predicted Salary i.e., lin_reg plt.title('Truth or Bluff (Linear Regression )') plt.xlabel('Position Level') plt.ylabel('Salary') plt.show() # visualising the polynomial regression results plt.scatter(X,y, color='red') plt.plot(X, lin_reg_2.predict(X_poly), color= 'blue') # plotting the linear regression line for the X values and the predicted Salary i.e., lin_reg plt.title('Truth or Bluff (Polynomial Regression )') plt.xlabel('Position Level') plt.ylabel('Salary') plt.show() # visualising the polynomial regression results(for higher resolution and smoother curve) X_grid = np.arange(min(X), max(X), 0.1) # choosing each point with a diff of 0.1 X_grid = X_grid.reshape((len(X_grid),1)) plt.scatter(X,y,color= 'red') plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)),color='blue') plt.title('Truth or Bluff (Polynomial Regression Smooth)') plt.xlabel('Position Level') plt.ylabel('Salary') plt.show() # predicting a new result with linear regression print(lin_reg.predict([[6.5]])) # predicting a new result with polynomial regression print(lin_reg_2.predict(poly_reg.fit_transform([[6.5]])))
[ "pratyakshgupta7@gmail.com" ]
pratyakshgupta7@gmail.com
079ded3be59ad28e3f6743e307ba309423d27dcd
87cfb3d137853d91faf4c1c5f6e34a4e4a5206d9
/src/zojax/cssregistry/tests.py
41aa3e1e8e541b96a69a0706d39bde5e7b101372
[ "ZPL-2.1" ]
permissive
Zojax/zojax.cssregistry
cea24579ef73f7ea2cec9c8b95671a5eeec0ce8f
688f4ecb7556935997bbe4c09713bf57ef7be617
refs/heads/master
2021-01-10T21:06:12.664527
2011-12-16T07:15:04
2011-12-16T07:15:04
2,034,954
0
0
null
null
null
null
UTF-8
Python
false
false
1,368
py
############################################################################## # # Copyright (c) 2009 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """ zojax.cssregistry tests $Id$ """ __docformat__ = "reStructuredText" import unittest, doctest from zope import interface, schema from zope.component import provideAdapter from zope.app.testing import setup from zope.traversing.namespace import view from zope.traversing.interfaces import ITraversable def setUp(test): setup.placelessSetUp() setup.setUpTraversal() provideAdapter(view, (None, None), ITraversable, name="view") def tearDown(test): setup.placelessTearDown() def test_suite(): return unittest.TestSuite(( doctest.DocFileSuite( 'README.txt', setUp=setUp, tearDown=tearDown, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS), ))
[ "andrey.fedoseev@gmail.com" ]
andrey.fedoseev@gmail.com
07f515eae1a86622a522fa62d427c819d880730d
c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34
/source/All_Solutions/1116.打印零与奇偶数/1116-打印零与奇偶数.py
6ba403369346dcd95a2fdfe939a07dc70c003e60
[ "MIT" ]
permissive
zhangwang0537/LeetCode-Notebook
73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1
1dbd18114ed688ddeaa3ee83181d373dcc1429e5
refs/heads/master
2022-11-13T21:08:20.343562
2020-04-09T03:11:51
2020-04-09T03:11:51
277,572,643
0
0
MIT
2020-07-06T14:59:57
2020-07-06T14:59:56
null
UTF-8
Python
false
false
1,103
py
import threading class ZeroEvenOdd: def __init__(self, n): self.n = n+1 self.Zero=threading.Semaphore(1) self.Even=threading.Semaphore(0) self.Odd=threading.Semaphore(0) # printNumber(x) outputs "x", where x is an integer. def zero(self, printNumber: 'Callable[[int], None]') -> None: for i in range(1,self.n): self.Zero.acquire() printNumber(0) if i%2==1: self.Odd.release() else: self.Even.release() def even(self, printNumber: 'Callable[[int], None]') -> None: for i in range(1,self.n): if i%2==0: self.Even.acquire() printNumber(i) self.Zero.release() def odd(self, printNumber: 'Callable[[int], None]') -> None: for i in range(1,self.n): if i%2==1: self.Odd.acquire() printNumber(i) self.Zero.release()
[ "mzm@mail.dlut.edu.cn" ]
mzm@mail.dlut.edu.cn
c8813dfc6bcc9f3e14517bbb631dad71176f78d9
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
/benchmark/wikipedia/testcase/interestallcases/testcase6_011_1.py
f8d2461ccd955bd20d8bbd39508574f42faae36b
[]
no_license
Prefest2018/Prefest
c374d0441d714fb90fca40226fe2875b41cf37fc
ac236987512889e822ea6686c5d2e5b66b295648
refs/heads/master
2021-12-09T19:36:24.554864
2021-12-06T12:46:14
2021-12-06T12:46:14
173,225,161
5
0
null
null
null
null
UTF-8
Python
false
false
8,155
py
#coding=utf-8 import os import subprocess import time import traceback from appium import webdriver from appium.webdriver.common.touch_action import TouchAction from selenium.common.exceptions import NoSuchElementException, WebDriverException desired_caps = { 'platformName' : 'Android', 'deviceName' : 'Android Emulator', 'platformVersion' : '4.4', 'appPackage' : 'org.wikipedia', 'appActivity' : 'org.wikipedia.main.MainActivity', 'resetKeyboard' : True, 'androidCoverage' : 'org.wikipedia/org.wikipedia.JacocoInstrumentation', 'noReset' : True } def command(cmd, timeout=5): p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True) time.sleep(timeout) p.terminate() return def getElememt(driver, str) : for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str) except NoSuchElementException: time.sleep(1) else: return element os.popen("adb shell input tap 50 50") element = driver.find_element_by_android_uiautomator(str) return element def getElememtBack(driver, str1, str2) : for i in range(0, 2, 1): try: element = driver.find_element_by_android_uiautomator(str1) except NoSuchElementException: time.sleep(1) else: return element for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str2) except NoSuchElementException: time.sleep(1) else: return element os.popen("adb shell input tap 50 50") element = driver.find_element_by_android_uiautomator(str2) return element def swipe(driver, startxper, startyper, endxper, endyper) : size = driver.get_window_size() width = size["width"] height = size["height"] try: driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper), end_y=int(height * endyper), duration=2000) except WebDriverException: time.sleep(1) driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper), end_y=int(height * endyper), duration=2000) return def scrollToFindElement(driver, str) : for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str) except NoSuchElementException: swipe(driver, 0.5, 0.6, 0.5, 0.2) else: return element return def clickoncheckable(driver, str, value = "true") : parents = driver.find_elements_by_class_name("android.widget.LinearLayout") for parent in parents: try : parent.find_element_by_android_uiautomator(str) lists = parent.find_elements_by_class_name("android.widget.LinearLayout") if (len(lists) == 1) : innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)") nowvalue = innere.get_attribute("checked") if (nowvalue != value) : innere.click() break except NoSuchElementException: continue # preference setting and exit try : os.popen("adb shell svc data diable") time.sleep(5) starttime = time.time() driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.DeveloperSettingsActivity") scrollToFindElement(driver, "new UiSelector().text(\"useRestbase_setManually\")").click() clickoncheckable(driver, "new UiSelector().text(\"useRestbase_setManually\")", "false") scrollToFindElement(driver, "new UiSelector().text(\"mediaWikiBaseUriSupportsLangCode\")").click() clickoncheckable(driver, "new UiSelector().text(\"mediaWikiBaseUriSupportsLangCode\")", "true") scrollToFindElement(driver, "new UiSelector().text(\"suppressNotificationPolling\")").click() clickoncheckable(driver, "new UiSelector().text(\"suppressNotificationPolling\")", "true") scrollToFindElement(driver, "new UiSelector().text(\"memoryLeakTest\")").click() clickoncheckable(driver, "new UiSelector().text(\"memoryLeakTest\")", "true") scrollToFindElement(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")").click() clickoncheckable(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")", "false") driver.press_keycode(4) time.sleep(2) os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.SettingsActivity") scrollToFindElement(driver, "new UiSelector().text(\"Download only over Wi-Fi\")").click() clickoncheckable(driver, "new UiSelector().text(\"Download only over Wi-Fi\")", "true") scrollToFindElement(driver, "new UiSelector().text(\"Show images\")").click() clickoncheckable(driver, "new UiSelector().text(\"Show images\")", "false") driver.press_keycode(4) time.sleep(2) except Exception, e: print 'FAIL' print 'str(e):\t\t', str(e) print 'repr(e):\t', repr(e) print traceback.format_exc() finally : endtime = time.time() print 'consumed time:', str(endtime - starttime), 's' command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_011_pre\"") jacocotime = time.time() print 'jacoco time:', str(jacocotime - endtime), 's' driver.quit() # testcase011 try : starttime = time.time() driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) element = getElememtBack(driver, "new UiSelector().text(\"Search Wikipedia\")", "new UiSelector().className(\"android.widget.TextView\")") TouchAction(driver).tap(element).perform() driver.press_keycode(82) driver.press_keycode(82) driver.press_keycode(82) driver.press_keycode(82) driver.press_keycode(82) driver.press_keycode(82) element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/menu_overflow_button\").className(\"android.widget.TextView\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/menu_overflow_button\").className(\"android.widget.TextView\")") TouchAction(driver).long_press(element).release().perform() swipe(driver, 0.5, 0.8, 0.5, 0.2) element = getElememtBack(driver, "new UiSelector().text(\"Explore\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/menu_overflow_button\").className(\"android.widget.TextView\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/voice_search_button\").className(\"android.widget.ImageView\")") TouchAction(driver).tap(element).perform() element = getElememtBack(driver, "new UiSelector().text(\"Got it\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")") TouchAction(driver).tap(element).perform() except Exception, e: print 'FAIL' print 'str(e):\t\t', str(e) print 'repr(e):\t', repr(e) print traceback.format_exc() else: print 'OK' finally: cpackage = driver.current_package endtime = time.time() print 'consumed time:', str(endtime - starttime), 's' command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_011\"") jacocotime = time.time() print 'jacoco time:', str(jacocotime - endtime), 's' driver.quit() if (cpackage != 'org.wikipedia'): cpackage = "adb shell am force-stop " + cpackage os.popen(cpackage) os.popen("adb shell svc data enable")
[ "prefest2018@gmail.com" ]
prefest2018@gmail.com
9cbf5b6cf37fb49f49e1452747be65c5d74a5e7b
05887f0f20f3a57c11370021b996d76a56596e5f
/cats/articles/urls.py
ec12a3e5441c566e5a600233b109c2dba9bbd2ea
[]
no_license
kate-ka/cats_backend
11b5102f0e324713a30747d095291dbc9e194c3a
df19f7bfa4d94bf8effdd6f4866012a0a5302b71
refs/heads/master
2020-12-06T16:59:02.713830
2017-10-27T09:51:23
2017-10-27T09:51:23
73,502,091
0
0
null
null
null
null
UTF-8
Python
false
false
340
py
from django.conf.urls import url from rest_framework.urlpatterns import format_suffix_patterns from . views import ArticleList, ArticleDetail urlpatterns = [ url(r'^api-v1/articles/$', ArticleList.as_view()), url(r'^api-v1/articles/(?P<pk>[0-9]+)/$', ArticleDetail.as_view()), ] urlpatterns = format_suffix_patterns(urlpatterns)
[ "you@example.com" ]
you@example.com
ea1bcc55cfd8b75317e22e14dd2204cfadba8760
413f7768f98f72cef8423c473424d67642f1228f
/examples/10_useful_functions/ex06_enumerate.py
909474c54cfdc5aacf3c864d87b7e09cc6e3b343
[]
no_license
easypythoncode/PyNEng
a7bbf09c5424dcd81796f8836509be90c3b1a752
daaed1777cf5449d5494e5a8471396bbcb027de6
refs/heads/master
2023-03-22T01:21:47.250346
2022-08-29T18:25:13
2022-08-29T18:25:13
228,477,021
0
0
null
null
null
null
UTF-8
Python
false
false
176
py
from pprint import pprint with open("config_r1.txt") as f: for num, line in enumerate(f, 1): if line.startswith("interface"): print(num, line, end="")
[ "58902135+easypythoncode@users.noreply.github.com" ]
58902135+easypythoncode@users.noreply.github.com
7391bab13b498877c272f00e6690ce87243867f1
c85d43dc50c26ea0fa4f81b54fb37460ef6cca8d
/rms/urls.py
e59bd4b9b1b0e2b3f232d4b3be1bab71db14aab9
[]
no_license
yorong/buzzz-web-dev
c48dbc165d0aa3abce07971a2dee0280f9bf1e92
0b74bd1b7cfc7a6a7d61672f930d0bde635b4398
refs/heads/master
2021-08-23T02:38:00.544503
2017-12-02T16:01:31
2017-12-02T16:01:31
112,854,877
0
0
null
2017-12-02T15:50:40
2017-12-02T15:50:39
null
UTF-8
Python
false
false
281
py
from django.conf.urls import url # from django.views.generic.base import RedirectView from rms.views import ( RMSHomeView, RMSStartView, ) urlpatterns = [ url(r'^$', RMSHomeView.as_view(), name='home'), url(r'^start/$', RMSStartView.as_view(), name='start'), ]
[ "ppark9553@gmail.com" ]
ppark9553@gmail.com
a0b89519a21414737b9664285aa3073dcc619318
9ab48ad4a8daf4cab1cdf592bac722b096edd004
/genutility/fingerprinting.py
b20ca97a8ec9dc1a95e9d6192a517f13f01cf58d
[ "ISC" ]
permissive
Dobatymo/genutility
c902c9b2df8ca615b7b67681f505779a2667b794
857fad80f4235bda645e29abbc14f6e94072403b
refs/heads/master
2023-08-16T18:07:23.651000
2023-08-15T19:05:46
2023-08-15T19:05:46
202,296,877
4
1
ISC
2022-06-14T01:39:53
2019-08-14T07:22:23
Python
UTF-8
Python
false
false
4,528
py
import logging import numpy as np from PIL import Image, ImageFilter # from .numba import opjit from .numpy import rgb_to_hsi, rgb_to_ycbcr, unblock # fingerprinting aka perceptual hashing def phash_antijpeg(image: Image.Image) -> np.ndarray: """Source: An Anti-JPEG Compression Image Perceptual Hashing Algorithm `image` is a RGB pillow image. """ raise NotImplementedError def hu_moments(channels: np.ndarray) -> np.ndarray: """Calculates all Hu invariant image moments for all channels separately. Input array must be of shape [width, height, channels] Returns shape [moments, channels] """ # pre-calculate matrices n, m, _ = channels.shape coords_x, coords_y = np.meshgrid(np.arange(m), np.arange(n)) coords_x = np.expand_dims(coords_x, axis=-1) # for batch input, some change is needed here coords_y = np.expand_dims(coords_y, axis=-1) # for batch input, some change is needed here def M(p, q): return np.sum(coords_x**p * coords_y**q * channels, axis=(-2, -3)) def mu(p, q, xb, yb): return np.sum((coords_x - xb) ** p * (coords_y - yb) ** q * channels, axis=(-2, -3)) def eta(p, q, xb, yb, mu00): gamma = (p + q) / 2 + 1 return mu(p, q, xb, yb) / mu00**gamma def loop(): M00 = M(0, 0) if not np.all(M00 > 0.0): logging.error("M00: %s", M00) raise ValueError("Failed to calculate moments. Single color pictures are not supported yet.") M10 = M(1, 0) M01 = M(0, 1) xb = M10 / M00 yb = M01 / M00 mu00 = mu(0, 0, xb, yb) eta20 = eta(2, 0, xb, yb, mu00) eta02 = eta(0, 2, xb, yb, mu00) eta11 = eta(1, 1, xb, yb, mu00) eta30 = eta(3, 0, xb, yb, mu00) eta12 = eta(1, 2, xb, yb, mu00) eta21 = eta(2, 1, xb, yb, mu00) eta03 = eta(0, 3, xb, yb, mu00) phi1 = eta20 + eta02 phi2 = (eta20 - eta02) ** 2 + 4 * eta11**2 phi3 = (eta30 - 3 * eta12) ** 2 + (3 * eta21 - eta03) ** 2 phi4 = (eta30 + eta12) ** 2 + (eta21 + eta03) ** 2 phi5 = (eta30 - 3 * eta12) * (eta30 + eta12) * ((eta30 + eta12) ** 2 - 3 * (eta21 + eta03) ** 2) + ( 3 * eta21 - eta03 ) * (eta21 + eta03) * (3 * (eta30 + eta12) ** 2 - (eta21 + eta03) ** 2) phi6 = (eta20 - eta02) * ((eta30 + eta12) ** 2 - (eta21 + eta03) ** 2) + 4 * eta11 * (eta30 + eta12) * ( eta21 + eta03 ) phi7 = (3 * eta21 - eta03) * (eta30 + eta12) * ((eta30 + eta12) ** 2 - 3 * (eta21 + eta03) ** 2) - ( eta30 - 3 * eta12 ) * (eta21 + eta03) * (3 * (eta30 + eta12) ** 2 - (eta21 + eta03) ** 2) return np.array([phi1, phi2, phi3, phi4, phi5, phi6, phi7]) return loop() # @opjit() rgb_to_hsi and rgb_to_ycbcr not supported by numba def phash_moments_array(arr: np.ndarray) -> np.ndarray: arr = arr / 255.0 # convert colorspaces hsi = rgb_to_hsi(arr) ycbcr = rgb_to_ycbcr(arr) # .astype(np.uint8) channels = np.concatenate([hsi, ycbcr], axis=-1) return np.concatenate(hu_moments(channels).T) def phash_moments(image: Image.Image) -> np.ndarray: """Source: Perceptual Hashing for Color Images Using Invariant Moments `image` is a RGB pillow image. Results should be compared with L^2-Norm of difference vector. """ if image.mode != "RGB": raise ValueError("Only RGB images are supported") # preprocessing image = image.resize((512, 512), Image.BICUBIC) image = image.filter(ImageFilter.GaussianBlur(3)) image = np.array(image) return phash_moments_array(image) def phash_blockmean_array(arr: np.ndarray, bits: int = 256) -> np.ndarray: """If bits is not a multiple of 8, the result will be zero padded from the right. """ if len(arr.shape) != 2: raise ValueError("arr must be 2-dimensional") n = int(np.sqrt(bits)) if n**2 != bits: raise ValueError("bits must be a square number") blocks = unblock(arr, n, n) means = np.mean(blocks, axis=-1) median = np.median(means) bools = means >= median return np.packbits(bools) def phash_blockmean(image: Image.Image, bits: int = 256, x: int = 256) -> bytes: """Source: Block Mean Value Based Image Perceptual Hashing Method: 1 Metric: 'Bit error rate' (normalized hamming distance) """ image = image.convert("L").resize((x, x)) image = np.array(image) return phash_blockmean_array(image, bits).tobytes()
[ "dobatymo@users.noreply.github.com" ]
dobatymo@users.noreply.github.com
550b55101c715dd350fa5cd27249306a0f72db95
aea7bbe854591f493f4a37919eb75dde7f2eb2ca
/startCamp/03_day/flask/intro/function.py
f11de57476cf04f3c28b5c8018c3382bdd7abb9c
[]
no_license
GaYoung87/StartCamp
6e31a50d3037174b08a17114e467989520bb9a86
231b1fd0e245acb5d4570778aa41de79d6ad4b17
refs/heads/master
2020-06-17T11:52:38.256085
2019-07-12T01:14:47
2019-07-12T01:14:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
167
py
# sum이라는 함수를 정의 def sum(num1, num2): # 숫자 num1, num2라는 인자를 받는 함수 return num1 + num2 result = sum(5, 6) print(result)
[ "gyyoon4u@naver.com" ]
gyyoon4u@naver.com
5a2524a0bd81ae136a6c230538fb8c7985d95562
09e57dd1374713f06b70d7b37a580130d9bbab0d
/data/p3BR/R2/benchmark/startCirq330.py
74d7d9238c53332a3cb7ad0438bb8c452fc6d169
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
4,271
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 5/15/20 4:49 PM # @File : grover.py # qubit number=3 # total number=66 import cirq import cirq.google as cg from typing import Optional import sys from math import log2 import numpy as np #thatsNoCode from cirq.contrib.svg import SVGCircuit # Symbols for the rotation angles in the QAOA circuit. def make_circuit(n: int, input_qubit): c = cirq.Circuit() # circuit begin c.append(cirq.H.on(input_qubit[0])) # number=1 c.append(cirq.H.on(input_qubit[2])) # number=38 c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=39 c.append(cirq.H.on(input_qubit[2])) # number=40 c.append(cirq.H.on(input_qubit[2])) # number=59 c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=60 c.append(cirq.H.on(input_qubit[2])) # number=61 c.append(cirq.H.on(input_qubit[2])) # number=42 c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=43 c.append(cirq.H.on(input_qubit[2])) # number=44 c.append(cirq.H.on(input_qubit[2])) # number=48 c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=49 c.append(cirq.H.on(input_qubit[2])) # number=50 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=54 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=63 c.append(cirq.X.on(input_qubit[2])) # number=64 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=65 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=56 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=47 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=37 c.append(cirq.H.on(input_qubit[2])) # number=51 c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=52 c.append(cirq.H.on(input_qubit[2])) # number=53 c.append(cirq.H.on(input_qubit[2])) # number=25 c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26 c.append(cirq.H.on(input_qubit[2])) # number=27 c.append(cirq.H.on(input_qubit[1])) # number=7 c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=8 c.append(cirq.rx(0.17592918860102857).on(input_qubit[2])) # number=34 c.append(cirq.rx(-0.3989822670059037).on(input_qubit[1])) # number=30 c.append(cirq.H.on(input_qubit[1])) # number=9 c.append(cirq.H.on(input_qubit[1])) # number=18 c.append(cirq.rx(2.3310617489636263).on(input_qubit[2])) # number=58 c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=19 c.append(cirq.H.on(input_qubit[1])) # number=20 c.append(cirq.X.on(input_qubit[1])) # number=62 c.append(cirq.Y.on(input_qubit[1])) # number=14 c.append(cirq.H.on(input_qubit[1])) # number=22 c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=23 c.append(cirq.rx(-0.9173450548482197).on(input_qubit[1])) # number=57 c.append(cirq.H.on(input_qubit[1])) # number=24 c.append(cirq.Z.on(input_qubit[2])) # number=3 c.append(cirq.Z.on(input_qubit[1])) # number=41 c.append(cirq.X.on(input_qubit[1])) # number=17 c.append(cirq.Y.on(input_qubit[2])) # number=5 c.append(cirq.X.on(input_qubit[2])) # number=21 c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=15 c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16 c.append(cirq.X.on(input_qubit[2])) # number=28 c.append(cirq.X.on(input_qubit[2])) # number=29 # circuit end c.append(cirq.measure(*input_qubit, key='result')) return c def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count,input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count =2000 simulator = cirq.Simulator() result = simulator.run(circuit, repetitions=circuit_sample_count) frequencies = result.histogram(key='result', fold_func=bitstring) writefile = open("../data/startCirq330.csv","w+") print(format(frequencies),file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit,file=writefile) writefile.close()
[ "wangjiyuan123@yeah.net" ]
wangjiyuan123@yeah.net
7354bef465760c7821f2382d875c71a979be9fd7
332cceb4210ff9a5d99d2f3a65a704147edd01a2
/justext/utils.py
42e5074ec56396d742d4234e9106a0655e9de958
[]
permissive
miso-belica/jusText
16e5befcb449d3939ce62dc3460afbc768bd07cc
22a59079ea691d67e2383039cf5b40d490420115
refs/heads/main
2023-08-30T03:48:27.225553
2023-01-24T08:45:58
2023-01-24T08:45:58
8,121,947
527
70
BSD-2-Clause
2022-05-04T06:11:47
2013-02-10T11:42:20
Python
UTF-8
Python
false
false
1,965
py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division, print_function, unicode_literals import re import os import sys import pkgutil MULTIPLE_WHITESPACE_PATTERN = re.compile(r"\s+", re.UNICODE) def normalize_whitespace(text): """ Translates multiple whitespace into single space character. If there is at least one new line character chunk is replaced by single LF (Unix new line) character. """ return MULTIPLE_WHITESPACE_PATTERN.sub(_replace_whitespace, text) def _replace_whitespace(match): """Normalize all spacing characters that aren't a newline to a space.""" text = match.group() return "\n" if "\n" in text or "\r" in text else " " def is_blank(string): """ Returns `True` if string contains only white-space characters or is empty. Otherwise `False` is returned. """ return not string or string.isspace() def get_stoplists(): """Returns a collection of built-in stop-lists.""" path_to_stoplists = os.path.dirname(sys.modules["justext"].__file__) path_to_stoplists = os.path.join(path_to_stoplists, "stoplists") stoplist_names = [] for filename in os.listdir(path_to_stoplists): name, extension = os.path.splitext(filename) if extension == ".txt": stoplist_names.append(name) return frozenset(stoplist_names) def get_stoplist(language): """Returns an built-in stop-list for the language as a set of words.""" file_path = os.path.join("stoplists", "%s.txt" % language) try: stopwords = pkgutil.get_data("justext", file_path) except IOError: raise ValueError( "Stoplist for language '%s' is missing. " "Please use function 'get_stoplists' for complete list of stoplists " "and feel free to contribute by your own stoplist." % language ) return frozenset(w.decode("utf8").lower() for w in stopwords.splitlines())
[ "miso.belica@gmail.com" ]
miso.belica@gmail.com
e42abe3c8e78b1c9969be47b78657894ae274870
351fa4edb6e904ff1ac83c6a790deaa7676be452
/graphs/graphUtil/graphAdjMat.py
a8211d8ac10c6c16d87eb91ba14de7aa24570e2a
[ "MIT" ]
permissive
shahbagdadi/py-algo-n-ds
42981a61631e1a9af7d5ac73bdc894ac0c2a1586
f3026631cd9f3c543250ef1e2cfdf2726e0526b8
refs/heads/master
2022-11-27T19:13:47.348893
2022-11-14T21:58:51
2022-11-14T21:58:51
246,944,662
0
0
null
null
null
null
UTF-8
Python
false
false
1,664
py
from typing import List from collections import deque from collections import defaultdict # A class to represent a graph. A graph # is the list of the adjacency Matrix. # Size of the array will be the no. of the # vertices "V" class Graph: def __init__(self, vertices, directed=True): self.V = vertices self.adjMatrix = [] self.directed = directed for i in range(self.V): self.adjMatrix.append([0 for i in range(self.V)]) # Function to add an edge in an undirected graph def add_edge(self, src, dest): if src == dest: print(f"Same vertex {src} and {dest}") self.adjMatrix[src][dest] = 1 if not self.directed: self.adjMatrix[dest][src] = 1 # Function to print the graph adj list def print_adj_list(self): for k in self.graph.keys(): print(f"Adjacency list of vertex {k}\n {k}", end="") for n in self.graph[k]: print(f" -> {n}", end="") print(" \n") def print_adj_mat(self): print(self.adjMatrix) # Breadth First Traversal of graph def BFS(self, root): q = deque([root]) visited = set([root]) while q: node = q.pop() print(f'{node} => ', end="") for i, child in enumerate(self.adjMatrix[node]): if child == 1 and i not in visited: visited.add(i) q.appendleft(i) g = Graph(5, True) g.add_edge(0, 1) g.add_edge(0, 4) g.add_edge(1, 4) g.add_edge(1, 3) g.add_edge(1, 2) g.add_edge(2, 3) g.print_adj_mat() print('====== BFS =====') g.BFS(0) print('\n')
[ "email.shanu@gmail.com" ]
email.shanu@gmail.com
1326a2e287de4aba98c8281869940dc914c7ec24
7db575150995965b0578f3b7c68567e07f5317b7
/tr2/models/transformer.py
f3799d94287770125fd19476f059cc6c49ce70a5
[]
no_license
anhdhbn/thesis-tr2
b14049cc3de517cdd9205239e4cf3d225d168e85
7a74bb1228f5493b37934f38a8d3e1ab5328fc3c
refs/heads/master
2023-03-27T21:51:57.763495
2021-02-26T12:51:20
2021-02-26T12:51:20
338,924,981
0
0
null
null
null
null
UTF-8
Python
false
false
4,876
py
import torch import torch.nn as nn from torch import Tensor from tr2.models.encoder import TransformerEncoder, TransformerEncoderLayer from tr2.models.decoder import TransformerDecoder, TransformerDecoderLayer class Transformer(nn.Module): def __init__(self, hidden_dims=512, num_heads = 8, num_encoder_layer=6, num_decoder_layer=6, dim_feed_forward=2048, dropout=.1 ): super().__init__() encoder_layer = TransformerEncoderLayer( hidden_dims=hidden_dims, num_heads=num_heads, dropout=dropout, dim_feedforward=dim_feed_forward ) self.encoder = TransformerEncoder( encoder_layer=encoder_layer, num_layers=num_encoder_layer ) decoder_layer = TransformerDecoderLayer( hidden_dims=hidden_dims, num_heads=num_heads, dropout=dropout, dim_feedforward=dim_feed_forward ) decoder_layer2 = TransformerDecoderLayer( hidden_dims=hidden_dims, num_heads=num_heads, dropout=dropout, dim_feedforward=dim_feed_forward ) self.decoder = TransformerDecoder(decoder_layer=decoder_layer, num_layers=num_decoder_layer) self.decoder2 = TransformerDecoder(decoder_layer=decoder_layer2, num_layers=num_decoder_layer) def _reset_parameters(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, template: Tensor, mask_template: Tensor, pos_template: Tensor, search: Tensor, mask_search: Tensor, pos_search:Tensor) -> Tensor: """ :param src: tensor of shape [batchSize, hiddenDims, imageHeight // 32, imageWidth // 32] :param mask: tensor of shape [batchSize, imageHeight // 32, imageWidth // 32] Please refer to detr.py for more detailed description. :param query: object queries, tensor of shape [numQuery, hiddenDims]. :param pos: positional encoding, the same shape as src. :return: tensor of shape [batchSize, num_decoder_layer * WH, hiddenDims] """ # flatten NxCxHxW to HWxNxC bs, c, h, w = search.shape template = template.flatten(2).permute(2, 0, 1) # HWxNxC search = search.flatten(2).permute(2, 0, 1) # HWxNxC mask_template = mask_template.flatten(1) # NxHW mask_search = mask_search.flatten(1) # NxHW pos_template = pos_template.flatten(2).permute(2, 0, 1) # HWxNxC pos_search = pos_search.flatten(2).permute(2, 0, 1) # HWxNxC memory = self.encoder(template, src_key_padding_mask=mask_template, pos=pos_template) out = self.decoder(search, memory, memory_key_padding_mask=mask_template, pos_template=pos_template, pos_search=pos_search, tgt_key_padding_mask=mask_search) # num_decoder_layer x WH x N x C out2 = self.decoder2(search, memory, memory_key_padding_mask=mask_template, pos_template=pos_template, pos_search=pos_search, tgt_key_padding_mask=mask_search) # num_decoder_layer x WH x N x C return out.transpose(1, 2), out2.transpose(1, 2) def init(self, template, mask_template, pos_template): template = template.flatten(2).permute(2, 0, 1) # HWxNxC mask_template = mask_template.flatten(1) # NxHW pos_template = pos_template.flatten(2).permute(2, 0, 1) # HWxNxC return self.encoder(template, src_key_padding_mask=mask_template, pos=pos_template) def track(self, memory, mask_template, pos_template, search, mask_search, pos_search): search = search.flatten(2).permute(2, 0, 1) # HWxNxC mask_template = mask_template.flatten(1) # NxHW mask_search = mask_search.flatten(1) # NxHW pos_template = pos_template.flatten(2).permute(2, 0, 1) # HWxNxC pos_search = pos_search.flatten(2).permute(2, 0, 1) # HWxNxC out = self.decoder(search, memory, memory_key_padding_mask=mask_template, pos_template=pos_template, pos_search=pos_search, tgt_key_padding_mask=mask_search) out2 = self.decoder2(search, memory, memory_key_padding_mask=mask_template, pos_template=pos_template, pos_search=pos_search, tgt_key_padding_mask=mask_search) return out.transpose(1, 2), out2.transpose(1, 2) def build_transformer( hidden_dims=512, num_heads = 8, num_encoder_layer=6, num_decoder_layer=6, dim_feed_forward=2048, dropout=.1 ): return Transformer(hidden_dims=hidden_dims, num_heads = num_heads, num_encoder_layer = num_encoder_layer, num_decoder_layer = num_decoder_layer, dim_feed_forward = dim_feed_forward, dropout=dropout )
[ "anhdhbn@gmail.com" ]
anhdhbn@gmail.com
70b347db5fcd769831550b3fecad4822d3c19ea2
fe9573bad2f6452ad3e2e64539361b8bc92c1030
/Socket_programming/TLS_server.py
846eac13cb9e9873d922eb3e035275be919fb72a
[]
no_license
OceanicSix/Python_program
e74c593e2e360ae22a52371af6514fcad0e8f41f
2716646ce02db00306b475bad97105b260b6cd75
refs/heads/master
2022-01-25T16:59:31.212507
2022-01-09T02:01:58
2022-01-09T02:01:58
149,686,276
1
2
null
null
null
null
UTF-8
Python
false
false
1,090
py
#!/usr/bin/python3 import socket, ssl, pprint html = """ HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n <!DOCTYPE html><html><body><h1>This is Bank32.com!</h1></body></html> """ SERVER_CERT = './certs/mycert.crt' SERVER_PRIVATE = './certs/mycert.key' # context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # For Ubuntu 20.04 VM context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) # For Ubuntu 16.04 VM context.load_cert_chain(SERVER_CERT, SERVER_PRIVATE) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.bind(('0.0.0.0', 4433)) sock.listen(5) while True: newsock, fromaddr = sock.accept() try: ssock = context.wrap_socket(newsock, server_side=True) print("TLS connection established") data = ssock.recv(1024) # Read data over TLS pprint.pprint("Request: {}".format(data)) ssock.sendall(html.encode('utf-8')) # Send data over TLS ssock.shutdown(socket.SHUT_RDWR) # Close the TLS connection ssock.close() except Exception as e: print("TLS connection fails") print(e) continue
[ "byan0007@student.monash.edu" ]
byan0007@student.monash.edu
8f5307ec6a941ac8d84d56f251ad4dbd6cccadd2
d362a983e055984c588ee81c66ba17d536bae2f5
/backend/agent/migrations/0003_beautician.py
50fc77458670228cbb0dd38577ad9635cf06145d
[]
no_license
prrraveen/Big-Stylist-CRM
1d770b5ad28f342dfc5d40002ddc3ee7cc6f840a
6cd84ce7b01a49a09b844c27ecc4575dcca54393
refs/heads/master
2021-01-10T04:37:43.414844
2015-12-15T10:47:21
2015-12-15T10:47:21
49,239,402
1
0
null
null
null
null
UTF-8
Python
false
false
2,057
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('agent', '0002_service'), ] operations = [ migrations.CreateModel( name='Beautician', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('gender', models.CharField(max_length=1, choices=[(b'M', b'Male'), (b'F', b'Female')])), ('marital_status', models.CharField(max_length=1, choices=[(b'M', b'married'), (b'S', b'Single')])), ('family_members', models.CharField(max_length=80, blank=True)), ('age', models.IntegerField(null=True, blank=True)), ('customer_rating', models.IntegerField(null=True, blank=True)), ('bs_rating', models.IntegerField(null=True, blank=True)), ('rating_by_service', models.IntegerField(null=True, blank=True)), ('phone_number', models.CharField(max_length=11)), ('alternate_number', models.CharField(max_length=11, blank=True)), ('address', models.CharField(max_length=1000, blank=True)), ('locality', models.CharField(max_length=100)), ('employment_status', models.CharField(blank=True, max_length=1, choices=[(b'0', b'Employed'), (b'1', b'Unemployed')])), ('availability', models.CharField(blank=True, max_length=2, choices=[(b'A', b'Available'), (b'NA', b'Single')])), ('Services', models.ManyToManyField(to='agent.Service', null=True, blank=True)), ('pincode', models.ForeignKey(related_name='beautician_pincode', blank=True, to='agent.Pincode', null=True)), ('serving_in', models.ManyToManyField(related_name='beautician_pincode_server_in', null=True, to='agent.Pincode', blank=True)), ], ), ]
[ "prrraveen@gmail.com" ]
prrraveen@gmail.com
5c1167fe99ba3fb29255afa69fa05a2a94c03178
6c2d219dec81b75ac1aef7f96f4e072ed7562f81
/scenes/siteVogov.py
2501bf763ea4538e32f964cf4d904cf6f7aeb93f
[]
no_license
SFTEAM/scrapers
7e2b0a159cb19907017216c16a976d630d883ba5
778f282bf1b6954aa06d265fdb6f2ecc2e3c8e47
refs/heads/main
2023-08-15T18:21:41.922378
2021-09-24T22:24:29
2021-09-24T22:24:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,781
py
import re import scrapy from tpdb.BaseSceneScraper import BaseSceneScraper class VogovSpider(BaseSceneScraper): name = 'Vogov' network = 'Vogov' parent = 'Vogov' site = 'Vogov' start_urls = [ 'https://vogov.com' ] selector_map = { 'title': '//meta[@property="og:title"]/@content', 'description': '//div[contains(@class,"info-video-description")]/p/text()', 'performers': '//div[contains(@class,"info-video-models")]/a/text()', 'date': '//li[contains(text(),"Release")]/span/text()', 'image': '//meta[@property="og:image"]/@content', 'tags': '//div[contains(@class,"info-video-category")]/a/text()', 'external_id': r'videos\/(.*)\/?', 'trailer': '//script[contains(text(),"video_url")]/text()', 'pagination': '/latest-videos/%s/' } def get_scenes(self, response): scenes = response.xpath('//div[@class="video-post"]/div/a/@href').getall() for scene in scenes: yield scrapy.Request(url=self.format_link(response, scene), callback=self.parse_scene, meta={'site': 'Vogov'}) def get_trailer(self, response): if 'trailer' in self.get_selector_map() and self.get_selector_map('trailer'): trailer = self.process_xpath( response, self.get_selector_map('trailer')).get() trailer = re.search(r'video_url:\ .*?(https:\/\/.*?\.mp4)\/', trailer).group(1) if trailer: return trailer return '' def get_tags(self, response): if self.get_selector_map('tags'): tags = self.process_xpath( response, self.get_selector_map('tags')).getall() return list(map(lambda x: x.strip().title(), tags)) return []
[ "briadin@yahoo.com" ]
briadin@yahoo.com
5e7f226554ea2fb5c3ea365c54d0f77bb1955e6d
180dc578d12fff056fce1ef8bd1ba5c227f82afc
/tensorflow_models/__init__.py
18eea8c6304e418deeb1b34e45a57fd437c81079
[ "Apache-2.0" ]
permissive
jianzhnie/models
6cb96c873d7d251db17afac7144c4dbb84d4f1d6
d3507b550a3ade40cade60a79eb5b8978b56c7ae
refs/heads/master
2023-07-12T05:08:23.314636
2023-06-27T07:54:20
2023-06-27T07:54:20
281,858,258
2
0
Apache-2.0
2022-03-27T12:53:44
2020-07-23T05:22:33
Python
UTF-8
Python
false
false
909
py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow Models Libraries.""" # pylint: disable=wildcard-import from tensorflow_models import nlp from tensorflow_models import vision from official import core from official.modeling import hyperparams from official.modeling import optimization from official.modeling import tf_utils as utils
[ "gardener@tensorflow.org" ]
gardener@tensorflow.org
14bc7b551cf26a394151530e590ccdb32e250759
09e57dd1374713f06b70d7b37a580130d9bbab0d
/benchmark/startPyquil2701.py
b7aa787a468a8dbc6e74cfb46597078a617fc507
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
1,864
py
# qubit number=4 # total number=41 import pyquil from pyquil.api import local_forest_runtime, QVMConnection from pyquil import Program, get_qc from pyquil.gates import * import numpy as np conn = QVMConnection() def make_circuit()-> Program: prog = Program() # circuit begin prog += H(3) # number=32 prog += CZ(0,3) # number=33 prog += H(3) # number=34 prog += H(3) # number=26 prog += CZ(0,3) # number=27 prog += H(3) # number=28 prog += X(3) # number=24 prog += CNOT(0,3) # number=25 prog += CNOT(0,3) # number=12 prog += H(2) # number=29 prog += CZ(0,2) # number=30 prog += H(2) # number=31 prog += X(2) # number=21 prog += CNOT(0,2) # number=22 prog += H(1) # number=2 prog += H(2) # number=3 prog += H(3) # number=4 prog += H(0) # number=5 prog += Y(3) # number=36 prog += H(3) # number=16 prog += CZ(1,3) # number=17 prog += H(3) # number=18 prog += H(1) # number=6 prog += H(2) # number=37 prog += CNOT(1,0) # number=38 prog += Z(1) # number=39 prog += CNOT(1,0) # number=40 prog += H(2) # number=7 prog += H(3) # number=8 prog += H(0) # number=9 prog += CNOT(3,0) # number=13 prog += CNOT(3,0) # number=14 # circuit end return prog def summrise_results(bitstrings) -> dict: d = {} for l in bitstrings: if d.get(l) is None: d[l] = 1 else: d[l] = d[l] + 1 return d if __name__ == '__main__': prog = make_circuit() qvm = get_qc('4q-qvm') results = qvm.run_and_measure(prog,1024) bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T bitstrings = [''.join(map(str, l)) for l in bitstrings] writefile = open("../data/startPyquil2701.csv","w") print(summrise_results(bitstrings),file=writefile) writefile.close()
[ "wangjiyuan123@yeah.net" ]
wangjiyuan123@yeah.net
3811c8ded3c52d3c4297b523a72aef17f3e5b4ff
fe8bd31a416d7217c8b95d2ebf36158fdc0412de
/revscoring/languages/__init__.py
8076663b037c06ed1909940273411b71a9b88537
[ "MIT" ]
permissive
nealmcb/revscoring
f0020a9009e584a0f59576adcdd16eadae21ee06
e5c889093c4f49443d12193a2da725065c87e6d6
refs/heads/master
2021-01-11T11:32:10.684223
2015-10-21T22:34:56
2015-10-21T22:34:56
44,418,672
0
0
null
2015-10-17T01:16:45
2015-10-17T01:16:45
null
UTF-8
Python
false
false
788
py
""" This module implements a set of :class:`revscoring.Language` -- collections of features that are language specific. languages +++++++++ .. automodule:: revscoring.languages.english .. automodule:: revscoring.languages.french .. automodule:: revscoring.languages.hebrew .. automodule:: revscoring.languages.indonesian .. automodule:: revscoring.languages.persian .. automodule:: revscoring.languages.portuguese .. automodule:: revscoring.languages.spanish :members: .. automodule:: revscoring.languages.turkish :members: .. automodule:: revscoring.languages.vietnamese :members: Base classes ++++++++++++ .. automodule:: revscoring.languages.language .. automodule:: revscoring.languages.space_delimited """ from .language import Language __all__ = [Language]
[ "aaron.halfaker@gmail.com" ]
aaron.halfaker@gmail.com
7e3edaadd3fc130cc391da1bfd5cd75125fbd91d
78b7a0f04a92499d7c7479d22a6d6ed0494f51d4
/doc/future_bottumup.py
2f3208851514d1180279d9e67312187043ba02fe
[]
no_license
duchesnay/pylearn-epac
5a6df8a68dc121ed6f87720250f24d927d553a04
70b0a85b7614b722ce40c506dfcb2e0c7dca8027
refs/heads/master
2021-01-21T00:16:09.693568
2013-07-23T10:21:56
2013-07-23T10:21:56
6,781,768
2
0
null
null
null
null
UTF-8
Python
false
false
7,240
py
# -*- coding: utf-8 -*- """ Created on Thu May 23 15:21:35 2013 @author: ed203246 """ from sklearn import datasets from sklearn.svm import SVC from sklearn.lda import LDA from sklearn.feature_selection import SelectKBest X, y = datasets.make_classification(n_samples=12, n_features=10, n_informative=2) from epac import Methods, Pipe self = Methods(*[Pipe(SelectKBest(k=k), SVC(kernel=kernel, C=C)) for kernel in ("linear", "rbf") for C in [1, 10] for k in [1, 2]]) self = Methods(*[Pipe(SelectKBest(k=k), SVC(C=C)) for C in [1, 10] for k in [1, 2]]) import copy self.fit_predict(X=X, y=y) self.reduce() [l.get_key() for l in svms.walk_nodes()] [l.get_key(2) for l in svms.walk_nodes()] # intermediary key collisions: trig aggregation """ # Model selection using CV: CV + Grid # ----------------------------------------- from epac import CVBestSearchRefit # CV + Grid search of a simple classifier wf = CVBestSearchRefit(*[SVC(C=C) for C in [1, 10]], n_folds=3) wf.fit_predict(X=X, y=y) wf.reduce() """ """ import numpy as np results_list = \ {'Methods/SelectKBest(k=1)/SVC(kernel=linear,C=1)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=1)/SVC(kernel=linear,C=10)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=1)/SVC(kernel=rbf,C=1)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=1)/SVC(kernel=rbf,C=10)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=2)/SVC(kernel=linear,C=1)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=2)/SVC(kernel=linear,C=10)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=2)/SVC(kernel=rbf,C=1)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=2)/SVC(kernel=rbf,C=10)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}} import numpy as np run epac/utils.py run epac/workflow/base.py results_list=\ {'Methods/SelectKBest(k=1)/SVC(C=1)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=1)/SVC(C=10)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=2)/SVC(C=1)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te': np.array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}, 'Methods/SelectKBest(k=2)/SVC(C=10)': {'pred_te': np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]), 'score_te': 0.83333333333333337, 'score_tr': 0.83333333333333337, 'true_te':np. array([1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0])}} """ keys_splited = [key_split(key, eval_args=True) for key in results_list.keys()] first = keys_splited[0] arg_grids = list() # list of [depth_idx, arg_idx, arg_name, [arg_values]] for i in xrange(len(first)): if len(first[i]) > 1: # has arguments for arg_idx in xrange(len(first[i][1])): arg_grids.append([i, arg_idx, first[i][1][arg_idx][0], [first[i][1][arg_idx][1]]]) # Check if Results can be stacked same depth, same node type a,d argument names # An enumerate all possible arguments values for other in keys_splited[1:]: if len(first) != len(other): print results_list.keys() raise ValueError("Results cannot be stacked: different depth") for i in xrange(len(first)): if first[i][0] != other[i][0]: print results_list.keys() raise ValueError("Results cannot be stacked: nodes have different type") if len(first[i]) > 1 and len(first[i][1]) != len(other[i][1]): print results_list.keys() raise ValueError("Results cannot be stacked: nodes have different length") if len(first[i]) > 1: # has arguments for arg_idx in xrange(len(first[i][1])): if first[i][1][arg_idx][0] != other[i][1][arg_idx][0]: print results_list.keys() raise ValueError("Results cannot be stacked: nodes have" "argument name") values = [item for item in arg_grids if i==item[0] and \ arg_idx==item[1]][0][3] values.append(other[i][1][arg_idx][1]) #values[i][1][arg_idx][1].append(other[i][1][arg_idx][1]) for grid in arg_grids: grid[3] = set(grid[3]) arg_grids @classmethod def stack_results(list_of_dict, axis_name=None, axis_values=[]): """Stack a list of Result(s) Example ------- >>> _list_of_dicts_2_dict_of_lists([dict(a=1, b=2), dict(a=10, b=20)]) {'a': [1, 10], 'b': [2, 20]} """ dict_of_list = dict() for d in list_of_dict: #self.children[child_idx].signature_args #sub_aggregate = sub_aggregates[0] for key2 in d.keys(): #key2 = sub_aggregate.keys()[0] result = d[key2] # result is a dictionary if isinstance(result, dict): if not key2 in dict_of_list.keys(): dict_of_list[key2] = dict() for key3 in result.keys(): if not key3 in dict_of_list[key2].keys(): dict_of_list[key2][key3] = ListWithMetaInfo() dict_of_list[key2][key3].axis_name = axis_name dict_of_list[key2][key3].axis_values = axis_values dict_of_list[key2][key3].append(result[key3]) else: # simply concatenate if not key2 in dict_of_list.keys(): dict_of_list[key2] = ListWithMetaInfo() dict_of_list[key2].axis_name = axis_name dict_of_list[key2].axis_values = axis_values dict_of_list[key2].append(result) return dict_of_list
[ "edouard.duchesnay@gmail.com" ]
edouard.duchesnay@gmail.com
2f6e31622147ccd5f16a2b68f420e3f8bf6471a0
c9c4536cebddfc3cc20f43084ccdb2ce1320b7e6
/experiments/utils.py
dfdf83bd5f97232f940217ba09e8103c9311d9a1
[ "MIT" ]
permissive
jdc08161063/gym-miniworld
adaf03db39fc47b88dfc5faa4f3f9e926c7f25ca
4e96db30cb574c6e0eb5db33e83c68a979094a7f
refs/heads/master
2020-04-06T17:06:23.350527
2018-11-14T19:47:17
2018-11-14T19:47:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,863
py
from functools import reduce import operator import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable class Print(nn.Module): """ Layer that prints the size of its input. Used to debug nn.Sequential """ def __init__(self): super(Print, self).__init__() def forward(self, x): print('layer input:', x.shape) return x class GradReverse(torch.autograd.Function): """ Gradient reversal layer """ def __init__(self, lambd=1): self.lambd = lambd def forward(self, x): return x.view_as(x) def backward(self, grad_output): return (grad_output * -self.lambd) def init_weights(m): classname = m.__class__.__name__ if classname.startswith('Conv'): nn.init.orthogonal_(m.weight.data) m.bias.data.fill_(0) elif classname.find('Linear') != -1: nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) def print_model_info(model): modelSize = 0 for p in model.parameters(): pSize = reduce(operator.mul, p.size(), 1) modelSize += pSize print(str(model)) print('Total model size: %d' % modelSize) def make_var(arr): arr = np.ascontiguousarray(arr) arr = torch.from_numpy(arr).float() arr = Variable(arr) if torch.cuda.is_available(): arr = arr.cuda() return arr def save_img(file_name, img): from skimage import io if isinstance(img, Variable): img = img.data.numpy() if len(img.shape) == 4: img = img.squeeze(0) img = img.astype(np.uint8) io.imsave(file_name, img) def load_img(file_name): from skimage import io # Drop the alpha channel img = io.imread(file_name) img = img[:,:,0:3] / 255 # Flip the image vertically img = np.flip(img, 0) # Transpose the rows and columns img = img.transpose(2, 0, 1) # Make it a batch of size 1 var = make_var(img) var = var.unsqueeze(0) return var def gen_batch(gen_data_fn, batch_size=2): """ Returns a tuple of PyTorch Variable objects gen_data is expected to produce a tuple """ assert batch_size > 0 data = [] for i in range(0, batch_size): data.append(gen_data_fn()) # Create arrays of data elements for each variable num_vars = len(data[0]) arrays = [] for idx in range(0, num_vars): vals = [] for datum in data: vals.append(datum[idx]) arrays.append(vals) # Make a variable out of each element array vars = [] for array in arrays: var = make_var(np.stack(array)) vars.append(var) return tuple(vars)
[ "maximechevalierb@gmail.com" ]
maximechevalierb@gmail.com
127d350e935ff500677c170ab861f0343b28e635
e7b312b4cc3355f4ca98313ef2ac9f3b0d81f245
/abc/229/g/g.TLE.py
756dc1a784847d74805a2326514ab64db4f673f6
[]
no_license
minus9d/programming_contest_archive
75466ab820e45ee0fcd829e6fac8ebc2accbbcff
0cb9e709f40460305635ae4d46c8ddec1e86455e
refs/heads/master
2023-02-16T18:08:42.579335
2023-02-11T14:10:49
2023-02-11T14:10:49
21,788,942
0
0
null
null
null
null
UTF-8
Python
false
false
1,868
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ 類題 https://tutorialspoint.com/program-to-find (YをN個連続させるのに必要な最小スワップ回数を求める) を見つけたので、これを使って二分探索で解こうとしたが、TLE 類題のコードは理解していない https://twitter.com/kyopro_friends/status/1464593018451750919 を参考にとき直すこと """ import array from bisect import * from collections import * import fractions import heapq from itertools import * import math import random import re import string import sys sys.setrecursionlimit(10 ** 9) # https://www.tutorialspoint.com/program-to-find-minimum-adjacent-swaps-for-k-consecutive-ones-in-python def calc_swap_num(nums, k): j = val = 0 ans = 10 ** 100 loc = [] for i, x in enumerate(nums): if x: loc.append(i) m = (j + len(loc) - 1)//2 val += loc[-1] - loc[m] - (len(loc)-j)//2 if len(loc) - j > k: m = (j + len(loc))//2 val -= loc[m] - loc[j] - (len(loc)-j)//2 j += 1 if len(loc)-j == k: ans = min(ans, val) return ans def solve(S, K): nums = [] for ch in S: if ch == 'Y': nums.append(1) else: nums.append(0) max_ans = sum(nums) # for i in range(1, max_ans + 1): # print(i, calc_swap_num(nums, i)) if max_ans == 0: return 0 tmp = calc_swap_num(nums, max_ans) if tmp <= K: return max_ans lo = 1 hi = max_ans while hi - lo > 1: mid = (lo + hi) // 2 tmp = calc_swap_num(nums, mid) if tmp <= K: lo = mid else: hi = mid return lo S = input() K = int(input()) print(solve(S, K))
[ "minus9d@gmail.com" ]
minus9d@gmail.com
67d1f7cd8d7bbddc37fe4bfd3e34c2c84521cfa4
5a8f9d8d1cc47ae83546b0e11279b1d891798435
/enumerate_reversible.py
fcce362a8588dd9d6a470e2f099a5cfab00ea31f
[ "MIT" ]
permissive
cjrh/enumerate_reversible
d81af841129adbad3a3d69a4955bfba202a174c7
d67044c78c1214c8749b60227d5c170d8c327770
refs/heads/master
2021-05-21T08:16:31.660568
2021-05-03T04:05:15
2021-05-03T04:05:15
252,613,686
0
0
MIT
2021-05-03T04:05:15
2020-04-03T02:27:01
Python
UTF-8
Python
false
false
438
py
original_enumerate = enumerate def enumerate(iterable, start=0): class Inner: def __iter__(self): yield from original_enumerate(iterable, start=start or 0) def __reversed__(self): stt = start or 0 rev = reversed(iterable) # First, for accurate exception msg rng = range(len(iterable) - 1 + stt, -1 + stt, -1) yield from zip(rng, rev) return Inner()
[ "caleb.hattingh@gmail.com" ]
caleb.hattingh@gmail.com
2b9e188a0d339e9e9ab6c6f43ca76d30a7100206
ca446c7e21cd1fb47a787a534fe308203196ef0d
/tests/graph/test_statement.py
4835355ef9be96cbb32485a349d98a91c0e3b83d
[ "MIT" ]
permissive
critocrito/followthemoney
1a37c277408af504a5c799714e53e0f0bd709f68
bcad19aedc3b193862018a3013a66869e115edff
refs/heads/master
2020-06-12T09:56:13.867937
2019-06-28T08:23:54
2019-06-28T08:23:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,279
py
# from nose.tools import assert_raises from unittest import TestCase from followthemoney import model from followthemoney.types import registry from followthemoney.graph import Statement, Node ENTITY = { 'id': 'test', 'schema': 'Person', 'properties': { 'name': 'Ralph Tester', 'birthDate': '1972-05-01', 'idNumber': ['9177171', '8e839023'], 'website': 'https://ralphtester.me', 'phone': '+12025557612', 'email': 'info@ralphtester.me', 'passport': 'passportEntityId' } } class StatementTestCase(TestCase): def test_base(self): prop = model.get_qname('Thing:name') node = Node(registry.entity, 'banana') stmt = Statement(node, prop, "Theodore Böln") assert stmt.subject == node value = stmt.to_tuple() other = stmt.from_tuple(model, value) assert other == stmt, (stmt, other) assert hash(other) == hash(stmt) assert repr(other) == repr(stmt) def test_invert(self): prop = model.get_qname('Thing:name') node = Node(registry.entity, 'banana') stmt = Statement(node, prop, "Theodore") assert not stmt.inverted inv = stmt.invert() assert inv.inverted assert inv.rdf() is None banana = Node(registry.entity, 'banana') peach = Node(registry.entity, 'peach') prop = model.get_qname('Thing:sameAs') stmt = Statement(banana, prop, peach.value) inv = stmt.invert() assert inv.subject == peach assert inv.value_node == banana assert inv.prop == stmt.prop def test_make_statements(self): statements = list(model.get_proxy(ENTITY).statements) assert len(statements) == 8, len(statements) def test_rdf(self): statements = list(model.get_proxy(ENTITY).statements) triples = [l.rdf() for l in statements] assert len(triples) == 8, len(triples) for (s, p, o) in triples: assert 'test' in s, s if str(o) == 'Ralph Tester': assert str(p) == 'http://www.w3.org/2004/02/skos/core#prefLabel' # noqa if p == registry.phone: assert str(o) == 'tel:+12025557612', o # assert False, triples
[ "friedrich@pudo.org" ]
friedrich@pudo.org
2d7d3d140684312694eeceace7b7556b9773c49c
2e22d14109f41ec84554a7994cd850619d73dc4d
/core/socketserver.py
acbb6e93f7ec0b453e50d8a8c82c25425d367983
[ "MIT" ]
permissive
magus0219/clockwork
35cefeac77e68c1b5e12ab275b7fde18fd07edfc
78c08afdd14f226d7f5c13af633d41a2185ebb7f
refs/heads/master
2021-01-10T07:49:09.539766
2015-09-28T08:17:46
2015-09-28T08:17:46
43,036,160
0
0
null
null
null
null
UTF-8
Python
false
false
4,035
py
# coding:utf-8 ''' Created on Feb 17, 2014 @author: magus0219 ''' import socket, logging, threading, pickle from core.command import Command def recv_until(socket, suffix): ''' Receive message suffixed with specified char @param socket:socket @param suffix:suffix ''' message = '' while not message.endswith(suffix): data = socket.recv(4096) if not data: raise EOFError('Socket closed before we see suffix.') message += data return message class SocketServer(object): ''' Socket Server This socket server is started by clockwork server and only used to invoke methods of JobManager ''' def __init__(self, host, port, jobmanager): ''' Constructor ''' self.host = host self.port = port self.jobmanager = jobmanager self.logger = logging.getLogger("Server.SocketThread") def handleCommand(self, command): ''' Handle one request command of client and return server's answer @param command:Command to handle This function return a Command object which contains result type and detail information. ''' cmd = command.cmd try: if cmd == Command.JOB_ADD: jobid = int(command.data) self.jobmanager.addJob(jobid) return Command(Command.RESULT_SUCCESS, "Successful!") elif cmd == Command.JOB_REMOVE: jobid = int(command.data) self.jobmanager.removeJob(jobid) return Command(Command.RESULT_SUCCESS, "Successful!") elif cmd == Command.JOB_RELOAD: jobid = int(command.data) self.jobmanager.reloadJob(jobid) return Command(Command.RESULT_SUCCESS, "Successful!") elif cmd == Command.TASK_RUN_IMMEDIATELY: jobid, params = command.data jobid = int(jobid) task = self.jobmanager.spawnImmediateTask(jobid=jobid, params=params) return Command(Command.RESULT_SUCCESS, "Successful!", task.get_taskid()) elif cmd == Command.TASK_CANCEL: taskid = command.data self.jobmanager.cancelTask(taskid) return Command(Command.RESULT_SUCCESS, "Successful!") elif cmd == Command.STATUS: return Command(Command.RESULT_SUCCESS, self.jobmanager.getServerStatus()) except ValueError, e: self.logger.exception(e) return Command(Command.RESULT_FAIL, str(e)) def process(self, conn, address): ''' Thread entry where new socket created ''' self.logger.info("Accepted a connection from %s" % str(address)) self.logger.info("Socket connects %s and %s" % (conn.getsockname(), conn.getpeername())) cmd = pickle.loads(recv_until(conn, '.')) self.logger.info("Recieve Command:[%s]" % str(cmd)) while cmd.cmd != Command.EXIT: conn.sendall(pickle.dumps(self.handleCommand(cmd))) cmd = pickle.loads(recv_until(conn, '.')) self.logger.info("Recieve Command:[%s]" % str(cmd)) self.logger.info("Socket is Over") def start(self): ''' Start the socket server and enter the main loop ''' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((self.host, self.port)) s.listen(10) self.logger.info("SocketThread is Listening at %s:%s" % (self.host, str(self.port))) while True: conn, address = s.accept() thread = threading.Thread(target=self.process, args=(conn, address)) thread.daemon = True thread.start() if __name__ == '__main__': server = SocketServer("0.0.0.0", 3993) server.start()
[ "magus0219@gmail.com" ]
magus0219@gmail.com
d3c54bcbc564892dfd88c419f85921faa603d6a6
a6610e191090e216b0e0f23018cecc5181400a7a
/robotframework-ls/tests/robotframework_ls_tests/test_code_analysis.py
dd34aae67913ad1d119eaee83156b557a906088b
[ "Apache-2.0" ]
permissive
JohanMabille/robotframework-lsp
d7c4c00157dd7c12ab15b7125691f7052f77427c
610f0257fdcd79b8c38107a0ecf600f60160bc1f
refs/heads/master
2023-01-19T10:29:48.982578
2020-11-25T13:46:22
2020-11-25T13:46:22
296,245,093
0
0
NOASSERTION
2020-09-17T06:58:54
2020-09-17T06:58:53
null
UTF-8
Python
false
false
5,420
py
def _collect_errors(workspace, doc, data_regression, basename=None, config=None): from robotframework_ls.impl.completion_context import CompletionContext from robotframework_ls.impl.code_analysis import collect_analysis_errors completion_context = CompletionContext(doc, workspace=workspace.ws, config=config) errors = [ error.to_lsp_diagnostic() for error in collect_analysis_errors(completion_context) ] data_regression.check(errors, basename=basename) def test_keywords_analyzed(workspace, libspec_manager, data_regression): workspace.set_root("case1", libspec_manager=libspec_manager) doc = workspace.get_doc("case1.robot") doc.source = doc.source + ( "\n This keyword does not exist" "\n [Teardown] Also not there" ) _collect_errors(workspace, doc, data_regression) def test_keywords_analyzed_templates(workspace, libspec_manager, data_regression): workspace.set_root("case1", libspec_manager=libspec_manager) doc = workspace.get_doc("case1.robot") doc.source = """*** Settings *** Test Template this is not there""" _collect_errors(workspace, doc, data_regression) def test_keywords_with_vars_no_error(workspace, libspec_manager, data_regression): workspace.set_root("case1", libspec_manager=libspec_manager) doc = workspace.get_doc("case1.robot") doc.source = ( doc.source + """ I check ls I execute "ls" rara "-lh" *** Keywords *** I check ${cmd} Log ${cmd} I execute "${cmd}" rara "${opts}" Log ${cmd} ${opts} """ ) _collect_errors(workspace, doc, data_regression) def test_keywords_with_prefix_no_error(workspace, libspec_manager, data_regression): workspace.set_root("case1", libspec_manager=libspec_manager) doc = workspace.get_doc("case1.robot") # Ignore bdd-related prefixes (see: robotframework_ls.impl.robot_constants.BDD_PREFIXES) doc.source = ( doc.source + """ given I check ls then I execute *** Keywords *** I check ${cmd} Log ${cmd} I execute Log foo """ ) _collect_errors(workspace, doc, data_regression, basename="no_error") def test_keywords_prefixed_by_library(workspace, libspec_manager, data_regression): workspace.set_root("case4", libspec_manager=libspec_manager) doc = workspace.get_doc("case4.robot") doc.source = """*** Settings *** Library String Library Collections Resource case4resource.txt *** Test Cases *** Test BuiltIn.Log Logging case4resource3.Yet Another Equal Redefined String.Should Be Titlecase Hello World ${list}= BuiltIn.Create List 1 2 Collections.Append To List ${list} 3""" _collect_errors(workspace, doc, data_regression, basename="no_error") def test_keywords_prefixed_with_alias(workspace, libspec_manager, data_regression): workspace.set_root("case4", libspec_manager=libspec_manager) doc = workspace.get_doc("case4.robot") doc.source = """*** Settings *** Library Collections WITH NAME Col1 *** Test Cases *** Test Col1.Append To List ${list} 3""" _collect_errors(workspace, doc, data_regression, basename="no_error") def test_keywords_name_matches(workspace, libspec_manager, data_regression): workspace.set_root("case4", libspec_manager=libspec_manager) doc = workspace.get_doc("case4.robot") doc.source = """*** Settings *** Library Collections *** Test Cases *** Test AppendToList ${list} 3""" _collect_errors(workspace, doc, data_regression, basename="no_error") def test_resource_does_not_exist(workspace, libspec_manager, data_regression): workspace.set_root("case4", libspec_manager=libspec_manager) doc = workspace.get_doc("case4.robot") doc.source = """*** Settings *** Library DoesNotExist Library . Library .. Library ../ Resource does_not_exist.txt Resource ${foo}/does_not_exist.txt Resource ../does_not_exist.txt Resource . Resource .. Resource ../ Resource ../../does_not_exist.txt Resource case4resource.txt *** Test Cases *** Test case4resource3.Yet Another Equal Redefined""" from robotframework_ls.robot_config import RobotConfig config = RobotConfig() # Note: we don't give errors if we can't resolve a resource. _collect_errors(workspace, doc, data_regression, basename="no_error", config=config) def test_casing_on_filename(workspace, libspec_manager, data_regression): from robocorp_ls_core.protocols import IDocument from pathlib import Path # i.e.: Importing a python library with capital letters fails #143 workspace.set_root("case4", libspec_manager=libspec_manager) doc: IDocument = workspace.get_doc("case4.robot") p = Path(doc.path) (p.parent / "myPythonKeywords.py").write_text( """ class myPythonKeywords(object): ROBOT_LIBRARY_VERSION = 1.0 def __init__(self): pass def Uppercase_Keyword (self): return "Uppercase does not work" """ ) doc.source = """*** Settings *** Library myPythonKeywords.py *** Test Cases *** Test Uppercase Keyword""" from robotframework_ls.robot_config import RobotConfig config = RobotConfig() # Note: we don't give errors if we can't resolve a resource. _collect_errors(workspace, doc, data_regression, basename="no_error", config=config)
[ "fabiofz@gmail.com" ]
fabiofz@gmail.com
9ddff1fa09a2a5c49b82729b44d4140b40e1fa55
cbdbb05b91a4463639deefd44169d564773cd1fb
/djangoproj/pos/invoices/migrations/0011_auto_20150718_0908.py
d24848402c8400bd25b51f8bef05d5d93aff8b99
[]
no_license
blazprog/py3
e26ef36a485809334b1d5a1688777b12730ebf39
e15659e5d5a8ced617283f096e82135dc32a8df1
refs/heads/master
2020-03-19T20:55:22.304074
2018-06-11T12:25:18
2018-06-11T12:25:18
136,922,662
0
0
null
null
null
null
UTF-8
Python
false
false
684
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('invoices', '0010_artikel_davek'), ] operations = [ migrations.AddField( model_name='racunpozicija', name='txtNazivArtikla', field=models.CharField(max_length=25, default='naziv'), preserve_default=False, ), migrations.AddField( model_name='racunpozicija', name='txtSifraArtikla', field=models.CharField(max_length=5, default='sifra'), preserve_default=False, ), ]
[ "blaz.korosec@mentis.si" ]
blaz.korosec@mentis.si
5416209788d81dbbb8263cbf9614f1608d323758
03bf031efc1f171f0bb3cf8a565d7199ff073f96
/utils/admin.py
ad5b99e1ae02e5c6358ca6949bc8b89a84e33e2a
[ "MIT" ]
permissive
emilps/onlineweb4
a213175678ac76b1fbede9b0897c538c435a97e2
6f4aca2a4522698366ecdc6ab63c807ce5df2a96
refs/heads/develop
2020-03-30T01:11:46.941170
2019-05-10T19:49:21
2019-05-10T19:49:21
150,564,330
0
0
MIT
2019-05-10T19:49:22
2018-09-27T09:43:32
Python
UTF-8
Python
false
false
802
py
from django.contrib import admin class DepositWithdrawalFilter(admin.SimpleListFilter): """ A simple filter to select deposits, withdrawals or empty transactions """ title = 'Transaction type' parameter_name = 'amount' def lookups(self, request, model_admin): """ Tuples with values for url and display term """ return ( ('positive', 'Deposit'), ('negative', 'Withdrawal'), ('empty', 'Empty') ) def queryset(self, request, queryset): if self.value() == 'positive': return queryset.filter(amount__gt=0) if self.value() == 'negative': return queryset.filter(amount__lt=0) if self.value() == 'empty': return queryset.filter(amount=0)
[ "hlsolbjorg@gmail.com" ]
hlsolbjorg@gmail.com
12ada9555cc15be06cd931a4408c0bd361b6eb02
caf8cbcafd448a301997770165b323438d119f5e
/.history/chapter01/python_05_if_condition_20201128214052.py
9ee4357bc39e5526dabfbdaecafa8175ebd0349b
[ "MIT" ]
permissive
KustomApe/nerdape
03e0691f675f13ce2aefa46ee230111247e90c72
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
refs/heads/main
2023-01-23T10:13:26.584386
2020-11-28T22:29:49
2020-11-28T22:29:49
309,897,105
0
0
null
null
null
null
UTF-8
Python
false
false
977
py
"""[if文について] もし〜だったら、こうして """ # if 条件: # 実行するブロック # 条件によって処理を適応したい場合 # 3000kmごとにオイル交換しないといけない distance = 3403 # if distance > 3000: # print('オイル交換時期です') total = 123200 average = total / 3 print(average) if average > 3000: print('オイル交換時期ですよ!') # 文字列を比較する/リストを比較する # if 'abc' == "ABC": # print('同類です') # if 'CDE' == 'CDE': # print('同類です') # if 'あいうえお' == 'あいうえお': # print('同類です') # 文字列を検索する/リストの要素を検索する # if 'abc' in "ABC": # print('ヒットしました!') # if 'ドリフト' in '僕はドリフトが好きです': # print('ヒットしました!') # if 'japan' in 'japanese domestic market vehicle': # print('ヒットしました!') # else文 # elif文
[ "kustomape@gmail.com" ]
kustomape@gmail.com
e68d1c40e9032cb0617ca2a03de48c33736f012f
9bd1daa53a7e5d65d4f7a3558f11d06006ecb000
/conditioner/tests/actions/factories.py
f744f19bb34a5f1c829d51e2f8696013d030116f
[ "MIT" ]
permissive
pombredanne/django-conditioner
55b01ac8e42a8e2c73025934c39aa72ee478c333
d5d2ad1f016bc3e6b34c74ff68cd024e8fad5125
refs/heads/master
2020-09-25T21:16:29.274170
2017-03-17T08:34:00
2017-03-17T08:34:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
988
py
""" Conditioner module actions related factories """ import random import factory from faker import Factory as FakerFactory from conditioner.actions import LoggerAction, SendTemplatedEmailAction from conditioner.tests.factories import BaseActionFactory faker = FakerFactory.create() class LoggerActionFactory(BaseActionFactory): """ Factory for `conditioner.actions.misc.LoggerAction` model """ level = random.choice(LoggerAction.LEVEL_CHOICES)[0] message = factory.LazyAttribute(lambda n: faker.paragraph()) class Meta: model = LoggerAction class SendTemplatedEmailActionFactory(BaseActionFactory): """ Factory for `conditioner.actions.common.SendTemplatedEmailAction` model """ email = factory.LazyAttribute(lambda n: faker.email()) subject = factory.LazyAttribute(lambda n: faker.sentence()) template = factory.LazyAttribute(lambda n: faker.uri_path() + '.txt') class Meta: model = SendTemplatedEmailAction
[ "pawel.ad@gmail.com" ]
pawel.ad@gmail.com
1129328bacebf961f72d0c0b6cf180bcc0d9483c
ee6fb9095faef4c88848f5f769b296f672d37cd0
/photomosaic/imgutils.py
791cb03f378176e1dbb5c88b361c085704f9beeb
[]
no_license
cosmozhang1995/photo-mosaic
76ca2846db0eefd6d7ded117fec1b2ac06e823ea
f5c57a9765887aeeb65804c5597727646b945814
refs/heads/master
2022-07-10T14:13:10.605884
2020-02-14T08:51:08
2020-02-14T08:51:08
240,463,724
0
0
null
2022-06-22T01:05:54
2020-02-14T08:41:38
Python
UTF-8
Python
false
false
600
py
import cv2 import numpy as np def resize_cut(srcimg, dstsize): dstheight, dstwidth = dstsize img = srcimg imgheight, imgwidth = img.shape[:2] sc = max(dstheight/imgheight, dstwidth/imgwidth) imgsize = (int(np.ceil(imgheight*sc)), int(np.ceil(imgwidth*sc))) img = cv2.resize(img, (imgsize[1], imgsize[0])) imgheight, imgwidth = img.shape[:2] imgcut = (int((imgheight-dstheight)/2), int((imgwidth-dstwidth)/2)) imgcuttop, imgcutleft = imgcut imgcutbottom, imgcutright = (imgcuttop + dstheight, imgcutleft + dstwidth) img = img[imgcuttop:imgcutbottom, imgcutleft:imgcutright, :] return img
[ "cosmozhang1995@gmail.com" ]
cosmozhang1995@gmail.com
b4ffb6e7aa7720bf94408c4205e0d631a33ccac7
c7d7bafdff29a9e0f91bec25e88b8db1b6694643
/firebot/modules/mf.py
37002a949a6c8212931fce04e72ad19042a64323
[ "MIT" ]
permissive
ultra-noob/Vivek-UserBot
ebedb80d98ca72fe1167211c14e32c017fcdf903
6c371a4aaa0c05397efa36237e9a2118deeb0d91
refs/heads/main
2023-07-11T16:52:37.696359
2021-08-11T03:38:15
2021-08-11T03:38:15
394,882,145
0
1
null
2021-08-11T06:11:45
2021-08-11T06:11:45
null
UTF-8
Python
false
false
2,724
py
import sys from telethon import __version__, functions from firebot import CMD_HELP from firebot.utils import fire_on_cmd @fire.on(fire_on_cmd(pattern="mf ?(.*)", allow_sudo=True)) # pylint:disable=E0602 async def _(event): if event.fwd_from: return splugin_name = event.pattern_match.group(1) if splugin_name in borg._modules: s_help_string = borg._modules[splugin_name].__doc__ else: s_help_string = "" help_string = """ ......................................../´¯/) ......................................,/¯../ ...................................../..../ ..................................../´.¯/ ..................................../´¯/ ..................................,/¯../ ................................../..../ ................................./´¯./ ................................/´¯./ ..............................,/¯../ ............................./..../ ............................/´¯/ ........................../´¯./ ........................,/¯../ ......................./..../ ....................../´¯/ ....................,/¯../ .................../..../ ............./´¯/'...'/´¯¯`·¸ ........../'/.../..../......./¨¯\ ........('(...´...´.... ¯~/'...') .........\.................'...../ ..........''...\.......... _.·´ ............\..............( ..............\.............\... """.format( sys.version, __version__ ) tgbotusername = Config.TG_BOT_USER_NAME_BF_HER # pylint:disable=E0602 if tgbotusername is not None: results = await borg.inline_query( # pylint:disable=E0602 tgbotusername, help_string + "\n\n" + s_help_string ) await results[0].click( event.chat_id, reply_to=event.reply_to_msg_id, hide_via=True ) await event.delete() else: await event.reply(help_string + "\n\n" + s_help_string) await event.delete() @fire.on(fire_on_cmd(pattern="dc")) # pylint:disable=E0602 async def _(event): if event.fwd_from: return result = await borg(functions.help.GetNearestDcRequest()) # pylint:disable=E0602 await event.edit(result.stringify()) @fire.on(fire_on_cmd(pattern="config")) # pylint:disable=E0602 async def _(event): if event.fwd_from: return result = await borg(functions.help.GetConfigRequest()) # pylint:disable=E0602 result = result.stringify() logger.info(result) # pylint:disable=E0602 await event.edit("""Telethon UserBot powered by @UniBorg""") CMD_HELP.update( { "mf": "**Mf**\ \n\n**Syntax : **`.mf`\ \n**Usage :** funny plugin.\ \n\n**Syntax : **`.dc`\ \n**Usage :** shows nearest Dc." } )
[ "noreply@github.com" ]
ultra-noob.noreply@github.com
98ad144923dfcbae14b423be115a14fbb1c611c4
150464efa69db3abf328ef8cd912e8e248c633e6
/_4.python/__code/Pythoneer-master/Jumbled Word/Jumbled(withouttkinter).py
7211039e1c6397134008308e8017b3165e1a9494
[]
no_license
bunshue/vcs
2d194906b7e8c077f813b02f2edc70c4b197ab2b
d9a994e3afbb9ea84cc01284934c39860fea1061
refs/heads/master
2023-08-23T22:53:08.303457
2023-08-23T13:02:34
2023-08-23T13:02:34
127,182,360
6
3
null
2023-05-22T21:33:09
2018-03-28T18:33:23
C#
UTF-8
Python
false
false
1,397
py
import os import sys from collections import defaultdict print " "; print "................................Jumbled ......................................"; print "NOTE : Please make sure, you enter all the letters necessary to make the word!"; print " "; print " "; word = input("Enter the word: ") print " "; #word = sys.argv[1] word1 = word #print word1 leng=len(word) no = leng chek='' dict = defaultdict(list) #word = input("Enter the : ") word = word.lower() word = sorted(word) word = ''.join(word) word = "\n"+word word = word.replace(" ", "") file = open("C:\Python27\Jumbled\Dictionary.txt", "r") line = file.readline() print " " count = 0; while line: if(line!=None): line = file.readline() j = line line = sorted(line) line = ''.join(line) j = ''.join(j) k = sorted(j) k = ''.join(k) k = k.lower() if (word==k): if(count<1): print "Solution : "+j+"\n", count=count+1; if(count>1): print "Another Combnation : "+j if(j=="mazahir"): print "'Mazahir' here! :), Hope you liked my program :D" #dict[word].append(k) file.close() fo = open("C:/Mazahir/now.txt", "w") line = fo.write( j ) fo.close() file = open("C:/Mazahir/now1.txt", "w") file.write( str(no) ) file.close()
[ "david@insighteyes.com" ]
david@insighteyes.com
7e6c1f50001acdca960cd972aca451db26803155
68ea05d0d276441cb2d1e39c620d5991e0211b94
/1940.py
a6e8f345cdc993bcae8b3a828ab9b0865b506f3b
[]
no_license
mcavalca/uri-python
286bc43aa157d3a6880dc222e0136c80cf079565
e22875d2609fe7e215f9f3ed3ca73a1bc2cf67be
refs/heads/master
2021-11-23T08:35:17.614443
2021-10-05T13:26:03
2021-10-05T13:26:03
131,339,175
50
27
null
2021-11-22T12:21:59
2018-04-27T19:54:09
Python
UTF-8
Python
false
false
236
py
j, r = [int(x) for x in input().split()] entrada = list(map(int, input().split())) pontos = [0] * j for k in range(j): pontos[k] = sum(entrada[k::j]) pontos = pontos[::-1] vencedor = j - pontos.index(max(pontos)) print(vencedor)
[ "m.cavalca@hotmail.com" ]
m.cavalca@hotmail.com
6a26301089da81a8e292227e32da92a3e05f82e2
f7d343efc7b48818cac4cf9b98423b77345a0067
/training/Permutations.py
10acfdf1568289dd3b55bcf473e76239ead669a4
[]
no_license
vijaymaddukuri/python_repo
70e0e24d0554c9fac50c5bdd85da3e15c6f64e65
93dd6d14ae4b0856aa7c6f059904cc1f13800e5f
refs/heads/master
2023-06-06T02:55:10.393125
2021-06-25T16:41:52
2021-06-25T16:41:52
151,547,280
0
1
null
null
null
null
UTF-8
Python
false
false
845
py
def permutations1(string): def factorial(n): fact=1 for i in range(1,n+1): fact=fact*i return fact repeat=len(string)-len(''.join(set(string))) n=factorial(len(string)) k=factorial(repeat) loop=n/(k**repeat) final=[] j=0 for i in range(loop): if i>=2: j=0 else: j+=1 new = string[j-1:] + string[j-1:] final.append(new) string=new return final return loop def permutations(string): result = set(string) if len(string) == 2: result.add(string[1] + string[0]) elif len(string) > 2: for i, c in enumerate(string): for s in permutations(string[:i] + string[i + 1:]): result.add(c + s) return list(result) a='abc' per=permutations(a) print(per)
[ "Vijay.Maddukuri@virtustream.com" ]
Vijay.Maddukuri@virtustream.com
e48b80cea00aad77f599556a86f3688235cc9a93
6cfa568e2012dde5c86265226b0dd3a49849c7f7
/website_sale_booking/__openerp__.py
6573e2e4ae8e491f0af198340e85248ca9f2cfc3
[]
no_license
arinno/odoo-website-sale-booking
c48771ee30dc8791656a7a9d75efa14fe07f88bc
dd2e45873e64ad0f5bdd24a23d905b70702cd85a
refs/heads/master
2021-01-09T06:23:01.904899
2017-02-05T07:10:21
2017-02-05T07:10:21
80,975,669
0
0
null
2017-02-05T07:06:24
2017-02-05T07:06:24
null
UTF-8
Python
false
false
425
py
{ 'name': 'Website Booking System', 'category': 'sale', 'description':""" OpenERP Website Booking System view. ========================== """, 'version': '1.0', 'js': [ ], 'css': [ ], 'author': 'Vertel AB', 'website': 'http://www.vertel.se', 'depends': ['website', 'product', 'hr_contract', 'resource'], 'data': ['view/website_sale_booking.xml'], 'installable': True, }
[ "apollo_zhj@msn.com" ]
apollo_zhj@msn.com
883364571d231534b05121da2095291109c936e8
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/adjectives/_handpicked.py
d8b02e24092556f8401358860df23874bd852d2b
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
448
py
#calss header class _HANDPICKED(): def __init__(self,): self.name = "HANDPICKED" self.definitions = [u'Someone who is handpicked has been carefully chosen for a special job or purpose: '] self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.specie = 'adjectives' def run(self, obj1, obj2): self.jsondata[obj2] = {} self.jsondata[obj2]['properties'] = self.name.lower() return self.jsondata
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
e7694b0db2814d86d4fe4e4c05b90604614b2138
91f30c829664ff409177e83776c9f4e2e98d9fc4
/apps/events/migrations/0002_auto_20180607_0411.py
0436f1e6422da2a9e882d0161aa9c56529c7231f
[]
no_license
TotalityHacks/madras
3ac92dc6caf989efcb02590f6474ab333d1f93fa
2395a703eed1a87cca3cdd6c0fb9162b69e8df27
refs/heads/master
2021-08-17T15:29:41.055074
2018-07-18T23:05:29
2018-07-18T23:05:29
105,232,414
4
5
null
2021-03-31T18:58:56
2017-09-29T05:13:41
Python
UTF-8
Python
false
false
581
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-06-07 04:11 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('events', '0001_initial'), ] operations = [ migrations.AlterField( model_name='event', name='description', field=models.TextField(), ), migrations.AlterField( model_name='event', name='title', field=models.CharField(max_length=40), ), ]
[ "ezwang1@gmail.com" ]
ezwang1@gmail.com
d4cb34b70a91e2240a08ad427a015525c61d1b39
7f8db5b974a747632729d16c431de7aca007af00
/0x11-python-network_1/8-json_api.py
482167a08bb420f6d28ad7b10c9d98d4c2ec9cbe
[]
no_license
thomasmontoya123/holbertonschool-higher_level_programming
6f5ceb636167efba1e36ed2dee7bf83b458f6751
48b7c9dccac77ccb0f57da1dc1d150f356612b13
refs/heads/master
2020-07-22T22:31:13.744490
2020-02-13T22:54:17
2020-02-13T22:54:17
207,351,235
0
0
null
null
null
null
UTF-8
Python
false
false
594
py
#!/usr/bin/python3 '''sends a POST request with the letter as a parameter.''' if __name__ == "__main__": import requests from sys import argv url = 'http://0.0.0.0:5000/search_user' if len(argv) == 2: values = {'q': argv[1]} result = requests.post(url, data=values) try: json = result.json() if json: print("[{}] {}".format(json.get("id"), json.get("name"))) else: print("No result") except Exception: print("Not a valid JSON") else: print("No result")
[ "tomasmontoya123@gmail.com" ]
tomasmontoya123@gmail.com
43f78419297092954ae2d68c3e9a6c3cdeb59b73
8bb2842aa73676d68a13732b78e3601e1305c4b2
/1920.py
5ce8ca2d8ffeda7120bfef402770ca16c94a7353
[]
no_license
Avani18/LeetCode
239fff9c42d2d5703c8c95a0efdc70879ba21b7d
8cd61c4b8159136fb0ade96a1e90bc19b4bd302d
refs/heads/master
2023-08-24T22:25:39.946426
2021-10-10T20:36:07
2021-10-10T20:36:07
264,523,162
0
0
null
null
null
null
UTF-8
Python
false
false
277
py
# Build Array from Permutation class Solution(object): def buildArray(self, nums): """ :type nums: List[int] :rtype: List[int] """ ans = [] for i in range(len(nums)): ans.append(nums[nums[i]]) return ans
[ "noreply@github.com" ]
Avani18.noreply@github.com
3323c4ec71a8a7d1a3ac28964a61aeacbeb33fd6
196eb2f5e3366987d7285bf980ac64254c4aec16
/supervised/util.py
4dbf6da6ffafcd128d865320bca7ad87e38b8408
[ "MIT" ]
permissive
mfouda/codenames
f54e0c4366edbf65251aadefddef1fda6cd7de9d
ccd0bd7578b3deedeec60d0849ec4ebca48b6426
refs/heads/master
2022-01-07T02:03:42.529978
2018-12-21T13:16:27
2018-12-21T13:16:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
595
py
import time import matplotlib.pyplot as plt plt.switch_backend('agg') import matplotlib.ticker as ticker # noqa: E402 def as_minutes(s): m = s // 60 s -= m * 60 return '%dm %ds' % (m, s) def time_since(since, percent): now = time.time() s = now - since es = s / percent rs = es - s return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) def show_plot(points): plt.figure() fig, ax = plt.subplots() # this locator puts ticks at regular intervals loc = ticker.MultipleLocator(base=0.2) ax.yaxis.set_major_locator(loc) plt.plot(points)
[ "lukas.jendele@gmail.com" ]
lukas.jendele@gmail.com
7b80ef96fa22fd633d5e959b647de4b9f16faedc
c7f98de17088cb4df6c171f1e76614beb1f4e0f7
/modules/vulnerability-analysis/w3af.py
73cb1fc29cbd307d7038d2d133f512666a097029
[]
no_license
fryjustinc/ptf
6262ca5b94a43a51e984d3eee1649a16584b597b
ba85f9e867b65b4aa4f06b6232207aadac9782c9
refs/heads/master
2020-03-31T09:43:44.474563
2018-10-08T18:39:03
2018-10-08T18:39:03
152,107,950
0
0
null
2018-10-08T16:00:37
2018-10-08T16:00:37
null
UTF-8
Python
false
false
244
py
#!/usr/bin/env python ##################################### # Installation module for w3af ##################################### AUTHOR="Justin Fry" INSTALL_TYPE="GIT" REPOSITORY_LOCATION="https://github.com/andresriancho/w3af" LAUNCHER="w3af"
[ "fryjustinc@gmail.com" ]
fryjustinc@gmail.com
47bcf163541fb59722252c3f339c87df5bc27d1b
a8a5772674e62beaa4f5b1f115d280103fd03749
/metstationdistance.py
06718ded3fcc86dd7af918765369d469f2ed4e6b
[]
no_license
tahentx/pv_workbook
c6fb3309d9acde5302dd3ea06a34ad2aee0de4b7
08912b0ef36a5226d23fa0430216a3f277aca33b
refs/heads/master
2022-12-12T20:39:35.688510
2021-03-30T03:20:54
2021-03-30T03:20:54
172,827,250
0
1
null
2022-12-08T16:47:39
2019-02-27T02:25:24
Python
UTF-8
Python
false
false
591
py
import csv from haversine import haversine with open('tucson.csv') as file: has_header = csv.Sniffer().has_header(file.read(1024)) file.seek(0) met = csv.reader(file) if has_header: next(met) met_list = list(met) coords = [] for x in met_list: coords.append(x[1:]) print(coords) # # def find_backup_metstation(coordinates: list) -> list: # backup_mets = [] # for i in range(len(coordinates)-1): # backup_mets.append(haversine(coordinates[i], coordinates[i + 1],unit='mi')) # print(type(coordinates[i])) # # find_backup_metstation(coords)
[ "hendricks.ta@gmail.com" ]
hendricks.ta@gmail.com
ce1d4c9f9dae392dbcd0c9e6cec095469f9b8092
0fccee4c738449f5e0a8f52ea5acabf51db0e910
/genfragments/ThirteenTeV/BulkGraviton/BulkGraviton_VBF_WW_inclu_narrow_M4000_13TeV-madgraph_cff.py
2bcf409e757355e8832799dc74ad7539b4678a06
[]
no_license
cms-sw/genproductions
f308ffaf3586c19b29853db40e6d662e937940ff
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
refs/heads/master
2023-08-30T17:26:02.581596
2023-08-29T14:53:43
2023-08-29T14:53:43
11,424,867
69
987
null
2023-09-14T12:41:28
2013-07-15T14:18:33
Python
UTF-8
Python
false
false
797
py
import FWCore.ParameterSet.Config as cms # link to cards: # https://github.com/cms-sw/genproductions/tree/91ab3ea30e3c2280e4c31fdd7072a47eb2e5bdaa/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-2/BulkGraviton_VBF_WW_inclu/BulkGraviton_VBF_WW_inclu_narrow_M4000 externalLHEProducer = cms.EDProducer("ExternalLHEProducer", args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-2/BulkGraviton_VBF_WW_inclu/narrow/v1/BulkGraviton_VBF_WW_inclu_narrow_M4000_tarball.tar.xz'), nEvents = cms.untracked.uint32(5000), numberOfParameters = cms.uint32(1), outputFile = cms.string('cmsgrid_final.lhe'), scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh') )
[ "sheffield@physics.rutgers.edu" ]
sheffield@physics.rutgers.edu
c441b84ad77af9e2410f70a7eb69c516673a72a5
8e67d8618b9be7c777597b650876fa20082a6ebb
/14501.py
74d650e562528e9b9e0e32bd3b717523cf2ba523
[]
no_license
ljm9748/practice_codingtest
c5a2cc315c1ccd8f48a9424d13d2097c9fed0efc
367710238976c1a2f8b42bfc3fc2936c47b195c5
refs/heads/master
2023-01-14T12:29:32.530648
2020-11-18T17:49:50
2020-11-18T17:49:50
282,162,451
0
0
null
null
null
null
UTF-8
Python
false
false
284
py
n=int(input()) myinp=[] for _ in range(n): myinp.append(list(map(int, input().split()))) dp=[0]*(n) for i in range(n): day=myinp[i][0] val=myinp[i][1] if i+day-1<=(n-1): for j in range(i+day-1, n): dp[j]=max(dp[j], dp[i+day-2]+val) print(dp[n-1])
[ "ljm9748@naver.com" ]
ljm9748@naver.com
2855a822a742a7fbeb6e50814966d36d5e36be0c
61a856d931688a49435b3caab4e9d674ca2a32aa
/tests/test_zeroDS.py
27f6636792c8ed111a8de6cd4169b2281cecf6d1
[ "Apache-2.0" ]
permissive
kvt0012/NeMo
3c9803be76c7a2ef8d5cab6995ff1ef058144ffe
6ad05b45c46edb5d44366bd0703915075f72b4fc
refs/heads/master
2020-08-14T16:59:18.702254
2019-10-14T22:46:48
2019-10-14T22:46:48
215,203,912
1
0
Apache-2.0
2019-10-15T04:05:37
2019-10-15T04:05:34
null
UTF-8
Python
false
false
4,613
py
import unittest import os import tarfile import torch from ruamel.yaml import YAML from nemo.core.neural_types import * from .context import nemo, nemo_asr from .common_setup import NeMoUnitTest class TestZeroDL(NeMoUnitTest): labels = ["'", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", " "] manifest_filepath = "tests/data/asr/an4_train.json" yaml = YAML(typ="safe") def setUp(self) -> None: super().setUp() data_folder = "tests/data/" print("Looking up for test ASR data") if not os.path.exists(data_folder + "nemo_asr"): print(f"Extracting ASR data to: {data_folder + 'nemo_asr'}") tar = tarfile.open("tests/data/asr.tar.gz", "r:gz") tar.extractall(path=data_folder) tar.close() else: print("ASR data found in: {0}".format(data_folder + "asr")) def test_simple_train(self): print("Simplest train test with ZeroDL") neural_factory = nemo.core.neural_factory.NeuralModuleFactory( backend=nemo.core.Backend.PyTorch, create_tb_writer=False) trainable_module = nemo.backends.pytorch.tutorials.TaylorNet(dim=4) data_source = nemo.backends.pytorch.common.ZerosDataLayer( size=10000, dtype=torch.FloatTensor, batch_size=128, output_ports={ "x": NeuralType({ 0: AxisType(BatchTag), 1: AxisType(ChannelTag, dim=1)}), "y": NeuralType({ 0: AxisType(BatchTag), 1: AxisType(ChannelTag, dim=1)})}) loss = nemo.backends.pytorch.tutorials.MSELoss() x, y = data_source() y_pred = trainable_module(x=x) loss_tensor = loss(predictions=y_pred, target=y) callback = nemo.core.SimpleLossLoggerCallback( tensors=[loss_tensor], print_func=lambda x: print(f'Train Loss: {str(x[0].item())}')) neural_factory.train( [loss_tensor], callbacks=[callback], optimization_params={"num_epochs": 3, "lr": 0.0003}, optimizer="sgd") def test_asr_with_zero_ds(self): print("Testing ASR NMs with ZeroDS and without pre-processing") with open("tests/data/jasper_smaller.yaml") as file: jasper_model_definition = self.yaml.load(file) dl = nemo.backends.pytorch.common.ZerosDataLayer( size=100, dtype=torch.FloatTensor, batch_size=4, output_ports={ "processed_signal": NeuralType( {0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag, dim=64), 2: AxisType(ProcessedTimeTag, dim=64)}), "processed_length": NeuralType( {0: AxisType(BatchTag)}), "transcript": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag, dim=64)}), "transcript_length": NeuralType({0: AxisType(BatchTag)}) }) jasper_encoder = nemo_asr.JasperEncoder( feat_in=jasper_model_definition['AudioPreprocessing']['features'], **jasper_model_definition["JasperEncoder"]) jasper_decoder = nemo_asr.JasperDecoderForCTC( feat_in=1024, num_classes=len(self.labels) ) ctc_loss = nemo_asr.CTCLossNM(num_classes=len(self.labels)) # DAG processed_signal, p_length, transcript, transcript_len = dl() encoded, encoded_len = jasper_encoder(audio_signal=processed_signal, length=p_length) # print(jasper_encoder) log_probs = jasper_decoder(encoder_output=encoded) loss = ctc_loss(log_probs=log_probs, targets=transcript, input_length=encoded_len, target_length=transcript_len) callback = nemo.core.SimpleLossLoggerCallback( tensors=[loss], print_func=lambda x: print(f'Train Loss: {str(x[0].item())}')) # Instantiate an optimizer to perform `train` action neural_factory = nemo.core.NeuralModuleFactory( backend=nemo.core.Backend.PyTorch, local_rank=None, create_tb_writer=False) neural_factory.train( [loss], callbacks=[callback], optimization_params={"num_epochs": 2, "lr": 0.0003}, optimizer="sgd")
[ "okuchaiev@nvidia.com" ]
okuchaiev@nvidia.com
eaebb4666a97d396903989fc5c9df6e3c92ebdc2
e13091c137650cd31c8d9778087b369033d0cf96
/src/main/python/algo_expert/Algorithm Implementation /Sort/selection_sort.py
db348f63325d79466c26d8e70fdde8fcced1ec7b
[]
no_license
jwoojun/CodingTest
634e2cfe707b74c080ddbe5f32f58c1e6d849968
d62479d168085f13e73dfc1697c5438a97632d29
refs/heads/master
2023-08-22T09:03:32.392293
2021-10-31T01:00:33
2021-10-31T01:00:33
300,534,767
0
0
null
null
null
null
UTF-8
Python
false
false
253
py
# selection_sort def selection_sort(lst) : for i in range(len(lst)-1) : min_ = i for j in range(i+1, len(lst)) : if lst[min_] > lst[j] : min_ = j lst[i], lst[min_] = lst[min_], lst[i] return lst
[ "jjwjun10@gmail.com" ]
jjwjun10@gmail.com
6aff8bc6b2649dd67495d446bfea943bd810d87e
24e7e0dfaaeaca8f911b40fcc2937342a0f278fd
/venv/Lib/site-packages/pygments/plugin.py
76e8f6cb61c2c456a487266d8ae4197c7a0293af
[ "MIT" ]
permissive
BimiLevi/Covid19
90e234c639192d62bb87364ef96d6a46d8268fa0
5f07a9a4609383c02597373d76d6b6485d47936e
refs/heads/master
2023-08-04T13:13:44.480700
2023-08-01T08:36:36
2023-08-01T08:36:36
288,455,446
1
0
MIT
2021-01-22T19:36:26
2020-08-18T12:53:43
HTML
UTF-8
Python
false
false
1,734
py
# -*- coding: utf-8 -*- """ pygments.plugin ~~~~~~~~~~~~~~~ Pygments setuptools plugin interface. The methods defined here also work if setuptools isn't installed but they just return nothing. lexer plugins:: [pygments.lexers] yourlexer = yourmodule:YourLexer formatter plugins:: [pygments.formatters] yourformatter = yourformatter:YourFormatter /.ext = yourformatter:YourFormatter As you can see, you can define extensions for the formatter with a leading slash. syntax plugins:: [pygments.styles] yourstyle = yourstyle:YourStyle filter plugin:: [pygments.filter] yourfilter = yourfilter:YourFilter :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ LEXER_ENTRY_POINT = 'pygments.lexers' FORMATTER_ENTRY_POINT = 'pygments.formatters' STYLE_ENTRY_POINT = 'pygments.styles' FILTER_ENTRY_POINT = 'pygments.filters' def iter_entry_points(group_name): try: import pkg_resources except (ImportError, IOError): return [] return pkg_resources.iter_entry_points(group_name) def find_plugin_lexers(): for entrypoint in iter_entry_points(LEXER_ENTRY_POINT): yield entrypoint.load() def find_plugin_formatters(): for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT): yield entrypoint.name, entrypoint.load() def find_plugin_styles(): for entrypoint in iter_entry_points(STYLE_ENTRY_POINT): yield entrypoint.name, entrypoint.load() def find_plugin_filters(): for entrypoint in iter_entry_points(FILTER_ENTRY_POINT): yield entrypoint.name, entrypoint.load()
[ "50989568+BimiLevi@users.noreply.github.com" ]
50989568+BimiLevi@users.noreply.github.com
964d74884c5d4fe523268950f181853cad302a7e
f77028577e88d228e9ce8252cc8e294505f7a61b
/web_backend/nvlserver/module/hw_module/specification/get_hw_module_specification.py
2c708377f8fa3bf925fcbfc53584e58eb50737ec
[]
no_license
Sud-26/Arkally
e82cebb7f907a3869443b714de43a1948d42519e
edf519067d0ac4c204c12450b6f19a446afc327e
refs/heads/master
2023-07-07T02:14:28.012545
2021-08-06T10:29:42
2021-08-06T10:29:42
392,945,826
0
0
null
null
null
null
UTF-8
Python
false
false
6,170
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __version__ = '1.0.1' get_hw_module_list_query = """ SELECT hwm.id AS id, hwm.name AS name, COALESCE(hwm.module_id::VARCHAR, '') AS module_id, hwm.user_id AS user_id, coalesce(usr.fullname, '') AS user_name, hwm.traceable_object_id AS traceable_object_id, coalesce(tob.name, '') AS traceable_object_name, hwm.meta_information::json AS meta_information, hwm.show_on_map AS show_on_map, hwm.gprs_active AS gprs_active, hwm.active AS active, hwm.deleted AS deleted, hwm.created_on AS created_on, hwm.updated_on AS updated_on FROM public.hw_module AS hwm LEFT OUTER JOIN public.user AS usr ON usr.id = hwm.user_id LEFT OUTER JOIN public.traceable_object AS tob on hwm.traceable_object_id = tob.id WHERE hwm.deleted is FALSE AND ($1::BIGINT = 0 OR hwm.user_id = $1::BIGINT) AND ( $2::VARCHAR is NULL OR hwm.name ILIKE $2::VARCHAR || '%' OR hwm.name ILIKE '%' || $2::VARCHAR || '%' OR hwm.name ILIKE $2::VARCHAR || '%') """ get_hw_module_list_user_id_hw_module_id_list_query = """ SELECT hwm.id AS id FROM public.hw_module AS hwm WHERE hwm.deleted is FALSE AND hwm.active is TRUE AND hwm.show_on_map IS TRUE AND ($1::BIGINT IS NULL OR hwm.user_id = $1::BIGINT) AND (array_length($2::int[], 1) IS NULL OR hwm.traceable_object_id = any ($2::int[])) """ get_hw_module_list_dropdown_query = """ SELECT hwm.id AS id, hwm.name AS name FROM public.hw_module AS hwm WHERE hwm.deleted is FALSE AND hwm.active is TRUE AND ($1::BIGINT IS NULL OR hwm.user_id = $1::BIGINT) AND ( $2::VARCHAR is NULL OR hwm.name ILIKE $2::VARCHAR || '%' OR hwm.name ILIKE '%' || $2::VARCHAR || '%' OR hwm.name ILIKE $2::VARCHAR || '%') """ get_hw_module_list_count_query = """ SELECT count(*) AS hw_module_count FROM public.hw_module AS hwm LEFT OUTER JOIN public.user AS usr ON usr.id = hwm.user_id WHERE hwm.deleted is FALSE AND ($1::BIGINT = 0 OR hwm.user_id = $1::BIGINT) AND ( $2::VARCHAR is NULL OR hwm.name ILIKE $2::VARCHAR || '%' OR hwm.name ILIKE '%' || $2::VARCHAR || '%' OR hwm.name ILIKE $2::VARCHAR || '%') """ get_hw_module_element_query = """ SELECT hwm.id AS id, hwm.name AS name, COALESCE(hwm.module_id::VARCHAR, '') AS module_id, hwm.user_id AS user_id, coalesce(usr.fullname, '') AS user_name, hwm.traceable_object_id AS traceable_object_id, coalesce(tob.name, '') AS traceable_object_name, hwm.meta_information::json AS meta_information, hwm.show_on_map AS show_on_map, hwm.gprs_active AS gprs_active, hwm.active AS active, hwm.deleted AS deleted, hwm.created_on AS created_on, hwm.updated_on AS updated_on FROM public.hw_module AS hwm LEFT OUTER JOIN public.user AS usr ON usr.id = hwm.user_id LEFT OUTER JOIN public.traceable_object AS tob on hwm.traceable_object_id = tob.id WHERE hwm.deleted is FALSE AND hwm.id = $1; """ get_hw_module_element_by_traceable_object_id_query = """ SELECT hwm.id AS id, hwm.name AS name, COALESCE(hwm.module_id::VARCHAR, '') AS module_id, hwm.user_id AS user_id, coalesce(usr.fullname, '') AS user_name, hwm.traceable_object_id AS traceable_object_id, coalesce(tob.name, '') AS traceable_object_name, hwm.meta_information::json AS meta_information, hwm.gprs_active AS gprs_active, hwm.show_on_map AS show_on_map, hwm.active AS active, hwm.deleted AS deleted, hwm.created_on AS created_on, hwm.updated_on AS updated_on FROM public.hw_module AS hwm LEFT OUTER JOIN public.user AS usr ON usr.id = hwm.user_id LEFT OUTER JOIN public.traceable_object AS tob on hwm.traceable_object_id = tob.id WHERE hwm.deleted is FALSE AND ($1::BIGINT is NULL OR hwm.user_id = $1::BIGINT) AND hwm.traceable_object_id = $2; """ get_hw_module_element_by_name_query = """ SELECT hwm.id AS id, hwm.name AS name, COALESCE(hwm.module_id::VARCHAR, '') AS module_id, hwm.user_id AS user_id, coalesce(usr.fullname, '') AS user_name, hwm.traceable_object_id AS traceable_object_id, coalesce(tob.name, '') AS traceable_object_name, hwm.meta_information::json AS meta_information, hwm.gprs_active AS gprs_active, hwm.show_on_map AS show_on_map, hwm.active AS active, hwm.deleted AS deleted, hwm.created_on AS created_on, hwm.updated_on AS updated_on FROM public.hw_module AS hwm LEFT OUTER JOIN public.user AS usr ON usr.id = hwm.user_id LEFT OUTER JOIN public.traceable_object AS tob on hwm.traceable_object_id = tob.id WHERE hwm.deleted is FALSE AND ( $1::VARCHAR is NULL OR hwm.name ILIKE $1::VARCHAR || '%' OR hwm.name ILIKE '%' || $1::VARCHAR || '%' OR hwm.name ILIKE $1::VARCHAR || '%') LIMIT 1; """
[ "sudhakar@satmatgroup.com" ]
sudhakar@satmatgroup.com
42d1ab3ce84114a87a59d9c3f9a7720ae4e57ece
bffe3ed7c76d488a685f1a586f08270d5a6a847b
/side_service/utils/importer.py
a6ea97b8f8a7a02c6df5fb58ea6838cd08c9642a
[]
no_license
ganggas95/side-service
86a863d7b8d164e05584938aa63e56aa1ed8f793
c58ee47d1145cb704c4268006f135a141efc0667
refs/heads/nizar_dev
2021-06-21T11:37:54.771478
2019-10-14T23:45:39
2019-10-14T23:45:39
213,881,364
0
1
null
2021-05-06T19:55:47
2019-10-09T09:52:06
Python
UTF-8
Python
false
false
2,443
py
import os from numpy import int64 import pandas as pd from flask import current_app as app from side_service.models.prov import Provinsi from side_service.models.kab import Kabupaten from side_service.models.kec import Kecamatan from side_service.models.desa import Desa class FileImporter: temps = [] def __init__(self, filename): if filename: self.df = pd.read_csv(os.path.join( app.config["FILE_IMPORT_FOLDER"], filename), header=None) class ImportProvinceFile(FileImporter): def read_data(self): for row in range(0, len(self.df.index)): kode_prov = str(self.df[0][self.df.index[row]]) name = self.df[1][self.df.index[row]] prov = Provinsi(kode_prov, name) prov.save() print("Import Province is success") class ImportRegenciesFile(FileImporter): @property def provs(self): return Provinsi.all() def read_data(self): for prov in self.provs: df_prov = self.df.loc[self.df[1] == int64(prov.kode_prov)] for row in range(0, len(df_prov.index)): kode_kab = str(df_prov[0][df_prov.index[row]]) name = df_prov[2][df_prov.index[row]] kab = Kabupaten(kode_kab, name, prov.kode_prov) kab.save() print("Import Kabupaten is success") class ImportDistrictFile(FileImporter): @property def kabs(self): return Kabupaten.all() def read_data(self): for kab in self.kabs: df_kab = self.df.loc[self.df[1] == int64(kab.kode_kab)] for row in range(0, len(df_kab.index)): kode_kec = str(df_kab[0][df_kab.index[row]]) name = df_kab[2][df_kab.index[row]] kec = Kecamatan(kode_kec, name, kab.kode_kab) kec.save() print("Import District Success") class ImportVillagesFile(FileImporter): @property def kecs(self): return Kecamatan.all() def read_data(self): for kec in self.kecs: df_desa = self.df.loc[self.df[1] == int64(kec.kode_kec)] for row in range(0, len(df_desa.index)): kode_desa = str(df_desa[0][df_desa.index[row]]) name = df_desa[2][df_desa.index[row]] desa = Desa(kode_desa, name, kec.kode_kec) desa.save() print("Import Villages Success")
[ "subhannizar25@gmail.com" ]
subhannizar25@gmail.com
b637325039f49ef3a68474942e03ff5f45a15a45
6455c57f85289fae2195e15b9de126ef1f6bf366
/project/job/models.py
d76497cb302d53dfa50245b326202b283d94f849
[]
no_license
muhamed-mustafa/django-job-board
8fcb76e8543509d233cb7697ced67c96f5d81fbc
6b3fa1d7126d9c400c1c6cf3ccb4c8061db8692b
refs/heads/master
2022-12-15T00:53:15.202174
2020-09-17T21:43:48
2020-09-17T21:43:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,523
py
from django.db import models from django.utils.translation import ugettext as _ from django.utils.text import slugify from django.contrib.auth.models import User def image_upload(instance, filename): imagename, extension = filename.split(".") return 'jobs/%s.%s' % (instance.id, extension) class Job(models.Model): objects = None JOB_TYPE = ( ('Full Time', 'Full Time'), ('Part Time', 'Part Time') ) owner = models.ForeignKey(User, related_name='job_owner', on_delete=models.CASCADE) like = models.ManyToManyField(User,blank=True) location = models.CharField(max_length=20) title = models.CharField(max_length=100) job_type = models.CharField(max_length=100, choices=JOB_TYPE) description = models.TextField(max_length=1000) published_at = models.DateTimeField(auto_now=True) vacancy = models.IntegerField(default=1) salary = models.IntegerField(default=0) experience = models.IntegerField(default=1) category = models.ForeignKey('Category', on_delete=models.CASCADE) image = models.ImageField(upload_to=image_upload, null=True, blank=True) slug = models.SlugField(null=True, blank=True) facebook = models.CharField(max_length=500,null=True,blank=True) instagram = models.CharField(max_length=500,null=True,blank=True) google = models.CharField(max_length=500,null=True,blank=True) twitter = models.CharField(max_length=500,null=True,blank=True) def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.title) super(Job, self).save(*args, **kwargs) class meta: verbose_name = _('Job') verbose_name_plural = _('Jobs') def __str__(self): return self.title class Category(models.Model): name = models.CharField(max_length=25) class meta: verbose_name = _('Category') verbose_name_plural = _('Categories') def __str__(self): return self.name class Apply(models.Model): job = models.ForeignKey(Job, related_name='apply_job', on_delete=models.CASCADE) name = models.CharField(max_length=50) email = models.EmailField(max_length=100) website = models.URLField() cv = models.FileField(upload_to='apply/') cover_letter = models.TextField(max_length=1000) created_at = models.DateTimeField(auto_now=True,null=True,blank=True) class Meta: verbose_name = _('Apply') verbose_name_plural = _('Applies') def __str__(self): return self.name
[ "muuhamed14mustafa@gmail.com" ]
muuhamed14mustafa@gmail.com
3d07fac9d2bb63738f35e626530cf62382648804
127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06
/2021_하반기 코테연습/boj16937.py
6d06ddfaf0afe6fb35157d542a1476f2a0119f6a
[]
no_license
holim0/Algo_Study
54a6f10239368c6cf230b9f1273fe42caa97401c
ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c
refs/heads/master
2023-08-25T14:07:56.420288
2021-10-25T12:28:23
2021-10-25T12:28:23
276,076,057
3
1
null
null
null
null
UTF-8
Python
false
false
999
py
from itertools import combinations h, w = map(int, input().split()) n = int(input()) sti = [] for _ in range(n): r, c = map(int, input().split()) if (r<=h and c<=w) or (r<=w and c<=h): sti.append((r, c)) answer = -1 johab = list(combinations(sti, 2)) for cur in johab: r1,c1, r2, c2 = cur[0][0], cur[0][1], cur[1][0], cur[1][1] rest_r, rest_c = h-r1, w-c1 rest_r2, rest_c2 = h-c1, w-r1 if rest_r>=0 and rest_c>=0: if (r2<=rest_r and c2<=w) or (r2<=h and c2<=rest_c): answer = max(answer, r1*c1+r2*c2) elif (c2<=rest_r and r2<=w) or (c2<=h and r2<=rest_c): answer = max(answer, r1*c1+r2*c2) if rest_r2>=0 and rest_c2>=0: if (r2<=rest_r2 and c2<=w) or (r2<=h and c2<=rest_c2): answer = max(answer, r1*c1+r2*c2) elif (c2<=rest_r2 and r2<=w) or (c2<=h and r2<=rest_c2): answer = max(answer, r1*c1+r2*c2) if answer == -1: print(0) else: print(answer)
[ "holim1226@gmail.com" ]
holim1226@gmail.com
66249f846adc240a436f156b941e6d0a01b7be95
4bed9030031fc99f6ea3d5267bd9e773f54320f8
/sparse/repos/katyhon/hello-world.git/setup.py
9910a87c354430aea7dbbdccc76c28a3264a91f0
[ "BSD-3-Clause" ]
permissive
yuvipanda/mybinder.org-analytics
c5f4b939541d29727bc8d3c023b4d140de756f69
7b654e3e21dea790505c626d688aa15640ea5808
refs/heads/master
2021-06-13T05:49:12.447172
2018-12-22T21:48:12
2018-12-22T21:48:12
162,839,358
1
1
BSD-3-Clause
2021-06-10T21:05:50
2018-12-22T20:01:52
Jupyter Notebook
UTF-8
Python
false
false
1,133
py
# -*- coding: utf-8 -*- # @Author: Zebedee Nicholls # @Date: 2017-04-10 13:42:11 # @Last Modified by: Chris Smith # @Last Modified time: 2018-01-11 19:17:00 from setuptools import setup from setuptools import find_packages import versioneer # README # def readme(): with open('README.rst') as f: return f.read() setup(name='fair', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description='Python package to perform calculations with the FAIR simple climate model', long_description=readme(), keywords='simple climate model temperature response carbon cycle emissions forcing', url='https://github.com/OMS-NetZero/FAIR', author='OMS-NetZero, Chris Smith, Richard Millar, Zebedee Nicholls, Myles Allen', author_email='c.j.smith1@leeds.ac.uk, richard.millar@physics.ox.ac.uk', license='Apache 2.0', packages=find_packages(exclude=['tests*','docs*']), package_data={'': ['*.csv']}, include_package_data=True, install_requires=[ 'numpy>=1.11.3', 'scipy>=0.19.0', ], zip_safe=False, )
[ "yuvipanda@gmail.com" ]
yuvipanda@gmail.com
757fdd5ec99459542dde88e360700156603c2846
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
/assessment2/amazon/run/core/controllers/vivacious.py
6023c96df4e75d647fba458235af9ef85c64b4ef
[]
no_license
sqlconsult/byte
02ac9899aebea4475614969b594bfe2992ffe29a
548f6cb5038e927b54adca29caf02c981fdcecfc
refs/heads/master
2021-01-25T14:45:42.120220
2018-08-11T23:45:31
2018-08-11T23:45:31
117,135,069
0
0
null
null
null
null
UTF-8
Python
false
false
372
py
#!/usr/bin/env python3 from flask import Blueprint, Flask, render_template, request, url_for controller = Blueprint('vivacious', __name__, url_prefix='/vivacious') # @controller.route('/<string:title>', methods=['GET']) # def lookup(title): # if title == 'Republic': # TODO 2 # return render_template('republic.html') # TODO 2 # else: # pass
[ "sqlconsult@hotmail.com" ]
sqlconsult@hotmail.com
0743fab55b7760c26dc13b3922aea2a97eec77c6
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
/recipes/Python/580763_Context_Manager_Arbitrary_Number_Files/recipe-580763.py
150135fb1201d2a97d2a7f356d35cb1f5cb58577
[ "MIT" ]
permissive
betty29/code-1
db56807e19ac9cfe711b41d475a322c168cfdca6
d097ca0ad6a6aee2180d32dce6a3322621f655fd
refs/heads/master
2023-03-14T08:15:47.492844
2021-02-24T15:39:59
2021-02-24T15:39:59
341,878,663
0
0
MIT
2021-02-24T15:40:00
2021-02-24T11:31:15
Python
UTF-8
Python
false
false
416
py
class Files(tuple): def __new__(cls, *filePaths): files = [] try: for filePath in filePaths: files.append(open(filePath)) files[-1].__enter__() except: for file in files: file.close() raise else: return super(Files, cls).__new__(cls, files) def __enter__(self): return self def __exit__(self, *args): for file in self: file.close()
[ "betty@qburst.com" ]
betty@qburst.com
8bc30b4ffffdd2a9e6c67b806b72324ac9bcf8c5
699c7f26a91106a2fc79bb15299ce0cee532a2dd
/test/pivottest.py
a875542be8d7a661c68859711f9c108434bede2c
[]
no_license
samconnolly/astro
70581a4d3f2086716aace3b5db65b74aaaa5df95
3731be313592c13dbb8af898e9734b98d83c0cc2
refs/heads/master
2020-04-06T03:40:27.454279
2014-03-12T14:36:34
2014-03-12T14:36:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,709
py
# Programme to test how a pivoting spectral component, plus a # constant soft component changes the shape of the flux-flux diagram import numpy as np from pylab import * # parameters epivot = 2.0 # KeV pivot energy pivmin = 2.0 pivmax = 10.0 cindex = 2.0 # index of constant component nsteps = 10.0 # constants h = 6.63 # planck e = 1.6e-19 # electron charge # energy axis energy = np.arange(0.5,10.0,0.01) # energy range of spectrum in KeV logenergy = np.log(energy) # log of energy freq = (energy*e*1000.0)/h # frequency range in Hz stepsize = (pivmax-pivmin)/nsteps pivnorm = ( ((epivot*e*1000.0)/h)**pivmin) fluxflux = [[],[],[]] # plotting the log spectrum components # constant component cflux = freq**(-cindex) # constant flux component logcflux = np.log10(cflux) # log of constant flux logvflux = [] # varying component for piv in np.arange(pivmin,pivmax,stepsize): currvflux = (freq**(-piv)) pnorm = (((epivot*e*1000.0)/h)**piv) currvflux = (currvflux/pnorm)*pivnorm logcurrvflux = np.log10(currvflux) # log thereof logvflux.append(logcurrvflux) # soft/hard delineaters low = [np.log(0.5),np.log10(0.5)] div = [np.log(2.0),np.log10(2.0)] high = [np.log(10.0),np.log10(10.0)] yrange = [logcflux[-1],logvflux[-1][0]] subplot(1,2,1) # log spectrum plot #plot(logenergy,logcflux,color="red") plot(logenergy,logvflux[0],color="blue") plot(logenergy,logvflux[len(logvflux)/2],color="blue") plot(logenergy,logvflux[-1],color="blue") #plot(low,yrange) #plot(div,yrange) #plot(high,yrange) # total spectrum subplot(1,2,2) for x in [0,len(logvflux)/2,-1]: logtotal = logvflux[x] + logcflux plot(logenergy,logtotal) show()
[ "sdc1g08@soton.ac.uk" ]
sdc1g08@soton.ac.uk
cbb97a71e43ba0aeae79c64781b1c2a7c1f09cb8
8fb4f83ac4e13c4c6de7f412f68c280d86ddea15
/eon/tests/unit/deployer/network/ovsvapp/test_vapp_util.py
6e5508948b41971f8773c56807dceeb902078137
[ "Apache-2.0" ]
permissive
ArdanaCLM/hpe-eon
cbd61afa0473bbd9c6953e5067dbe5a7ff42c084
48a4086d2ccc5ccac60385b183f0d43f247c0b97
refs/heads/master
2021-07-25T18:55:30.176284
2017-10-24T08:49:42
2017-10-24T08:49:42
103,971,673
0
1
null
2017-11-07T15:47:45
2017-09-18T17:43:45
Python
UTF-8
Python
false
false
6,225
py
# # (c) Copyright 2015-2017 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import contextlib from mock import patch from pyVmomi import vim from eon.deployer.network.ovsvapp.util.vapp_util import OVSvAppUtil from eon.deployer.util import VMwareUtils from eon.tests.unit import tests from eon.tests.unit.deployer import fake_inputs # TODO: Put all the helper classes in one location class PrepFolder: name = 'fake_folder' childEntity = False class MoveInto_Task: def __init__(self, val): self.val = val class Destroy_Task: pass class MOB: class content(): def rootFolder(self): pass def propertyCollector(self): pass class VM: class Vm: class config: annotation = 'hp-ovsvapp' class runtime: powerState = 'poweredOn' class PowerOff: pass class Destroy: pass vm = [Vm] class Cluster: @staticmethod def ReconfigureComputeResource_Task(cluster_spec_ex, modify): pass class TestOVSvAppUtil(tests.BaseTestCase): def setUp(self): super(TestOVSvAppUtil, self).setUp() self.ovs_vapp_util = OVSvAppUtil() vc_info = fake_inputs.data.get('vcenter_configuration') self.cluster = {'obj': Cluster(), 'name': vc_info.get('cluster'), 'configuration.dasConfig.enabled': True, 'configuration.drsConfig.enabled': True} def test_get_ovsvapps(self): fake_vms = [{'name': 'ovsvapp_fake_vm', 'config.annotation': 'hp-ovsvapp', 'runtime.host': 'host-1'}] content = None vm_folder = None with contextlib.nested( patch.object(VMwareUtils, 'get_view_ref'), patch.object(VMwareUtils, 'collect_properties', return_value=fake_vms))as ( mock_get_view_ref, mock_collect_properties): output = self.ovs_vapp_util.get_ovsvapps(content, vm_folder, fake_inputs.fake_clusters) self.assertEqual(fake_vms[0], output['host-1']) self.assertTrue(mock_get_view_ref.called) self.assertTrue(mock_collect_properties.called) def test_get_active_hosts(self): host = {'obj': 'host1', 'name': 'fake_host'} with patch.object(VMwareUtils, 'get_all_hosts', return_value=[host]) as mock_get_all_hosts: self.ovs_vapp_util.get_active_hosts(MOB, 'vm_folder', ['host1'], 'cluster') self.assertTrue(mock_get_all_hosts.called) def test_exec_multiprocessing(self): pass def test_get_folder(self): pass def test_create_host_folder(self): with patch.object(OVSvAppUtil, '_get_folder', return_value='fake_folder') as mock_get_folder: self.ovs_vapp_util.create_host_folder( 'content', [{'cluster': {'name': self.cluster.get('name')}}], 'host_folder') self.assertTrue(mock_get_folder.called) def test_move_hosts_in_to_folder(self): pass def test_enter_maintenance_mode(self): pass def test_destroy_failed_commissioned_vapps(self): host = {'obj': VM, 'name': 'fake_host'} with patch.object(VMwareUtils, 'wait_for_task') as mock_wait_for_task: self.ovs_vapp_util.destroy_failed_commissioned_vapps(host, MOB) self.assertTrue(mock_wait_for_task.called) def test_move_host_back_to_cluster(self): host = {'obj': 'host', 'name': 'fake_host'} cluster = {'obj': PrepFolder, 'name': 'fake_cluster'} with contextlib.nested( patch.object(OVSvAppUtil, 'destroy_failed_commissioned_vapps'), patch.object(OVSvAppUtil, 'enter_maintenance_mode'), patch.object(VMwareUtils, 'wait_for_task')) as ( mock_destroy, mock_enter_maintenance_mode, mock_wait_for_task): self.ovs_vapp_util.move_host_back_to_cluster(MOB, host, cluster, PrepFolder, 'err') self.assertTrue(mock_destroy.called) self.assertTrue(mock_enter_maintenance_mode.called) self.assertTrue(mock_wait_for_task.called) def test_get_host_parent(self): pass def test_get_cluster_inventory_path(self): pass def test_get_eon_env(self): pass def test_exec_subprocess(self): pass def test_disable_ha_on_ovsvapp(self): with contextlib.nested( patch.object(vim.VirtualMachine, '__init__', return_value=None), patch.object(vim.HostSystem, '__init__', return_value=None), patch.object(VMwareUtils, 'wait_for_task')) as ( mock_vm, mock_host, mock_wait_for_task): vim.VirtualMachine.name = 'fake-vm' self.vm_obj = vim.VirtualMachine() self.host_obj = vim.HostSystem() host = {'obj': self.host_obj, 'name': 'fake_host'} self.ovs_vapp_util.disable_ha_on_ovsvapp(fake_inputs.session['si'], self.vm_obj, self.cluster, host) self.assertTrue(mock_vm.called) self.assertTrue(mock_host.called) self.assertTrue(mock_wait_for_task.called)
[ "srivastava.abhishek@hp.com" ]
srivastava.abhishek@hp.com
e646bb93cc2e371f15aec3b80cb3f8c0380cccc1
51a37b7108f2f69a1377d98f714711af3c32d0df
/src/leetcode/P292.py
5c48dc1f6e7885796ce6210fadce7d6c63219433
[]
no_license
stupidchen/leetcode
1dd2683ba4b1c0382e9263547d6c623e4979a806
72d172ea25777980a49439042dbc39448fcad73d
refs/heads/master
2022-03-14T21:15:47.263954
2022-02-27T15:33:15
2022-02-27T15:33:15
55,680,865
7
1
null
null
null
null
UTF-8
Python
false
false
136
py
class Solution: def canWinNim(self, n): """ :type n: int :rtype: bool """ return n % 4 != 0
[ "stupidchen@foxmail.com" ]
stupidchen@foxmail.com
9af39db942c3e1dde49345526d12a0c37972f44a
739e41d4f24f79c772d266cded0de9b759c6e953
/venv/lib/python3.6/site-packages/datasets/__init__.py
eb40f7f2cfc457e3e42b84454b398c6c73b9dd70
[ "MIT" ]
permissive
MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020
24b7bbdecf459292f8b58be286feab3b9aa341ba
82586c632268c103de269bcbffa5f7849b174a29
refs/heads/main
2023-05-18T15:41:13.495286
2021-06-11T18:21:35
2021-06-11T18:21:35
304,268,819
3
2
null
null
null
null
UTF-8
Python
false
false
2,246
py
# flake8: noqa # coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __version__ = "1.0.2" import pyarrow from pyarrow import total_allocated_bytes from . import datasets from .arrow_dataset import Dataset, concatenate_datasets from .arrow_reader import ArrowReader, ReadInstruction from .arrow_writer import ArrowWriter from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .dataset_dict import DatasetDict from .features import ( Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Translation, TranslationVariableLanguages, Value, ) from .info import DatasetInfo, MetricInfo from .inspect import inspect_dataset, inspect_metric, list_datasets, list_metrics from .load import import_main_class, load_dataset, load_from_disk, load_metric, prepare_module from .metric import Metric from .splits import NamedSplit, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent from .utils import * from .utils.tqdm_utils import disable_progress_bar if int(pyarrow.__version__.split(".")[1]) < 16 and int(pyarrow.__version__.split(".")[0]) == 0: raise ImportWarning( "To use `datasets`, the module `pyarrow>=0.16.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) SCRIPTS_VERIONS = __version__
[ "adiaz@bcamath.org" ]
adiaz@bcamath.org
b21cdd6b34ce845121a1bedbe1cbd7d5fcb287c1
2834f98b53d78bafc9f765344ded24cf41ffebb0
/weblayer/renderer/DEPS
e83c233386cd4f75dd17543c345adb86342e4984
[ "BSD-3-Clause" ]
permissive
cea56/chromium
81bffdf706df8b356c2e821c1a299f9d4bd4c620
013d244f2a747275da76758d2e6240f88c0165dd
refs/heads/master
2023-01-11T05:44:41.185820
2019-12-09T04:14:16
2019-12-09T04:14:16
226,785,888
1
0
BSD-3-Clause
2019-12-09T04:40:07
2019-12-09T04:40:07
null
UTF-8
Python
false
false
784
include_rules = [ # This is needed for error page strings/resources. # TODO(1024326): If WebLayer stays with WebView's error pages implementation # long-term, componentize these strings/resources as part of componentizing # that implementation and remove the need for this dependency. "+android_webview/grit", "+components/safe_browsing/common", "+components/safe_browsing/renderer", "+components/security_interstitials/content/renderer", "+components/security_interstitials/core/common", "+components/spellcheck/renderer", "+content/public/renderer", # needed for safebrowsing "+mojo/public/cpp/bindings", "+net/base", "+services/service_manager/public/cpp", "+third_party/blink/public/common", "+third_party/blink/public/platform", "+ui/base", ]
[ "commit-bot@chromium.org" ]
commit-bot@chromium.org
352bab04b5f300ec488b2c2277b3fcce8aa5430d
0fd92b7d882a1edb5542f6600bb177dcad67ed50
/powerful104/2476.py
59884bdebe88950cc45e5841b1b8de057f17569f
[]
no_license
alpha-kwhn/Baekjun
bce71fdfbbc8302ec254db5901109087168801ed
f8b4136130995dab78f34e84dfa18736e95c8b55
refs/heads/main
2023-08-02T11:11:19.482020
2021-03-09T05:34:01
2021-03-09T05:34:01
358,347,708
0
0
null
2021-04-15T17:56:14
2021-04-15T17:56:13
null
UTF-8
Python
false
false
317
py
ans=0 for _ in range(int(input())): a, b, c= map(int, input().split()) prize=0 if a==b==c: prize=10000+a*1000 elif a==b or a==c: prize=1000+a*100 elif b==c: prize=1000+b*100 else: prize=max(a,b,c)*100 if ans<prize: ans=prize print(ans)
[ "noreply@github.com" ]
alpha-kwhn.noreply@github.com
b3a89023ea3508c3e11a114dd212533d8cafa3d2
c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5
/ml/m30_pca2_5_diabetes_RF.py
cf6c7c7cf6d4e1510dfd2fa544ccd37999038961
[]
no_license
89Mansions/AI_STUDY
d9f8bdf206f14ba41845a082e731ea844d3d9007
d87c93355c949c462f96e85e8d0e186b0ce49c76
refs/heads/master
2023-07-21T19:11:23.539693
2021-08-30T08:18:59
2021-08-30T08:18:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,082
py
# PCA : 차원축소, 컬럼 재구성 # RandomForest로 모델링 import numpy as np from sklearn.datasets import load_diabetes from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from sklearn.metrics import r2_score from sklearn.pipeline import Pipeline from xgboost import XGBRegressor #1. DATA datasets = load_diabetes() x = datasets.data y = datasets.target # print(x.shape, y.shape) # (442, 10) (442,) pca = PCA(n_components=8) x2 = pca.fit_transform(x) # fit_transform : 전처리 fit과 transform 한꺼번에 한다. x_train, x_test, y_train, y_test = train_test_split(x2, y, train_size=0.8, shuffle=True, random_state=46) print(x_train.shape) # (353, 8) >> 컬럼을 압축시켰다. 컬럼 재구성됨 print(x_test.shape) # (89, 8) >> 컬럼을 압축시켰다. 컬럼 재구성됨 # pca = PCA() # pca.fit(x) # cumsum = np.cumsum(pca.explained_variance_ratio_) # print("cumsum : ", cumsum) # cumsum 누적 합을 계산 # cumsum : [0.40242142 0.55165324 0.67224947 0.76779711 0.83401567 0.89428759 # 0.94794364 0.99131196 0.99914395 1. ] # d = np.argmax(cumsum >= 0.95)+1 # print("cumsum >= 0.95", cumsum > 0.95) # print("d : ", d) # cumsum >= 0.95 [False False False False False False False True True True] # d : 8 # import matplotlib.pyplot as plt # plt.plot(cumsum) # plt.grid() # plt.show() #2. Modeling model = Pipeline([("scaler", MinMaxScaler()),("model",RandomForestRegressor())]) model = Pipeline([("scaler", MinMaxScaler()),("model",XGBRegressor())]) #3. Train model.fit(x_train, y_train) #4. Score, Predict result = model.score(x_test, y_test) print("model.score : ", result) y_pred = model.predict(x_test) score = r2_score(y_pred, y_test) print("r2_score : ", score) # RandomForestRegressor # model.score : 0.43512635590690074 # r2_score : -0.5421970924222612 # XGBoost # model.score : 0.3449642489091771 # r2_score : -0.3388132027144872
[ "hwangkei0212@gmail.com" ]
hwangkei0212@gmail.com
9db9821260783c8ab2205fe0109af946caaa20e8
3806db5b4bb7a638f30c818a29ccaf2b0ddb2836
/test_141.py
47edc016610b2369996717a30fdc8799c917f569
[]
no_license
EomAA/fenics-qa
d0a687a7b84c51417e96eeeef9855c0d4ba27dea
c37a36a14450d0e7f6432c4726c5d96e0d6c4e96
refs/heads/master
2021-12-15T12:07:10.316478
2017-08-18T09:16:01
2017-08-18T09:16:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
634
py
import dolfin mesh = dolfin.UnitSquareMesh(1,1) dX = dolfin.dx(mesh) fe = dolfin.FiniteElement( family="Quadrature", cell=mesh.ufl_cell(), degree=1, quad_scheme="default") cppExprCode=''' namespace dolfin { class CppExpr : public Expression { public: CppExpr(): Expression(0) { } void eval(Array<double>& values, const Array<double>& position) const { std::cout << "position = " << position << std::endl; values[0] = 1.; std::cout << "values = " << values << std::endl; } }; }''' cppExpr = dolfin.Expression(cppExprCode, element=fe) dolfin.assemble(cppExpr * dX)
[ "miroslav.kuchta@gmail.com" ]
miroslav.kuchta@gmail.com
e358e020b0dd01cb1f401a59c6dc293c3929cbad
6ea69f9a4431837a36b04ab926ac9c565b8a5eb6
/pydemic_ui/i18n.py
53f5ca158c862fef6e82e4716ab8fbbb4df02fcd
[ "MIT" ]
permissive
WillAllmeida/pydemic-ui
e9fad4845c428f3e2f0e7a65913391c3216083b3
f7c05d97489918736b0c7b1da4b0992bd77ed9a1
refs/heads/master
2023-01-24T13:49:27.219863
2020-10-07T18:11:02
2020-10-07T18:11:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
945
py
from gettext import gettext from pathlib import Path import sidekick as sk LOCALEDIR = Path(__file__).parent / "locale" def set_i18n(lang, language=None): """ Set locale and translations. Examples: set_i18n('pt_BR.UTF-8') -> set locale to pt_BR.UTF-8 and language to pt_BR. """ import gettext import locale import warnings import os try: locale.setlocale(locale.LC_ALL, lang) locale.setlocale(locale.LC_MESSAGES, language or lang) os.environ["LANG"] = lang os.environ["LANGUAGE"] = language or lang.split(".")[0] except locale.Error: warnings.warn(f"locale is not supported: {lang}") gettext.bindtextdomain("messages", localedir=LOCALEDIR) def run(): import os lang = os.environ.get("PYDEMIC_LANG") or os.environ.get("LANG") set_i18n(lang) def gettext_lazy(st): return sk.deferred(gettext, st) _ = gettext __ = gettext_lazy
[ "fabiomacedomendes@gmail.com" ]
fabiomacedomendes@gmail.com
a344bfbfba2175a962b94b0450d79418dd1cd225
8246e9fbdecdb37651e0d09497fd9428e434f33c
/ServiceCatagory/urls.py
e17e6184bb33e3ef9d66c52d6ec20aae7d197828
[]
no_license
rajeev1234/Landing-Page
479995026ab01fc504a1e9502e7763dc04266009
4bfd22a6a1776907ba78b3dc9037064c820b049e
refs/heads/master
2020-03-08T13:37:20.253252
2018-04-05T06:33:26
2018-04-05T06:33:26
128,162,519
0
0
null
null
null
null
UTF-8
Python
false
false
863
py
from django.urls import path from . import views urlpatterns = [ # Path to list view of ServiceCatagory : ServiceCatagory_list path('', views.ServiceCatagoryListView.as_view(), name='ServiceCatagory_list'), # Path to create new ServiceCatagory : ServiceCatagory_new path('new/', views.ServiceCatagoryCreateView.as_view(), name='ServiceCatagory_new'), # Path to edit ServiceCatagory : edit_list path('<int:pk>/edit', views.ServiceCatagoryUpdateView.as_view(), name='ServiceCatagory_update'), # Path to delete ServiceCatagory : ServiceCatagory_delete path('<int:pk>/delete', views.ServiceCatagoryDeleteView.as_view(), name='ServiceCatagory_delete'), # Path to detail view of ServiceCatagory : ServiceCatagory_details path('<int:pk>', views.ServiceCatagoryDetailView.as_view(), name='ServiceCatagory_details') ]
[ "ccrcian.rajeev1@gmail.com" ]
ccrcian.rajeev1@gmail.com
0e535165547e2b25ce06072821dc32d3a608475f
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
/cases/pa3/benchmarks/sieve-244.py
29dca34d6015fcd1da12c7a36d7a12abcdf185e5
[]
no_license
Virtlink/ccbench-chocopy
c3f7f6af6349aff6503196f727ef89f210a1eac8
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
refs/heads/main
2023-04-07T15:07:12.464038
2022-02-03T15:42:39
2022-02-03T15:42:39
451,969,776
0
0
null
null
null
null
UTF-8
Python
false
false
2,581
py
# A resizable list of integers class Vector(object): items: [int] = None size: int = 0 def __init__(self:"Vector"): self.items = [0] # Returns current capacity def capacity(self:"Vector") -> int: return len(self.items) # Increases capacity of vector by one element def increase_capacity(self:"Vector") -> int: self.items = self.items + [0] return self.capacity() # Appends one item to end of vector def append(self:"Vector", item: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends many items to end of vector def append_all(self:"Vector", new_items: [int]) -> object: item:int = 0 for item in $ID: self.append(item) # Removes an item from the middle of vector def remove_at(self:"Vector", idx: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Retrieves an item at a given index def get(self:"Vector", idx: int) -> int: return self.items[idx] # Retrieves the current size of the vector def length(self:"Vector") -> int: return self.size # A faster (but more memory-consuming) implementation of vector class DoublingVector(Vector): doubling_limit:int = 1000 # Overriding to do fewer resizes def increase_capacity(self:"DoublingVector") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Makes a vector in the range [i, j) def vrange(i:int, j:int) -> Vector: v:Vector = None v = DoublingVector() while i < j: v.append(i) i = i + 1 return v # Sieve of Eratosthenes (not really) def sieve(v:Vector) -> object: i:int = 0 j:int = 0 k:int = 0 while i < v.length(): k = v.get(i) j = i + 1 while j < v.length(): if v.get(j) % k == 0: v.remove_at(j) else: j = j + 1 i = i + 1 # Input parameter n:int = 50 # Data v:Vector = None i:int = 0 # Crunch v = vrange(2, n) sieve(v) # Print while i < v.length(): print(v.get(i)) i = i + 1
[ "647530+Virtlink@users.noreply.github.com" ]
647530+Virtlink@users.noreply.github.com
836f316ea7373f660f670c441dd647f0fbff730c
6eb9078ce34bed9c895b821aae30f97bcc50ea7d
/前端第二课:Django/muggle/blog/admin.py
1eb06d37c9dd952bfd268205ff4916ceb34a5fa1
[]
no_license
yinsendemogui/Atomspace
e7b9e24f8e541f57bdbae2e4d935a3b67133bc69
1053d7e3e71365f6acca99431c2d4295243d3df1
refs/heads/master
2020-06-11T19:24:53.123006
2016-12-19T16:53:51
2016-12-19T16:53:51
75,628,559
0
0
null
null
null
null
UTF-8
Python
false
false
922
py
#!/usr/bin/python # -*- coding: utf-8 -*- from django.contrib import admin from blog.models import UserProfile, Topic, Question, Answer, Comment,Ticket from django.contrib.auth.admin import UserAdmin as BaseUserAdmin from django.contrib.auth.models import User # Register your models here. class UserProfileInline(admin.StackedInline): model = UserProfile can_delete = False verbose_name_plural = 'UserProfile' class TicketInline(admin.StackedInline): model = Ticket can_delete = False verbose_name_plural = 'Ticket' class UserAdmin(BaseUserAdmin): inlines = (UserProfileInline,TicketInline ) admin.site.unregister(User) admin.site.register(User, UserAdmin) admin.site.register(UserProfile) admin.site.register(Topic) admin.site.register(Question) admin.site.register(Answer) admin.site.register(Comment) admin.site.register(Ticket) # 超级管理员账号密码: Admin/Admin123456
[ "215379068@qq.com" ]
215379068@qq.com
de9fb2ae97800642eda88142b72a772c8c9c47c5
2b14ef0b8c086a2dd047f6fab6f565f27c3634c6
/BOJ/브루트포스/호석이두마리치킨.py
48feda541fda601099cc09a058365b61117ed8f8
[]
no_license
psj8532/problem_solving
055475bbdc8902ed4d19fd242d95dff461cc9608
8ae06fc935c3d0a3c5ec537f13677b0534869df3
refs/heads/master
2023-06-09T10:16:01.248293
2021-05-07T03:09:38
2021-05-07T03:09:38
240,618,744
1
1
null
null
null
null
UTF-8
Python
false
false
1,312
py
from itertools import combinations from _collections import deque def bfs(here, depth, chicken_house): if here in chicken_house: return 0 deq = deque([[here, depth]]) visited = [False] * (N + 1) visited[here] = True while deq: h, d = deq.popleft() if h in chicken_house: return 2 * d for next in adj[h]: if not visited[next]: visited[next] = True deq.append([next, d + 1]) CHICKEN_CNT = 2 answer = [0,0,9876543210] N, M = map(int,input().split()) adj = {i:[] for i in range(N+1)} for _ in range(M): start, end = map(int,input().split()) adj[start].append(end) adj[end].append(start) perm = list(map(list, combinations(range(1, N + 1), CHICKEN_CNT))) for chicken_list in perm: chicken_house = sorted(chicken_list) distance = 0 for house in range(1, N + 1): distance += bfs(house, 0 , chicken_house) if distance == answer[2] and chicken_house[0] <= answer[0]: if (chicken_house[0] == answer[0] and chicken_house[1] < answer[1]) or (chicken_house[0] < answer[0]): answer[0], answer[1] = chicken_house[0], chicken_house[1] elif distance < answer[2]: answer[0], answer[1], answer[2] = chicken_house[0], chicken_house[1], distance print(*answer)
[ "psj8532@naver.com" ]
psj8532@naver.com
e719c824c41b673746fdbfe99ee5d27001eb7e45
5f92dd6164c41e5756842da0a053b207005be252
/tests/models/test_dagcode.py
6f3c5d64cdf66aa29dc7df109dcd370d4f0fa3d5
[ "BSD-3-Clause", "MIT", "Apache-2.0" ]
permissive
mans2singh/airflow
24d17446457ebfbf17850d336722f79e46b06404
de9633f93a366ebc0a46d1ec4df2c4aa9a18357d
refs/heads/main
2023-03-20T17:56:18.506101
2022-10-22T19:41:57
2022-10-22T19:41:57
168,956,212
0
0
Apache-2.0
2019-02-03T14:51:24
2019-02-03T14:51:23
null
UTF-8
Python
false
false
6,409
py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations from datetime import timedelta from unittest.mock import patch import pytest from airflow import AirflowException, example_dags as example_dags_module from airflow.models import DagBag from airflow.models.dagcode import DagCode # To move it to a shared module. from airflow.utils.file import open_maybe_zipped from airflow.utils.session import create_session from tests.test_utils.db import clear_db_dag_code def make_example_dags(module): """Loads DAGs from a module for test.""" dagbag = DagBag(module.__path__[0]) return dagbag.dags class TestDagCode: """Unit tests for DagCode.""" def setup_method(self): clear_db_dag_code() def teardown_method(self): clear_db_dag_code() def _write_two_example_dags(self): example_dags = make_example_dags(example_dags_module) bash_dag = example_dags['example_bash_operator'] DagCode(bash_dag.fileloc).sync_to_db() xcom_dag = example_dags['example_xcom'] DagCode(xcom_dag.fileloc).sync_to_db() return [bash_dag, xcom_dag] def _write_example_dags(self): example_dags = make_example_dags(example_dags_module) for dag in example_dags.values(): dag.sync_to_db() return example_dags def test_sync_to_db(self): """Dg code can be written into database.""" example_dags = self._write_example_dags() self._compare_example_dags(example_dags) def test_bulk_sync_to_db(self): """Dg code can be bulk written into database.""" example_dags = make_example_dags(example_dags_module) files = [dag.fileloc for dag in example_dags.values()] with create_session() as session: DagCode.bulk_sync_to_db(files, session=session) session.commit() self._compare_example_dags(example_dags) def test_bulk_sync_to_db_half_files(self): """Dg code can be bulk written into database.""" example_dags = make_example_dags(example_dags_module) files = [dag.fileloc for dag in example_dags.values()] half_files = files[: int(len(files) / 2)] with create_session() as session: DagCode.bulk_sync_to_db(half_files, session=session) session.commit() with create_session() as session: DagCode.bulk_sync_to_db(files, session=session) session.commit() self._compare_example_dags(example_dags) @patch.object(DagCode, 'dag_fileloc_hash') def test_detecting_duplicate_key(self, mock_hash): """Dag code detects duplicate key.""" mock_hash.return_value = 0 with pytest.raises(AirflowException): self._write_two_example_dags() def _compare_example_dags(self, example_dags): with create_session() as session: for dag in example_dags.values(): if dag.is_subdag: dag.fileloc = dag.parent_dag.fileloc assert DagCode.has_dag(dag.fileloc) dag_fileloc_hash = DagCode.dag_fileloc_hash(dag.fileloc) result = ( session.query(DagCode.fileloc, DagCode.fileloc_hash, DagCode.source_code) .filter(DagCode.fileloc == dag.fileloc) .filter(DagCode.fileloc_hash == dag_fileloc_hash) .one() ) assert result.fileloc == dag.fileloc with open_maybe_zipped(dag.fileloc, 'r') as source: source_code = source.read() assert result.source_code == source_code def test_code_can_be_read_when_no_access_to_file(self): """ Test that code can be retrieved from DB when you do not have access to Code file. Source Code should at least exist in one of DB or File. """ example_dag = make_example_dags(example_dags_module).get('example_bash_operator') example_dag.sync_to_db() # Mock that there is no access to the Dag File with patch('airflow.models.dagcode.open_maybe_zipped') as mock_open: mock_open.side_effect = FileNotFoundError dag_code = DagCode.get_code_by_fileloc(example_dag.fileloc) for test_string in ['example_bash_operator', 'also_run_this', 'run_this_last']: assert test_string in dag_code def test_db_code_updated_on_dag_file_change(self): """Test if DagCode is updated in DB when DAG file is changed""" example_dag = make_example_dags(example_dags_module).get('example_bash_operator') example_dag.sync_to_db() with create_session() as session: result = session.query(DagCode).filter(DagCode.fileloc == example_dag.fileloc).one() assert result.fileloc == example_dag.fileloc assert result.source_code is not None with patch('airflow.models.dagcode.os.path.getmtime') as mock_mtime: mock_mtime.return_value = (result.last_updated + timedelta(seconds=1)).timestamp() with patch('airflow.models.dagcode.DagCode._get_code_from_file') as mock_code: mock_code.return_value = "# dummy code" example_dag.sync_to_db() with create_session() as session: new_result = session.query(DagCode).filter(DagCode.fileloc == example_dag.fileloc).one() assert new_result.fileloc == example_dag.fileloc assert new_result.source_code == "# dummy code" assert new_result.last_updated > result.last_updated
[ "noreply@github.com" ]
mans2singh.noreply@github.com
2f3246645ae2b648b8a03a0fbdc252ec71da5335
119646d6e1f13582c577fd7b87c9654839a0b806
/hubspot/cms/domains/api/__init__.py
b4e60c2edbcb4bf654f47d3f798df161efd090c9
[]
permissive
HubSpot/hubspot-api-python
446daaceeb3a6ce27edcd0414603c6d4bc07e327
d51a64c413461c0b82d8a41743e752d878747ca1
refs/heads/master
2023-08-31T09:52:56.583803
2023-08-07T11:00:27
2023-08-07T11:00:27
248,865,684
227
98
Apache-2.0
2023-09-14T15:25:19
2020-03-20T22:41:24
Python
UTF-8
Python
false
false
146
py
from __future__ import absolute_import # flake8: noqa # import apis into api package from hubspot.cms.domains.api.domains_api import DomainsApi
[ "atanasiuk@hubspot.com" ]
atanasiuk@hubspot.com
d09be6ac32398bbd0397467b2ebf8504d775e116
356740062993a5967717098a7a3ee78ac6c6cf3f
/chapter15/projects/brute_force_pdf/pdf_password_breaker.py
814c051fc597e336a223e62a06f1d762f59c9ba6
[]
no_license
xerifeazeitona/autbor
79588302f14c0c09b1f9f57fcb973e656ee1da5c
c37ccbfa87c1ac260e728a3a91a8f2be97978f04
refs/heads/main
2023-04-03T18:01:34.588984
2021-04-07T17:59:26
2021-04-07T17:59:26
348,749,618
0
0
null
null
null
null
UTF-8
Python
false
false
1,528
py
""" Brute-Force PDF Password Breaker Say you have an encrypted PDF that you have forgotten the password to, but you remember it was a single English word. Trying to guess your forgotten password is quite a boring task. Instead you can write a program that will decrypt the PDF by trying every possible English word until it finds one that works. The text file dictionary.txt contains over 44,000 English words. Using the file-reading skills you learned in Chapter 9, create a list of word strings by reading this file. Then loop over each word in this list, passing it to the decrypt() method. If this method returns the integer 0, the password was wrong and your program should continue to the next password. If decrypt() returns 1, then your program should break out of the loop and print the hacked password. You should try both the uppercase and lowercase form of each word. """ import sys import PyPDF2 # Open PDF file pdf_reader = PyPDF2.PdfFileReader(open('encrypted.pdf', 'rb')) # Read dictionary words from text file with open('dictionary.txt') as file_object: wordlist = file_object.read().split('\n') # try to brute-force the password total_words = len(wordlist) for i, word in enumerate(wordlist): print(f'\r{i} / {total_words}', end='') if pdf_reader.decrypt(word) == 1: print(f'\nPassword found: "{word}"') sys.exit() if pdf_reader.decrypt(word.lower()) == 1: print(f'\nPassword found: "{word.lower()}"') sys.exit() print('\nCould not find a valid password.')
[ "juliano.amaral@gmail.com" ]
juliano.amaral@gmail.com
b9f7406f059d850f23f3e542f6e85d3ad59ee508
cd555725b300579d44c0bd3f6fc8f6a968912dfb
/UF2/Practica 25/Ejercicio4/main.py
b5ab8a5b60b80fa92c11bf55fc828788bc6704fe
[]
no_license
aleexnl/aws-python
2da5d8a416927f381618f1d6076d98d5e35b3b5e
03fce7744b443b2b59a02c261067ecae46ecc3d9
refs/heads/master
2022-11-24T08:58:24.686651
2020-04-18T15:58:32
2020-04-18T15:58:32
221,772,677
0
0
null
null
null
null
UTF-8
Python
false
false
158
py
from module import functions as fc # Llamamos al modulo que contiene las funciones print(fc.mcd(30, 3)) # Llamamos a la funcion y le pasamos dos parametros
[ "alex.nieto0027@gmail.com" ]
alex.nieto0027@gmail.com
a7bf7e1b684f8cbdcd574f13eed72023cf50bd3d
1bfad01139237049eded6c42981ee9b4c09bb6de
/RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/statistics/view/formulacatalog/formulacatalog.py
b878626afcde61247e92d719fe488330d5646617
[ "MIT" ]
permissive
kakkotetsu/IxNetwork
3a395c2b4de1488994a0cfe51bca36d21e4368a5
f9fb614b51bb8988af035967991ad36702933274
refs/heads/master
2020-04-22T09:46:37.408010
2019-02-07T18:12:20
2019-02-07T18:12:20
170,284,084
0
0
MIT
2019-02-12T08:51:02
2019-02-12T08:51:01
null
UTF-8
Python
false
false
2,267
py
# Copyright 1997 - 2018 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from ixnetwork_restpy.base import Base from ixnetwork_restpy.files import Files class FormulaCatalog(Base): """The FormulaCatalog class encapsulates a required formulaCatalog node in the ixnetwork hierarchy. An instance of the class can be obtained by accessing the FormulaCatalog property from a parent instance. The internal properties list will contain one and only one set of properties which is populated when the property is accessed. """ _SDM_NAME = 'formulaCatalog' def __init__(self, parent): super(FormulaCatalog, self).__init__(parent) @property def FormulaColumn(self): """An instance of the FormulaColumn class. Returns: obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.formulacatalog.formulacolumn.formulacolumn.FormulaColumn) Raises: NotFoundError: The requested resource does not exist on the server ServerError: The server has encountered an uncategorized error condition """ from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.formulacatalog.formulacolumn.formulacolumn import FormulaColumn return FormulaColumn(self)
[ "hubert.gee@keysight.com" ]
hubert.gee@keysight.com
3629c99cabd1fa8d37fb6433b4c595dcf55e3483
6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f
/venv/Scripts/easy_install-3.6-script.py
a95d3aabec1724ba66e9a38d3cbc071eb631e331
[]
no_license
reo-dev/bolt
29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54
d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e
refs/heads/master
2023-07-13T04:05:57.856278
2021-08-27T09:07:03
2021-08-27T09:07:03
382,195,547
0
0
null
null
null
null
UTF-8
Python
false
false
491
py
#!C:\Users\kwongibin.DESKTOP-KIC4V0D\PycharmProjects\bolttnut\venv\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6' __requires__ = 'setuptools==39.1.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')() )
[ "75593016+reo-dev@users.noreply.github.com" ]
75593016+reo-dev@users.noreply.github.com
4e775386203f7562b4f2575de0d1b52520df4054
eb677df036fb922c36be3ac309a6b51137161343
/bin/alpha
fd242e28ca7f18e5ca9be924643804c1d152bc05
[]
no_license
kmyk/dotfiles
f1be5f1732a22a44605faca92a003de7a40968fa
33fbd461135fa6bc4b954c403402d4433cc45abd
refs/heads/master
2021-07-02T03:17:12.814548
2020-09-20T05:56:52
2020-09-20T05:56:52
11,720,259
0
0
null
null
null
null
UTF-8
Python
false
false
663
#!/usr/bin/env python import sys import argparse parser = argparse.ArgumentParser() parser.add_argument('-u', '--upper', action='store_true') parser.add_argument('-l', '--lower', action='store_false', dest='upper') parser.add_argument('-d', '--delimiter', type=str, default=' ') parser.add_argument('-1', action='store_const', const="\n", dest='delimiter') parser.add_argument('-0', action='store_const', const='', dest='delimiter') parser.add_argument('-n', action='store_const', default="\n", const='') args = parser.parse_args() sys.stdout.write(args.delimiter.join(list("ABCDEFGHIJKLMNOPQRSTUVWXYZ" if args.upper else "abcdefghijklmnopqrstuvwxyz"))+args.n)
[ "kimiyuki95@gmail.com" ]
kimiyuki95@gmail.com
bde40d296203cf014d6fa9584b60e567c306d60d
781e2692049e87a4256320c76e82a19be257a05d
/all_data/exercism_data/python/rna-transcription/64f6942fedc844af848cc19de6e53748.py
bc316926c867570f3906523b44066297c83447b6
[]
no_license
itsolutionscorp/AutoStyle-Clustering
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
refs/heads/master
2020-12-11T07:27:19.291038
2016-03-16T03:18:00
2016-03-16T03:18:42
59,454,921
4
0
null
2016-05-23T05:40:56
2016-05-23T05:40:56
null
UTF-8
Python
false
false
239
py
#didnt work out with replace method #so i used the translate method with a map #Is translate deprecated in Python3.4? def to_rna(what): mapper = what.maketrans('GCTA','CGAU') what = what.translate(mapper) return what
[ "rrc@berkeley.edu" ]
rrc@berkeley.edu