blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d4b5e31db05f369c213e720e112388382249eb1
|
30eae4e521c282fb026147cf41d730f993996df0
|
/tests/test_connection.py
|
ae0172357678d943262e7c9b56c869b2134a562a
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
trhowe/pycassa
|
7a98034304817fa46b2ea05179f55bf5dc374940
|
b4025cd7aea9454d10cc405010dee5fd7cc4e4b6
|
refs/heads/master
| 2021-01-15T15:55:37.976410
| 2010-11-24T18:48:52
| 2010-11-24T18:48:52
| 1,109,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
import threading
import unittest
from nose.tools import assert_raises
import pycassa
from pycassa.cassandra.ttypes import CfDef, KsDef, InvalidRequestException
class ConnectionCase(unittest.TestCase):
def test_api_version_check(self):
original_ver = pycassa.connection.LOWEST_COMPATIBLE_VERSION
conn = pycassa.Connection('Keyspace1', 'localhost:9160')
ver = int(conn.describe_version().split('.',1)[0]) + 1
pycassa.connection.LOWEST_COMPATIBLE_VERSION = ver
conn.close()
try:
conn = pycassa.Connection('Keyspace1', 'localhost:9160')
assert False
except AssertionError:
pass
finally:
pycassa.connection.LOWEST_COMPATIBLE_VERSION = original_ver
|
[
"tyler@riptano.com"
] |
tyler@riptano.com
|
485a2e80602b51c08c20a4246f4b8d553f5b11ae
|
865559fc533655bfc24838871458c890a62a0b56
|
/scripts/fault_inject/io_bottleneck.py
|
bffc448cb2af9dad5e673fb5fc376fd103c12aef
|
[] |
no_license
|
LearningOS/Performance-Anomaly-Detection
|
fdf16af268630a51ee3e9d443acd76cae67ec7a0
|
772be444c42c5a1656e193150ef89e74c9c5eedb
|
refs/heads/master
| 2021-01-15T21:30:44.363761
| 2017-06-12T01:18:11
| 2017-06-12T01:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
import os
import time
import random
import argparse
import setproctitle
def main():
setproctitle.setproctitle('io_reader')
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dir', type=str, required=True, help='the input dir')
args = parser.parse_args()
random.seed(os.urandom(512))
while True:
for root, dirs, files in os.walk(args.dir):
random.shuffle(files)
for filename in files:
time.sleep(0.2)
with open(os.path.join(root, filename)) as f:
lines = f.readlines()
if __name__ == '__main__':
main()
|
[
"hqythu@gmail.com"
] |
hqythu@gmail.com
|
83905bcfa2f64c6ad760e2709e617474b1151839
|
1d8a255507ae42b4d1fd891dd6cb6dedee175a3e
|
/hekrapi/types.py
|
f48043a49b33052ac460458bf04514b3fcdf8abd
|
[
"MIT"
] |
permissive
|
alryaz/hekrapi-python
|
2478230748d93c21f7f2ca4600f3ea30a9422e50
|
4674e74ec5bace6c7f1f82be93edc22682e1d5b3
|
refs/heads/master
| 2023-02-13T01:19:40.109845
| 2023-01-30T21:18:14
| 2023-01-30T21:18:14
| 224,899,839
| 11
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
"""Types for Hekr API project."""
from typing import Tuple, Dict, Any, Union, Optional, Callable, TYPE_CHECKING
from .const import DeviceResponseState
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
from .command import Command
# noinspection PyUnresolvedReferences
from .device import Device
DecodeResult = Tuple['Command', Dict[str, Any], int]
MessageID = int
DeviceID = str
Action = str
ProcessedData = Union[dict, DecodeResult]
DeviceResponse = Tuple[MessageID, DeviceResponseState, Action, ProcessedData]
ProcessedResponse = Tuple[MessageID, DeviceResponseState, Action, ProcessedData, Optional['Device']]
HekrCallback = Callable[[Optional['Device'], MessageID, DeviceResponseState, Action, ProcessedData], Any]
DeviceInfo = Dict[str, Any]
AnyCommand = Union[int, str, 'Command']
CommandData = Optional[Dict[str, Any]]
DevicesDict = Dict[DeviceID, 'Device']
|
[
"alryaz@xavux.com"
] |
alryaz@xavux.com
|
4fd5356f20891d2b0e5f8d6b00d1af0d9f7be823
|
1d16e9f9a75f8980dd9a6ac07d7b50d0ab4dc259
|
/day4_me/sieve.py
|
29a81c590431bf7b3ba064c3fb10d24afa3c90bb
|
[
"MIT"
] |
permissive
|
stoneand2/python-washu-2014
|
285c8aea0b80f8c0487da41db9beaef4fde066b5
|
a488aa3a4df02c6512869c07751f66c716b82ca0
|
refs/heads/master
| 2020-12-25T09:48:01.289330
| 2014-08-23T16:49:19
| 2014-08-23T16:49:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
global big_list # works with very specific parameters only
global new_list
big_list = []
new_list = list(range(3,122))
def sieve(prime):
big_list.append(prime)
while len(new_list) >= 1:
big_list.append(prime)
for number in new_list:
if (number % prime) == 0 and number != prime:
new_list.remove(number)
next_round_prime = new_list.pop(0)
return sieve(next_round_prime)
return big_list
print sieve(2)
# trying to make it work with a more relaxed set of assumptions. Unsuccessful so far
# class SieveClass():
#
# def __init__(self, maximum):
# self.maximum = maximum
# self.new_list = list(range(2, maximum+1))
# self.primes_list = []
#
# def sieve(self):
#
# while len(self.new_list) != 0:
# working_value = self.new_list[0]
#
# for number in self.new_list:
# if (number % working_value) == 0 and number != working_value:
# self.new_list.remove(number)
# elif number == working_value:
# self.primes_list.append(working_value)
# self.new_list.pop(0)
#
# next_round_working_value = self.new_list.pop(0)
#
# return self.sieve()
#
# return self.primes_list
#
# call = SieveClass(121)
# print call.sieve()
|
[
"stoneand2@users.noreply.github.com"
] |
stoneand2@users.noreply.github.com
|
1533db14daa2d37a9f2be50a66fbffba59243413
|
6e063122527f12c354b467eed9e2f992c41706ee
|
/QuickSort.py
|
a302ae5e9b4c9a2350139e0c8b96c6290d8e5055
|
[] |
no_license
|
tianhaomin/algorithm4
|
2b6411f14e9f34088563ebd20dd1af3444c7333b
|
95b6490da65144fb2a5a16a0e35815e3a189afa9
|
refs/heads/master
| 2021-05-10T16:33:31.794252
| 2018-01-23T08:55:31
| 2018-01-23T08:55:31
| 118,581,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#用两种方法实现一种正常一种递归
#lalala
def exch(a,i,j):
temp = a[j]
a[j] = a[i]
a[i] = temp
def subnum(j):
j -= 1
return j
def addnum(i):
i += 1
return i
def partition(a,lo,hi):
v = a[lo]
i = lo
j = hi+1
while True:
i += 1
while a[i] < v:
if i==hi:
break
j -= 1
while v < a[j]:
if j==lo:
break
if i>=j:
break
exch(a,i,j)
exch(a,lo,j)
return j
def sort1(a,lo,hi):
if hi<=lo:
return
j = partition(a,lo,hi)
sort1(a,lo,j-1)
sort1(a,j+1,hi)
def sort(a):
sort1(a,0,len(a)-1)
return a
|
[
"noreply@github.com"
] |
tianhaomin.noreply@github.com
|
6ff09683a65fd90486c1e9e867d292a34c30a5d2
|
18ca2e0f98b98941ff9d9e098e0be89166c8b87c
|
/Abp/Cp7/c7_18_1_checkPassword.py
|
44139200105bd8848776e24d165b4cbbaa750ff7
|
[] |
no_license
|
masa-k0101/Self-Study_python
|
f20526a9cd9914c9906059678554285bfda0c932
|
72b364ad4da8485a201ebdaaa430fd2e95681b0a
|
refs/heads/master
| 2023-03-07T07:38:27.559606
| 2021-02-22T16:24:47
| 2021-02-22T16:24:47
| 263,381,292
| 1
| 0
| null | 2020-06-09T17:32:06
| 2020-05-12T15:47:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
# -*- coding: utf-8 -*-
# 演習プロジェクト 7.18.1
import re
# 強いパスワードならTrue、そうでなければFalseを返す
def check_password(password):
if len(password) < 8: #8文字以上
return False
if not re.search(r'[a-z]', password): #小文字を含む
return False
if not re.search(r'[A-Z]', password): #大文字を含む
return False
if not re.search(r'[0-9]', password): #数字を含む
return False
return True
# テスト用
if __name__ == "__main__":
def print_password(p):
print('パスワード "' + p + '" は、', end='')
if check_password(p):
print('強い')
else:
print('弱い')
passwords = ['abcdehA1', 'abcdeA1', '', ' ',
'abcdefgh', 'abcdefgA', 'abcdefg1',
'ABCDEFGH', 'ABCDEFGa', 'ABCDEFG1',
'12345678', '1234567a', '1234567A']
for p in passwords:
print_password(p)
try:
while True:
print('パスワードを入力してください(終了するにはCTRL-C):',end='', flush=True)
password = input()
print_password(password)
except KeyboardInterrupt:
print('終了')
|
[
"noreply@github.com"
] |
masa-k0101.noreply@github.com
|
de6a7dea3250609edf9110de417155b0b2ddc5b1
|
4b30aeffb63b10868fbfceab86bc60240f095513
|
/master modulde/kite_statemaster/__init__.py
|
016473ffd7b3008f083e8aea8ac3b3430f9871ed
|
[] |
no_license
|
gsaran49/PurchaseOrderAndInventory
|
fd3710dd6800e1dbe072ee69b31a00edf90c844b
|
1b7fb12dcb3bf7d0f3c6cfa02cceb9a0d113587b
|
refs/heads/master
| 2021-09-09T11:00:03.958465
| 2018-03-15T10:53:21
| 2018-03-15T10:53:21
| 114,107,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
import kite_statemaster
|
[
"gsaran49@gmail.com"
] |
gsaran49@gmail.com
|
62c644ca475e7661286befbef03030f82e7319a9
|
5c74741969998b17bb2eb89dfdf5dce203d1cadf
|
/cython/perf/compute_threaded.py
|
1c0dbf528035569aeaf3876766f00fb0e8e0ab5d
|
[
"MIT"
] |
permissive
|
vromanuk/async_techniques
|
c21484e14fc8792ba835ae6cb45176c2be9124b6
|
7e1c6efcd4c81c322002eb3002d5bb929c5bc623
|
refs/heads/master
| 2020-06-20T02:39:18.632214
| 2019-11-09T15:46:03
| 2019-11-09T15:46:03
| 196,962,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
import math
import datetime
import threading
import multiprocessing
def main():
do_math(1)
t0 = datetime.datetime.now()
# do_math(num=30000000)
processor_count = multiprocessing.cpu_count()
print('Doing math on {:,} processors.'.format(multiprocessing.cpu_count()))
threads = []
for n in range(1, processor_count + 1):
threads.append(
(threading.Thread(target=do_math, args=(30_000_000 * (n - 1) / processor_count,
30_000_000 * n / processor_count),
daemon=True))
)
[t.start() for t in threads]
[t.join() for t in threads]
dt = datetime.datetime.now() - t0
print('Done in {:,.2f} sec.'.format(dt.total_seconds()))
def do_math(start=0, num=10):
pos = start
k_sq = 1000 * 1000
while pos < num:
pos += 1
math.sqrt((pos - k_sq) * (pos - k_sq))
if __name__ == '__main__':
main()
|
[
"romanuk_vlad@ukr.net"
] |
romanuk_vlad@ukr.net
|
a06d6bdecd0bcaaed6199eecdc4a5e280cf4f953
|
5f176b4d5f7bf567e1f4941aa8f43ac8fc6ee509
|
/Assignment_3/q2_rnn_cell.py
|
6c4da9ce2568e993769316bdcef0cd76e537eaa8
|
[] |
no_license
|
hncpr1992/stanford_cs224n_NLP
|
a2a219e389a358654e6d23ae1ea29f01c0182174
|
2ea464b521ffbf5cf3f0ef33df8ce02d8df8a434
|
refs/heads/master
| 2021-01-20T15:13:40.188528
| 2017-06-29T04:10:23
| 2017-06-29T04:10:23
| 90,741,160
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,117
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Q2(c): Recurrent neural nets for NER
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import sys
import tensorflow as tf
import numpy as np
logger = logging.getLogger("hw3.q2.1")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class RNNCell(tf.contrib.rnn.RNNCell):
"""Wrapper around our RNN cell implementation that allows us to play
nicely with TensorFlow.
"""
def __init__(self, input_size, state_size):
self.input_size = input_size
self._state_size = state_size
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Updates the state using the previous @state and @inputs.
Remember the RNN equations are:
h_t = sigmoid(x_t W_x + h_{t-1} W_h + b)
TODO: In the code below, implement an RNN cell using @inputs
(x_t above) and the state (h_{t-1} above).
- Define W_x, W_h, b to be variables of the apporiate shape
using the `tf.get_variable' functions. Make sure you use
the names "W_x", "W_h" and "b"!
- Compute @new_state (h_t) defined above
Tips:
- Remember to initialize your matrices using the xavier
initialization as before.
Args:
inputs: is the input vector of size [None, self.input_size]
state: is the previous state vector of size [None, self.state_size]
scope: is the name of the scope to be used when defining the variables inside.
Returns:
a pair of the output vector and the new state vector.
"""
scope = scope or type(self).__name__
# It's always a good idea to scope variables in functions lest they
# be defined elsewhere!
with tf.variable_scope(scope):
### YOUR CODE HERE (~6-10 lines)
xavier_initializer = tf.contrib.layers.xavier_initializer()
W_x = tf.get_variable(name="W_x", shape = (self.input_size, self._state_size),
initializer = xavier_initializer)
W_h = tf.get_variable(name="W_h", shape = (self._state_size, self._state_size),
initializer = xavier_initializer)
b = tf.get_variable(name="b", shape = (self._state_size, ),
initializer = xavier_initializer)
new_state = tf.sigmoid(tf.matmul(inputs, W_x) + tf.matmul(state, W_h) + b)
### END YOUR CODE ###
# For an RNN , the output and state are the same (N.B. this
# isn't true for an LSTM, though we aren't using one of those in
# our assignment)
output = new_state
return output, new_state
def test_rnn_cell():
with tf.Graph().as_default():
with tf.variable_scope("test_rnn_cell"):
x_placeholder = tf.placeholder(tf.float32, shape=(None,3))
h_placeholder = tf.placeholder(tf.float32, shape=(None,2))
with tf.variable_scope("rnn"):
tf.get_variable("W_x", initializer=np.array(np.eye(3,2), dtype=np.float32))
tf.get_variable("W_h", initializer=np.array(np.eye(2,2), dtype=np.float32))
tf.get_variable("b", initializer=np.array(np.ones(2), dtype=np.float32))
tf.get_variable_scope().reuse_variables()
cell = RNNCell(3, 2)
y_var, ht_var = cell(x_placeholder, h_placeholder, scope="rnn")
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
x = np.array([
[0.4, 0.5, 0.6],
[0.3, -0.2, -0.1]], dtype=np.float32)
h = np.array([
[0.2, 0.5],
[-0.3, -0.3]], dtype=np.float32)
y = np.array([
[0.832, 0.881],
[0.731, 0.622]], dtype=np.float32)
ht = y
y_, ht_ = session.run([y_var, ht_var], feed_dict={x_placeholder: x, h_placeholder: h})
print("y_ = " + str(y_))
print("ht_ = " + str(ht_))
assert np.allclose(y_, ht_), "output and state should be equal."
assert np.allclose(ht, ht_, atol=1e-2), "new state vector does not seem to be correct."
def do_test(_):
logger.info("Testing rnn_cell")
test_rnn_cell()
logger.info("Passed!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tests the RNN cell implemented as part of Q2 of Homework 3')
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('test', help='')
command_parser.set_defaults(func=do_test)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
|
[
"noreply@github.com"
] |
hncpr1992.noreply@github.com
|
6ee39c46d0f28cf3bb1b5e87f00d043999b5ca81
|
e54e4f7dc62a539ff3a4ddf0826b40387052a00f
|
/redisearch/_util.py
|
d29e8d01b1f32cfd4ed23dfb8a2f1adb21902289
|
[
"BSD-2-Clause"
] |
permissive
|
surajptl/redisearch-py
|
305708dd8549f0d8ca84fbb36b789f5b0351000d
|
9ac3f2c18b9f3574df78e90dae6be199e2eeaade
|
refs/heads/master
| 2022-12-28T22:48:44.960041
| 2020-08-04T07:46:16
| 2020-08-04T07:46:16
| 287,507,232
| 0
| 0
|
BSD-2-Clause
| 2020-08-14T10:30:01
| 2020-08-14T10:30:00
| null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
import six
def to_string(s):
if isinstance(s, six.string_types):
return s
elif isinstance(s, six.binary_type):
return s.decode('utf-8')
else:
return s # Not a string we care about
|
[
"noreply@github.com"
] |
surajptl.noreply@github.com
|
27f1a5233f5fd1bd7b3d793c022586102443990a
|
07ce1c95aacd19c5ffb4f604a107e669919d4abc
|
/KF5_Tmin_Elev_RF180.py
|
e0b9a49ea44cd5e93ec883f4c2d5ef8ca58aa836
|
[] |
no_license
|
ekourkchi/WildFire
|
2f3577358844485640a5e58a25386bfb442ae18f
|
f0b849db7222db7c556b4694c02aca03cf4cff2c
|
refs/heads/main
| 2023-05-01T23:15:17.817102
| 2021-05-17T09:19:15
| 2021-05-17T09:19:15
| 328,634,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,238
|
py
|
from scipy.optimize import curve_fit
from sklearn.model_selection import KFold
from george import kernels
import george
from scipy.optimize import minimize
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from matplotlib.patches import Polygon
from mpl_toolkits.basemap import Basemap
import matplotlib
import sys
import os
import matplotlib.pyplot as plt
import requests
import json
import pandas as pd
from pandas.io import sql
from pandas.io.json import json_normalize
import numpy as np
from sqlalchemy import types, create_engine
from datetime import date, timedelta
from datetime import datetime
import time
import re
import pylab as py
from matplotlib import gridspec
import matplotlib.dates as md
from sklearn.model_selection import LeaveOneOut
os.environ['PROJ_LIB'] = '/home/ehsan/anaconda3/share/proj'
def linear(x, a, b):
return a * x + b
def bilinear(x, a, b, c):
left = a * x + b
right = c * (x - 2150) + (a * 2150 + b)
try:
y = np.asarray([left[i] if x[i] <= 2150 else right[i]
for i in range(len(x))])
return y
except BaseException:
if x <= 2150:
return left
else:
return right
# calculate bic for regression
def calculate_bic(n, mse, num_params):
bic = n * np.log(mse) + num_params * np.log(n)
return bic
# calculate aic for regression
def calculate_aic(n, mse, num_params):
aic = n * np.log(mse) + 2 * num_params
return aic
def metrics(y1, y2, verbose=True, n_param=1):
'''
y1 and y2 are two series of the same size
This function outputs the MAE, RMSE and R^2
of the cross evaluated series.
'''
y1 = y1.reshape(-1)
y2 = y2.reshape(-1)
n = len(y1)
mse = np.mean((y1 - y2)**2)
RMSE = np.sqrt(mse)
MAE = np.mean(np.abs(y1 - y2))
R2 = np.max([r2_score(y1, y2), r2_score(y2, y1)])
BIC = calculate_bic(n, mse, n_param)
AIC = calculate_aic(n, mse, n_param)
if verbose:
print('MAE: %.2f' % MAE, ' RMSE: %.2f' % RMSE, ' R^2: %.2f' % R2)
print('AIC: %.2f' % AIC, ' BIC: %.2f' % BIC)
return MAE, RMSE, R2, AIC, BIC
####################################################################
def nll_fn2(X, y):
def step(theta):
loo = KFold(n_splits=5)
# loo = LeaveOneOut()
XI2 = 0
for train_index, test_index in loo.split(X):
#if True: # for train_index, test_index in loo.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
N = X_train.shape[0]
zp = theta[0]
slope = theta[1]
s2 = theta[2]
s3 = theta[3]
y_model = slope * X_train[:, 2] + zp + s3 * X_train[:, 3]
ind, = np.where(X_train[:, 2] > 2150)
y_model[ind] += s2 * (X_train[:, 2][ind] - 2150)
y_model_test = slope * X_test[:, 2] + zp + s3 * X_test[:, 3]
ind, = np.where(X_test[:, 2] > 2150)
y_model_test[ind] += s2 * (X_test[:, 2][ind] - 2150)
delta = np.abs(y_model_test - y_test)
# print(delta[0])
# if delta[0] > 3:
# delta[0]=0
XI2 += np.sum(delta**2)
return XI2
return step
####################################################################
if __name__ == '__main__':
iCODE = str(sys.argv[1])
ISLAND_code = [iCODE]
if iCODE == 'MA':
ISLAND_code = ['MA', 'KO', 'MO', 'LA']
pr = pd.read_csv("tmin_predictors.csv",
encoding="ISO-8859-1", engine='python')
pr = pr.set_index("SKN")
pr = pr[(pr.Island.isin(ISLAND_code))]
rf = pd.read_csv("2_Partial_Fill_Daily_RF_mm_1990_2020.csv",
encoding="ISO-8859-1", engine='python')
meta_columns = [
'SKN',
'Station.Name',
'Observer',
'Network',
'Island',
'ELEV.m.',
'LAT',
'LON',
'NCEI.id',
'NWS.id',
'NESDIS.id',
'SCAN.id',
'SMART_NODE_RF.id']
Temp_columns = rf.columns[13:]
rf2 = rf[meta_columns]
rf2 = rf2.set_index("SKN")
rf2 = rf2[(rf2.Island.isin(ISLAND_code))]
rf1 = rf[["SKN"] + list(Temp_columns)].T
new_header = rf1.iloc[0]
rf1 = rf1[1:]
rf1.columns = new_header
rf1.index = pd.to_datetime([x.split('X')[1] for x in rf1.index.values])
rf1.index.name = 'Date'
rf1 = rf1[list(rf2.index.values)]
rf1_mean3 = rf1.rolling('3d', min_periods=3).mean()
rf1_mean7 = rf1.rolling('7d', min_periods=7).mean()
rf1_mean15 = rf1.rolling('15d', min_periods=10).mean()
rf1_mean30 = rf1.rolling('30d', min_periods=15).mean()
rf1_mean60 = rf1.rolling('60d', min_periods=30).mean()
rf1_mean90 = rf1.rolling('90d', min_periods=45).mean()
rf1_mean120 = rf1.rolling('120d', min_periods=60).mean()
rf1_mean180 = rf1.rolling('180d', min_periods=90).mean()
rf3 = rf2[["LON", "LAT", "ELEV.m."]].T
rf3 = rf3[list(rf2.index.values)]
df = pd.read_csv("Tmin_QC.csv", encoding="ISO-8859-1", engine='python')
meta_columns = [
'SKN',
'Station.Name',
'Observer',
'Network',
'Island',
'ELEV.m.',
'LAT',
'LON',
'NCEI.id',
'NWS.id',
'NESDIS.id',
'SCAN.id',
'SMART_NODE_RF.id']
Temp_columns = df.columns[13:]
df2 = df[meta_columns]
df2 = df2.set_index("SKN")
df2 = df2[(df2.Island.isin(ISLAND_code))]
df1 = df[["SKN"] + list(Temp_columns)].T
new_header = df1.iloc[0]
df1 = df1[1:]
df1.columns = new_header
df1.index = pd.to_datetime([x.split('X')[1] for x in df1.index.values])
df1.index.name = 'Date'
df1 = df1[list(df2.index.values)]
df3 = df2[["LON", "LAT", "ELEV.m."]].T
df3 = df3[list(df2.index.values)]
df_station = df3.T
df_station = df_station.join(df1.T, how='left')
dt_cols = df_station.columns[3:][::-1]
t1 = datetime.now()
###########################################
Hyper = {}
Hyper['MAE'] = []
Hyper['RMSE'] = []
Hyper['R2'] = []
Hyper['AIC'] = []
Hyper['BIC'] = []
Hyper['t0'] = []
Hyper['t1'] = []
Hyper['t2'] = []
Hyper['t3'] = []
###########################################
for dt_col in dt_cols:
try:
date_str = str(dt_col.date())
Date = pd.to_datetime(date_str)
rf_station = rf3.T
rf_station = rf_station.join(rf1.T[[Date]].rename(columns={Date: 'RF'}), how='left')
rf_station = rf_station.join(rf1_mean3.T[[Date]].rename(columns={Date: 'RF3'}), how='left')
rf_station = rf_station.join(rf1_mean7.T[[Date]].rename(columns={Date: 'RF7'}), how='left')
rf_station = rf_station.join(rf1_mean15.T[[Date]].rename(columns={Date: 'RF15'}), how='left')
rf_station = rf_station.join(rf1_mean30.T[[Date]].rename(columns={Date: 'RF30'}), how='left')
rf_station = rf_station.join(rf1_mean60.T[[Date]].rename(columns={Date: 'RF60'}), how='left')
rf_station = rf_station.join(rf1_mean90.T[[Date]].rename(columns={Date: 'RF90'}), how='left')
rf_station = rf_station.join(rf1_mean120.T[[Date]].rename(columns={Date: 'RF120'}), how='left')
rf_station = rf_station.join(rf1_mean180.T[[Date]].rename(columns={Date: 'RF180'}), how='left')
data_rf = rf_station[["RF", "RF7", "RF15", "RF30", "RF90", "RF180"]].dropna()
data_df = df_station[["LON", "LAT", "ELEV.m.", Date]].dropna()
data_df = data_df.rename(columns={Date: 'T'})
df = data_df.join(
data_rf,
how='left',
lsuffix='_T',
rsuffix='_RF').dropna()
df = df.join(pr[pr.columns[:-3]], how='left').dropna()
X = df[["LON", "LAT", "ELEV.m.", "RF180"]].values
y = df['T'].values
if len(data_df) > 1:
u = np.arange(np.round(np.max(X[:, 2])))
fit, cov = curve_fit(linear, X[:, 2], y, sigma=y * 0 + 1)
v1 = linear(u, fit[0], fit[1])
fit, cov = curve_fit(bilinear, X[:, 2], y, sigma=y * 0 + 1)
model = bilinear(X[:, 2], fit[0], fit[1], fit[2])
indx, = np.where(np.abs(model - y) < 3)
fit, cov = curve_fit(
bilinear, X[:, 2][indx], y[indx], sigma=y[indx] * 0 + 1)
model = bilinear(X[:, 2], fit[0], fit[1], fit[2])
indx, = np.where(np.abs(model - y) < 3)
fit, cov = curve_fit(
bilinear, X[:, 2][indx], y[indx], sigma=y[indx] * 0 + 1)
v2 = bilinear(u, fit[0], fit[1], fit[2])
X = X[indx]
y = y[indx]
# Maximum Likelihood
pos = minimize(nll_fn2(X, y), [
0, -0.002, 0, 0], method='L-BFGS-B')
theta = pos.x
N = X.shape[0]
zp = theta[0]
slope = theta[1]
s2 = theta[2]
s3 = theta[3]
y_model = slope * X[:, 2] + zp + s3*X[:,3]
ind, = np.where(X[:, 2] > 2150)
y_model[ind] += s2 * (X[:, 2][ind] - 2150)
MAE, RMSE, R2, AIC, BIC = metrics(
y, y_model, verbose=False, n_param=len(theta))
if pos.success:
Hyper['MAE'].append(MAE)
Hyper['RMSE'].append(RMSE)
Hyper['R2'].append(R2)
Hyper['AIC'].append(AIC)
Hyper['BIC'].append(BIC)
Hyper['t0'].append(theta[0])
Hyper['t1'].append(theta[1])
Hyper['t2'].append(theta[2])
Hyper['t3'].append(theta[3])
pd.DataFrame.from_dict(Hyper).to_csv(
'KF5_Tmin_Elev_RF180_'+iCODE+'_hyper.csv', sep=',', index=False)
# print(date_str, pos.success)
except:
pass
|
[
"ekourkchi@gmail.com"
] |
ekourkchi@gmail.com
|
bde6956609aba68b342d015bc32186f5f5fb5f82
|
dc940d7614c4cf55a3c61a1ad4ee48e4ea4e319d
|
/src/slurmify/__init__.py
|
cc723c570f8dfe66d10ec5ae27246ff2ea2a1250
|
[
"MIT"
] |
permissive
|
salotz/slurmify
|
b4c88e434c2814c71fcef9cd13ecd716163a75d8
|
4a6da629706621b9b2633d34e574b121a75a8f87
|
refs/heads/master
| 2020-04-28T23:22:10.227665
| 2019-10-08T01:37:33
| 2019-10-08T01:37:33
| 175,651,924
| 0
| 1
|
MIT
| 2020-12-01T07:43:57
| 2019-03-14T15:42:30
|
Python
|
UTF-8
|
Python
| false
| false
| 312
|
py
|
import pkg_resources
import os
import os.path as osp
TEMPLATES_PATH = pkg_resources.resource_filename('slurmify', 'templates')
TEMPLATE_FILENAMES = tuple(os.listdir(TEMPLATES_PATH))
TEMPLATE_FILEPATHS = tuple([osp.join(TEMPLATES_PATH, filename) for filename
in TEMPLATE_FILENAMES])
|
[
"samuel.lotz@salotz.info"
] |
samuel.lotz@salotz.info
|
c562af65bec64fe2158eb72c157e4568b5af21b2
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnsearl.py
|
071f353b10e248c72e5b480bf1d46a32b41166c8
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 44
|
py
|
ii = [('FitzRNS.py', 1), ('MereHHB2.py', 1)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
400c53c45a8fc1ed86885f16453368e4200bf90c
|
1ffd5de413dc1ba5046ec668de4b597fd0b6d3e3
|
/rsa_decode.py
|
c1b8a8bd8e200c97d4d1028201734fa5182bee1a
|
[] |
no_license
|
chiseng/RSA-CTF
|
9687895140749e564d666993944fbaddc7bcd2a8
|
38fb65218824c973287c229a7ef846aadf7d1f03
|
refs/heads/master
| 2020-06-01T16:01:30.973797
| 2019-07-25T01:20:32
| 2019-07-25T01:20:32
| 190,842,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from Crypto.PublicKey import RSA
import base64
f=open('encrypted.txt', 'rb')
message=base64.b64decode(f.read())
private_key = RSA.importKey(open("priv.pem").read())
text=private_key.decrypt(message)
print(text.decode('utf-8'))
|
[
"chiseng_wong@mymail.sutd.edu.sg"
] |
chiseng_wong@mymail.sutd.edu.sg
|
01f9d2a30447deccb7faa6b5a84798147b35930d
|
232ea06658ce3cafb03fc2b9567bfb0ba04e2f9c
|
/desafio/core/migrations/0002_phone.py
|
a5638a59ec597c118c61d95124554d58e2a5a4c6
|
[] |
no_license
|
TiagoLisboa/pitangagile-desafio
|
6cd939da6820249147def6b8d7f5afaedcba7487
|
873c15626a791303a7deb9a356461b748884cb34
|
refs/heads/master
| 2023-07-02T07:39:20.674041
| 2021-08-01T22:47:37
| 2021-08-01T22:47:37
| 391,487,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
# Generated by Django 3.2.5 on 2021-07-31 15:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Phone',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.BigIntegerField(verbose_name='number')),
('area_code', models.IntegerField(verbose_name='area code')),
('country_code', models.CharField(max_length=4, verbose_name='country code')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phones', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'phone',
'verbose_name_plural': 'phones',
},
),
]
|
[
"tiago.lisboa@devsquad.com"
] |
tiago.lisboa@devsquad.com
|
76bb321a415c79aeebadb80add405286f059d601
|
e90d5f029db0f93cdee35693ffca529c3934b654
|
/Pythonic/solneq/GaussSei.py
|
56f5b6558fdc9e95d6d190c055eb4052a385064f
|
[] |
no_license
|
TrigonaMinima/NumMets
|
c18c8a35101f4ffbd711bb27fae6b4898cc500a3
|
fa57aca32968574144660ecd8fd13963508d2efd
|
refs/heads/master
| 2021-01-20T09:12:52.031617
| 2014-05-26T13:04:47
| 2014-05-26T13:04:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21
|
py
|
# Gauss-Seidel method
|
[
"shivamrana95@yahoo.in"
] |
shivamrana95@yahoo.in
|
8c9807c3b0b1e321a9112b6b2106cf08c861c342
|
2004cfde7f0cb70d10ae045e0bab12afa0d18b35
|
/etc/zipArchiveScript.py
|
066c003ec67081617849dd62ea59551fd0834a76
|
[] |
no_license
|
erpost/python-beginnings
|
a51951eb9a3bfd58bfcabd60e5968cbd7d29bc1d
|
8ef94a0ac077a463ecafbd085f8b79d78284a42a
|
refs/heads/master
| 2023-02-05T08:29:32.101001
| 2023-01-27T18:29:27
| 2023-01-27T18:29:27
| 120,106,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
import zipfile
import sys
import os
import logging
logging.basicConfig(filename='file_ex.log', level = logging.DEBUG)
logging.info("checking to see if the backup.zip file exists")
if os.path.exists("backup.zip"):
logging.info("it exists")
try:
zip_file = zipfile.ZipFile('backup.zip', 'a')
except:
err = sys.exc_info()
logging.error("Unable to open backup.zip in append mode")
logging.error("Error Num: " + str(error[1].args[0]))
logging.error("Error Msg: " + err[1].args[1])
sys.exit()
else:
logging.info("creating backup.zip")
try:
zip_file = zipfile.ZipFile('backup.zip', 'w')
except:
err = sys.exc_info()
logging.error("Unable to create backup.zip")
logging.error("Error Num: " + str(error[1].args[0]))
logging.error("Error Msg: " + err[1].args[1])
sys.exit()
logging.info("adding test.txt to backup.zip")
try:
zip_file.write('test.txt', 'test.txt', zipfile.ZIP_DEFLATED)
except:
err = sys.exc_info()
logging.error("Unable to create backup.zip in append mode")
logging.error("Error Num: " + str(error[1].args[0]))
logging.error("Error Msg: " + err[1].args[1])
zip_file.close()
|
[
"25180070+erpost@users.noreply.github.com"
] |
25180070+erpost@users.noreply.github.com
|
d52dee81c4ad7207d92e8ef6493d3f46d11f0b84
|
0bf0fafe2fb7ad48f012049bb005f7674d104d70
|
/projects/02_trivia_api/starter/backend/env/bin/easy_install-3.8
|
63f2f9a577d62f40cb3638e9b787226b38b4d52e
|
[] |
no_license
|
zziying/FSND
|
054ce647388bde61f97ce6bb6552e816c09f7bd3
|
92355a6c7afb5781093b45baa69d63ea3a059132
|
refs/heads/master
| 2022-11-24T03:44:23.336897
| 2020-08-02T00:00:45
| 2020-08-02T00:00:45
| 280,506,485
| 0
| 0
| null | 2020-07-17T19:15:45
| 2020-07-17T19:15:44
| null |
UTF-8
|
Python
| false
| false
| 307
|
8
|
#!/Users/imirenee/Desktop/webdev/FSND/projects/02_trivia_api/starter/backend/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ziying612@berkeley.edu"
] |
ziying612@berkeley.edu
|
1038737295edc226bf1768c58329d2bf99823932
|
c6fea702b817b719d9774b66d76c7cbaf1369d7d
|
/pythas-python3/day_06.py
|
b6fa1f942c5f98d8e1ea2e5564387889618a59ad
|
[] |
no_license
|
piksel/advent_of_code_2016
|
eae359228372b53f88430360b38e48210ac6da40
|
996fe2a999949fab420115474b32b40ed8ba8414
|
refs/heads/master
| 2021-01-12T10:12:19.865894
| 2016-12-18T16:55:58
| 2016-12-18T16:55:58
| 76,386,900
| 1
| 0
| null | 2016-12-13T18:29:15
| 2016-12-13T18:29:15
| null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import re, string
def solve(file):
lines = file.readlines()
lines = list(zip(*lines[::-1]))
message = ''
message2 = ''
for line in lines:
count = [(i, line.count(i)) for i in set(line)]
count.sort(key = lambda x: x[1], reverse=True)
message += count[0][0]
message2 += count[-1][0]
print(message)
print(message2)
with open('data/06.txt', 'r') as file:
solve(file)
|
[
"pythas@gmail.com"
] |
pythas@gmail.com
|
821e61f7b33acd32ef5ac595ff87f80a93f1ec47
|
c0a380876abb91564b408cf08cfc056600c86c1f
|
/app/core/management/commands/wait_for_db.py
|
cf643323890da02a4ef939daff39d78fc47fcfc5
|
[
"MIT"
] |
permissive
|
acrawford13/recipe-app-api
|
8a5e28ea64fe9212d17308de29127d19670591e3
|
ef66b489e3431ecb38fa9148b370013b0e2fe880
|
refs/heads/master
| 2020-12-05T17:49:52.032090
| 2020-02-15T10:22:57
| 2020-02-15T10:22:57
| 232,195,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available."""
def handle(self, *args, **options):
"""Handle the command"""
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
|
[
"andrea.crawford13@gmail.com"
] |
andrea.crawford13@gmail.com
|
b0e8d7312ffef5f4698b8962c1b81f8ad0abde77
|
03362ece836f297df553cb639c98695a1b7864a1
|
/portfolio/migrations/0002_auto_20210623_1935.py
|
ee4805230df4b7e1c1de580c97085f4def919167
|
[] |
no_license
|
purusottam234/My-Portfolio
|
31fc099df0c420d8d9649e23ca1b7db0592452b4
|
c26f36d7dc14d268b6d7ab350105528af3188e78
|
refs/heads/main
| 2023-05-31T01:19:29.296272
| 2021-06-24T04:52:50
| 2021-06-24T04:52:50
| 379,806,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Generated by Django 3.1 on 2021-06-23 13:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Facts',
),
migrations.DeleteModel(
name='Skills',
),
migrations.DeleteModel(
name='Testimonials',
),
]
|
[
"purusottamadhikari234@gmail.com"
] |
purusottamadhikari234@gmail.com
|
0b2e2c1a54d147703b047fece5d909a80c7b07a7
|
43a27b80ce1a8cf422142f5a43044917ff17a7cf
|
/python/leetcode/Subset.py
|
a70f7d5e2ff7e78883e6096eebe8fea47352c999
|
[] |
no_license
|
bignamehyp/interview
|
321cbe4c5763b2fc6d2ba31354d627af649fe4ed
|
73c3a3e94c96994afdbc4236888456c8c08b6ee4
|
refs/heads/master
| 2021-01-25T08:49:30.249961
| 2015-03-07T07:34:00
| 2015-03-07T07:34:00
| 23,125,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
class Solution:
# @param S, a list of integer
# @return a list of lists of integer
def subsets(self, S):
solns = []
S.sort()
N = 2**len(S)
for k in range(N):
soln = []
l = 0
while k > 0:
if k % 2 == 1:
soln.append(S[l])
k = k / 2
l += 1
solns.append(soln)
return solns
|
[
"huangyp@Huangyp-MBP-2.local"
] |
huangyp@Huangyp-MBP-2.local
|
6f72efc883c52be8b581a2cc97d5c28576f03fee
|
429211c01057abcd51e5120d566c7daa0a8e2f33
|
/database/parameter.py
|
581791ded6c2a5fde41b1746da6f3682cb0c1746
|
[] |
no_license
|
LDZ-RGZN/b1804
|
2788c922a6d1a6dc11267920a90336d1df93a453
|
c57f8b7cf14686036cae3c30a30f07514622b5ca
|
refs/heads/master
| 2021-07-19T12:54:07.031858
| 2018-10-12T02:48:39
| 2018-10-12T02:48:39
| 133,500,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
from pymysql import *
def main():
find_name = input('请输入物品名称:')
#创建Connection
conn = connect(host = 'localhost',
port = 3306,
user = 'root',
database = 'jing_dong',
password = '150136',
charset = 'utf8')
#获得Cursor对象
cs1 = conn.cursor()
params = [find_name]
count = cs1.execute('select * from goods where name=%s',params)
print(count)
for i in range(count):
result = cs1.fetchone()
print(result)
cs1.close()
conn.close()
if __name__ == '__main__':
main()
|
[
"2654213432@qq.com"
] |
2654213432@qq.com
|
7dcfcc439a594d1f9fae8d4214dcd3468575e9a7
|
f5eecd96df0a682622bb2dd05521514f92171312
|
/12/script.py
|
5e5c46347c32c91064c52463e71a3ec43082ea59
|
[
"MIT"
] |
permissive
|
shreyansh26/Python-Challenge-Soutions
|
c112582fcb2e2aa24b62db34879043ba75e37c6d
|
0aa288df9c0af719972a351dcc5142e398114564
|
refs/heads/master
| 2020-04-30T08:44:11.405473
| 2019-03-20T12:19:21
| 2019-03-20T12:19:21
| 176,725,986
| 0
| 0
| null | 2019-03-20T12:18:01
| 2019-03-20T12:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
from PIL import Image
file = open("evil2.gfx", "rb").read()
# print(len(file.read()))
for i in range(5):
f = open(str(i)+".jpg", "wb")
f.write(file[i::5])
|
[
"shreyansh.pettswood@gmail.com"
] |
shreyansh.pettswood@gmail.com
|
eaa683de07295d6efbdff9b2f773447c411ec61c
|
336a91fb67904c18a5dc88ab966a95a5eee84281
|
/dev-env/bin/pilprint.py
|
28e1afa1ba4c69dd78d58a61672c7b7e6ce1e012
|
[] |
no_license
|
toprasit2/POOL
|
818335d45f387b2a1ed7530926f6a6d42df31552
|
528c576904f7c9bd3fc2b516a2cf58f7a3fcd50b
|
refs/heads/master
| 2020-03-08T09:14:02.622041
| 2018-04-04T09:48:36
| 2018-04-04T09:48:36
| 128,041,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
#!/home/thanaphon/projects/dev-env/bin/python3.6
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
[
"toprasit.gk@gmail.com"
] |
toprasit.gk@gmail.com
|
47f1dea90fb7fc8e9c825f6bdedfc73208d44f41
|
e97c3bda61af3772f5e6e9816d27e12f8e3a8e50
|
/B47.py
|
a72b1d9a46c46b5a76576ba8d6f184739ad47811
|
[] |
no_license
|
satizkumar32/sk
|
54809f471687dbbff97e8cb0d1315e9923d9234f
|
684b65445681cdbc67ee7c33fc1ce636d13c6ed0
|
refs/heads/master
| 2020-05-23T00:54:52.994397
| 2019-07-11T03:19:50
| 2019-07-11T03:19:50
| 186,579,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
numb=int(input())
igs=list(map(int,input().split()))
gk=max(igs)
ik=min(igs)
print(ik,gk)
|
[
"noreply@github.com"
] |
satizkumar32.noreply@github.com
|
666dbd7f475d486a05ca63ece8e76fa4ba1bc681
|
594d2af9bfe0ab8cc1b9df673d5405034ce277fc
|
/Programming Fundamentals/exam_preparation/01.secret_chat.py
|
05f853d5e6e72deb2e947697b442830b90bd86a3
|
[] |
no_license
|
mi6eto/SoftUni
|
7f7aa7419b5430b6bd0c908246c1604342eb605a
|
56dc781fa3502bf250b47a52823de49bd2d85cb5
|
refs/heads/main
| 2023-02-12T11:43:47.997337
| 2021-01-16T19:00:00
| 2021-01-16T19:00:00
| 325,096,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
message = input()
command = input()
encrypted_message = message
while not command == "Reveal":
command = command.split(":|:")
name = command[0]
move = command[1]
if name == "InsertSpace":
move = int(command[1])
encrypted_message = encrypted_message[:move] + " " + encrypted_message[move:]
print(encrypted_message)
elif name == "Reverse":
if move in encrypted_message:
encrypted_message = encrypted_message.replace(move, "", 1)
move = move[::-1]
encrypted_message = encrypted_message + move
print(encrypted_message)
else:
print("error")
elif name == "ChangeAll":
replacement = command[2]
if move in encrypted_message:
encrypted_message = encrypted_message.replace(move, replacement)
print(encrypted_message)
command = input()
print(f"You have a new text message: {encrypted_message}")
|
[
"mivanov0105@gmail.com"
] |
mivanov0105@gmail.com
|
70819d4f7a61a67beb08903256c9e261cfbe48c8
|
4e90d567a5bee2b40109091fc7f06e69550b8061
|
/src/manage.py
|
d0871dfc59ef3944fd3edf8ae3c235c3e4263fd1
|
[
"MIT"
] |
permissive
|
jtrussell/swindle
|
cdd30c868a04e2b3b9bfa63b71971211a1d9c60c
|
914f9ddc7b155cf895fc233b9f3f0c1804bf23e3
|
refs/heads/main
| 2023-04-23T03:22:05.483701
| 2021-05-10T02:01:36
| 2021-05-10T02:01:36
| 350,307,319
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'swindle.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"jus.russell@gmail.com"
] |
jus.russell@gmail.com
|
5e8b02bdaf38c20d2f114f739a75378f121e5a59
|
6f6d884e8677f429a670de6cf44497720c53b0ab
|
/env/educa/bin/pilprint.py
|
aac0973cae03342b1a4ac244c2d8880bb4eb4daa
|
[] |
no_license
|
luoshuitaotao/eLearningmy_own_part
|
216c34557a28e4f9b0ebe9507a5ed052e46ce065
|
a3009f56b657ee062265d07301cbaae329adc871
|
refs/heads/master
| 2021-05-15T21:46:18.236365
| 2017-10-15T04:27:11
| 2017-10-15T04:27:11
| 106,574,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,429
|
py
|
#!/Users/cuijing/Desktop/test/env/educa/bin/python3.6
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.2a1/96-10-04 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
[
"jcui@unomaha.edu"
] |
jcui@unomaha.edu
|
72aa26de7735743a4922927eb8c2510797d8c1fe
|
d515fc60d1ce2ad5b5f58f66ec89379dac370679
|
/bot/Backtesting/Pairs.py
|
348e33128995f58595d5150f6fc0af3d10a06a5e
|
[] |
no_license
|
clinestanford/USUInvest18-19
|
49e69ff3991b087b57af84ebb079812cc076c973
|
641565e5710c42e671a05c6f41f03e2ee834bc5f
|
refs/heads/master
| 2021-06-23T03:57:53.696389
| 2019-08-14T23:27:19
| 2019-08-14T23:27:19
| 148,672,229
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,222
|
py
|
import pandas as pd
import numpy as np
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
PATH_TO_DATA = '../StockData/'
class Pair:
##window will be the number of days to look back
##in order to calculate the mean
def __init__(self, pair1, pair2, window, gap):
self.pair1 = pair1
self.pair2 = pair2
self.window = window
self.gap = gap
def backtest(self, start_date, cash):
start_cash = cash
df1 = pd.read_pickle(PATH_TO_DATA + self.pair1 + '.pkl')
df2 = pd.read_pickle(PATH_TO_DATA + self.pair2 + '.pkl')
assert df1 is not None
assert df2 is not None
df1 = df1[['close', 'changePercent', 'volume']]
df2 = df2[['close', 'changePercent', 'volume']]
df1 = df1.rename(columns={"close": "close_pair1", "changePercent":"chg_pct_pair1", 'volume':'vol_pair1'})
df2 = df2.rename(columns={"close": "close_pair2", "changePercent":"chg_pct_pair2", 'volume':'vol_pair2'})
merge = df1.merge(df2, how='inner', left_index=True, right_index=True)
merge['spread'] = merge['close_pair1'] - merge['close_pair2']
#I will need to calculate the 'spread_pct'
## merge['spread_pct'] = (merge['spread'] - merge['spread'].mean()) / merge['spread'].std()
#this will test the two members for cointegration
#print(adfuller(merge['close_pair1'] - merge['close_pair2']), len(merge['close_pair1']))
# this will display a graph with a histogram
# merge['spread_pct'].hist(bins=30)
# plt.show(block=True)
bought = False
shares = 0
##TODO will need to set a mask to handle the time frame
mask = merge.index > start_date
delta = pd.Timedelta(str(self.window) + ' days')
for date in merge[mask].index:
##by iterating over this, I can create a new dataframe
##that will be masked for the last window days
day = merge.loc[date]
##need to calculate the spread_pct
temp_mask = (merge.index > date - delta) & (merge.index <= date)
temp_df = merge[temp_mask]
spread_pct = day.spread - temp_df['spread'].mean()
if spread_pct > self.gap and bought == False:
##sell the high, buy the low
#print(day.close_pair1, day.close_pair2)
bought = True
#sell the one
cash += day.close_pair1 * shares
#buy the other
shares = cash // day.close_pair2
cash -= shares * day.close_pair2
elif spread_pct < -1 * self.gap and bought == True:
##buy
#print(day.close_pair1, day.close_pair2)
bought = False
#sell the one
cash += day.close_pair2 * shares
#buy the other
shares = cash // day.close_pair1
cash -= shares * day.close_pair1
if bought:
cash += merge.loc[merge.index[-1]].close_pair2 * shares
else:
cash += merge.loc[merge.index[-1]].close_pair1 * shares
print(f"end Cash: ${cash}")
print(f"total returns: {cash/start_cash - 1}%")
return (cash, cash/start_cash - 1)
if __name__ == '__main__':
# delta = np.linspace(.4, .6, 20)
# results = []
# for val in delta:
pair = Pair('AKAM', 'ALLE', 30, .5)
start_cash = 25000
end_cash, end_perc = pair.backtest('2018-09-26', start_cash)
print(end_cash, end_perc)
#results.append((end_cash, end_perc, val))
# #results = sorted(results, key=lambda x: x[1])
# for r in results:
# print(r)
|
[
"clinestanford@gmail.com"
] |
clinestanford@gmail.com
|
79c8dcd5e4c88a73728513cb4df0880fed8d39ed
|
a1b649fcd0b6f6c51afb13f406f53d7d823847ca
|
/studies/migrations/0005_response_date_modified.py
|
bb8d219751306f22ed39ea102d80bdade3f9f49d
|
[
"MIT"
] |
permissive
|
enrobyn/lookit-api
|
e79f0f5e7a4ef8d94e55b4be05bfacaccc246282
|
621fbb8b25100a21fd94721d39003b5d4f651dc5
|
refs/heads/master
| 2020-03-27T01:54:00.844971
| 2018-08-08T15:33:25
| 2018-08-08T15:33:25
| 145,752,095
| 0
| 0
|
MIT
| 2018-08-22T19:14:05
| 2018-08-22T19:14:04
| null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-05 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0004_auto_20170616_0244'),
]
operations = [
migrations.AddField(
model_name='response',
name='date_modified',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"henriqueharman@gmail.com"
] |
henriqueharman@gmail.com
|
faf962ecb5afbe4c62cc1493d84e930af29eab6b
|
9585322c6e129daf74b453e0ef47418f52ef10f7
|
/lib_new/checkmat.py
|
9f4eb34e9369ca03ccaf1af38c2978c112f9869f
|
[] |
no_license
|
SIMEXP/GCN_fmri_decoding
|
7399b3580a8d9b79a50d31e354ad97d559993d7a
|
8850245458fa0976b3449e3eed529f702b1b6edb
|
refs/heads/master
| 2021-07-13T10:45:12.541451
| 2021-03-25T04:37:42
| 2021-03-25T04:37:42
| 232,432,212
| 1
| 0
| null | 2020-11-02T01:27:02
| 2020-01-07T22:50:11
| null |
UTF-8
|
Python
| false
| false
| 6,204
|
py
|
import os
import glob
import json
import numpy as np
import tensorflow as tf
class BestCheckpointSaver(object):
"""Maintains a directory containing only the best n checkpoints
Inside the directory is a best_checkpoints JSON file containing a dictionary
mapping of the best checkpoint filepaths to the values by which the checkpoints
are compared. Only the best n checkpoints are contained in the directory and JSON file.
This is a light-weight wrapper class only intended to work in simple,
non-distributed settings. It is not intended to work with the tf.Estimator
framework.
"""
def __init__(self, save_dir, num_to_keep=1, maximize=True, saver=None):
"""Creates a `BestCheckpointSaver`
`BestCheckpointSaver` acts as a wrapper class around a `tf.train.Saver`
Args:
save_dir: The directory in which the checkpoint files will be saved
num_to_keep: The number of best checkpoint files to retain
maximize: Define 'best' values to be the highest values. For example,
set this to True if selecting for the checkpoints with the highest
given accuracy. Or set to False to select for checkpoints with the
lowest given error rate.
saver: A `tf.train.Saver` to use for saving checkpoints. A default
`tf.train.Saver` will be created if none is provided.
"""
self._num_to_keep = num_to_keep
self._save_dir = save_dir
self._save_path = os.path.join(save_dir, 'best.ckpt')
self._maximize = maximize
self._saver = saver if saver else tf.train.Saver(
max_to_keep=None,
save_relative_paths=True
)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.best_checkpoints_file = os.path.join(save_dir, 'best_checkpoints')
def handle(self, value, sess, global_step_tensor):
"""Updates the set of best checkpoints based on the given result.
Args:
value: The value by which to rank the checkpoint.
sess: A tf.Session to use to save the checkpoint
global_step_tensor: A `tf.Tensor` represent the global step
"""
global_step = sess.run(global_step_tensor)
current_ckpt = 'best.ckpt-{}'.format(global_step)
value = float(value)
if not os.path.exists(self.best_checkpoints_file):
self._save_best_checkpoints_file({current_ckpt: value})
self._saver.save(sess, self._save_path, global_step_tensor)
return
best_checkpoints = self._load_best_checkpoints_file()
if len(best_checkpoints) < self._num_to_keep:
best_checkpoints[current_ckpt] = value
self._save_best_checkpoints_file(best_checkpoints)
self._saver.save(sess, self._save_path, global_step_tensor)
return
if self._maximize:
should_save = not all(current_best >= value
for current_best in best_checkpoints.values())
else:
should_save = not all(current_best <= value
for current_best in best_checkpoints.values())
if should_save:
best_checkpoint_list = self._sort(best_checkpoints)
worst_checkpoint = os.path.join(self._save_dir,
best_checkpoint_list.pop(-1)[0])
self._remove_outdated_checkpoint_files(worst_checkpoint)
self._update_internal_saver_state(best_checkpoint_list)
best_checkpoints = dict(best_checkpoint_list)
best_checkpoints[current_ckpt] = value
self._save_best_checkpoints_file(best_checkpoints)
self._saver.save(sess, self._save_path, global_step_tensor)
def _save_best_checkpoints_file(self, updated_best_checkpoints):
with open(self.best_checkpoints_file, 'w') as f:
json.dump(updated_best_checkpoints, f, indent=3)
def _remove_outdated_checkpoint_files(self, worst_checkpoint):
os.remove(os.path.join(self._save_dir, 'checkpoint'))
for ckpt_file in glob.glob(worst_checkpoint + '.*'):
os.remove(ckpt_file)
def _update_internal_saver_state(self, best_checkpoint_list):
best_checkpoint_files = [
(ckpt[0], np.inf) # TODO: Try to use actual file timestamp
for ckpt in best_checkpoint_list
]
self._saver.set_last_checkpoints_with_time(best_checkpoint_files)
def _load_best_checkpoints_file(self):
with open(self.best_checkpoints_file, 'r') as f:
best_checkpoints = json.load(f)
return best_checkpoints
def _sort(self, best_checkpoints):
best_checkpoints = [
(ckpt, best_checkpoints[ckpt])
for ckpt in sorted(best_checkpoints,
key=best_checkpoints.get,
reverse=self._maximize)
]
return best_checkpoints
def get_best_checkpoint(best_checkpoint_dir, select_maximum_value=True):
""" Returns filepath to the best checkpoint
Reads the best_checkpoints file in the best_checkpoint_dir directory.
Returns the filepath in the best_checkpoints file associated with
the highest value if select_maximum_value is True, or the filepath
associated with the lowest value if select_maximum_value is False.
Args:
best_checkpoint_dir: Directory containing best_checkpoints JSON file
select_maximum_value: If True, select the filepath associated
with the highest value. Otherwise, select the filepath associated
with the lowest value.
Returns:
The full path to the best checkpoint file
"""
best_checkpoints_file = os.path.join(best_checkpoint_dir, 'best_checkpoints')
assert os.path.exists(best_checkpoints_file)
with open(best_checkpoints_file, 'r') as f:
best_checkpoints = json.load(f)
best_checkpoints = [ ckpt for ckpt in sorted(best_checkpoints,key=best_checkpoints.get,reverse=select_maximum_value) ]
return os.path.join(best_checkpoint_dir, best_checkpoints[0])
|
[
"zhangyu2ustc@gmail.com"
] |
zhangyu2ustc@gmail.com
|
f578c13a1ecfd5199cfd7d828ac17dcf7795fa64
|
4a28265a41277f614c5574d750b11eb3b616e420
|
/challenges/isAdmissibleOverpayment.py
|
f991c3034149bfe8c23d518b1655305180d3ec9c
|
[] |
no_license
|
JorG96/CodingPractice
|
8819937f0fbf1112e2969b55bb62b9700a450222
|
c06d92079afe3a85e6c1748d86facd5fbfde8f71
|
refs/heads/main
| 2023-07-24T03:30:17.660260
| 2021-09-05T03:41:28
| 2021-09-05T03:41:28
| 398,324,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
'''
After recently joining Instacart's beta testing developer group, you decide to experiment with their new API. You know that the API returns item-specific display-ready strings like 10.0% higher than in-store or 5.0% lower than in-store that inform users when the price of an item is different from the one in-store. But you want to extend this functionality by giving people a better sense of how much more they will be paying for their entire shopping cart.
Your app lets a user decide the total amount x they are willing to pay via Instacart over in-store prices. This you call their price sensitivity.
Your job is to determine whether a given customer will be willing to pay for the given items in their cart based on their stated price sensitivity x.
Example
For
prices = [110, 95, 70],
notes = ["10.0% higher than in-store",
"5.0% lower than in-store",
"Same as in-store"]
and x = 5, the output should be
isAdmissibleOverpayment(prices, notes, x) = true.
In-store prices of the first and the second items are 100, and the price of the third item is 70, which means the customer is overpaying 10 - 5 + 0 = 5, which they are willing to do based on their price sensitivity.
For
prices = [48, 165],
notes = ["20.00% lower than in-store",
"10.00% higher than in-store"]
and x = 2, the output should be
isAdmissibleOverpayment(prices, notes, x) = false.
The in-store price of the first item is 60, and the second item is 150. The overpayment equals 15 - 12 = 3, which is too much for the customer to be willing to pay.
'''
import re
def extractNumber(string):
return float(re.findall(r"\d+\.\d+",string)[0])
def percentDiffH(price,percentage):
return price*(1-1/(1+extractNumber(percentage)*0.01))
def percentDiffL(price,percentage):
return price*(1-1/(1-extractNumber(percentage)*0.01))
def isAdmissibleOverpayment(prices, notes, x):
priceDiff=[]
for item in range(len(notes)):
if 'higher' in notes[item]:
priceDiff.append(percentDiffH(prices[item],notes[item]))
elif 'lower' in notes[item]:
priceDiff.append(percentDiffL(prices[item],notes[item]))
else:
priceDiff.append(0)
return sum(priceDiff)<=x+0.000000001
|
[
"noreply@github.com"
] |
JorG96.noreply@github.com
|
d04f2054d41b54b3d9ec9a8cf5cfd3175fd904c2
|
a1242d26f49a3f5825a476b47fb7d790c3f3a5ab
|
/normal_dataset.py
|
60e055bddc8ffa3910eadfe439a0723c8fe02d3b
|
[] |
no_license
|
biditdas18/Covid-19_Detection_from_X_ray_image
|
85b42998012e99ce4899064625fd37a1d1e21c9b
|
5c7c0b3b4b954d6b67ab867d09793133d582c786
|
refs/heads/master
| 2022-11-13T21:26:29.722192
| 2020-06-26T09:10:40
| 2020-06-26T09:10:40
| 275,111,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
from imutils import paths
import argparse
import random
import os
import shutil
ap = argparse.ArgumentParser()
ap.add_argument('-n','--normal', required = True,
help = 'path to the base directory of normal xray dataset')
ap.add_argument('-o','--output', required = True,
help = 'path to the direcctory where images of normal xray will be stored')
ap.add_argument('-s','--sample',type=int, default=206,
help='number of samples selected')
args = vars(ap.parse_args())
# grabbing all the training paths from the kaggle chest x-ray dataset
basePath = os.path.sep.join([args['normal'],'train','NORMAL'])
imagePaths = list(paths.list_images(basePath))
random.seed(42)
random.shuffle(imagePaths)
imagePaths = imagePaths[:args['sample']]
for i,imagePath in enumerate(imagePaths):
filename = imagePath.split(os.path.sep)[-1]
outputPath = os.path.sep.join([args['output'],filename])
shutil.copy2(imagePath,outputPath)
|
[
"noreply@github.com"
] |
biditdas18.noreply@github.com
|
61a040429d1c3dd55166c7657efc5b4fbb53c03e
|
507fb1ec490943034d8507fc69f4f63039232ec3
|
/models/entities.py
|
7739c0124db33f0a0a05ce57cafaa95a1657f321
|
[] |
no_license
|
kjanko/data-scrapping
|
968ff800a27770241198f3b964b505f80bc633b6
|
709d538e45945344eb68900475da505d1c1457dc
|
refs/heads/master
| 2020-09-26T17:42:17.406929
| 2019-12-06T10:39:07
| 2019-12-06T10:39:07
| 226,134,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
from peewee import Model, CharField, ForeignKeyField
from models import database
class BaseModel(Model):
class Meta:
database = database
class Flight(BaseModel):
destination = CharField()
weather = CharField()
timestamp = CharField()
@property
def notes(self):
return self.notes.get()
class Note(BaseModel):
text = CharField()
flight = ForeignKeyField(Flight, backref='notes')
|
[
"kristijan.jankoski@netcetera.com"
] |
kristijan.jankoski@netcetera.com
|
1cf5ae3abf64cb0f896364d8cd5cb9ed3ea69664
|
40132307c631dccbf7aa341eb308f69389715c73
|
/OLD/idmt/maya/Calimero/UnlockALL.py
|
b7d180408bf113df0b406b6af1cf6e6a88b352bf
|
[] |
no_license
|
Bn-com/myProj_octv
|
be77613cebc450b1fd6487a6d7bac991e3388d3f
|
c11f715996a435396c28ffb4c20f11f8e3c1a681
|
refs/heads/master
| 2023-03-25T08:58:58.609869
| 2021-03-23T11:17:13
| 2021-03-23T11:17:13
| 348,676,742
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
# --- Unlock transforms
import maya.cmds as mc
for sel in mc.ls(tr=True, l=True):
mc.setAttr(sel+'.translateX', l=False, k=True)
mc.setAttr(sel+'.translateY', l=False, k=True)
mc.setAttr(sel+'.translateZ', l=False, k=True)
mc.setAttr(sel+'.rotateX', l=False, k=True)
mc.setAttr(sel+'.rotateY', l=False, k=True)
mc.setAttr(sel+'.rotateZ', l=False, k=True)
mc.setAttr(sel+'.scaleX', l=False, k=True)
mc.setAttr(sel+'.scaleY', l=False, k=True)
mc.setAttr(sel+'.scaleZ', l=False, k=True)
mc.setAttr(sel+'.visibility', l=False, k=True)
|
[
"snakelonely@outlook.com"
] |
snakelonely@outlook.com
|
03078a67d75666352755db543548fc948c990a24
|
ead9315c1cc9ac6da71985c84d35e3cde8abdeea
|
/MachineLearning/GD.py
|
6865deaeacff305a44d134029f44f841f9b7e02c
|
[] |
no_license
|
drivenow/MachineLearning
|
e2091d5c37fbaacca4377ffee0aac38188344fbc
|
f83ce40900adc43279c5af15058b6831ea4dc60f
|
refs/heads/master
| 2021-05-16T09:27:28.361265
| 2017-09-22T07:52:35
| 2017-09-22T07:52:35
| 104,430,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 21:43:14 2015
@author: shenjunling
“””
本算法实现线性回归实验。
输入:年份(2000-2013),年份对应的房价
输出:年份—房价成线性相关的预测直线,损失函数的变化图
参数说明:
X : 年份,同减去2000以标准化
Y : 房价
Alpha :梯度下降法的步长
Theta :权重参数
Maxiter : 最大迭代次数
“””
"""
#%%
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
x=np.linspace(2000,2013,14)
x=np.subtract(x,[2000]*14)
#x = np.divide(np.subtract(x,min(x)),(max(x)-min(x)))
#x = np.divide(np.subtract(x,x.mean()),x.var())
y=[2,2.500,2.900,3.147,4.515,4.903,5.365,5.704,6.853,
7.971,8.561,10.000,11.280,12.900]
y = np.array(y)
#y = np.subtract(y,y.mean())
#y = np.divide(np.subtract(y,y.mean()),y.var())
#y = np.divide(np.subtract(y,min(y)),(max(y)-min(y)))
#绘制原始数据散点图
plt.figure(1)
plt.scatter(x,y)
#%%
def regression(x,theta):
return x*theta[1]+theta[0]
#初始参数
alpha=0.0001
theta0=[1,1]
theta=theta0
maxiter = 50
iterator=0
while True:
#迭代次数
iterator=iterator+1
#预测数据
y0=regression(x,theta)
#误差
error=sum((y0-y)**2)
#绘制误差曲线
if(iterator%1==0):
plt.figure(2)
plt.scatter(iterator,error)
#梯度
# gradient=sum(np.multiply(np.subtract(y0,y),x))
gradient=sum((y0-y)*x)
#更新theta
theta=np.subtract(theta,[alpha*gradient]*2)
#终止条件
if error<0.001 or iterator>maxiter:
break
plt.figure(2)
plt.xlabel('iter')
plt.ylabel('loss')
plt.show(False)
#绘制回归曲线
plt.figure(1)
plt.plot(x,y0)
plt.xlabel('year')
plt.ylabel('price')
plt.show(False)
|
[
"1171935994@qq.com"
] |
1171935994@qq.com
|
88478167a06dcdc3161b86f7e90b0fe4afef5f45
|
ffc46e91f18e93c6dfe7a5fe64145d82f2857c16
|
/alien.py
|
4c40ffdae64542dc48398329e75d888df1239cc9
|
[] |
no_license
|
bmgreene19/alieninvasion
|
79640d0da35e9855579add406d0386c543eee07a
|
3233c3a14bc0bf5a4de9b601cfda1e4cceaf9fd5
|
refs/heads/main
| 2023-01-20T11:03:06.030931
| 2020-11-19T06:55:29
| 2020-11-19T06:55:29
| 314,161,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
'''A class to represent a single alien in the fleet'''
def __init__(self, ai_game):
'''Initialize the alien and set its starting position'''
super().__init__()
self.screen = ai_game.screen
self.settings = ai_game.settings
# Load the alien image and set its rect attribute
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# Start each new alien near the top left of the screen
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# Store the alien's exact horizontal position
self.x = float(self.rect.x)
def check_edges(self):
'''Return true if alien is at edge of screen'''
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right or self.rect.left <= 0:
return True
def update(self):
'''Move the alien to the right or left'''
self.x += (self.settings.alien_speed * self.settings.fleet_direction)
self.rect.x = self.x
|
[
"noreply@github.com"
] |
bmgreene19.noreply@github.com
|
9ead7acdf2745df4f47c825cf56dab9abb9cbdd2
|
3b26427a5afe4bd4fa50875dbec7996a8c9d8f62
|
/catkin_ws/devel/lib/python2.7/dist-packages/niryo_one_msgs/srv/_SetCalibrationCam.py
|
a3ab00279ce00b214a19c23af89ca4d155001ec9
|
[] |
no_license
|
Anton1B/Base_mobile_MaD
|
26ca8854863fa30ef7db582aed4e62fb35409178
|
46a1cda6167e40c5203bb865802edcca1a4c8263
|
refs/heads/main
| 2023-08-14T21:07:48.631852
| 2021-09-13T12:50:19
| 2021-09-13T12:50:19
| 373,205,357
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,259
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from niryo_one_msgs/SetCalibrationCamRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetCalibrationCamRequest(genpy.Message):
_md5sum = "c1f3d28f1b044c871e6eff2e9fc3c667"
_type = "niryo_one_msgs/SetCalibrationCamRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string name
"""
__slots__ = ['name']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetCalibrationCamRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
else:
self.name = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from niryo_one_msgs/SetCalibrationCamResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetCalibrationCamResponse(genpy.Message):
_md5sum = "b8d517967bcdc1a37247f33181b13bc0"
_type = "niryo_one_msgs/SetCalibrationCamResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """int32 status
int32 SUCCESSFULLY_SET = 0
int32 OVERWRITTEN = 1
int32 NOT_SET = -1
"""
# Pseudo-constants
SUCCESSFULLY_SET = 0
OVERWRITTEN = 1
NOT_SET = -1
__slots__ = ['status']
_slot_types = ['int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetCalibrationCamResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = 0
else:
self.status = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.status
buff.write(_get_struct_i().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(self.status,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.status
buff.write(_get_struct_i().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(self.status,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
class SetCalibrationCam(object):
_type = 'niryo_one_msgs/SetCalibrationCam'
_md5sum = '34594f1cc2cba58cae4d417628221460'
_request_class = SetCalibrationCamRequest
_response_class = SetCalibrationCamResponse
|
[
"betaille.antonin@gmail.com"
] |
betaille.antonin@gmail.com
|
d26125814a10613abfecc0df6a4ed36751bd4ea9
|
ad870203d61044a02c1bad8cdb8704846a192e23
|
/MUNDO_01/Aula_11/Cores_no_terminal.py
|
d3e7f3e0ff8849b78f4a3074030cba99beaf55e0
|
[] |
no_license
|
gpreviatti/exercicios-python
|
ee3095677c8628c7dbeb6b3d8275dc12666fd602
|
72dd6a83891906a3e9eaa40c4a68d2422ee593cc
|
refs/heads/master
| 2021-06-24T10:44:17.278772
| 2020-11-13T18:54:02
| 2020-11-13T18:54:02
| 166,492,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
#Arquivo com demonstração das cores no terminal
#Codigo anci
# \033[STYLE(normal,negrito,sublinhado);TEXT(cor);BACKGROUND(cor do fundo)m
#ESTILO DO TEXTO
print('\033[1m Negrito')
print('\033[4m Sublinhado\n')
print('\033[7m Inversão de cores')
#TEXTO
print('\033[30m Branco')
print('\033[31m Vermelho')
print('\033[32m Verde')
print('\033[33m Amarelo')
print('\033[34m Azul')
print('\033[35m Roxo')
print('\033[36m Azul Claro')
print('\033[37m Cinza')
#BACKGROUNDS
print('\033[40m Fundo Branco')
print('\033[41m Fundo Vermelho')
print('\033[42m Fundo Verde')
print('\033[43m Fundo Amarelo')
print('\033[44m Fundo Azul')
print('\033[45m Fundo Roxo')
print('\033[46m Fundo Azul Claro')
print('\033[47m Fundo Cinza')
print('\033[m') #volta a cor ao padrão normal
#para juntar os estilo basta colocar um ponto e virgula ex:
print('\033[4;31;44m TESTE') #sempre nessa ordem estilo,texto,background
|
[
"gpreviatt@gmail.com"
] |
gpreviatt@gmail.com
|
47b04892b221e164b29feeed52cc0d7f46750230
|
6eb4a6a795a935729e582b82a5a49c9645555099
|
/src/dev/echo-client.py
|
48fe19c550cfb747f3786a67ad97706bb7d9e96e
|
[] |
no_license
|
yw6916/ASA
|
e7263f17bc90b7c583dba9dff5af3a34c0a52f67
|
ca431c695c691fbf1c06b5f69aa6dbc637ea45d6
|
refs/heads/master
| 2020-04-24T12:34:30.565250
| 2019-02-21T18:35:18
| 2019-02-21T18:35:18
| 171,959,620
| 1
| 0
| null | 2019-02-21T23:13:20
| 2019-02-21T23:13:20
| null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
#!/usr/bin/env python3
import socket
import struct
import argparse
def send_message(msg, HOST='127.0.0.1', PORT=65432):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(msg)
data = s.recv(1024)
print('Received', repr(data))
return repr(data)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Send message to socket.')
parser.add_argument('-e', '--exit', action='store_true',
help='sends exit message to socket')
parser.add_argument('-i', '--in', default=0, type=int,
help='input for message')
FMT = '?i'
args = vars(parser.parse_args())
exit = args['exit']
val = args['in']
if not exit:
msg = struct.pack(FMT, True, val)
send_message(msg)
else:
msg = struct.pack(FMT, False, 0)
send_message(msg)
print("Sent",msg)
|
[
"joearrowsmith98@gmail.com"
] |
joearrowsmith98@gmail.com
|
3817993a706e9bc126c553f35e7c7d3d592ba38f
|
80f66a4d10b43cd28c7a49ff8ce829899b73173b
|
/Python Codes/correlation.py
|
4ee1b979f6728a94db7130e9d4f616e007efee09
|
[] |
no_license
|
davidulicio/Rapa-Nui-s-soundings-analysis
|
8ac30d4f5707b7217a68c224cfb2a6911d078d41
|
dfd087b3cf6d2f7647445871e63d9e1c98475826
|
refs/heads/master
| 2021-06-23T20:33:43.100582
| 2019-09-21T03:05:54
| 2019-09-21T03:05:54
| 116,276,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
# -*- coding: utf-8 -*-
"""
Correlation between ozone and VOCs
@author: DavidUlises
"""
import numpy as np
from Subplots_EIC import yearCO, yearCO2, yearP, yearNP, yearNB, yearE, yearMP, yearMB
from ozone import year
# Correlation coeficients
co = np.corrcoef(yearCO, year)
co2 = np.corrcoef(yearCO2, year)
p = np.corrcoef(yearP, year)
np = np.corrcoef(yearNP, year)
nb = np.corrcoef(yearNB, year)
e = np.corrcoef(yearE, year)
mp = np.corrcoef(yearMP, year)
mb = np.corrcoef(yearMB, year)
|
[
"30703946+davidulicio@users.noreply.github.com"
] |
30703946+davidulicio@users.noreply.github.com
|
81289e1ac4431bee8a711cd0b2370d2c34425e7c
|
ee980aedb53d3e3cd03d4f55b2f6451eb9602218
|
/result_dir.py
|
6092fa8209982f69a95c1a34645d994fa618bed4
|
[] |
no_license
|
renatodmt/Auto-ML-Research-Project
|
1004331bc217446be946d6b75679d9ba77ade3e0
|
e633d46649c1c728d1d2289bed0ee89c4cc0518d
|
refs/heads/main
| 2023-04-20T08:50:14.619230
| 2021-05-14T01:50:36
| 2021-05-14T01:50:36
| 351,396,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
import os
def make_dir_if_not_exists(dir):
if os.path.exists(dir):
print(f'{dir} already exists!')
else:
os.makedirs(dir)
|
[
"noreply@github.com"
] |
renatodmt.noreply@github.com
|
5266bd005817bd8e2d651a2454a37f7f848dd8eb
|
65af3d50e34b0669c377cd59e4c175bd8639247d
|
/tests/reshape_2/generate_pb.py
|
8b880dad445296192d50d0ffc1ff8de7ce4210dc
|
[
"Apache-2.0"
] |
permissive
|
lzufalcon/utensor_cgen
|
3688881706710a66f14e399463f705045f26fe10
|
1774f0dfc0eb98b274271e7a67457dc3593b2593
|
refs/heads/master
| 2020-03-22T14:28:07.393957
| 2018-04-06T15:40:30
| 2018-04-06T15:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# -*- coding: utf8 -*-
import os
from utensor_cgen.utils import save_consts, save_graph, save_idx
import numpy as np
import tensorflow as tf
def generate():
"""the reshape op will be used in this case since tensorflow will flatten
the input tensor and find the min/max value for quantized matmul
"""
test_dir = os.path.dirname(__file__)
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32,
shape=[None, 3],
name='x')
w = tf.constant(0.5 * np.random.randn(3, 1),
dtype=tf.float32,
name='w')
y = tf.matmul(x, w, name='y')
np_x = 0.5 * np.random.randn(5, 3).astype(np.float32)
with tf.Session(graph=graph) as sess:
save_consts(sess, test_dir)
save_graph(graph, 'test_reshape_2', test_dir)
np_output = y.eval(feed_dict={'x:0': np_x})
save_idx(np_x, os.path.join(test_dir, 'input_x.idx'))
save_idx(np_output, os.path.join(test_dir, 'output_y.idx'))
if __name__ == "__main__":
generate()
|
[
"qmalliao@gmail.com"
] |
qmalliao@gmail.com
|
038356119549e317713b53c56effec940daac268
|
eae7786f204ff1329dc57bcf7dbec86459312ba3
|
/igata/utils.py
|
1c139fa5997b892d420765011f51aabd2642b785
|
[
"BSD-2-Clause"
] |
permissive
|
kiconiaworks/igata
|
a70285e9467b66325becbd60b29459f34bf67a69
|
29b854fb498b5eabf41465fcaaeb7c8059f944b0
|
refs/heads/master
| 2023-01-11T07:43:56.092988
| 2022-04-06T11:37:13
| 2022-04-06T11:37:13
| 211,023,674
| 1
| 0
|
BSD-2-Clause
| 2022-12-27T16:38:09
| 2019-09-26T07:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 11,289
|
py
|
import csv
import datetime
import json
import logging
import os
import time
import urllib
from collections.abc import Hashable
from decimal import Decimal
from gzip import GzipFile
from hashlib import md5
from io import BytesIO, StringIO
from pathlib import Path
from typing import Generator, List, Optional, Tuple, Union
from urllib.error import HTTPError
from urllib.parse import unquote, urlparse
from uuid import NAMESPACE_URL, uuid5
import boto3
import imageio
import numpy as np
import pandas
import requests
from botocore.errorfactory import ClientError
from igata import settings
from requests.adapters import HTTPAdapter
from retry.api import retry_call
from urllib3 import Retry
logger = logging.getLogger("cliexecutor")
# for generating UUID for request_id
UUID_NAMESPACE_DNS_NAME = os.getenv("UUID_NAMESPACE_DNS_NAME", "my-api.com")
S3 = boto3.client("s3", endpoint_url=settings.S3_ENDPOINT)
def default_json_encoder(obj):
"""
Serialize for objects that cannot be serialized by the default json encoder
Usage:
json_bytes = json.dumps(myobj, default=default_json_encoder)
"""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, Decimal):
return float(obj)
raise TypeError(f"Object cannot be serialized: {obj}")
def flatten(nested_object, keystring="", allow_null_strings=True, separator="__") -> Generator[tuple, None, None]:
"""
Flatten a nested dictionary into a flat/single-level key, value tuple.
Usage:
nested_object = {
'key1': {'other': 'other1'},
'key2': 'value2'
}
for key_value in flatten(nested_object):
print(key_value) # ('key1__other': 'other1') ...
.. note::
Results can be converted to dictionary using:
flattened_dict = dict(flatten(nested_object))
"""
if isinstance(nested_object, dict):
keystring = f"{keystring}{separator}" if keystring else keystring
for key in nested_object:
updated_keystring = f"{keystring}{key}"
yield from flatten(nested_object[key], updated_keystring, allow_null_strings, separator)
elif isinstance(nested_object, list):
for list_element in nested_object:
yield from flatten(list_element, keystring, allow_null_strings, separator)
else:
if not allow_null_strings:
if nested_object != "":
yield keystring, nested_object
else:
yield keystring, nested_object
def prepare_images(bucket, key) -> Tuple[Tuple[str, str], np.array, float, Optional[str]]:
"""
Read the given s3 key into a numpy array.from retry.api import retry_call
"""
error_message = None
key = unquote(key)
url = S3.generate_presigned_url(ClientMethod="get_object", Params={"Bucket": bucket, "Key": key}, ExpiresIn=3600, HttpMethod="GET")
start = time.time()
try:
image = retry_call(imageio.imread, fargs=[url], tries=10)[:, :, :3]
except HTTPError as e:
logger.exception(e)
error_message = f"Exception while processing image(s3://{bucket}/{key}): ({e.code}) {e.reason}"
logger.error(error_message)
image = np.array([])
except ValueError as e:
logger.exception(e)
error_message = f"Exception while processing image(s3://{bucket}/{key}): {e.args}"
logger.error(error_message)
image = np.array([])
end = time.time()
download_time = end - start
return (bucket, key), image, download_time, error_message
def _download_s3_file(bucket: str, key: str) -> dict:
"""Download file from S3"""
url = S3.generate_presigned_url(ClientMethod="get_object", Params={"Bucket": bucket, "Key": key}, ExpiresIn=3600, HttpMethod="GET")
logger.info(f"downloading ({url})...")
response = requests_retry_session().get(url)
return response
def prepare_csv_reader(
bucket: str,
key: str,
encoding: str = settings.INPUT_CSV_ENCODING,
delimiter: str = settings.INPUT_CSV_DELIMITER,
reader: Union[csv.reader, csv.DictReader] = csv.DictReader,
dialect: str = settings.INPUT_CSV_READER_DIALECT,
) -> Tuple[Tuple[str, str], Union[csv.reader, csv.DictReader, None], float, Optional[str]]:
"""
Read the given s3 key into a numpy array.from retry.api import retry_call
reader = csv.DictReader(StringIO(text))
"""
error_message = None
csvreader = None
key = unquote(key)
if key.lower().endswith((".csv", ".gz")):
start = time.time()
try:
response = _download_s3_file(bucket, key)
except HTTPError as e:
logger.exception(e)
error_message = f"Exception while processing csv(s3://{bucket}/{key}): ({e.code}) {e.reason}"
logger.error(error_message)
except ValueError as e:
logger.exception(e)
error_message = f"Exception while processing csv(s3://{bucket}/{key}): {e.args}"
logger.error(error_message)
if 200 <= response.status_code <= 299:
if key.lower().endswith(".gz"):
data = GzipFile(fileobj=BytesIO(response.content)).read().decode(encoding)
csvreader = reader(StringIO(data), dialect=dialect, delimiter=delimiter)
elif key.lower().endswith(".csv"):
data = response.text
csvreader = reader(StringIO(data), dialect=dialect, delimiter=delimiter)
else:
error_message = f"({response.status_code}) error downloading data"
else:
error_message = f"unsupported CSV file extension: s3://{bucket}/{key}"
end = time.time()
download_time = end - start
return (bucket, key), csvreader, download_time, error_message
def prepare_csv_dataframe(
bucket: str, key: str, read_csv_kwargs: Optional[dict] = None
) -> Tuple[Tuple[str, str], Optional[pandas.DataFrame], float, Optional[str]]:
"""Read CSV from s3 and return a dataframe"""
df = None
error_message = None
response = None
start = time.time()
try:
response = _download_s3_file(bucket, key)
except HTTPError as e:
logger.exception(e)
error_message = f"Exception while processing csv(s3://{bucket}/{key}): ({e.code}) {e.reason}"
logger.error(error_message)
if response:
if 200 <= response.status_code <= 299:
filename = Path(key.split("/")[-1])
data = BytesIO(response.content)
data.name = filename.name
if not read_csv_kwargs:
# set defaults
read_csv_kwargs = {
"sep": settings.DEFAULT_INPUT_CSV_DELIMITER,
"encoding": settings.DEFAULT_INPUT_CSV_ENCODING,
"header": settings.DEFAULT_INPUT_CSV_HEADER_LINES,
}
# - determine compression
ext = filename.suffix.lower()
compression_ext_mapping = {".zip": "zip", ".gz": "gzip", ".xz": "xz", ".bz2": "bz2"}
compression = compression_ext_mapping.get(ext, None)
if compression and "compression" not in read_csv_kwargs:
read_csv_kwargs["compression"] = compression
logger.debug(f"read_csv_kwargs={read_csv_kwargs}")
try:
df = pandas.read_csv(data, **read_csv_kwargs)
except Exception as e:
logger.exception(e)
error_message = f"Exception Occurred while calling pandas.read_csv(): {e.args}"
else:
error_message = f"Invalid response.status_code while processing csv(s3://{bucket}/{key}): status_code={response.status_code}"
logger.error(error_message)
else:
error_message = f"response not defined, download failed for: s3://{bucket}/{key}"
logger.error("response not defined!")
end = time.time()
download_time = end - start
return (bucket, key), df, download_time, error_message
def parse_s3_uri(uri: str) -> Tuple[str, str]:
"""
Parse s3 uri (s3://bucket/key) to (bucket, key)
"""
result = urlparse(uri)
bucket = result.netloc
key = result.path[1:] # removes leading slash
return bucket, key
def generate_request_id(*values, uuid_namespace_dns_name=UUID_NAMESPACE_DNS_NAME) -> str:
"""
Generate the UUID string for given values
.. note::
values are sorted to ensure key reproducibility
"""
if not all(isinstance(v, Hashable) for v in values):
raise ValueError(f"Given value not hashable, values: {values}")
unique_key = md5(".".join(value for value in sorted(str(v) for v in values)).encode("utf8")).hexdigest()
hash_url = urllib.parse.quote_plus(f"http://{uuid_namespace_dns_name}/{unique_key}")
value = str(uuid5(namespace=NAMESPACE_URL, name=hash_url))
return value
def serialize_json_and_chunk_by_bytes(items: List[Union[dict, str]], max_bytes: int = 2048) -> Generator[str, None, None]:
"""
Serialize items into JSON and yield by the resulting
"""
is_initial = True
last_json_str = None
chunked_items = []
logger.debug(f"chunk_processing items incoming: {len(items)}")
for item in items:
if chunked_items:
json_str = json.dumps(chunked_items)
json_bytes = json_str.encode("utf8")
if is_initial and len(json_bytes) > max_bytes:
raise ValueError(f"Single item > max_bytes({max_bytes}: {json_bytes}")
elif len(json_bytes) > max_bytes:
yield last_json_str
chunked_items = chunked_items[-1:] # remove items yielded in last_json_str
last_json_str = json_str
chunked_items.append(item)
is_initial = False
if chunked_items:
json_str = json.dumps(chunked_items)
encoded = json_str.encode("utf8")
if len(encoded) >= max_bytes:
json_str = json.dumps(chunked_items[:-1])
yield json_str # make sure to send last one!
json_str = json.dumps(chunked_items[-1:])
yield json_str # make sure to send last one!
else:
yield json_str # make sure to send last one!
def requests_retry_session(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None):
"""
request retry sessions
:param retries:
:param backoff_factor:
:param status_forcelist:
:param session:
:return:
"""
session = session or requests.Session()
retry = Retry(total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def s3_key_exists(bucket: str, key: str) -> bool:
"""Check if given bucket, key exists"""
exists = False
try:
S3.head_object(Bucket=bucket, Key=key)
exists = True
except ClientError as e:
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
logger.error(f"s3 key does not exist: s3://{bucket}/{key}")
else:
logger.exception(e)
logger.error(f"Unknown ClientError: {e.args}")
return exists
|
[
"shane.cousins@gmail.com"
] |
shane.cousins@gmail.com
|
23498eed5e9a542ea41369a9e9a38d4bcb1a456e
|
92b74258a1058e0d84f8f6e0b84cc1984d9f9d55
|
/app/mlengine/encodings.py
|
8071bbe5fb714ffabe4af21118c9e1c4f9b5b977
|
[] |
no_license
|
I-am-Cazza/group31-project
|
56c053700e96ef1e8e56f4952fa2197843ba2ecc
|
c9ef41480b908ed95c36e08729f1021c8eaef0c5
|
refs/heads/master
| 2020-04-17T22:11:48.841577
| 2019-03-08T09:15:04
| 2019-03-08T09:15:04
| 166,984,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
class Encodings:
custom_indices: dict
uni_encodings: dict
degree_encodings: dict
def __init__(self):
self.custom_indices = dict()
self.uni_encodings = dict()
self.degree_encodings = dict()
|
[
"cameronhughes0@btinternet.com"
] |
cameronhughes0@btinternet.com
|
e6e73b43edec5b695bade6e9dff193fb728fcf53
|
bd4e97350d455bb12bd8aa15366d00c89cb97d15
|
/tut68.py
|
a437e07031102547cbe8f43f77fb092b95e8f728
|
[] |
no_license
|
summitkumarsharma/pythontutorials
|
a2f9ae2e112da223db4be3120bbed879df4a0609
|
c4ab020ebd32ed8cd259602dec9b1b6ea33ff193
|
refs/heads/master
| 2022-11-07T10:24:16.223835
| 2020-06-25T18:08:46
| 2020-06-25T18:08:46
| 270,009,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
# Abstract Base Class & @abstractmethod
# from abc import ABCMeta, abstractmethod
from abc import ABC, abstractmethod # we can import in this way also
# class Shape(metaclass=ABCMeta):
class Shape(ABC):
@abstractmethod
def print_area(self):
return 0
class Rectangle(Shape):
type = "rectangle"
no_of_sides = 4
def __init__(self):
self.length = 6
self.breadth = 7
def print_area(self):
return self.length * self.breadth
rect = Rectangle()
print(rect.print_area())
# tryObj = Shape() we can not create object of Abstract Class
|
[
"summitkumarsharma@gmail.com"
] |
summitkumarsharma@gmail.com
|
dc8d9542b83f65c6494bafe5bfbab2190b018152
|
ac73a433b66695992a00cb26d572bc94b52576e5
|
/code/featurization.py
|
a6c6a14c5516836104531287033e17f78acd70b9
|
[] |
no_license
|
yutaogura/MLtest
|
54792065b6949a23d01cb71bf5ff604a4b2d514d
|
f69ee2731048529685e9e16fe2c1572d51124854
|
refs/heads/master
| 2020-03-11T06:54:41.218079
| 2018-04-17T08:23:27
| 2018-04-17T08:23:27
| 129,843,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
import pandas as pd
import numpy as np
import scipy.sparse as sparse
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import conf
try: import cPickle as pickle # python2
except: import pickle # python3
np.set_printoptions(suppress=True)
import sys
try: #python2
reload(sys)
sys.setdefaultencoding('utf-8')
except: pass
train_input = conf.train_tsv
test_input = conf.test_tsv
train_output = conf.train_matrix
test_output = conf.test_matrix
def get_df(input):
df = pd.read_csv(
input,
encoding='utf-8',
header=None,
delimiter='\t',
names=['id', 'label', 'text']
)
sys.stderr.write('The input data frame {} size is {}\n'.format(input, df.shape))
return df
def save_matrix(df, matrix, output):
id_matrix = sparse.csr_matrix(df.id.astype(np.int64)).T
label_matrix = sparse.csr_matrix(df.label.astype(np.int64)).T
result = sparse.hstack([id_matrix, label_matrix, matrix], format='csr')
msg = 'The output matrix {} size is {} and data type is {}\n'
sys.stderr.write(msg.format(output, result.shape, result.dtype))
with open(output, 'wb') as fd:
pickle.dump(result, fd, pickle.HIGHEST_PROTOCOL)
pass
df_train = get_df(train_input)
print(df_train.shape)
# print(df_train.columns)
print(df_train.dtypes)
print('step1')
# train_words = np.array(df_train.text.str.lower().values.astype('U'))
train_words = df_train.text.str.lower().values.astype('U')
print(train_words.shape, train_words.dtype)
print('step2')
bag_of_words = CountVectorizer(stop_words='english',
max_features=5000)
print('step3')
bag_of_words.fit(train_words)
print('step4')
train_words_binary_matrix = bag_of_words.transform(train_words)
print('step5')
tfidf = TfidfTransformer(smooth_idf=False)
print('step6')
tfidf.fit(train_words_binary_matrix)
print('step7')
train_words_tfidf_matrix = tfidf.transform(train_words_binary_matrix)
print('step8')
save_matrix(df_train, train_words_tfidf_matrix, train_output)
del df_train
df_test = get_df(test_input)
print('step2-1')
#test_words = np.array(df_test.text.str.lower().values.astype('U'))
test_words = df_test.text.str.lower().values.astype('U')
print('step2-2')
test_words_binary_matrix = bag_of_words.transform(test_words)
print('step2-3')
test_words_tfidf_matrix = tfidf.transform(test_words_binary_matrix)
print('step2-4')
save_matrix(df_test, test_words_tfidf_matrix, test_output)
|
[
"6315026@ed.tus.ac.jp"
] |
6315026@ed.tus.ac.jp
|
09941b0ce2af4d2a1e8dd0497661b07557f7cad4
|
26932deb5eb41825af699dd70369a590325489e4
|
/news_site/wsgi.py
|
e79e006c565087ddf2379bcc79526647fb69e7e4
|
[] |
no_license
|
atleysayone/news_site
|
73a728377b7aa520f391f4d655c9ec6a22e18695
|
65706de1d8d866276a0569891e113fb8b4914e4d
|
refs/heads/master
| 2020-04-21T17:17:27.095940
| 2017-09-17T16:40:19
| 2017-09-17T16:40:19
| 169,730,927
| 0
| 0
| null | 2019-02-08T12:20:57
| 2019-02-08T12:20:56
| null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
"""
WSGI config for news_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "news_site.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[
"atleyvarghese@gmail.com"
] |
atleyvarghese@gmail.com
|
4ded22392e891ed13facd308785a927b3fe64ccf
|
94cf925fc46ea6428f62322c50f78c54c02d22a5
|
/onlinesale/coments/admin.py
|
46f7a2e36bcf168992de05daf04ae3959958cde9
|
[] |
no_license
|
blue-marker/Social-Site-Website-For-Movies-Using-Django
|
73ed29adedfc6e51419a8aa667a255d1dc76c48e
|
2c8aaf9c51e295b1ea6a4d909cbfaa42fc1a71b8
|
refs/heads/master
| 2022-04-19T12:54:22.064529
| 2020-04-18T07:12:36
| 2020-04-18T07:12:36
| 256,691,331
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
from django.contrib import admin
from .models import Coment
# Register your models here.
admin.site.register(Coment)
|
[
"salianvarun30@gmail.com"
] |
salianvarun30@gmail.com
|
2f6f9316b76ea79538af8c9e1f6cd8f60f96e9b2
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/framework/python_api_info_test.py
|
6522303b24f1e227e29cd64540b38538679e61ea
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 11,146
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.python_api_info."""
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import _pywrap_python_api_info
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
# pylint: disable=g-long-lambda
# Helper function to make expected output in examples more compact:
def Const(x):
return constant_op.constant(x)
@test_util.run_all_in_graph_and_eager_modes
class PythonAPIInfoTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def setUp(self):
context.ensure_initialized()
super(PythonAPIInfoTest, self).setUp()
def makeConverterForGenOp(self, op_name):
"""Returns a PythonAPIInfo for the given gen_op."""
api_info = _pywrap_python_api_info.PythonAPIInfo(op_name)
api_info.InitializeFromRegisteredOp(op_name)
return api_info
def makeConverterFromParamSpecs(self,
api_name,
param_names,
input_specs,
attr_specs,
defaults=()):
"""Returns a PythonAPIInfo built from the given specs."""
api_info = _pywrap_python_api_info.PythonAPIInfo(api_name)
api_info.InitializeFromParamSpecs(input_specs, attr_specs, param_names,
defaults)
return api_info
# This test initializes a PythonAPIInfo from a registered
# op, and then uses DebugInfo() to check that the internal state is
# correct.
@parameterized.named_parameters([
# An op whose inputs have fixed dtypes.
("RegexFullMatch", "RegexFullMatch", "DebugInfo for RegexFullMatch:\n"
" param_names=[input, pattern, name]\n"
" defaults_tuple=('RegexFullMatch',)\n"
" inputs=[\n"
" {index=0, name=input, is_list=0},\n"
" {index=1, name=pattern, is_list=0},]\n"
" inputs_with_fixed_dtype=[\n"
" {index=0, dtype=DT_STRING, is_list=0},\n"
" {index=1, dtype=DT_STRING, is_list=0},]\n"),
# An op whose input has a variable dtype.
("Abs", "Abs", "DebugInfo for Abs:\n"
" param_names=[x, name]\n"
" defaults_tuple=('Abs',)\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=x, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, tensor_params=[0], ok_dtypes=[DT_BFLOAT16, DT_HALF, "
"DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64]},]\n"
" inferred_type_attrs=[T]\n"),
# An op with two inputs that have the same (variable) dtype.
("AddV2", "AddV2", "DebugInfo for AddV2:\n"
" param_names=[x, y, name]\n"
" defaults_tuple=('AddV2',)\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=x, is_list=0},\n"
" {index=1, name=y, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, tensor_params=[0, 1], ok_dtypes=[DT_BFLOAT16, "
"DT_HALF, DT_FLOAT, DT_DOUBLE, DT_UINT8, DT_UINT16, DT_UINT32, "
"DT_UINT64, DT_INT8, DT_INT16, "
"DT_INT32, DT_INT64, DT_COMPLEX64, DT_COMPLEX128]},]\n"
" inferred_type_attrs=[T]\n"),
# An op with an int attribute.
("GatherV2", "GatherV2", "DebugInfo for GatherV2:\n"
" param_names=[params, indices, axis, batch_dims, name]\n"
" defaults_tuple=(0, 'GatherV2')\n"
" attributes=[\n"
" {index=3, name=batch_dims, type=int},\n"
" {inferred_index=0, name=Tparams, type=type},\n"
" {inferred_index=1, name=Tindices, type=type},\n"
" {inferred_index=2, name=Taxis, type=type},]\n"
" inputs=[\n"
" {index=0, name=params, is_list=0},\n"
" {index=1, name=indices, is_list=0},\n"
" {index=2, name=axis, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=Tparams, tensor_params=[0]},\n"
" {type_attr=Tindices, tensor_params=[1], "
"ok_dtypes=[DT_INT16, DT_INT32, DT_INT64]},\n"
" {type_attr=Taxis, tensor_params=[2], "
"ok_dtypes=[DT_INT32, DT_INT64]},]\n"
" inferred_type_attrs=[Tparams, Tindices, Taxis]\n"),
# An op with default attrib values.
("ReduceJoin", "ReduceJoin", "DebugInfo for ReduceJoin:\n"
" param_names=[inputs, reduction_indices, keep_dims, separator, name]\n"
" defaults_tuple=(False, '', 'ReduceJoin')\n"
" attributes=[\n"
" {index=2, name=keep_dims, type=bool},\n"
" {index=3, name=separator, type=string},]\n"
" inputs=[\n"
" {index=0, name=inputs, is_list=0},\n"
" {index=1, name=reduction_indices, is_list=0},]\n"
" inputs_with_fixed_dtype=[\n"
" {index=0, dtype=DT_STRING, is_list=0},\n"
" {index=1, dtype=DT_INT32, is_list=0},]\n"),
# An op with a variable-dtype list input, and an int attribute.
("ParseExampleV2", "ParseExampleV2", "DebugInfo for ParseExampleV2:\n"
" param_names=[serialized, names, sparse_keys, dense_keys, "
"ragged_keys, dense_defaults, num_sparse, sparse_types, "
"ragged_value_types, ragged_split_types, dense_shapes, name]\n"
" defaults_tuple=('ParseExampleV2',)\n"
" attributes=[\n"
" {inferred_index=0, name=Tdense, type=list(type)},\n"
" {index=6, name=num_sparse, type=int},\n"
" {index=7, name=sparse_types, type=list(type)},\n"
" {index=8, name=ragged_value_types, type=list(type)},\n"
" {index=9, name=ragged_split_types, type=list(type)},\n"
" {index=10, name=dense_shapes, type=list(shape)},]\n"
" inputs=[\n"
" {index=0, name=serialized, is_list=0},\n"
" {index=1, name=names, is_list=0},\n"
" {index=2, name=sparse_keys, is_list=0},\n"
" {index=3, name=dense_keys, is_list=0},\n"
" {index=4, name=ragged_keys, is_list=0},\n"
" {index=5, name=dense_defaults, is_list=1},]\n"
" inputs_with_fixed_dtype=[\n"
" {index=0, dtype=DT_STRING, is_list=0},\n"
" {index=1, dtype=DT_STRING, is_list=0},\n"
" {index=2, dtype=DT_STRING, is_list=0},\n"
" {index=3, dtype=DT_STRING, is_list=0},\n"
" {index=4, dtype=DT_STRING, is_list=0},]\n"
" inputs_with_type_list_attrs=[\n"
" {type_list_attr=Tdense, tensor_list_params=[5], "
"ok_dtypes=[DT_FLOAT, DT_INT64, DT_STRING]},]\n"
" inferred_type_list_attrs=[Tdense]\n"),
# An op with a default dtype
("BroadcastArgs", "BroadcastArgs", "DebugInfo for BroadcastArgs:\n"
" param_names=[s0, s1, name]\n"
" defaults_tuple=('BroadcastArgs',)\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=s0, is_list=0},\n"
" {index=1, name=s1, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, default_dtype=DT_INT32, tensor_params=[0, 1], "
"ok_dtypes=[DT_INT32, DT_INT64]},]\n"
" inferred_type_attrs=[T]\n"),
])
def testInitializeFromRegisteredOp(self, op_name, debug_info):
api_info = self.makeConverterForGenOp(op_name)
self.assertEqual(api_info.DebugInfo().strip(), debug_info.strip())
# This test initializes a PythonAPIInfo from parameter specs,
# and then uses DebugInfo() to check that the internal state is correct.
@parameterized.named_parameters([
("NoParams", "NoParams", [], {}, {}, "DebugInfo for NoParams:\n"
" param_names=[]\n"
" defaults_tuple=()\n"),
("OnlyNameParam", "OnlyNameParam", ["name"], {}, {},
"DebugInfo for OnlyNameParam:\n"
" param_names=[name]\n"
" defaults_tuple=()\n"),
("SomeBinaryOp", "SomeBinaryOp", ["x", "y"], dict(x="T", y="T"),
dict(T="type"), "DebugInfo for SomeBinaryOp:\n"
" param_names=[x, y]\n"
" defaults_tuple=()\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=x, is_list=0},\n"
" {index=1, name=y, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, tensor_params=[0, 1]},]\n"
" inferred_type_attrs=[T]\n"),
("AllAttributeTypes", "AllAttributeTypes", [
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
"o", "p"
], {},
dict(
a="any",
b="float",
c="int",
d="string",
e="bool",
f="type",
g="shape",
h="tensor",
i="list(any)",
j="list(float)",
k="list(int)",
l="list(string)",
m="list(bool)",
n="list(type)",
o="list(shape)",
p="list(tensor)"), "DebugInfo for AllAttributeTypes:\n"
" param_names=[a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p]\n"
" defaults_tuple=()\n"
" attributes=[\n"
" {index=0, name=a, type=any},\n"
" {index=1, name=b, type=float},\n"
" {index=2, name=c, type=int},\n"
" {index=3, name=d, type=string},\n"
" {index=4, name=e, type=bool},\n"
" {index=5, name=f, type=type},\n"
" {index=6, name=g, type=shape},\n"
" {index=7, name=h, type=tensor},\n"
" {index=8, name=i, type=list(any)},\n"
" {index=9, name=j, type=list(float)},\n"
" {index=10, name=k, type=list(int)},\n"
" {index=11, name=l, type=list(string)},\n"
" {index=12, name=m, type=list(bool)},\n"
" {index=13, name=n, type=list(type)},\n"
" {index=14, name=o, type=list(shape)},\n"
" {index=15, name=p, type=list(tensor)},]\n"),
])
def testInitializeFromParamSpecs(self, api_name, param_names, input_specs,
attr_specs, debug_info):
api_info = self.makeConverterFromParamSpecs(api_name, param_names,
input_specs, attr_specs)
self.assertEqual(api_info.DebugInfo().strip(), debug_info.strip())
if __name__ == "__main__":
googletest.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
f4e96334215c46f86777705be0337ab833f88b27
|
fe6b475c6504d113a7742a5f46c695a21e777b59
|
/layers/categorical_encoding/mutils.py
|
2db7dc9620f2f044a708ac9d6e085ee86c0278f5
|
[
"MIT"
] |
permissive
|
achinta/CategoricalNF
|
eba5cadf3018edc5166d77e588f7e1c355efaec1
|
d8717a037e8f13641e9d9a89abf66fba38e23f91
|
refs/heads/master
| 2023-02-19T20:21:12.934621
| 2021-01-19T18:41:38
| 2021-01-19T18:41:38
| 337,639,302
| 0
| 0
|
MIT
| 2021-02-10T06:39:36
| 2021-02-10T06:39:35
| null |
UTF-8
|
Python
| false
| false
| 3,894
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("../../")
from general.mutils import get_param_val
from layers.categorical_encoding.variational_dequantization import VariationalDequantization
from layers.categorical_encoding.linear_encoding import LinearCategoricalEncoding
from layers.categorical_encoding.variational_encoding import VariationalCategoricalEncoding
def add_encoding_parameters(parser, postfix=""):
# General parameters
parser.add_argument("--encoding_dim" + postfix, help="Dimensionality of the embeddings.", type=int, default=4)
parser.add_argument("--encoding_dequantization" + postfix, help="If selected, variational dequantization is used for encoding categorical data.", action="store_true")
parser.add_argument("--encoding_variational" + postfix, help="If selected, the encoder distribution is joint over categorical variables.", action="store_true")
# Flow parameters
parser.add_argument("--encoding_num_flows" + postfix, help="Number of flows used in the embedding layer.", type=int, default=0)
parser.add_argument("--encoding_hidden_layers" + postfix, help="Number of hidden layers of flows used in the parallel embedding layer.", type=int, default=2)
parser.add_argument("--encoding_hidden_size" + postfix, help="Hidden size of flows used in the parallel embedding layer.", type=int, default=128)
parser.add_argument("--encoding_num_mixtures" + postfix, help="Number of mixtures used in the coupling layers (if applicable).", type=int, default=8)
# Decoder parameters
parser.add_argument("--encoding_use_decoder" + postfix, help="If selected, we use a decoder instead of calculating the likelihood by inverting all flows.", action="store_true")
parser.add_argument("--encoding_dec_num_layers" + postfix, help="Number of hidden layers used in the decoder of the parallel embedding layer.", type=int, default=1)
parser.add_argument("--encoding_dec_hidden_size" + postfix, help="Hidden size used in the decoder of the parallel embedding layer.", type=int, default=64)
def encoding_args_to_params(args, postfix=""):
params = {
"use_dequantization": getattr(args, "encoding_dequantization" + postfix),
"use_variational": getattr(args, "encoding_variational" + postfix),
"use_decoder": getattr(args, "encoding_use_decoder" + postfix),
"num_dimensions": getattr(args, "encoding_dim" + postfix),
"flow_config": {
"num_flows": getattr(args, "encoding_num_flows" + postfix),
"hidden_layers": getattr(args, "encoding_hidden_layers" + postfix),
"hidden_size": getattr(args, "encoding_hidden_size" + postfix)
},
"decoder_config": {
"num_layers": getattr(args, "encoding_dec_num_layers" + postfix),
"hidden_size": getattr(args, "encoding_dec_hidden_size" + postfix)
}
}
return params
def create_encoding(encoding_params, dataset_class, vocab=None, vocab_size=-1, category_prior=None):
assert not (vocab is None and vocab_size <= 0), "[!] ERROR: When creating the encoding, either a torchtext vocabulary or the vocabulary size needs to be passed."
use_dequantization = encoding_params.pop("use_dequantization")
use_variational = encoding_params.pop("use_variational")
if use_dequantization and "model_func" not in encoding_params["flow_config"]:
print("[#] WARNING: For using variational dequantization as encoding scheme, a model function needs to be specified" + \
" in the encoding parameters, key \"flow_config\" which was missing here. Will deactivate dequantization...")
use_dequantization = False
if use_dequantization:
encoding_flow = VariationalDequantization
elif use_variational:
encoding_flow = VariationalCategoricalEncoding
else:
encoding_flow = LinearCategoricalEncoding
return encoding_flow(dataset_class=dataset_class,
vocab=vocab,
vocab_size=vocab_size,
category_prior=category_prior,
**encoding_params)
|
[
"phillip.lippe@googlemail.com"
] |
phillip.lippe@googlemail.com
|
39eeee636ac00789623e040c9e5a29a90c083778
|
b9c140b741d04e496159ab987d72e37b8c99f735
|
/phonecode/apps.py
|
a9892d2dd6d05ff9364ff27da8b21afa30f603de
|
[
"MIT"
] |
permissive
|
okchaty/django-country
|
2d2ae8a418cbecdbf30ec298afa5ace3a6c4a75f
|
740bc25956dc1b87f44486538a62037e0bd0ac94
|
refs/heads/master
| 2021-05-18T13:22:48.793505
| 2020-04-03T07:05:00
| 2020-04-03T07:05:00
| 251,260,881
| 1
| 0
|
MIT
| 2020-04-02T18:41:10
| 2020-03-30T09:36:07
|
Python
|
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class PhoneCodeConfig(AppConfig):
name = 'phonecode'
|
[
"cristhian9bravo@gmail.com"
] |
cristhian9bravo@gmail.com
|
f0f3122abdc27a19f89ca90ea186ca899ff17a16
|
6a48403f9939e4ad74364250e90227a2698fecde
|
/scapy-test/AbnormalMessageDetection/IPprotocol/IP_Protocol.py
|
cfa5a128af4aedc621b6485136542c24b372bce0
|
[] |
no_license
|
pig98/DDOSAttack
|
96438baedb0ae1c5de332323e483ab4ff6490640
|
11cac8cad6109332f51cc89abe7cc334e25fa887
|
refs/heads/master
| 2023-03-17T07:54:47.864460
| 2020-06-04T10:36:27
| 2020-06-04T10:36:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
import scapy.all as scapy
import time
import os
def IP_Protocol():
clear = os.system('clear')
print("")
print("**************************************************************")
print("* Welcome to DDOS Attack system *")
print("**************************************************************")
print("")
print(" (1) Wrong IP Message Options ")
print("")
print(" (2) IP TimeStamp Message Options ")
print("")
print(" (3) IP Security Message Options ")
print("")
print(" (4) IP Data Flow Message Options ")
print("")
print(" (5) IP Route Message Options ")
print("")
print(" (6) IP Strict Route Message Options ")
print("")
print(" (7) IP Loose Route Message Options ")
print("")
print("**************************************************************")
print("* Author by SL *")
print("**************************************************************")
a = input("[Abnormal_Message@IP]#")
a = int(a)
time.sleep(1)
try:
if a== 1:
try:
from Wrong_IP import Wrong_IP
Wrong_IP()
except KeyboardInterrupt:
IP_Protocol()
elif a == 2:
try:
from IP_TimeStamp import IP_TimeStamp
IP_TimeStamp()
except KeyboardInterrupt:
IP_Protocol()
elif a == 3:
try:
from IP_Security import IP_Security
IP_Security()
except KeyboardInterrupt:
IP_Protocol()
elif a == 4:
try:
from IP_Data_Flow import IP_Data_Flow
IP_Data_Flow()
except KeyboardInterrupt:
IP_Protocol()
elif a == 5:
try:
from IP_Route import IP_Route
IP_Route()
except KeyboardInterrupt:
IP_Protocol()
elif a == 6:
try:
from IP_Strict_Route import IP_Strict_Route
IP_Strict_Route()
except KeyboardInterrupt:
IP_Protocol()
elif a == 7:
try:
from IP_Loose_Route import IP_Loose_Route
IP_Loose_Route()
except KeyboardInterrupt:
IP_Protocol()
except KeyboardInterrupt:
print("Go back")
IP_Protocol()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
69a90d67ae4e3fe3881282fd832445ae0b5d82b3
|
e01883301bd2174babfca949517b06d70aaa7196
|
/functional_tests/test_list_item_validation.py
|
a89f565f3f093c78b933f622b6358dc0ada631eb
|
[] |
no_license
|
elihro/obey_testing_goat_superlists
|
c275a73ef2b64307ebe78f20f84c751609c27281
|
13a7867674841c4acfa94f0fb243600299f6b395
|
refs/heads/master
| 2020-07-02T11:48:12.440385
| 2019-09-20T20:23:15
| 2019-09-20T20:23:15
| 201,516,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,876
|
py
|
from .base import FuncionalTest
from selenium.webdriver.common.keys import Keys
from unittest import skip
import time
class ItemValidationTest(FuncionalTest):
def get_error_element(self):
return self.browser.find_element_by_css_selector('.has-error')
def test_cannot_add_empty_list_items(self):
# Edith goes to the home page and accidentally tries to submit
# an empty list item. She hits Enter on the empty input box
self.browser.get(self.live_server_url)
self.get_item_input_box().send_keys(Keys.ENTER)
# The browser intercepts the request, and does not load the
# list page
self.wait_for(lambda: self.browser.find_element_by_css_selector (
'#id_text:invalid'
))
# She starts typing some text for the new item and the error disappears
self.get_item_input_box().send_keys('Buy milk')
self.wait_for(lambda: self.browser.find_element_by_css_selector(
'#id_text:valid'
))
# And she can submit it successfully
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Perversely, she now decides to submit a second blank list item
self.get_item_input_box().send_keys(Keys.ENTER)
# Again, the browser will not comply
self.wait_for_row_in_list_table('1: Buy milk')
self.wait_for(lambda: self.browser.find_element_by_css_selector(
'#id_text:invalid'
))
# And she can correct it by filling some text in
inputbox = self.get_item_input_box()
inputbox.send_keys('Make tea')
inputbox.send_keys(Keys.ENTER)
self.wait_for(lambda: self.browser.find_element_by_css_selector(
'#id_text:valid'
))
self.wait_for_row_in_list_table('1: Buy milk')
self.wait_for_row_in_list_table('2: Make tea')
def test_cannot_add_duplicate_items(self):
# Edith goes to the home page and starts a new list
self.browser.get(self.live_server_url)
self.get_item_input_box().send_keys('Buy wellies')
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy wellies')
# She accidentally tries to enter a duplicate item
self.get_item_input_box().send_keys('Buy wellies')
self.get_item_input_box().send_keys(Keys.ENTER)
# She sees a helpful error message
self.wait_for(lambda: self.assertEqual (
self.get_error_element().text,
"You've already got this in your list"
))
def test_error_message_are_cleaned_on_input_when_keypress(self):
# Edith starts a list and causes a validation error.
self.browser.get(self.live_server_url)
self.get_item_input_box().send_keys('Banter too thick')
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Banter too thick')
self.get_item_input_box().send_keys('Banter too thick')
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for(lambda: self.assertTrue(
self.get_error_element().is_displayed()
))
# She starts typing in the input box to clear the error
self.get_item_input_box().send_keys('a')
# She is pleased to see that error message disappears
self.wait_for(lambda: self.assertFalse(
self.get_error_element().is_displayed()
))
def test_error_message_are_cleaned_on_input_when_click(self):
# Edith starts a list and causes a validation error.
self.browser.get(self.live_server_url)
self.get_item_input_box().send_keys('Banter too thick')
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Banter too thick')
self.get_item_input_box().send_keys('Banter too thick')
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for(lambda: self.assertTrue(
self.get_error_element().is_displayed()
))
# She clicks the input box to clear the error
self.get_item_input_box().click()
# She is pleased to see that error message disappears
self.wait_for(lambda: self.assertFalse(
self.get_error_element().is_displayed()
))
|
[
"elihro.duarte@gmail.com"
] |
elihro.duarte@gmail.com
|
534d9e5e9a975e2165b2dd4c57e4f2705ae24d4b
|
25c26a9d1752918dccbe9ff33b5e7e8b627c0951
|
/test_journal.py
|
2043e830495ee85b48e4613e90bd04457b940d5d
|
[
"MIT"
] |
permissive
|
nbeck90/learning-journal
|
01e1b4d1529fb8dc0ffb7f60e9c0180ea3b0da75
|
93755f0ba829382ec3ab55e1ab0f2ba94a405094
|
refs/heads/master
| 2021-01-20T17:20:36.777618
| 2017-05-18T22:08:48
| 2017-05-18T22:08:48
| 30,119,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,974
|
py
|
from contextlib import closing
from pyramid import testing
from cryptacular.bcrypt import BCRYPTPasswordManager
import os
import pytest
import datetime
from journal import INSERT_ENTRY
from journal import connect_db
from journal import DB_SCHEMA
TEST_DSN = 'dbname=test_learning_journal user=nbeck'
def init_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
def clear_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
def clear_entries(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DELETE FROM entries")
db.commit()
def run_query(db, query, params=(), get_results=True):
cursor = db.cursor()
cursor.execute(query, params)
db.commit()
results = None
if get_results:
results = cursor.fetchall()
return results
@pytest.fixture(scope='session')
def db(request):
"""set up and tear down a database"""
settings = {'db': TEST_DSN}
init_db(settings)
def cleanup():
clear_db(settings)
request.addfinalizer(cleanup)
return settings
@pytest.yield_fixture(scope='function')
def req_context(db, request):
"""mock a request with a database attached"""
settings = db
req = testing.DummyRequest()
with closing(connect_db(settings)) as db:
req.db = db
req.exception = None
yield req
# after a test has run, we clear out entries for isolation
clear_entries(settings)
@pytest.fixture(scope='function')
def app(db):
from journal import main
from webtest import TestApp
os.environ['DATABASE_URL'] = TEST_DSN
app = main()
return TestApp(app)
@pytest.fixture(scope='function')
def entry(db, request):
"""provide a single entry in the database"""
settings = db
now = datetime.datetime.utcnow()
expected = ('Test Title', 'Test Text', now)
with closing(connect_db(settings)) as db:
run_query(db, INSERT_ENTRY, expected, False)
db.commit()
def cleanup():
clear_entries(settings)
request.addfinalizer(cleanup)
return expected
def test_listing(app, entry):
response = app.get('/')
assert response.status_code == 200
actual = response.body
for expected in entry[:2]:
assert expected in actual
def test_empty_listing(app):
response = app.get('/')
assert response.status_code == 200
actual = response.body
expected = 'No entries here so far'
assert expected in actual
def test_write_entry(req_context):
from journal import write_entry
fields = ('title', 'text')
expected = ('Test Title', 'Test Text')
req_context.params = dict(zip(fields, expected))
# assert that there are no entries when we start
rows = run_query(req_context.db, "SELECT * FROM entries")
assert len(rows) == 0
result = write_entry(req_context)
# manually commit so we can see the entry on query
req_context.db.commit()
rows = run_query(req_context.db, "SELECT title, text FROM entries")
assert len(rows) == 1
actual = rows[0]
for idx, val in enumerate(expected):
assert val == actual[idx]
def test_read_entries_empty(req_context):
# call the function under test
from journal import read_entries
result = read_entries(req_context)
# make assertions about the result
assert 'entries' in result
assert len(result['entries']) == 0
def test_read_entries(req_context):
# prepare data for testing
now = datetime.datetime.utcnow()
expected = ('Test Title', 'Test Text', now)
run_query(req_context.db, INSERT_ENTRY, expected, False)
# call the function under test
from journal import read_entries
result = read_entries(req_context)
# make assertions about the result
assert 'entries' in result
assert len(result['entries']) == 1
for entry in result['entries']:
assert expected[0] == entry['title']
assert expected[1] == entry['text']
for key in 'id', 'created':
assert key in entry
def test_post_to_add_view(app):
entry_data = {
'title': 'Hello there',
'text': 'This is a post',
}
response = app.post('/add', params=entry_data, status='3*')
redirected = response.follow()
actual = redirected.body
for expected in entry_data.values():
assert expected in actual
@pytest.fixture(scope='function')
def auth_req(request):
manager = BCRYPTPasswordManager()
settings = {
'auth.username': 'admin',
'auth.password': manager.encode('secret'),
}
testing.setUp(settings=settings)
req = testing.DummyRequest()
def cleanup():
testing.tearDown()
request.addfinalizer(cleanup)
return req
def test_do_login_success(auth_req):
from journal import do_login
auth_req.params = {'username': 'admin', 'password': 'secret'}
assert do_login(auth_req)
def test_do_login_bad_pass(auth_req):
from journal import do_login
auth_req.params = {'username': 'admin', 'password': 'wrong'}
assert not do_login(auth_req)
def test_do_login_bad_user(auth_req):
from journal import do_login
auth_req.params = {'username': 'bad', 'password': 'secret'}
assert not do_login(auth_req)
def test_do_login_missing_params(auth_req):
from journal import do_login
for params in ({'username': 'admin'}, {'password': 'secret'}):
auth_req.params = params
with pytest.raises(ValueError):
do_login(auth_req)
INPUT_BTN = '<input type="submit" value="Share" name="Share"/>'
def login_helper(username, password, app):
"""encapsulate app login for reuse in tests
Accept all status codes so that we can make assertions in tests
"""
login_data = {'username': username, 'password': password}
return app.post('/login', params=login_data, status='*')
def test_start_as_anonymous(app):
response = app.get('/', status=200)
actual = response.body
assert INPUT_BTN not in actual
def test_login_success(app):
username, password = ('admin', 'secret')
redirect = login_helper(username, password, app)
assert redirect.status_code == 302
response = redirect.follow()
assert response.status_code == 200
actual = response.body
assert INPUT_BTN in actual
def test_login_fails(app):
username, password = ('admin', 'wrong')
response = login_helper(username, password, app)
assert response.status_code == 200
actual = response.body
assert "Login Failed" in actual
assert INPUT_BTN not in actual
def test_logout(app):
# re-use existing code to ensure we are logged in when we begin
test_login_success(app)
redirect = app.get('/logout', status="3*")
response = redirect.follow()
assert response.status_code == 200
actual = response.body
assert INPUT_BTN not in actual
|
[
"nbeck90@uw.edu"
] |
nbeck90@uw.edu
|
66e49975c766d72936c157dd0bc2e21068ea656b
|
3bfe35a518869c4354ab1f817fd2060f0446313e
|
/examples/core_display_callbacks.py
|
a8dbe0cb65622cf41e9ebe14f145a0a0adb8cfee
|
[] |
no_license
|
tankvn/pythonocc-demos-with-thumb-preview
|
b16b6d00b6e14bda6e4b765a354691255a7d2d43
|
36746225ddd8406dcce9b12a9ccd6489aa281694
|
refs/heads/master
| 2023-04-26T16:43:30.377812
| 2021-05-04T06:12:36
| 2021-05-04T06:12:36
| 362,368,937
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,127
|
py
|
##Copyright 2010-2014 Thomas Paviot (tpaviot@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox, BRepPrimAPI_MakeTorus
from OCC.Core.Bnd import Bnd_Box
from OCC.Core.BRepBndLib import brepbndlib_Add
from OCC.Display.SimpleGui import init_display
def print_xy_click(shp, *kwargs):
for shape in shp:
print("Shape selected: ", shape)
print(kwargs)
def compute_bbox(shp, *kwargs):
print("Compute bbox for %s " % shp)
for shape in shp:
bbox = Bnd_Box()
brepbndlib_Add(shape, bbox)
xmin, ymin, zmin, xmax, ymax, zmax = bbox.Get()
dx = xmax - xmin
dy = ymax - ymin
dz = zmax - zmin
print("Selected shape bounding box : dx=%f, dy=%f, dz=%f." % (dx, dy, dz))
print(" bounding box center: x=%f, y=%f, z=%f" % (xmin + dx/2.,
ymin + dy/2.,
zmin + dz/2.))
display, start_display, add_menu, add_function_to_menu = init_display()
# register callbacks
display.register_select_callback(print_xy_click)
display.register_select_callback(compute_bbox)
# creating geometry
my_torus = BRepPrimAPI_MakeBox(10., 20., 30.).Shape()
my_box = BRepPrimAPI_MakeTorus(30., 5.).Shape()
# and finally display geometry
display.DisplayShape(my_torus)
display.DisplayShape(my_box, update=True)
start_display()
|
[
"tpaviot@gmail.com"
] |
tpaviot@gmail.com
|
0df7d6c7aae8d0befd717b3de9078de48f6c71ec
|
c0af6f30e21cb27227037a7461c24d3187304a65
|
/models/SortCont.py
|
34b4afea33fe52157bc0de186fc6bbe4fc7c0a08
|
[] |
no_license
|
masl512/dabm2021
|
320d07dbe44577d8019953b413f6a20213ee1e77
|
613a61f4af2bc6d4fdfa3ad4236c2c37d39154b2
|
refs/heads/master
| 2023-04-21T23:28:49.306743
| 2021-05-20T06:59:57
| 2021-05-20T06:59:57
| 361,468,454
| 0
| 1
| null | 2021-05-18T07:01:20
| 2021-04-25T15:40:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
import cv2
import pandas as pd
def sortContours(cnts):
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
# print(boundingBoxes)
df = pd.DataFrame(boundingBoxes, columns=['x','y','w','h'])
# print(df)
df.sort_values(by=['y','x'], inplace= True)
boundingBoxes = df.values.tolist()
# print(boundingBoxes)
return boundingBoxes
# contours_poly = [None]*len(contours)
# boundRect = [None]*len(contours)
# for i, c in enumerate(contours):
# contours_poly[i] = contours[i]
# boundRect[i] = cv2.boundingRect(contours_poly[i])
# for i in range(len(contours)):
# # color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
# color = (0,0 ,255 )
# cv2.rectangle(resized, (int(boundRect[i][0]), int(boundRect[i][1])), \
# (int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
# cv2.imwrite('boxes.png', resized)
# # cv2.imshow('Contours', resized)
# # cv2.waitKey(0)
# # cv2.destroyAllWindows()
|
[
"miguel.soler@mail.escuelaing.edu.co"
] |
miguel.soler@mail.escuelaing.edu.co
|
369cf4599d8f04b904158fb0b1611755ec90dc12
|
15215d0ccc0414e4d9e187109af11002cbc14453
|
/node_modules/keccak/build/config.gypi
|
6d26ffab9cabf16ec85f7d936c2ebd74410b5c4a
|
[
"MIT"
] |
permissive
|
jsdelivrbot/VinylMarketplace
|
752d34cf2b6c98e6f92ec44dacc9b302258b3cab
|
508c4143e7f494fcbc0555af83343cda817c358f
|
refs/heads/master
| 2020-04-10T08:10:46.394976
| 2018-12-08T01:51:27
| 2018-12-08T01:51:27
| 160,899,218
| 0
| 0
| null | 2018-12-08T03:13:02
| 2018-12-08T03:13:02
| null |
UTF-8
|
Python
| false
| false
| 5,184
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt59l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt59l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "59",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/surayashivji/.node-gyp/8.9.3",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"viewer": "man",
"commit_hooks": "true",
"browser": "",
"only": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/surayashivji/npm/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"git_tag_version": "true",
"cert": "",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"message": "%s",
"key": "",
"versions": "",
"globalconfig": "/Users/surayashivji/npm/etc/npmrc",
"logs_max": "10",
"always_auth": "",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"searchlimit": "20",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"userconfig": "/Users/surayashivji/.npmrc",
"init_module": "/Users/surayashivji/.npm-init.js",
"cidr": "",
"user": "502",
"node_version": "8.9.3",
"save": "true",
"editor": "vi",
"auth_type": "legacy",
"ignore_prepublish": "",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/surayashivji/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/5.6.0 node/v8.9.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"umask": "0022",
"init_version": "1.0.0",
"node_options": "",
"scope": "",
"git": "git",
"init_author_name": "",
"unsafe_perm": "true",
"tmp": "/var/folders/9z/w9cn0hbd0lj_cxfy8vw8mpmc0000gp/T",
"onload_script": "",
"prefix": "/Users/surayashivji/npm",
"link": ""
}
}
|
[
"surayashivji@gmail.com"
] |
surayashivji@gmail.com
|
945f36544dc449381c85c003cb56159906ef46ba
|
6a7e952a99211b86ce1c43b0e12436faf7e794a8
|
/2021/Python/Algorithms/ArrayNumAdder.py
|
996e8cff368ec594d4b715178b918541e7735ab8
|
[] |
no_license
|
Lazeeez/hacktoberfest
|
0ab7bcb3d5cb62a6cce03ada0b0a48360b07caea
|
36e3d83a5ed94ee9a57a82aa7773e6e3b5589838
|
refs/heads/master
| 2023-08-27T06:32:48.993821
| 2021-10-30T09:46:06
| 2021-10-30T09:46:06
| 412,881,759
| 0
| 0
| null | 2021-10-30T11:52:52
| 2021-10-02T18:35:11
|
Python
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
from functools import reduce # for reducing our code into 1 line
def ArrayNumAdder(numlist):
return reduce(lambda a,b: a+b, numlist)
if __name__ == '__main__':
mylist = [1,2,3,4,5]
print(ArrayNumAdder(mylist))
|
[
"michaelmaranan030@gmail.com"
] |
michaelmaranan030@gmail.com
|
7a978563773c57d4437f81d9255dc645e1df02af
|
60299298843d727d600a113904007339f1647f11
|
/Django Project/mysite/demo/models.py
|
ead10f692b5dcdeb2076b07b2c283b386e25c8bb
|
[] |
no_license
|
pntehan/Django-
|
fea530ba18ae267a4cc65887f8d11bb1a8dfe7c4
|
31ccbc3059a39d009aeaff661dfab1a9ae5aa79a
|
refs/heads/master
| 2022-12-26T10:40:51.582732
| 2018-12-27T08:30:38
| 2018-12-27T08:41:39
| 163,253,318
| 0
| 1
| null | 2022-12-12T14:24:59
| 2018-12-27T05:56:48
|
Python
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
from django.db import models
# Create your models here.
class LoginUser(models.Model):
name = models.CharField(max_length = 150)
password = models.CharField(max_length = 150)
account = models.CharField(max_length=150)
img = models.ImageField(upload_to='img')
data = models.FileField(upload_to='myfile')
|
[
"2390003207@qq.com"
] |
2390003207@qq.com
|
c9c44164e3eebc4bd22437b21a345e6fe938101d
|
9e786b81fea1f6001300fd2efe192d07e69a44f2
|
/tetris/tetris_keypress.py
|
815840847842b8f8813c22abc0eb07c8eb34d045
|
[] |
no_license
|
SebastianStaab/AEMLproject
|
81e34dcf1774b8269e41791b1f96b5d6879e8a1c
|
d1015aa8bcc45c30d3571299fb0cc21adc2429c9
|
refs/heads/master
| 2022-12-14T20:53:26.239050
| 2020-08-30T13:23:17
| 2020-08-30T13:23:17
| 286,392,726
| 0
| 0
| null | 2020-08-10T06:19:17
| 2020-08-10T06:19:16
| null |
UTF-8
|
Python
| false
| false
| 8,494
|
py
|
from random import randrange as rand
import random
import pygame, sys
### CONFIGURATIONS ###
cell_size = 25
cols = 10
rows = 20
colors = [ (0, 0, 0), # color for background
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(0, 255, 255),
(128, 0, 128),
(255, 165, 0),
(0, 0, 0) ] # helper color for background (grid)
tetris_shapes = [
[[1, 1, 1],
[0, 1, 0]],
[[0, 2, 2],
[2, 2, 0]],
[[3, 3, 0],
[0, 3, 3]],
[[4, 0, 0],
[4, 4, 4]],
[[0, 0, 5],
[5, 5, 5]],
[[6, 6, 6, 6]],
[[7, 7],
[7, 7]] ]
def rotate_clockwise(shape):
return [
[ shape[y][x] for y in range(len(shape)) ]
for x in range(len(shape[0]) - 1, -1, -1)
]
def rotate_anticlockwise(shape):
return [
[shape[y][x] for y in range(len(shape) - 1, -1, -1)]
for x in range(len(shape[0]))
]
def rotate_half(shape):
return rotate_clockwise(rotate_clockwise(shape))
def check_collision(board, shape, offset):
off_x, off_y = offset
for cy, row in enumerate(shape):
for cx, cell in enumerate(row):
try:
if cell and board[ cy + off_y ][ cx + off_x ]:
return True
except IndexError:
return True
return False
def remove_row(board, row):
del board[row]
return [[0 for i in range(cols)]] + board # adds new line on top (highest row)
def join_matrixes(mat1, mat2, mat2_off):
off_x, off_y = mat2_off
for cy, row in enumerate(mat2):
for cx, val in enumerate(row):
mat1[cy+off_y-1 ][cx+off_x] += val
return mat1
def new_board():
board = [
[ 0 for x in range(cols) ]
for y in range(rows)
]
board += [[ 1 for x in range(cols)]] # usefulness is unclear, but necessary
return board
class TetrisApp(object):
def __init__(self):
pygame.init()
pygame.key.set_repeat() # held keys are not repeated
self.width = cell_size*(cols+8)
self.height = cell_size*rows
self.rlim = cell_size*cols
self.bground_grid = [[ 8 if x%2 == y%2 else 0 for x in range(cols)] for y in range(rows)]
self.default_font = pygame.font.Font(pygame.font.get_default_font(), 18)
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption("TETRIS")
pygame.event.set_blocked(pygame.MOUSEMOTION) # block mouse movements
self.next_stone = tetris_shapes[rand(len(tetris_shapes))]
self.init_game()
def new_stone(self):
self.stone = self.next_stone[:]
self.next_stone = tetris_shapes[rand(len(tetris_shapes))]
self.stone_x = int(cols / 2 - len(self.stone[0]) / 2)
self.stone_y = 0
if check_collision(self.board, self.stone, (self.stone_x, self.stone_y)):
self.gameover = True
def init_game(self):
self.board = new_board()
self.new_stone()
self.level = 1 #### TO DO
self.score = 0 #### To Do
self.lines = 0 #### TO DO
def disp_msg(self, msg, topleft):
x, y = topleft
for line in msg.splitlines():
self.screen.blit( self.default_font.render(line, False, (255,255,255), (0,0,0)), (x,y))
y += 20
def center_msg(self, msg):
for i, line in enumerate(msg.splitlines()):
msg_image = self.default_font.render(line, False, (255,255,255), (0,0,0))
msgim_center_x, msgim_center_y = msg_image.get_size()
msgim_center_x //= 2
msgim_center_y //= 2
self.screen.blit(msg_image, (self.width // 2 - msgim_center_x, self.height // 2 - msgim_center_y + i * 22))
def draw_matrix(self, matrix, offset):
off_x, off_y = offset
for y, row in enumerate(matrix):
for x, val in enumerate(row):
if val:
pygame.draw.rect(self.screen, colors[val],
pygame.Rect((off_x+x) * cell_size, (off_y+y) * cell_size, cell_size, cell_size), 0)
def add_cl_lines(self, n):
linescores = [0, 40, 100, 300, 1200]
self.lines += n
self.score += linescores[n] * self.level #### To Do
#if self.lines >= self.level*6:
# self.level += 1
def move(self, delta_x):
if not self.gameover and not self.paused:
new_x = self.stone_x + delta_x
if new_x < 0:
new_x = 0
if new_x > cols - len(self.stone[0]):
new_x = cols - len(self.stone[0])
if not check_collision(self.board, self.stone, (new_x, self.stone_y)):
self.stone_x = new_x
def quit(self):
sys.exit()
def drop(self, manual):
if not self.gameover and not self.paused:
self.score += 1 if manual else 0 ### TO DO
self.stone_y += 1
if check_collision(self.board, self.stone, (self.stone_x, self.stone_y)):
self.board = join_matrixes(self.board, self.stone, (self.stone_x, self.stone_y))
self.new_stone()
cleared_rows = 0
while True:
for i, row in enumerate(self.board[:-1]):
if 0 not in row:
self.board = remove_row(self.board, i)
cleared_rows += 1
break
else:
break
self.add_cl_lines(cleared_rows)
return True
return False
def insta_drop(self):
if not self.gameover and not self.paused:
while(not self.drop(True)):
pass
def rotate_stone(self, direction):
if not self.gameover and not self.paused:
if direction == 'clock':
new_stone = rotate_clockwise(self.stone)
elif direction == 'anticlock':
new_stone = rotate_anticlockwise(self.stone)
elif direction == 'half':
new_stone = rotate_half(self.stone)
if not check_collision(self.board, new_stone, (self.stone_x, self.stone_y)):
self.stone = new_stone
def toggle_pause(self):
self.paused = not self.paused
def start_game(self):
if self.gameover:
self.init_game()
self.gameover = False
def run(self):
key_actions = {
'ESCAPE': self.quit,
'p': self.toggle_pause,
's': self.start_game,
'LEFT': lambda:self.move(-1),
'RIGHT': lambda:self.move(+1),
'DOWN': lambda:self.rotate_stone('clock'),
'UP': lambda:self.rotate_stone('anticlock'),
'SPACE': lambda:self.rotate_stone('half'),
'RETURN': self.insta_drop,
'r': 0
}
self.gameover = False
self.paused = False
while 1:
self.screen.fill(colors[0])
if self.gameover:
self.center_msg("""Game Over!\n\nYour score: %d""" % self.score)
else:
if self.paused:
self.center_msg("Paused")
else:
pygame.draw.line(self.screen, (255,255,255), (self.rlim+1, 0), (self.rlim+1, self.height-1))
self.disp_msg("Next:", (self.rlim+cell_size, 2))
self.disp_msg("Score: %d\n\nLevel: %d\n\n#Lines: %d" % (self.score, self.level, self.lines),
(self.rlim+cell_size, cell_size*5))
self.draw_matrix(self.bground_grid, (0,0))
self.draw_matrix(self.board, (0,0))
self.draw_matrix(self.stone, (self.stone_x, self.stone_y))
self.draw_matrix(self.next_stone, (cols+1, 2))
pygame.display.update()
action = random.choice(list(key_actions.values())[3:])
key_actions['r'] = action
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.quit()
elif event.type == pygame.KEYDOWN:
for key in key_actions:
if event.key == eval("pygame.K_" + key):
key_actions[key]()
if __name__ == '__main__':
App = TetrisApp()
App.run()
|
[
"silkehusse@Silkes-MBP.fritz.box"
] |
silkehusse@Silkes-MBP.fritz.box
|
28680a676835b6a4686ce65756256a75cd8450d6
|
6041ed86c2a565af83c2b0a70ce62e390605e917
|
/test/TargetTest.py
|
eacb4beded62606f31eaf5bcfd24cd568ab950a6
|
[] |
no_license
|
DiwakarRDivu7/ShellSummary
|
97037e25f13b34fbd80c5ed35d078cdc4b5c4db1
|
706e895a1d96bd373c61cda85eee0fbce76a1e9c
|
refs/heads/master
| 2020-09-21T23:05:48.342779
| 2020-01-30T11:33:32
| 2020-01-30T11:33:32
| 224,965,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
"""
"""
import unittest
from pyspark.sql import SparkSession
from connectors.TargetConnector import createDatasetFromAbbyXmlFile
import yaml
class Test(unittest.TestCase):
with open("/Users/diwr/PycharmProjects/ShellShipping/conf/shell_shipping_config.yaml") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
# xx = createDatasetFromAbbyXmlFile(config, '/Users/diwr/Desktop/Shell/Shell Doc/ShellShipping/TwoMonths/Vendors/50500.xml')
def test_0_createDatasetFromAbbyXMLFile(self):
path = '/Users/diwr/Desktop/Shell/Shell Doc/ShellShipping/TwoMonths/Vendors/50500.xml'
jar_path = "/Users/diwr/Desktop/DDL/jars/spark-xml_2.11-0.5.0.jar"
xml = SparkSession.builder.master("local").appName("appName") \
.config("spark.jars", jar_path) \
.config("spark.executor.extraClassPath", jar_path) \
.config("spark.executor.extraLibrary", jar_path) \
.config("spark.driver.extraClassPath", jar_path) \
.getOrCreate().read.format("com.databricks.spark.xml") \
.option("rowtag", "_ShellInvoices:_ShellInvoices") \
.load(path)
readXML = createDatasetFromAbbyXmlFile(self.config, path)
actual = [list(row) for row in xml.collect()]
expected = [list(row) for row in readXML.collect()]
self.assertEqual(actual, expected)
|
[
"46436050+DiwakarRogers@users.noreply.github.com"
] |
46436050+DiwakarRogers@users.noreply.github.com
|
d792e0a3f05d142d434a25421734a42aef34b38e
|
fbcea84d30f2ab1778cff27e512acd562b651284
|
/background.py
|
d30f143c00172c665efd27802d34720c2d8babf9
|
[] |
no_license
|
tidalvirus/bashup
|
1f3e580775f23fa29bcfc94b9faae8fb46ff0898
|
a917b4f6e6075a8e4b184f8549bc9dbb21d50a1f
|
refs/heads/master
| 2023-08-03T10:07:51.614160
| 2023-07-24T11:53:03
| 2023-07-24T11:53:03
| 36,228,614
| 0
| 1
| null | 2023-07-24T11:53:04
| 2015-05-25T11:56:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
#!/usr/bin/env python
# Simple background colour changing program
# Slooooooow
# by Siraj 'Sid' Rakhada sid-git@mindless.co.uk
import random, sys, time, pygame
from pygame.locals import *
FPS = 30
WINDOWWIDTH = 0
WINDOWHEIGHT = 0
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
BRIGHTRED = (255, 0, 0)
RED = (155, 0, 0)
BRIGHTGREEN = ( 0, 255, 0)
GREEN = ( 0, 155, 0)
BRIGHTBLUE = ( 0, 0, 255)
BLUE = ( 0, 0, 155)
BRIGHTYELLOW = (255, 255, 0)
YELLOW = (155, 155, 0)
DARKGRAY = ( 40, 40, 40)
bgColor = BLACK
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init()
FPSCLOCK = pygame.time.Clock()
#DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT),pygame.FULLSCREEN)
pygame.display.set_caption('norbash')
BASICFONT = pygame.font.Font('freesansbold.ttf', 72)
infoSurf = BASICFONT.render('BASH KEYS', 1, YELLOW)
infoRect = infoSurf.get_rect()
infoRect.topleft = (10, 25)
while True:
DISPLAYSURF.fill(bgColor)
DISPLAYSURF.blit(infoSurf, infoRect)
checkForQuit()
for event in pygame.event.get():
if event.type == KEYDOWN:
changeBackgroundAnimation()
DISPLAYSURF.fill(bgColor)
pygame.display.flip()
#pygame.time.wait(1000)
def terminate():
pygame.quit()
sys.exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
def changeBackgroundAnimation(animationSpeed=30):
global bgColor
newBgColor = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
newBgSurf = pygame.Surface((WINDOWWIDTH, WINDOWHEIGHT))
newBgSurf = newBgSurf.convert_alpha()
oldr, oldg, oldb = bgColor
r, g, b = newBgColor
for alpha in range(0, 255, animationSpeed): # animation loop
checkForQuit()
newBgSurf.fill((r, g, b, alpha))
DISPLAYSURF.blit(newBgSurf, (0, 0))
pygame.display.flip()
FPSCLOCK.tick(FPS)
bgColor = newBgColor
if __name__ == '__main__':
main()
|
[
"sid-git@mindless.co.uk"
] |
sid-git@mindless.co.uk
|
0c948fa14d63d5ba7e3946aee79ad59819197e16
|
1db400c30c31dc1af32e3503b23776a2bea9d94f
|
/hcn/agents/hcn/utils.py
|
9e8ed9aac229d321a73378822dd0802d65ec5d3f
|
[
"Apache-2.0"
] |
permissive
|
wonyonyon/hcn-dialogue-manager
|
c1c3d6be2029e5923e409e46d682e611ba361d7f
|
78da36ff6a20103c26fb5725973e694aef014c70
|
refs/heads/master
| 2020-03-16T21:00:48.437967
| 2018-01-22T15:21:07
| 2018-01-22T15:21:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,960
|
py
|
"""
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unicodedata
import numpy as np
# ------------------------------------------------------------------------------
# Data/model utilities.
# ------------------------------------------------------------------------------
def normalize_text(text):
return unicodedata.normalize('NFD', text)
def is_api_call(text):
return text.strip().startswith('api_call')
def is_api_answer(text):
return (not is_silence(text)) and text.strip().endswith('<SILENCE>')
def is_null_api_answer(text):
return text.strip().startswith('api_call no result')
def is_silence(text):
return text.strip() == '<SILENCE>'
def filter_service_words(tokens):
return filter(lambda t: '_' not in t, tokens)
# ------------------------------------------------------------------------------
# Babi5&Babi6 specific utilities.
# ------------------------------------------------------------------------------
def babi6_dirty_fix(text):
"""Fix some inconsistencies in DSTC2 data preparation."""
return text.replace('the cow pizza kitchen and bar',
'the_cow_pizza_kitchen')\
.replace('the good luck chinese food takeaway', 'the_good_luck')\
.replace('the river bar steakhouse and grill', 'the_river_bar')\
.replace(' Fen Ditton', '')\
.replace('ask is', 'R_name is')\
.replace('ask serves', 'R_name serves')\
.replace('01223 323737', 'R_phone')\
.replace('C.B 2, 1 U.F', 'R_post_code')\
.replace('C.B 1, 3 N.F', 'R_post_code')\
.replace('C.B 2, 1 D.P', 'R_post_code')\
.replace('C.B 4, 3 L.E', 'R_post_code')\
.replace('108 Regent Street City Centre', 'R_address')\
.replace('17 Magdalene Street City Centre', 'R_address')\
.replace('529 Newmarket Road', 'R_address')\
.replace('7 Milton Road Chesterton', 'R_address')
def iter_api_response(text):
info = {}
for ln in text.split('\n'):
tokens = ln.split()
if is_silence(ln):
yield info
if (len(tokens) != 3):
return
rest, prop, value = tokens
value = int(value) if value.isdecimal() else value
if not info:
info['R_name'] = rest
if info['R_name'] == rest:
info[prop] = value
else:
yield info
info = {'R_name': rest, prop: value}
|
[
"mary.vikhreva@gmail.com"
] |
mary.vikhreva@gmail.com
|
b50a9db416ad10d4ecc3f9fec05155cca9709ead
|
0e531966f459388f080f6d3cb080d72d79d26cf2
|
/app/dependencies/security.py
|
aafb7a6eb65a4253c4e8b5db5ef459ef1d2832ae
|
[
"MIT"
] |
permissive
|
Luispapiernik/Guane-Inter-FastAPI
|
508820926406349bc82c1858a3e20954950382c6
|
e08d0d995a25ecc70d2cb5be7bca49b0703173c6
|
refs/heads/main
| 2023-02-24T05:46:26.044084
| 2021-01-25T10:46:43
| 2021-01-25T10:46:43
| 331,399,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,475
|
py
|
from datetime import datetime, timedelta
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from passlib.context import CryptContext
from ..logger import logger
from ..logs.security_messages import *
from ..models.security_models import *
# openssl rand -hex 32
SECRET_KEY = "7c4b24b47e6db890b03642a5cf51a3b7530c841c3dd2d80ccf49e976d20444a5"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
users_database = {
"Luispapiernik": {
"username": "Luispapiernik",
"full_name": "Luis Papiernik",
"email": "lpapiernik24@gmail.com",
"hashed_password": "$2b$12$Jp3lIMSzaQy9H40Lxl9xm.lzo.LU51X95xHXUMkDEBlBmoAvcnEvC",
"disabled": False,
}
}
logger.info(CRYPT_CONTEXT)
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def verify_password(plain_password, hashed_password):
"""
Se verifica que el hash de plain_password coincida con el valor
hashed_password.
Parameters
----------
plain_password : str
Contraseña a la que se le va a verificar el hash.
hashed_password : str
Hash usado para la verificación.
Returns
-------
out : bool
True en caso de que los valores coincidan, False en caso contrario.
"""
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
"""
Se calcula el hash de un string
Parameters
----------
password : str
Texto al que se le calculara el hash.
Returns
-------
out : str
String con el hash correspondiente al texto.
"""
return pwd_context.hash(password)
def get_user(database, username: str):
"""
Esta función retorna un usuario de la base de datos.
Parameters
----------
database : dict
La base de datos representada como un diccionario, donde las claves
son los usernames.
username : str
Usuario que se quiere extraer de la base de datos.
Returns
-------
out : UserInDB, None
Datos del usuario.
"""
if username in database:
user_dict = database[username]
return UserInDB(**user_dict)
def authenticate_user(database, username: str, password: str):
"""
Se verifica que el usuario pasado como parámetro este en base de datos y
tenga correctas las credenciales.
Parameters
----------
database : dict
Base de datos representada como un diccionario.
username : str
username del usuario a verificar
password : str
contraseña del usuario (texto plano) a verificar.
Returns
-------
out : bool
True en caso de que la autenticación haya sido exitosa, False en caso
contrario.
"""
user = get_user(database, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
"""
Se crea un token de acceso.
Parameters
----------
data : dict
Diccionario que puede corresponder a datos de un usuario.
expires_delta : timedelta
Tiempo de validez del token.
Returns
-------
out : str
Token de acceso.
"""
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
"""
Se obtiene el usuario asociado a un token
Parameters
----------
token : str
Token para el que se rastreara a que usuario pertenece.
Returns
-------
out : UserInDB
Usuario asociado al token.
"""
logger.info(GET_TOKEN)
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Could not validate credentials',
headers={'WWW-Authenticate': 'Bearer'},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get('sub')
if username is None:
logger.error(NOT_VALIDATE)
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
logger.error(NOT_VALIDATE)
raise credentials_exception
user = get_user(users_database, username=token_data.username)
if user is None:
logger.error(NOT_VALIDATE)
raise credentials_exception
logger.info(SUCCESSFUL_RECUPERATION)
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
"""
Se verifica si un usuario tiene un token activo
Parameters
----------
current_user : User
Usuario al que se le hara la verificación
Returns
-------
out: User
En caso de que la verificación falle no se retorna al usuario, en cambio
se lanza una excepción
"""
logger.info(CHECK_USER)
if current_user.disabled:
logger.error(INACTIVE_USER)
raise HTTPException(status_code=400, detail='Inactive user')
logger.info(ACTIVE_USER)
return current_user
|
[
"lpapiernik24@gmail.com"
] |
lpapiernik24@gmail.com
|
67e29793a7af8e444a07aa860998d0ffcf73c7dc
|
0cdf8b69725fe8135ac1484c77078541cd246d18
|
/sampling_script.py
|
e44f139b514ef2e9071a15e7a820393d365ab7bc
|
[] |
no_license
|
cassidyhhaas/NYC-Urban-Conditions
|
644c12c646c94477161f59ab2eab7f38c35627c4
|
3ff8b6419b6fe89f76abd491000b72c10ff9ad13
|
refs/heads/master
| 2023-06-10T19:38:45.410629
| 2021-06-28T19:50:08
| 2021-06-28T19:50:08
| 115,288,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
import argparse
import pymongo
from pymongo import MongoClient
import os
settings = {
'mongo_db_name': '311_mongo_import',
'mongo_collection_name': '311_mongo_import',
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Bootstrapping script")
parser.add_argument('--size',
type=int,
help='sample size',
default=100000,
required=False)
parser.add_argument('--number',
type=int,
help='number of samples',
default=10,
required=False)
args = parser.parse_args()
mongo_client = MongoClient()
mongo_db = mongo_client[settings['mongo_db_name']]
mongo_collection = mongo_db[settings['mongo_collection_name']]
print("Sampling from " + str(mongo_collection.count()) + " documents")
for sample in range(args.number):
mongo_sample_collection_name = "sample_" + str(sample)
mongo_sample_collection = mongo_db[mongo_sample_collection_name]
mongo_sample_collection.insert(mongo_collection.aggregate(
[ { '$sample': { 'size': args.size } } ], allowDiskUse=True))
#cursor = mongo_sample_collection.find({})
#for document in cursor:
# print(document)
|
[
"cassidyhhaas@gmail.com"
] |
cassidyhhaas@gmail.com
|
aa1893b22270efd12c7a77ba187a7159e426b639
|
748c936f204be73142928a7ab1d3eca03b8551df
|
/lineNewsBot.py
|
fee07b47190f4891786698a398d67df5fdce70d1
|
[] |
no_license
|
jeff3388/line_news
|
992ed1eea11ec8c8e239bdeee74c533c63ad20f3
|
435d5dab6e1afa127f4c37b37d7e6ed046771689
|
refs/heads/main
| 2023-03-31T07:17:20.314238
| 2021-04-09T15:29:03
| 2021-04-09T15:29:03
| 356,316,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,544
|
py
|
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
import time
import re
class LineNewsCrawler:
driver = None
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
filter_source_keywords = ['中台灣生活網', 'LINE TV', 'LINE TODAY', 'TODAY 看世界', 'LINE TODAY 話題', 'TVBS新聞台', '民視新聞台',
'東森新聞台', '華視影音']
filter_title_keywords = ['TVBS新聞台', '民視新聞台', '中台灣生活網', '東森新聞台', '華視影音']
@staticmethod
def parser_article_time(url):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36',
}
res = requests.get(url=url, headers=headers, timeout=10)
soup = BeautifulSoup(res.content, 'lxml')
publish_time_text = soup.find(
attrs={'class': 'entityPublishInfo-meta-info text text--f text--greyLighten12 text--regular'}).text.strip()
update_time = "".join(re.findall('發布於.*前', publish_time_text))
if bool(update_time) is False:
update_time = "".join(re.findall('發布於.*•', publish_time_text)).replace(' •', '')
return update_time
@classmethod
def open_browser(cls):
driver = webdriver.Chrome(options=cls.options)
driver.get('https://today.line.me/tw/v2/tab')
time.sleep(3)
for i in range(0, 25000, 2000):
js = "var q=document.documentElement.scrollTop=" + str(i)
driver.execute_script(js)
time.sleep(2)
cls.driver = driver
@classmethod
def parserNews(cls):
soup = BeautifulSoup(cls.driver.page_source, 'lxml')
element = soup.find_all('div', attrs={'class': 'listModule'})
title_list = []
link_list = []
source_list_1 = []
for ele in element:
link_ls = [links.get('href') for links in ele.find_all('a')]
link_list += [link_ls]
news_title_class = ele.find_all(attrs={'class': 'articleCard-content'})
title_list += [[news_title.text.strip().replace('\u3000', '').split('\n')[0] for news_title in news_title_class]]
news_source_class = ele.find_all(attrs={'class': 'articleCard-bottomWrap'})
source_list_1 += [news_source.text.strip() for news_source in news_source_class]
title_ls_1 = sum(title_list, [])
link_ls_1 = sum(link_list, [])
ele = soup.find('div', attrs={'class': 'foryou-list'})
link_ls_2 = [links.get('href') for links in ele.find_all('a')]
# clear data
news_title_class = ele.find_all(attrs={'class': 'articleCard-content'})
title_ls_2 = [news_title.text.strip().replace('\u3000', '').replace('\xa0', '').split('\n')[0] for news_title in
news_title_class]
news_source_class = ele.find_all(attrs={'class': 'foryou-publisher'})
source_ls_2 = [news_source.text.strip() for news_source in news_source_class]
total_title = title_ls_1 + title_ls_2
total_link = link_ls_1 + link_ls_2
total_source = source_list_1 + source_ls_2
news_dict_format = [{"title": title, "url": link, "source": source} for title, link, source in
zip(total_title, total_link, total_source)]
news_dict_format = [news_dict for news_dict in news_dict_format if
news_dict.get('source') not in cls.filter_source_keywords]
article_time_ls = []
for news_dict in news_dict_format:
url = news_dict.get('url')
article_time = cls.parser_article_time(url)
article_time_ls += [article_time]
result_news_dict = [{"title": title, "url": link, "source": source, "article_time": article_time} for
title, link, source, article_time in zip(total_title, total_link, total_source, article_time_ls)]
result = [result_news for result_news in result_news_dict if result_news.get('title') not in cls.filter_title_keywords]
return result
@classmethod
def close_browser(cls):
cls.driver.close()
cls.driver.quit()
def main():
LineNewsCrawler.open_browser()
result = LineNewsCrawler.parserNews()
LineNewsCrawler.close_browser()
for news in result:
print(news)
if __name__ == '__main__':
main()
|
[
"jaxlouder@gmail.com"
] |
jaxlouder@gmail.com
|
fbfa424ac510326cac597ac08784d5ed7fe0a3dd
|
3b21e66188a646c2a876fc283a19265ef891e277
|
/py/第二章:字符串和文本/2_4_字符串匹配和搜索.py
|
7698ece043eee3a21f076e02f9a32c89508388fb
|
[] |
no_license
|
Minsc016/ggstudy
|
f8139322d3f19b20fd39c43b972fe65d887592b4
|
00a75caad76eea213e7ece22f774ea4aace2bb13
|
refs/heads/master
| 2021-01-06T10:49:24.103229
| 2020-10-15T02:00:51
| 2020-10-15T02:00:51
| 241,302,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,848
|
py
|
#########################################################################
# File Name: 2_4_字符串匹配和搜索.py
# Author: Crow
# mail:qnglsk@163.com
# Created Time: Tue Nov 5 11:18:53 2019
#########################################################################
#!/usr/bin/env python3
# 匹配字面字符串 只需 基本字符串方法:str.find() str.endswith() str.startswith()
text = 'yeah, but no, but yeah, but no, but yeah'
# Exact match
print(text == 'yeah')
# match start or end
print(text.startswith('yeah'))
print(text.endswith('no'))
# Search for the location of the first occurrence
print(text.find('no'))
# 复杂的匹配 使用 正则表达式 和 re模块
# eg匹配 数字格式的日期字符串 比如 11/27/2012:
text1 = '11/27/2012'
text2 = 'Nov 27,2012'
import re
# Simple matching:\d+ means one or more digits
if re.match(r'\d+/\d+/\d+',text1):
print('yes')
else:
print('no')
if re.match(r'\d+/\d+/\d+',text2):
print('yes')
else:
print('no')
# 如果想 使用 一个模式 做 多次匹配,应该先将 模式字符串 预编译 为 模式对象:
datepat = re.compile(r'\d+/\d+/\d+')
if datepat.match(text1):
print('yes')
else:
print('no')
if datepat.match(text2):
print('yes')
else:
print('no')
# match() 总是从字符串开头去匹配,如果想 查找字符串任意部分的模式出现位置,用
# findall() 方法代替。:
text = 'Today is 11/27/2012.PyCon starts 3/13/2013'
print(datepat.findall(text))
# 定义正则表达式 的时候,通常会利用 括号取捕获分组:
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
# 根据分组可以使得后面的处理 更加简单,因为可以分别将 每个组的内容提取出来
m = datepat.match('11/27/2012')
print(m)
# Extract the contents of each group
print(m.group(0))
print(m.group(1))
print(m.group(2))
print(m.group(3))
print(m.groups())
month,day,year = m.groups()
print(month,day,year)
# find all matches (notice splitting into tuples)
print(text)
datepat.findall(text)
for month,day,year in datepat.findall(text):
print('{}-{:0>2}-{:0>2}'.format(year,month,day))
# findall() 方法 会 搜索文本并 以 列表 返回所欲的匹配,如果 想以迭代方法 返回匹配,可以
# 使用 finditer() 方法来代替:
for m in datepat.finditer(text):
print(m.groups())
# 核心步骤:
# 1.使用 re.compile() 编译正则表达式字符串
# 2.使用match()、findall()、finditer() 方法
# match() 仅仅检查字符串的开始部分:
m = datepat.match('11/27/2012abcdef')
print(m)
print(m.group())
# 精确匹配,正则表达式以 $ 结尾
datepat = re.compile(r'(\d+)/(\d+)/(\d+)$')
print(datepat.match('11/27/2012abcdef'))
print(datepat.match('11/27/2012'))
# 仅仅做一次文本匹配/搜索操作:
print(re.findall(r'(\d+)/(\d+)/(\d+)',text))
|
[
"qnglsk0@gmail.com"
] |
qnglsk0@gmail.com
|
f2d692c8e9d9a1ead3d2b6676e2d7f18bd7b6515
|
ca2f635faaaaf9426e438372613c2580b8e0ff5f
|
/scraping.py
|
a2abd23617fce59f40d8e440b7ab2cbe8d00cd32
|
[] |
no_license
|
fadlnabbouh/Mission-to-Mars
|
e5a2b40a513fa7b8d50bda44389e828d7b89828f
|
1683d86304d5c58a9b748896ef92b68a4a923177
|
refs/heads/main
| 2023-06-07T02:08:21.611940
| 2021-07-04T00:23:31
| 2021-07-04T00:23:31
| 378,420,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,660
|
py
|
# Import Splinter and BeautifulSoup
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import datetime as dt
def scrape_all():
# Initiate headless driver for deployment
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
# Run all scraping functions and store results in dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"last_modified": dt.datetime.now(),
"hemisphere_image" : mars_hemispheres(browser)
}
# Stop webdriver and return data
browser.quit()
return data
#Set up Splinter
#executable_path = {'executable_path': ChromeDriverManager().install()}
#browser = Browser('chrome', **executable_path, headless=False)
def mars_news(browser):
# Visit the mars nasa news site
url = 'https://redplanetscience.com'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
#Convert the browser html to soup object and then quit browser
html = browser.html
news_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
slide_elem = news_soup.select_one('div.list_text')
#slide_elem.find('div', class_='content_title')
# Use the parent element to find the first `a` tag and save it as `news_title`
news_title = slide_elem.find('div', class_='content_title').get_text()
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
# ### Featured Images
def featured_image(browser):
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
try:
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
except AttributeError:
return None
# Use the base URL to create an absolute URL
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
return img_url
def mars_facts():
try:
df = pd.read_html('https://galaxyfacts-mars.com')[0]
except BaseException:
return None
df.columns=['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
return df.to_html()
def mars_hemispheres(browser):
url = 'https://marshemispheres.com/'
browser.visit(url)
hemisphere_image_urls = []
html = browser.html
hemisphere_img_soup = soup(html, 'html.parser')
hemisphere_img_soup
for i in range(4):
hemisphere_info = {}
hemisphere_info['title'] = hemisphere_img_soup.find_all('h3')[i].text
#click link
browser.find_by_css("a.product-item h3")[i].click()
#extract image url
img_url = browser.links.find_by_text("Sample")
hemisphere_info['image_url'] = img_url['href']
hemisphere_image_urls.append(hemisphere_info)
#back
browser.back()
return hemisphere_image_urls
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all())
|
[
"nabbouhf@gmail.com"
] |
nabbouhf@gmail.com
|
49f78666ab8c2fec997c6ce879bb4b3655237c73
|
9326df6f7c65afe55d82b8039542c38fb265a26e
|
/week007-008/SakeScraping/NihonsyudbScraping.py
|
fa4e3aa5b33b7601b263be48224b9470a1de387a
|
[] |
no_license
|
masa116/homeworks
|
a984c6689ce4fa822796cff7291d3f10cc5b81e4
|
c602096ccc864310eb5668adbee21fc114af4b91
|
refs/heads/master
| 2021-09-11T15:40:06.801979
| 2018-04-09T11:55:50
| 2018-04-09T11:55:50
| 115,341,049
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
# -*- conding:utf8 -*-
import urllib.request
import codecs
import re
p = re.compile(r"<[^>]*?>")
from bs4 import BeautifulSoup
f = codecs.open('nihonsyudb.csv', 'w', 'utf-8')
f.write('id,sakecode,sakename,'+ "¥n")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36'}
tpl_url = 'http://sake.oisiso.com/archives/{0}'
count = 0
for i in range(1, 2000):
url = tpl_url.format(i)
req = urllib.request.Request(url, headers=headers)
try:
soup = BeautifulSoup(urllib.request.urlopen(req).read())
except:
continue
sakename = soup.find('h1', {'class':'storytitle'})
if sakename is not None:
sakename_sub = p.sub("", str(sakename))
sakecode = i
content = soup.find('div', {'class': "storycontent"})
if content is not None:
text = content.find('div', {'class': "text"})
if text is not None:
li_all = text.findAll('li')
sakedata = ""
for li in li_all:
li_sub = p.sub("", str(li))
sakedata = sakedata + li_sub + ","
count = count + 1
sakeid = str(count) + "," + str(sakecode) + "," + sakename_sub
saketext = sakeid + sakedata
print(saketext)
f.write(saketext + "\n")
f.close()
|
[
"masa@masa-MBP.local"
] |
masa@masa-MBP.local
|
50c27d4e89e096291c4b3770778e613828c12616
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02697/s973072904.py
|
e3e4aaf02e274ad4bf79dde736e315c2ed0abf14
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
N,M=map(int,input().split())
a,b=1,N//2
c,d=N//2+1,N
if N%2==0:
a+=1
i=0
while i<M:
if i<M:
print(c,d)
i+=1
c,d=c+1,d-1
else:
break
if i<M:
print(a,b)
a,b=a+1,b-1
i+=1
else:
break
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1257e6eda6975b8181e621e61d214f62a80715e0
|
7329a11494b6ebac358ec9e74e028ebb1ee08ecf
|
/array/121/v1.py
|
c51550d2e784e70ddd0e3887365585bb0814cf53
|
[] |
no_license
|
EthanGe77/leetcode
|
28ec2e353c0d2fddcb3bfb1e5a7eb2026b3ddf2c
|
035bf3ce35db7c017d35af738051946d41d53bd6
|
refs/heads/master
| 2020-03-25T16:28:00.154603
| 2018-08-17T03:19:36
| 2018-08-17T03:19:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
max_profit = 0
min_price = float('inf')
for price in prices:
min_price = min(price, min_price)
max_profit = max(price - min_price, max_profit)
return max_profit
A = Solution()
print(A.maxProfit([7,1,5,3,6,4]))
|
[
"tiandi5001@gmail.com"
] |
tiandi5001@gmail.com
|
6ca2bbf6b78164b49efef1c8299f581a5cb27dc7
|
d932ca0f4197aec1a8d3f5cbfb287e44f8d71d25
|
/Ex1/IndexWriter2.py
|
9a244d6572b70cb54580b900dd54da5d7b833faa
|
[] |
no_license
|
roipk/InternetQueries
|
7dee32f329359f14215cbd3cb4ee2efb13471556
|
f674e04c5dbfce10edb17e8cb2261640e7023d2e
|
refs/heads/master
| 2020-09-24T17:56:46.867439
| 2020-02-01T18:54:02
| 2020-02-01T18:54:02
| 225,812,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,343
|
py
|
import os ,os.path
import operator
import re
import sys
import zlib
import shutil
import datetime
import glob
import bisect
from time import gmtime, asctime, time
import threading
class IndexWriter:
Term = ''
docId=0
indexer = []
f_tuple = []
# temp_indexer= []
maxread = 1000000000000
blocks = chr(ord('A'))
b = 40
startTime=""
threads = []
threadsWrite = []
dir =""
lock = threading.Lock()
debug = True
# debug = False
numBlock = 0
stopwords={}
def __init__(self, inputFile, dir):
"""Given a collection of documents,
creates an on disk index inputFile is the path to the file
containing the review data (the path includes the filename itself)
dir is the name of the directory in which all index files will be created
if the directory does not exist, it should be created"""
self.Term = ''
self.docId = 0
self.indexer = []
frequency = 1
self.f_tuple=[]
# self.temp_indexer = []
self.blocks = chr(ord('A') - 1)
self.threads = []
self.dir = dir
self.lock = threading.Lock()
self.numBlock = 0
# self.stopwords={'ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during', 'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours', 'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from', 'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through', 'don', 'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their', 'while', 'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them', 'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what', 'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has', 'just', 'where', 'too', 'only', 'myself','which', 'those', 'i', 'after', 'few', 'whom', 't', 'being', 'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it', 'how', 'further', 'was', 'here', 'than'}
for i in range(8):
self.threads.append(threading.Thread())
for i in range(8):
self.threadsWrite.append(threading.Thread())
self.delfolder(dir+'\\temp')
self.startTime = datetime.datetime.now()
self.createTempFolder(inputFile)
print("done create folders in {} time".format(asctime()))
print(datetime.datetime.now() - self.startTime)
# self.startTime = datetime.datetime.now()
self.mergeFolders(dir)
print("done merge folders in {} time".format(asctime()))
print(datetime.datetime.now() - self.startTime)
# self.startTime = datetime.datetime.now()
os.chdir(dir+"\\temp")
path =list(os.walk(os.getcwd()))[0]
if self.debug:
print(path[1][0])
os.rename(path[1][0], "File Compressed")
def readfiles(self,path):
with open(path, 'rb') as s1:
file1 = s1.read()
if self.debug:
print(file1)
with open(path, 'rb') as s1:
file1 = s1.read(self.maxread)
tempfile = file1
file1 = s1.read(self.maxread)
while file1:
tempfile+=file1
file1 = s1.read(self.maxread)
txt = zlib.decompress(tempfile).decode('utf-8')
if self.debug:
print(tempfile)
print(txt)
# file1 = s1.read(self.maxread)
# newfile1 = '{}'.format(file1)
#
# print(newfile1)
# if len(file1) > 0:
# newfile1 = zlib.decompress(file1).decode('utf-8')
# print(newfile1)
def createTempFolder(self, inputFile):
count = 0
numthread = 0
with open(inputFile,buffering=4000000) as f:
for line in f:
s = []
if line[0] != '*' and line[0] != '\n':
count+=1
v = ''
for i in range(len(line)):
ch = line[i]
if 'A' <= ch <= 'Z':
v += '{}'.format(chr(ord(ch) + 32))
elif 'a' <= ch <= 'z' or '0' <= ch <= '9':
v += '{}'.format(ch)
elif len(v) > 2:
# if v not in self.stopwords:
s.append(v)
v = ''
else:
v = ''
if len(v) > 0:
# if v not in self.stopwords:
s.append(v)
v=''
firstWord = ""
frequency = 1
s.sort()
for word in s:
if firstWord == "":
firstWord = word
elif firstWord != word:
# bisect.insort(self.indexer, (firstWord, count, frequency))
self.indexer.append((firstWord, count, frequency))
firstWord = word
# print(firstWord, count, frequency)
frequency = 1
else:
frequency += 1
if frequency > 1:
# bisect.insort(self.indexer, (firstWord, count, frequency))
self.indexer.append((firstWord, count, frequency))
elif firstWord != '':
# bisect.insort(self.indexer, (firstWord, count, 1))
self.indexer.append((firstWord, count, 1))
if len(self.indexer) > 0 and (sys.getsizeof(self.indexer) * sys.getsizeof(self.indexer[0])) > self.maxread:
indexer = self.indexer
self.indexer=[]
self.sortFile(indexer)
directory = "{}\{}".format(dir, 'temp')
if os.path.exists(directory):
folders = list(os.walk(directory))
if self.debug:
print("continue")
if count % 100000 == 0 and self.debug:
print("done {} in {} time".format(count,asctime()))
self.indexer.sort(key=operator.itemgetter(0))
s=''
for i in self.indexer:
s += '{}'.format(i)
print(s)
directory = "{}\{}\{}".format(dir, 'temp', 'A')
if not os.path.exists(directory):
os.makedirs(directory)
self.compressFile(s,directory,'b')
# self.writeToFile(self.dir,self.indexer,'a')
# if self.debug:
# print(count)
# print(datetime.datetime.now() - self.startTime)
# print("done create dictionary after {} time ".format(datetime.datetime.now() - self.startTime))
# if len(self.indexer) > 0:
# indexer = self.indexer
# self.indexer = []
# self.sortFile(indexer)
# if self.debug:
# print("continue")
for i in self.threads:
if i.is_alive():
i.join()
if self.debug:
print("done",i)
return
def sortFile(self,indexer):
while True:
for i in range(len(self.threads)):
# print(i)
if not self.threads[i].isAlive():
# print("i = {}".format(i))
numthread = i
# index = self.indexer
# print(self.indexer)
if self.debug:
print("i = {}".format(i))
# stime = datetime.datetime.now()
self.threads[numthread] = threading.Thread(target=self.writeToFileWrapper, args=(indexer,))
self.threads[numthread].start()
if self.debug:
print("doneThread")
# print(datetime.datetime.now()-stime)
# print("numthread = {}".format(numthread) )
# print( self.indexer)
return
def writeToFileWrapper(self,index):
# index = copy.deepcopy(self.indexer)
# self.indexer = []
self.lock.acquire()
try:
# chr(ord(self.blocks) + 1)
if self.blocks[-1] < 'Z' and len(self.blocks) == 1:
self.blocks = chr(ord(self.blocks ) + 1)
elif self.blocks[-1] < 'Z':
self.blocks = '{}{}'.format(self.blocks[:-1],chr(ord(self.blocks[-1]) + 1))
else:
self.blocks = '{}A'.format(self.blocks)
finally:
self.lock.release()
self.writeToFile(self.dir, index, self.blocks)
# if self.debug:
# print("end")
return
def mergeFolders(self,dir):
directory = "{}\{}".format(dir, 'temp')
if os.path.exists(directory):
folders = list(os.walk(directory))
countfolders = len(folders[0][1])
while countfolders > 1:
for f, b in zip(folders[1::2], folders[2::2]):
if f and b:
self.MergeFileWithThread(directory, f, b)
for k in self.threads:
if k.isAlive():
k.join()
# for i in self.threadsWrite:
# if i.isAlive():
# i.join()
if self.debug:
print("done loop merge in {} time".format(asctime()))
print(datetime.datetime.now() - self.startTime)
os.chdir(directory)
if countfolders > 1 and countfolders % 2 == 1:
if self.blocks[-1] < 'Z' and len(self.blocks) == 1:
self.blocks = chr(ord(self.blocks) + 1)
elif self.blocks[-1] < 'Z':
self.blocks = '{}{}'.format(self.blocks[:-1], chr(ord(self.blocks[-1]) + 1))
else:
self.blocks = '{}A'.format(self.blocks)
os.rename(folders[0][1][-1], self.blocks)
os.chdir(dir)
# self.startTime = datetime.datetime.now()
folders = list(os.walk(directory))
# if self.debug:
# print(folders)
countfolders = len(folders[0][1])
return
def getCorrectFolder(self,folders,low):
sortlist = folders[0][1]
min = (0,0)
for i in range(len(sortlist)):
if int(sortlist[i]) == low[0]:
min = (i + 1, min[1])
elif int(sortlist[i]) == low[1]:
min = (min[0], i + 1)
# if self.debug:
# print(sortlist)
# print(low[0],low[1])
# print(min)
return min
def MergeFileWithThread(self,directory,f,b):
if self.blocks[-1] < 'Z' and len(self.blocks) == 1:
self.blocks = chr(ord(self.blocks) + 1)
elif self.blocks[-1] < 'Z':
self.blocks = '{}{}'.format(self.blocks[:-1], chr(ord(self.blocks[-1]) + 1))
else:
self.blocks = '{}A'.format(self.blocks)
path = '{}\{}\\'.format(directory, self.blocks)
if not os.path.exists(path):
os.makedirs(path)
self.createFolders(path)
for i in range(len(f[2])):
newpath = '{}{}'.format(path, f[2][i])
folder1 = '{}\{}'.format(f[0], f[2][i])
folder2 = '{}\{}'.format(b[0], b[2][i])
self.lock.acquire()
try:
self.writeCharsFile(folder1, folder2, newpath)
finally:
self.lock.release()
for k in self.threadsWrite:
if k.isAlive():
k.join()
self.delfolder(f[0])
self.delfolder(b[0])
if self.debug:
print("done merge in {} time".format(asctime()))
print(datetime.datetime.now() - self.startTime)
# self.startTime = datetime.datetime.now()
for k in self.threads:
if k.isAlive():
k.join()
for m in self.threadsWrite:
if m.isAlive():
m.join()
return
def writeCharsFile(self,folder1, folder2, newpath):
t = threading.Thread(target=self.mergeandsort, args=(folder1, folder2, newpath))
t.start()
t.join()
return
def delfolder(self,path):
if os.path.exists(path):
shutil.rmtree(path)
return
def mergeandsort(self, src1, src2, dst):
# Use `with` statements to close file automatically
with open(src1, 'rb') as s1, open(src2, 'rb') as s2, open(dst, 'ab') as d:
newfile1=""
newfile2=""
file1 = s1.read(self.maxread)
tempfile = file1
file1 = s1.read(self.maxread)
while file1:
tempfile += file1
file1 = s1.read(self.maxread)
if len(tempfile)>0:
newfile1 = zlib.decompress(tempfile).decode('utf-8')
file2 = s2.read(self.maxread)
tempfile = file2
file2 = s2.read(self.maxread)
while file2:
tempfile += file2
file2 = s2.read(self.maxread)
if len(tempfile) > 0:
newfile2 = zlib.decompress(tempfile).decode('utf-8')
if len(newfile1)>0:
newfile1 = newfile1.split("|")
if len(newfile2)>0:
newfile2 = newfile2.split("|")
i=0
j=0
str = ''
while i < len(newfile1) and j < len(newfile2):
sub1 = newfile1[i].split("-")
sub2 = newfile2[j].split("-")
# i+=1
if len(sub1[0]) <= 0 and len(sub2[0]) > 0:
if len(str) == 0:
str += "{}-{}".format(sub2[0], sub2[1])
else:
str += "|{}-{}".format(sub2[0], sub2[1])
i += 1
elif len(sub2[0]) <= 0 and len(sub1[0]) > 0 :
if len(str) == 0:
str+="{}-{}".format( sub1[0],sub1[1])
else:
str += "|{}-{}".format(sub1[0], sub1[1])
i+=1
elif sub1[0] < sub2[0]:
if len(str) == 0:
str+="{}-{}".format( sub1[0],sub1[1])
else:
str += "|{}-{}".format(sub1[0], sub1[1])
i+=1
elif sub1[0] > sub2[0]:
if len(str) == 0:
str+="{}-{}".format( sub2[0],sub2[1])
else:
str+="|{}-{}".format( sub2[0],sub2[1])
j+=1
elif len(sub2[0]) <= 0 and len(sub1[0]) <= 0:
i += 1
j += 1
else:
if len(str) == 0:
str += "{}-{}_{}".format(sub1[0], sub1[1],sub2[1])
else:
str += "|{}-{}_{}".format(sub1[0], sub1[1],sub2[1])
i += 1
j += 1
while i < len(newfile1):
sub1 = newfile1[i].split("-")
if len(sub1[0]) > 0:
if len(str) == 0:
str += "{}-{}".format(sub1[0], sub1[1])
else:
str += "|{}-{}".format(sub1[0], sub1[1])
i += 1
while j < len(newfile2):
sub2 = newfile2[j].split("-")
if len(sub2[0]) > 0:
if len(str) == 0:
str += "{}-{}".format(sub2[0], sub2[1])
else:
str += "|{}-{}".format(sub2[0], sub2[1])
j += 1
sb = zlib.compress(str.encode('utf-8'))
d.write(sb)
return
def writeToFile(self , dir , index,blocks):
# print(index)
index.sort(key=operator.itemgetter(0)) # Sort the lists by AB
# print("done sort in {} time".format(asctime()))
ch = '0'
s = ""
backword = ""
# directory = "{}\{}\{}".format(dir,'a-z',self.blocks)
# print("done blocks in {}".format(self.blocks))
directory = "{}\{}\{}".format(dir, 'temp', blocks)
self.createFolders(directory)
# print("start index")
# print (self.indexer)
for word in index:
numbers = ""
# print("ch ccccc= {}".format(ch))
# print( word[0][0])
while word[0][0] != ch :
if not os.path.exists(directory):
os.makedirs(directory)
if len(s) > 0:
# charfile = open("{}\{}.bin".format(directory,ch), "wb")
# self.compress(s, directory, ch)
numthread = -1
while True:
# print("wait")
for j in range(len(self.threadsWrite)):
if not self.threadsWrite[j].is_alive():
numthread = j
self.threadsWrite[numthread] = threading.Thread(target=self.compressFile,args=(s,directory, ch))
self.threadsWrite[numthread].start()
break
if numthread > -1:
break
s = ""
backword = ""
ch = chr(ord(ch) + 1)
if ch > '9' and ch < 'a' :
ch = 'a'
# print(ch)
# print("ch = {}".format(ch))
# self.writeFiles("", directory, ch)
if len(s) == 0:
s = "{}-{}:{}".format(word[0],word[1],word[2])
backword = word[0]
elif backword == word[0]:
s += ("_{}:{}".format(word[1],word[2]))
else:
s += ("|{}-{}:{}".format(word[0], word[1],word[2]))
backword = word[0]
numthread = -1
while True:
# print("wait")
for j in range(len(self.threadsWrite)):
if not self.threadsWrite[j].is_alive():
numthread = j
self.threadsWrite[numthread] = threading.Thread(target=self.compressFile, args=(s,directory, ch))
self.threadsWrite[numthread].start()
break
if numthread > -1:
break
for t in self.threadsWrite:
if t.isAlive():
t.join()
if self.debug:
print("done write in {} time".format(asctime()))
print( datetime.datetime.now()- self.startTime)
# self.startTime = datetime.datetime.now()
return
def compressFolder(self,directoryIn,directoryOut,originPath):
os.chdir(directoryIn)
for f in glob.glob("*.bin"):
fileName = f.split(".bin")[0]
file = open(f, 'rb')
txt = file.read()
# print(txt)
if len(txt) > 3:
txt = zlib.decompress(txt).decode('utf-8')
while len(txt) > 0 :
dir = "{}\{}.bin".format(directoryOut, fileName)
if not os.path.exists(dir):
charfile = open(dir, "wb")
else:
charfile = open(dir, "ab")
sb = zlib.compress(txt.encode('utf-8'))
charfile.write(sb)
charfile.close()
txt = file.read(self.maxread)
if(txt):
txt = zlib.decompress(txt).decode('utf-8')
file.close()
directoryIn = directoryIn[:-2]
os.chdir(originPath)
self.delfolder(directoryIn)
return
def compressFile(self,s,directory,ch):
charfile = open("{}\{}.bin".format(directory, ch), "wb")
sb = zlib.compress(s.encode('utf-8'))
charfile.write(sb)
charfile.close()
def findDoc(self,directory,word):
charfile = open("{}\\a-z\\{}.bin".format(directory, word[0]), "rb")
s = charfile.read()
charfile.close()
s = zlib.decompress(s).decode('utf-8')
wordArray = s.split("|")
docs=[]
for i in wordArray:
numdoc = i.split("-")
if numdoc[0] == word:
# print(numdoc[0])
for j in range(len(numdoc)):
if j > 0:
doc = numdoc[j].split(":")
docs.append(doc[0])
# print(docs)
def createComprassFolder(self, dir):
if not os.path.exists(dir):
os.makedirs(dir)
ch = '0'
while ch <= 'z':
# print("ch = {}".format(ch))
open("{}\{}.bin".format(dir, ch), "wb")
ch = chr(ord(ch) + 1)
if ch > '9' and ch < 'a':
ch = 'a'
# print("done create Folders in {} time".format(asctime()))
return
def createFolders(self,dir):
self.lock.acquire()
try:
if not os.path.exists(dir):
os.makedirs(dir)
finally:
self.lock.release()
ch = '0'
while ch <= 'z':
# print("ch = {}".format(ch))
open("{}\{}.bin".format(dir, ch), "wb")
ch = chr(ord(ch) + 1)
if ch > '9' and ch < 'a':
ch = 'a'
# print("done create Folders in {} time".format(asctime()))
return
def removeIndex(dir):
directory = "{}\{}".format(dir, 'temp')
if os.path.exists(directory):
shutil.rmtree(directory)
# os.remove(directory)
"""Delete all index files by removing
the given directory dir is the name of the directory in which all index files are located.
After removing the files, the directory should be deleted."""
def worker(self):
"""thread worker function"""
print('Worker')
return
if __name__ =="__main__":
"""part 1.3.1 IndexWriter"""
start = time()
time1 = datetime.datetime.now()
dir = os.getcwd()
file = os.getcwd()+"\\text file\\100000.txt"
print(asctime())
IW = IndexWriter(file,dir)
# IW = IndexWriter.removeIndex(dir)
print(asctime())
# IW.findDoc(dir,"book")
time2 = datetime.datetime.now()
time3 = time2 - time1
print(time3)
|
[
"roipk123@gmail.com"
] |
roipk123@gmail.com
|
81de9564e38b044ab6ab0eee03150a33f4699acb
|
351d0d951dc1b1a0540f80ecf0c1b0f571f4b8e4
|
/fb/subset2.py
|
c479c9e7b2656c47ce0a7a026b89be21c9519160
|
[] |
no_license
|
crystalbai/Algorithm
|
90f5b60cce55a070feaf2657d2b344f2a1cf62c9
|
32bbf6ea81ad39a0496ca83049b72d749eda28dd
|
refs/heads/master
| 2021-01-19T20:55:38.838001
| 2017-11-10T22:20:56
| 2017-11-10T22:26:31
| 101,240,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
stack = []
count = 0
nums = sorted(nums)
if nums == []:
return []
for idx, i in enumerate(nums):
if idx == 0:
res.append([i])
stack.append((count, idx))
count += 1
if idx > 0 and i != nums[idx-1]:
res.append([i])
stack.append((count, idx))
count +=1
while len(stack) != 0:
ele = stack.pop(0)
if ele[1] < len(nums) -1:
for shift in range(ele[1]+1, len(nums)):
tmp = res[ele[0]][:]+[nums[shift]]
if tmp != res[-1]:
res.append(tmp)
stack.append([count, shift])
count+=1
res = res +[[]]
return res
sol = Solution()
print sol.subsetsWithDup([1,2,2,2,3,3])
|
[
"crystalbai1994@gmail.com"
] |
crystalbai1994@gmail.com
|
bdbb361b505221722ff6c13136870d6a8eb69f70
|
c783b9fbefd42dbc4b466bd0b5d0fdcf3ad40a09
|
/test/test_content.py
|
6e04c6adf51b438d3de1fa1e0f9198051ca77eda
|
[
"Apache-2.0"
] |
permissive
|
esper-io/esper-client-py
|
5fb6c86e2d5fe538b6dc51a9c50f90f6fcd86623
|
76d42d8f90376c1fbfa85a10738aa47ecffcb43f
|
refs/heads/master
| 2021-12-28T09:22:07.224763
| 2020-10-27T11:50:26
| 2020-10-27T11:50:26
| 178,143,868
| 8
| 6
| null | 2021-12-21T02:11:09
| 2019-03-28T06:51:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
# coding: utf-8
"""
ESPER API REFERENCE
OpenAPI spec version: 1.0.0
Contact: developer@esper.io
---------------------------------------------------------
Copyright 2019 Shoonya Enterprises Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import unittest
import esperclient
from esperclient.models.content import Content
from esperclient.rest import ApiException
class TestContent(unittest.TestCase):
"""Content unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testContent(self):
"""Test Content"""
model = esperclient.models.content.Content()
pass
if __name__ == '__main__':
unittest.main()
|
[
"travis@travis-ci.org"
] |
travis@travis-ci.org
|
e11afd3d83f7d4604abd7d1b30cda714dee6ced0
|
a5a254450d21336176596a255b885d32ee7d5ccd
|
/小作业/新数码管.py
|
afabab95269835c8399d36577f6c12fd767905eb
|
[] |
no_license
|
BrandonSherlocking/python_document
|
1d072869ea4ef6da245ceb9fda0b39d5a0a045cd
|
2a6e90a1266129cc497227ac035c1e649fd1cf2d
|
refs/heads/master
| 2021-09-10T06:23:21.262776
| 2018-03-21T13:41:10
| 2018-03-21T13:41:10
| 114,750,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
import turtle
import datetime
def drawGap(): # 绘制数码管间隔
turtle.penup()
turtle.fd(5)
def drawLine (draw): #绘制单条数码管
drawGap()
turtle.pendown() if draw else turtle.penup()
turtle.fd(40)
drawGap()
turtle.right(90)
def drawDigit(d): #根据数字绘制数码管
drawLine(True) if d in [2, 3, 4, 5, 6, 8, 9] else drawLine(False)
drawLine(True) if d in [0, 1, 3, 4, 5, 6, 7, 8, 9] else drawLine(False)
drawLine(True) if d in [0, 2, 3, 5, 6, 8, 9] else drawLine(False)
drawLine(True) if d in [0, 2, 6, 8] else drawLine(False)
turtle.left(90)
drawLine(True) if d in [0, 4, 5, 6, 8, 9] else drawLine(False)
drawLine(True) if d in [0, 2, 3, 5, 6, 7, 8, 9] else drawLine(False)
drawLine(True) if d in [0, 1, 2, 3, 4, 7, 8, 9] else drawLine(False)
turtle.left(180)
turtle.penup()
turtle.fd(20)
def drawDate(date): #根据年月份更改画笔颜色
turtle.pencolor('red')
for i in date:
if i == '-':
turtle.write('年', font = ('Arial', 18, 'normal'))
turtle.pencolor('green')
turtle.fd(40)
elif i == '=':
turtle.write('月', font = ('Arial', 18, 'normal'))
turtle.pencolor('blue')
turtle.fd(40)
elif i == '+':
turtle.write('日', font=('Arial', 18, 'normal'))
else:
drawDigit(eval(i))
def main(): #主程序
turtle.setup(800, 350, 200, 200)
turtle.penup()
turtle.fd(-350)
turtle.pensize(5)
drawDate(datetime.datetime.now().strftime('%Y-%m=%d+'))
turtle.hideturtle()
main()
|
[
"32945389+BrandonSherlocking@users.noreply.github.com"
] |
32945389+BrandonSherlocking@users.noreply.github.com
|
785f508bf9e774ea47eac235ca5f734b06f14816
|
0989fa233dcbf04eb43d64b697b63fd840fdbf1a
|
/challenge/users/apps.py
|
efa3316edae3374ddbeaf9f0a1cbdb34cfd6c2a3
|
[
"MIT"
] |
permissive
|
ramses132/spaceag-challenge
|
5c52343d114be43db58d019a562089f7eedf29c3
|
88c7a13159a4990578f7f1f5922a7c92dfde5a30
|
refs/heads/master
| 2020-05-29T10:42:32.490612
| 2019-05-28T22:23:04
| 2019-05-28T22:23:04
| 189,100,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "challenge.users"
verbose_name = _("Users")
def ready(self):
try:
import challenge.users.signals # noqa F401
except ImportError:
pass
|
[
"yugo132@gmail.com"
] |
yugo132@gmail.com
|
47fad72d32586238467bdac82d38489bbc466204
|
140f9d56b61f2d4039b25c5dfdf2e12fbd0f63c7
|
/index.py
|
e2548e3bc4165b3f7a466824f1d2a0bcc4986535
|
[] |
no_license
|
Daniel823/Curtain-Client-Service
|
52c2398baf39c856af211387fbf09b90c100bfd2
|
6397e2f98b3ea08a6da5f28902cc5fa129affe59
|
refs/heads/master
| 2021-06-16T23:12:04.697834
| 2017-06-12T00:21:16
| 2017-06-12T00:21:16
| 78,909,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
from flask import request
from flask_api import FlaskAPI, status, exceptions
import json
from modules import MicroController as mc
app = FlaskAPI(__name__)
@app.route("/state", methods=['GET'])
def get():
"""
GET : returns the item state with 2** status code
4**, 5** accordingly if MicroController is not avaliable
"""
return json.dumps(mc.getState())
@app.route("/update/<int:state>/", methods=['POST'])
def post(state):
"""
POST : tells the client what to do ie. close/open the blind
returns status codes 2**, 4**, 5** accordingly
"""
if(mc.updateState(state)):
return '200: The request has succeeded.', status.HTTP_200_OK
return '500: The server encountered an unexpected condition which prevented it from fulfilling the request.', status.HTTP_500_INTERNAL_SERVER_ERROR
if __name__ == "__main__":
app.run(host='192.168.1.119', port=1234, debug=True)
|
[
"dglownia222@gmail.com"
] |
dglownia222@gmail.com
|
09ca4311b850a02645ba1503d9cbaca749e9c2ea
|
51aa2894c317f60726fe9a778999eb7851b6be3e
|
/140_gui/pyqt_pyside/examples/Advanced_Python_Scripting/012_Drag&Drop/dnd_widget4.py
|
50f0cec7ae49887d6c1460a7af19a8b442432336
|
[] |
no_license
|
pranaymate/Python_Topics
|
dd7b288ab0f5bbee71d57080179d6481aae17304
|
33d29e0a5bf4cde104f9c7f0693cf9897f3f2101
|
refs/heads/master
| 2022-04-25T19:04:31.337737
| 2020-04-26T00:36:03
| 2020-04-26T00:36:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import sys
import os
from PySide.QtCore import *
from PySide.QtGui import *
class listWidgetClass(QListWidget):
def __init__(self):
super(listWidgetClass, self).__init__()
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setDragDropMode(QAbstractItemView.DropOnly)
def dropEvent(self, event):
# print 'DROP', type(event)
mimedata = event.mimeData()
if mimedata.hasUrls():
for f in mimedata.urls():
print f.toLocalFile()
def dragEnterEvent(self, event):
mimedata = event.mimeData()
if mimedata.hasUrls():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
mimedata = event.mimeData()
if mimedata.hasUrls():
event.accept()
else:
event.ignore()
def addFile(self, path):
pass
if __name__ == '__main__':
app = QApplication([])
w = listWidgetClass()
w.show()
app.exec_()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
deab0f26117f1606730c127176a5eceee3776576
|
3d16202d11a98bd50d54ec8236ee0e079ec35da6
|
/9-5 (extra).py
|
10d7384336ae4a31b80c0022a21b8b80f8cccb43
|
[] |
no_license
|
jamanddounts/Python-answers
|
bb0da904ab57c488e3cf7cc54d8c123326f32bcf
|
fa0bce3d9651d9d9722ebc8fc02e2aa3278db9a9
|
refs/heads/main
| 2023-06-03T22:46:41.923420
| 2021-06-22T05:14:00
| 2021-06-22T05:14:00
| 376,573,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
a = [5, 3, 12, 8, 2]
total = 0
for x in a:
total = total + x
print(total)
|
[
"noreply@github.com"
] |
jamanddounts.noreply@github.com
|
39a26a47928be7c60e106a75ebbbc7a20eb8a1bb
|
d21ffb9b177488a95f3b4253960efcc17bcf987c
|
/scrapy_redis_cricket_data/scrapy_redis_cricket_data/spiders/scr.py
|
d73dc90efbb7b30ed91e088bc2f1b468cf6bbe9d
|
[] |
no_license
|
syedissaq/scrapy-py
|
28793c1b32c510032fb67090cd3716fd62aa6331
|
57efe69892a6d81b7fec1a90308b1de6b803159e
|
refs/heads/master
| 2021-01-22T01:34:11.401885
| 2015-02-24T21:09:25
| 2015-02-24T21:09:25
| 31,256,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,415
|
py
|
# -*- coding: utf-8 -*
import redis
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
import scrapy
from scrapy_redis_cricket_data.items import CricketrecurItem
# result=r.lpop("url:l:1:day:cr")
# for i in range(0,3000):
# result=r.lpop("url:l:1:day:cr")
l=[]
for i in range(0,len(r.lrange("url:l:1:day:cric",0,-1))):
result =r.lpop("url:l:1:day:cric")
l.append(result)
r.lpush("url:l:1:day:cric",result)
class PakSpider(scrapy.Spider):
name = "cri"
start_urls = l
def parse(self, response):
i=0
item = CricketrecurItem()
item['matchid']=response.xpath('//*[@id="full-scorecard"]/div[1]/div[2]/div[1]/a[1]/text()').extract()
item['toss']= response.xpath('//*[@id="full-scorecard"]/div[3]/div/div/div[1]/span[1]/text()').extract()
item['daynight']= response.xpath('//*[@id="full-scorecard"]/div[1]/div[2]/div[3]/text()').extract()
item['ground']=response.xpath('//*[@id="full-scorecard"]/div[1]/div[1]/div[1]/a/text()').extract()
i=0
total=0
p=0
for x in range(2,25,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]"""%x
for sel in response.xpath(c):
i=i+2
i=i-2
for x in range(2,i+1,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]"""%x
for sel in response.xpath(c):
t= sel.xpath('td[4]/text()').extract()
k=int(t[0])
total=total+k
i=i+2
n=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[4]/text()"""%i).extract()
ltotal=total+int(n[0])
item['totalrunistinning']=ltotal
i=0
total=0
p=0
for x in range(2,25,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]"""%x
for sel in response.xpath(c):
i=i+2
i=i-2
for x in range(2,i+1,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]"""%x
for sel in response.xpath(c):
t= sel.xpath('td[4]/text()').extract()
k=int(t[0])
total=total+k
i=i+2
n=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[4]/text()"""%i).extract()
ltotal=total+int(n[0])
item['totalrunsecondinning']=ltotal
item['istbating']= response.xpath('//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[1]/th[2]/text()').extract()
item['secondbating']= response.xpath('//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[1]/th[2]/text()').extract()
item['won']= response.xpath('//*[@id="full-scorecard"]/div[1]/div[1]/div[3]/text()').extract()
lr1=[]
for x in range(2,25,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]"""%x
for sel in response.xpath(c):
x=sel.xpath('td[4]/text()').extract()
lr1.append(int(x[0]))
print type(lr1[0])
print lr1
x=lr1[0]
xi=0
for i in range(0,len(lr1)):
if lr1[i] > x:
x=lr1[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
item['iisttoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[4]/text()"""%yi).extract()
item['iisttoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[2]/a/text()"""%yi).extract()
else:
item['iisttoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[4]/text()"""%yi).extract()
item['iisttoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[2]/a/text()"""%yi).extract()
lr1[xi]=0
x=lr1[0]
xi=0
for i in range(0,len(lr1)):
if lr1[i] > x:
x=lr1[i]
xi=i
yi=xi+xi+2
if xi==0:
yi=2
item['isecondtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[4]/text()"""%yi).extract()
item['isecondtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[2]/a/text()"""%yi).extract()
else:
item['isecondtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[4]/text()"""%yi).extract()
item['isecondtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[2]/a/text()"""%yi).extract()
lr1[xi]=0
print lr1
x=lr1[0]
xi=0
for i in range(0,len(lr1)):
if lr1[i] > x:
x=lr1[i]
xi=i
yi=xi+xi+2
if xi==0:
yi=2
item['ithirdtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[4]/text()"""%yi).extract()
item['ithirdtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[2]/a/text()"""%yi).extract()
else:
item['ithirdtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[4]/text()"""%yi).extract()
item['ithirdtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[1]/tr[%d]/td[2]/a/text()"""%yi).extract()
lr3=[]
for x in range(2,25,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]"""%x
for sel in response.xpath(c):
x=sel.xpath('td[6]/text()').extract()
lr3.append(int(x[0]))
print type(lr3[0])
print lr3
x=lr3[0]
xi=0
for i in range(0,len(lr3)):
if lr3[i] > x:
x=lr3[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
item['iisttopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[6]/text()"""%yi).extract()
item['iisttopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[2]/a/text()"""%yi).extract()
else:
item['iisttopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[6]/text()"""%yi).extract()
item['iisttopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[2]/a/text()"""%yi).extract()
lr3[xi]=0
x=lr3[0]
xi=0
for i in range(0,len(lr3)):
if lr3[i] > x:
x=lr3[i]
xi=i
yi=xi+xi+2
if xi==0:
yi=2
item['isecondtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[6]/text()"""%yi).extract()
item['isecondtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[2]/a/text()"""%yi).extract()
else:
item['isecondtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[6]/text()"""%yi).extract()
item['isecondtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[2]/a/text()"""%yi).extract()
lr3[xi]=0
x=lr3[0]
xi=0
for i in range(0,len(lr3)):
if lr3[i] > x:
x=lr3[i]
xi=i
yi=xi+xi+2
if xi==0:
yi=2
item['ithirdtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[6]/text()"""%yi).extract()
item['ithirdtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[2]/a/text()"""%yi).extract()
else:
item['ithirdtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[6]/text()"""%yi).extract()
item['ithirdtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[2]/tr[%d]/td[2]/a/text()"""%yi).extract()
lr2=[]
for x in range(2,25,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]"""%x
for sel in response.xpath(c):
x=sel.xpath('td[4]/text()').extract()
lr2.append(int(x[0]))
x=lr1[0]
xi=0
for i in range(0,len(lr2)):
if lr2[i] > x:
x=lr2[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
# //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[4]/text()
item['sisttoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[4]/text()"""%yi).extract()
item['sisttoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[2]/a/text()"""%yi).extract()
else: # //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[2]/a/text()
item['sisttoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[4]/text()"""%yi).extract()
item['sisttoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[2]/a/text()"""%yi).extract()
print item['sisttoprunerbatsman']
print item['sisttoprunerbatsmanname']
print xi
lr2[xi]=0
print lr2
x=lr2[0]
xi=0
for i in range(0,len(lr2)):
if lr2[i] > x:
x=lr2[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
# //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[4]/text()
item['ssecondtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[4]/text()"""%yi).extract()
item['ssecondtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[2]/a/text()"""%yi).extract()
else: # //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[2]/a/text()
item['ssecondtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[4]/text()"""%yi).extract()
item['ssecondtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[2]/a/text()"""%yi).extract()
print item['ssecondtoprunerbatsman']
print item['ssecondtoprunerbatsmanname']
#11##################################
print xi
lr2[xi]=0
print lr2
x=lr2[0]
xi=0
for i in range(0,len(lr2)):
if lr2[i] > x:
x=lr2[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
# //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[4]/text()
item['sthirdtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[4]/text()"""%yi).extract()
item['sthirdtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[2]/a/text()"""%yi).extract()
else: # //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[2]/a/text()
item['sthirdtoprunerbatsman']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[4]/text()"""%yi).extract()
item['sthirdtoprunerbatsmanname']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[3]/tr[%d]/td[2]/a/text()"""%yi).extract()
print item['sthirdtoprunerbatsman']
print item['sthirdtoprunerbatsmanname']
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#IIIIIIIIIIIIIIIIIIIIIIIRRRRRRRRRRRRRRRRRRRIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
lr4=[]
for x in range(2,25,2):
c = """//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]"""%x
for sel in response.xpath(c):
x=sel.xpath('td[6]/text()').extract()
lr4.append(int(x[0]))
print type(lr4[0])
print lr4
x=lr4[0]
xi=0
for i in range(0,len(lr4)):
if lr4[i] > x:
x=lr4[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
# //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[4]/text()
item['sisttopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[6]/text()"""%yi).extract()
item['sisttopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[2]/a/text()"""%yi).extract()
else: # //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[2]/a/text()
item['sisttopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[6]/text()"""%yi).extract()
item['sisttopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[2]/a/text()"""%yi).extract()
print item['sisttopbowler']
print item['sisttopbowlername']
print xi
lr4[xi]=0
print lr4
x=lr4[0]
xi=0
for i in range(0,len(lr4)):
if lr4[i] > x:
x=lr4[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
# //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[4]/text()
item['ssecondtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[6]/text()"""%yi).extract()
item['ssecondtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[2]/a/text()"""%yi).extract()
else: # //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[2]/a/text()
item['ssecondtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[6]/text()"""%yi).extract()
item['ssecondtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[2]/a/text()"""%yi).extract()
print item['ssecondtopbowler']
print item['ssecondtopbowlername']
#11##################################
print xi
lr4[xi]=0
print lr4
x=lr4[0]
xi=0
for i in range(0,len(lr4)):
if lr4[i] > x:
x=lr4[i]
xi=i
yi=xi+xi+2
print xi
if xi==0:
yi=2
# //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[4]/text()
item['sthirdtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[6]/text()"""%yi).extract()
item['sthirdtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[2]/a/text()"""%yi).extract()
else: # //*[@id="full-scorecard"]/div[2]/div/table[1]/tbody/tr[10]/td[2]/a/text()
item['sthirdtopbowler']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[6]/text()"""%yi).extract()
item['sthirdtopbowlername']=response.xpath("""//*[@id="full-scorecard"]/div[2]/div/table[4]/tr[%d]/td[2]/a/text()"""%yi).extract()
print item['sthirdtopbowler']
print item['sthirdtopbowlername']
r.lpush("data:one:day:match",item)
|
[
"syed@trialx.com"
] |
syed@trialx.com
|
457e7c36a29b2a48a9368e843ecb6c32c5c017bc
|
737005e8145d7aa961cb8012f0da3129ddf13907
|
/venv/Scripts/easy_install-3.8-script.py
|
9c68114b00924f9de03c46816d3396cc071492eb
|
[
"Apache-2.0"
] |
permissive
|
Architect0711/rbWebCrawler
|
b61a53b6336ff77c203f781a1727187f49013e71
|
f331ddfe8e8cc0ea7458a0e4fbcc9ca500ab4d02
|
refs/heads/master
| 2021-05-22T17:37:58.560070
| 2020-08-08T10:52:56
| 2020-08-08T10:52:56
| 253,024,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
#!E:\Develop\Python\10_Work\rbWebCrawler\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"R.Bantele@live.de"
] |
R.Bantele@live.de
|
2f9976e9ae634c5d370e0a45e4484a9c63e7d363
|
1c81afc8e8eb19bd51114c87c0f15742861aa29f
|
/stack-and-queue/linked_list_stack.py
|
c948cea8162d782a4648555d38df23cc601a304d
|
[] |
no_license
|
ShaneKoNaung/Python-practice
|
4489b0dbf8b8146926bcdbbaa26b4149c24ed4dd
|
9994644eebd0e2eb031c61ef8c0a592b65334f99
|
refs/heads/master
| 2020-03-12T10:30:06.778739
| 2018-11-10T18:22:02
| 2018-11-10T18:22:02
| 130,574,389
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
''' Implementation of stack using singly linked list '''
class LinkedListStack(object):
class Node(object):
def __init__(self, data, next=None):
self._data = data
self._next = next
def __init__(self):
''' create an empty stack '''
self._head = None
self._size = 0
def __len__(self):
''' return Number of elements in the list '''
return self._size
def is_empty(self):
''' return True if the list is empty'''
return self._size == 0
def push(self, e):
''' add element at the top of the stack '''
self._head = self.Node(e, self._head)
self._size += 1
def top(self):
''' return the element at the top of the stack'''
if self.is_empty():
raise IndexError("Stack is empty")
else:
return self._head._data
def pop(self):
''' remove and return the element at teh top of the stack'''
if self.is_empty():
raise IndexError("stack is empty")
answer = self._head._data
self._head = self._head._next
self._size -= 1
return answer
|
[
"shanekonaung@gmail.com"
] |
shanekonaung@gmail.com
|
f66d3100182358ac9c2158b8dc78e3cbc7e0093f
|
39fa403d46a4456a07c761e1aaa8af2d418c5f87
|
/kid_readout/roach/tests/test_roach1_baseband_loopback.py
|
e0c4ee787ddb500843be8ccc22da12ad8c68f502
|
[
"BSD-2-Clause"
] |
permissive
|
vapor36/kid_readout
|
72d94d96e964d6a2eef3aa57ed6fc814946cfe46
|
07202090d468669200cab78297122880c1c03e87
|
refs/heads/master
| 2020-12-12T13:32:47.267337
| 2018-11-11T15:36:40
| 2018-11-11T15:36:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 787
|
py
|
"""
This module runs tests on the ROACH1 in baseband mode using loopback.
"""
from kid_readout.roach.baseband import RoachBaseband
from kid_readout.settings import ROACH1_IP, ROACH1_VALON, ROACH1_HOST_IP
from kid_readout.roach.tests.mixin import RoachMixin, Roach1Mixin, BasebandSoftwareMixin, BasebandHardwareMixin
# This causes nose test discovery to not add tests found in this module. To run these tests, specify
# $ nosetests test_roach1_baseband_loopback.py
__test__ = False
class TestRoach1BasebandLoopback(RoachMixin, Roach1Mixin, BasebandSoftwareMixin, BasebandHardwareMixin):
@classmethod
def setup(cls):
cls.ri = RoachBaseband(roachip=ROACH1_IP, adc_valon=ROACH1_VALON, host_ip=ROACH1_HOST_IP, initialize=False)
cls.ri.initialize(use_config=False)
|
[
"daniel.isaiah.flanigan@gmail.com"
] |
daniel.isaiah.flanigan@gmail.com
|
5e53b05ff14e2d58b75629214e71f2d22aff575e
|
a43dbdcf0b954b930df2a1145572b079841ff9e8
|
/app/models/follow.py
|
8d77e1a27d93c3d05f5f1ae05ca96a14d09e06fd
|
[] |
no_license
|
geekhub-python/flask-geekhub-app
|
134489351c4752bb448b62f18cf1353b1d1b9fa1
|
fbfdba440fe5f27a44a4b87af35f0a5fc74c3752
|
refs/heads/master
| 2021-01-19T08:49:37.955233
| 2017-04-13T15:07:52
| 2017-04-13T15:07:52
| 80,143,711
| 0
| 0
| null | 2017-04-13T15:07:48
| 2017-01-26T18:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
from datetime import datetime
from app import db
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
|
[
"evgeniy.kostenko@ardas.biz"
] |
evgeniy.kostenko@ardas.biz
|
e12555003b1c9d9e87173316a47f5cf4d6169589
|
a995a9ae2d3167aa1fbad7b811703c7face4f953
|
/tests/cc-simple-status-test.py
|
bb45c5e3fb04493d2edc9f036f334d5a92d0a633
|
[] |
no_license
|
carlasouza/nimbus
|
129ab3af1356b1dea022a557cdc199be9b53c628
|
73ffda057fec82d6220d3fda437ed2934c2c757a
|
refs/heads/master
| 2021-01-17T16:18:22.995431
| 2011-08-08T13:23:09
| 2011-08-08T13:23:09
| 1,764,362
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
#!/usr/bin/env python
import pexpect
import sys
import os
to=90
cc_home=os.environ['CLOUD_CLIENT_HOME']
logfile = sys.stdout
cmd = "%s/bin/cloud-client.sh --transfer --sourcefile /etc/group" % (cc_home)
(x, rc)=pexpect.run(cmd, withexitstatus=1)
cmd = "%s/bin/cloud-client.sh --run --name group --hours .25" % (cc_home)
child = pexpect.spawn (cmd, timeout=to, maxread=20000, logfile=logfile)
rc = child.expect ('Running:')
if rc != 0:
print "group not found in the list"
sys.exit(1)
handle = child.readline().strip().replace("'", "")
rc = child.expect(pexpect.EOF)
if rc != 0:
print "run"
sys.exit(1)
cmd = "%s/bin/cloud-client.sh --status" % (cc_home)
child = pexpect.spawn (cmd, timeout=to, maxread=20000, logfile=logfile)
rc = child.expect ('State:')
if rc != 0:
print "group not found in the list"
sys.exit(1)
cmd = "%s/bin/cloud-client.sh --terminate --handle %s" % (cc_home, handle)
print cmd
(x, rc)=pexpect.run(cmd, withexitstatus=1)
print x
if rc != 0:
print "failed to terminate"
sys.exit(1)
cmd = "%s/bin/cloud-client.sh --delete --name group" % (cc_home)
print cmd
(x, rc)=pexpect.run(cmd, withexitstatus=1)
print x
if rc != 0:
print "failed to terminate"
sys.exit(1)
sys.exit(0)
|
[
"bresnaha@mcs.anl.gov"
] |
bresnaha@mcs.anl.gov
|
53cf879fbec8518512026b768481999b0d969d96
|
c10121b33f2c2e2f5abe1499691d5d0cb18219ba
|
/lessons/ex24.py
|
fd66509dedbaeb6456ecaa35f20fbb1543993e5f
|
[] |
no_license
|
denver/learn_python
|
9782b9d42c895b56700149574daba03719605148
|
7dfc21ca52e8dbfe1e588da84f63727ee29e3661
|
refs/heads/master
| 2020-04-10T10:04:20.324942
| 2015-10-17T18:46:25
| 2015-10-17T18:46:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 6
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates, = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
|
[
"denver.peterson@gmail.com"
] |
denver.peterson@gmail.com
|
3377c6bcf64f155261ced7e78bdd30f65820bd87
|
c8c8244540b38f1ece905ce3de3545f1d57b7959
|
/genral_ledger/wizard.py
|
688f372aa91563a8f3c3e44683f4f8738863da7d
|
[] |
no_license
|
nayyabecube/naseem2
|
e8e47f02db7ef3fd4d8add3e6d57b96ad1ba0e24
|
f068b8c10e0fc6b3684df3fe30d90c1e61c3580c
|
refs/heads/master
| 2020-03-13T21:48:07.940113
| 2018-05-21T06:45:38
| 2018-05-21T06:45:38
| 131,304,228
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
# #-*- coding:utf-8 -*-
# ##############################################################################
# #
# # OpenERP, Open Source Management Solution
# # Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
# #
# # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU Affero General Public License as published by
# # the Free Software Foundation, either version 3 of the License, or
# # (at your option) any later version.
# #
# # This program is distributed in the hope that it will be useful,
# # but WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# # GNU Affero General Public License for more details.
# #
# # You should have received a copy of the GNU Affero General Public License
# # along with this program. If not, see <http://www.gnu.org/licenses/>.
# #
# ##############################################################################
from odoo import models, fields, api
class GenerateGenralLedger(models.Model):
_name = "genral.ledger"
form = fields.Date(string="From")
to = fields.Date(string="To")
entry_type = fields.Selection([
('posted', 'Actual Ledger'),
('all', 'Virtual Ledger'),
],default='posted',string="Target Moves")
account = fields.Many2one('account.account',string="Account")
class BankandCash(models.Model):
_inherit = 'account.account'
nature = fields.Selection([
('debit', 'Debit'),
('credit', 'Credit'),
])
|
[
"nayyabzakir@yahoo.com"
] |
nayyabzakir@yahoo.com
|
6bc9dba587e6051b86abf1f854913e32e47c7c28
|
13fefcfe7a5630b8e932ae6753f5c03f8a74af0c
|
/projects/models.py
|
3fc3ab45231bd9a8b0f41aa8be3b1b202c8e841d
|
[] |
no_license
|
richardkefa/portfolio-v2
|
13d7a023a6234b40b40735f5d391c66036af5366
|
d5deac021696818847e8cfabefcec40e2f9aea1e
|
refs/heads/master
| 2023-01-08T08:23:26.221291
| 2020-11-04T18:47:59
| 2020-11-04T18:47:59
| 302,455,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
from django.db import models
# Create your models here.
class Projects(models.Model):
project_name = models.CharField(max_length=50)
description = models.TextField()
live_link = models.CharField()
def __str__(self):
return self.project_name
def save_project(self):
self.save()
def get_projects(cls):
projects = cls.objects.all()
return projects
|
[
"richardkefa@gmail.com"
] |
richardkefa@gmail.com
|
f56818e23f2bd45bc5839c78bc9674b23868bf08
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02266/s056648532.py
|
f2de9d1bca9251b46a0e6d7f9e65066fcf09a8b5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
def Main():
cross = input().replace("\n", "")
S = list()
V = list()
ans = list()
for i in range(len(cross)):
r = cross[i]
if r == "\\":
S.append(i)
elif r == "/":
if len(S) > 0:
j = S.pop()
v = i - j
V.append([v, j])
while len(V) > 0 :
total = 0
[v, j] = V.pop()
total += v
while True:
if len(V) > 0:
[vv, jj] = V.pop()
if jj > j:
total += vv
else:
V.append([vv, jj])
ans.append(total)
break
else:
ans.append(total)
break
print("{}".format(sum(ans)))
print(len(ans), *reversed(ans))
Main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3e049f289d8410ab81abac59a1ecb1be149fb523
|
9432dcda5c6fd0571f74162fa5687be58a00a248
|
/alpha_tictactoe/minmax.py
|
b2865bb60a794211e61eba7163b9ac2367d8f516
|
[] |
no_license
|
chickensouple/experiments
|
a6542fafbff342cd202ef96439236d50b39a9e0d
|
f991450cf50acc376d9d3eb17055c9c58302724b
|
refs/heads/master
| 2023-01-06T22:00:02.766157
| 2022-12-31T04:18:26
| 2022-12-31T04:18:26
| 256,374,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,433
|
py
|
import numpy as np
import copy
import time
from tree import Tree
from game_base import GameBase, GameActor
class MinMaxNodeState(object):
def __init__(self,
game_state,
minmax_value,
optimal_action):
self.game_state = game_state
self.minmax_value = minmax_value
self.optimal_action = optimal_action
class MinMaxSearchTree(object):
"""
MinMax Search Tree
"""
def __init__(self, game):
"""
ctor for min max search tree.
Arguments:
game {GameBase} -- A game object that will be used and mutated by the minmax tree search.
Don't the game outside of this class as it will be manipulated within here.
"""
self.tree = Tree()
self.game = game
def search(self, game_state, player):
"""
Perform MinMax search and return the search tree.
The returned search tree will contain a tuple of data at each node.
This tuple consists of (game_state, minmax_value, optimal_action)
Arguments:
game_state {np.array} -- state of the game as returned by ttt.get_state()
player {which player to solve minmax tree for} -- PLAYER1 or PLAYER2
"""
# clear out any previous searches
self.tree.reset()
# Insert the parent node
root_idx = self.tree.insert_node(MinMaxNodeState(game_state, None, None), None)
# Start expanding from parent node
self._expand_node(root_idx, player)
return self.tree
def _expand_node(self, node_idx, player):
# get possible actions
node_data = self.tree.get_node_data(node_idx)
self.game.set_state(node_data.game_state)
curr_player = self.game.get_curr_player()
actions = self.game.get_valid_actions()
# If we have reached a leaf node, get the value and return
# 1 for winning, -1 for losing, 0 for tie
if len(actions) == 0:
val = self.game.get_outcome(player)
node_data.minmax_value = val
self.tree.update_node_data(node_idx, node_data)
return val
# Recursively expand each child node
# and collect the minmax values
minmax_vals = []
for action in actions:
self.game.set_state(node_data.game_state)
self.game.step(action)
new_node_idx = self.tree.insert_node(MinMaxNodeState(self.game.get_state(), None, None), node_idx)
val = self._expand_node(new_node_idx, player)
minmax_vals.append(val)
# Compute minimum or maximum of values depending on what level the
# search is currently on
if player == curr_player:
val_idx = np.argmax(minmax_vals)
else:
val_idx = np.argmin(minmax_vals)
val = minmax_vals[val_idx]
opt_action = actions[val_idx]
# update the expanded node with the value and optimal action
node_data.minmax_value = val
node_data.optimal_action = opt_action
self.tree.update_node_data(node_idx, node_data)
return val
class OptimalActor(GameActor):
"""
Class to play tic tac toe using MinMax search results.
"""
def __init__(self, optimal_data):
"""
Creates the min max player.
Arguments:
optimal_data {np.array} -- The numpy array generated by OptimalActor.generate_optimal_data()
"""
self.optimal_data = optimal_data
def get_action(self, game_state):
"""
Get an action at a particular game state.
"""
idx = OptimalActor._state_to_idx(game_state)
return tuple(self.optimal_data[idx, :])
_POWERS_3 = np.power(3, np.arange(9))
_PLAYER_OFFSET = np.sum(2 * _POWERS_3) + 1
_MAX_STATES = _PLAYER_OFFSET + np.sum(2 * _POWERS_3)
@staticmethod
def _state_to_idx(state):
idx = np.sum(OptimalActor._POWERS_3 * state[:9])
idx += (state[9] == GameBase.Player.PLAYER2) * OptimalActor._PLAYER_OFFSET
return idx
@staticmethod
def generate_optimal_data():
"""
Generates numpy array of optimal moves.
It will be an (N, 2) array. Where the i'th row is the optimal action
for the i'th state. The states are indexed by flattening the state using
_state_to_idx().
"""
ttt = TicTacToe()
ttt_search = TicTacToe()
search_tree = MinMaxSearchTree(ttt_search)
# Run search for the various scenarios where the minmax player has to go first
# or second reacting to various first moves.
tree_list = []
tree_list.append(copy.deepcopy(search_tree.search(ttt.get_state(), GameBase.Player.PLAYER1)))
actions = ttt.get_valid_actions()
initial_state = ttt.get_state()
for action in actions:
ttt.set_state(initial_state)
ttt.step(action)
tree_list.append(copy.deepcopy(search_tree.search(ttt.get_state(), GameBase.Player.PLAYER2)))
# Take the search trees and condense the optimal actions into a numpy array
optimal_actions = np.ones((OptimalActor._MAX_STATES, 2), dtype=np.int8) * -1
for tree in tree_list:
for node in tree.nodes:
idx = OptimalActor._state_to_idx(node.game_state)
if node.optimal_action != None:
optimal_actions[idx, :] = node.optimal_action
return optimal_actions
if __name__ == "__main__":
import argparse
import pickle
from tictactoe import TicTacToe, TicTacToeHumanActor
from game_base import run_game
parser = argparse.ArgumentParser(
description="MinMax TicTacToe Player. \
Use *generate* option to generate perform a search and cache the optimal actions.\
Then use the *play* option to read in the cached data and play a game against the computer.")
parser.add_argument(
"--file",
action="store",
type=str,
default="/tmp/minmax_cache.npy",
help="File to store/load search trees.")
subparser = parser.add_subparsers(
help="Generate tree or play game.",
dest="cmd")
generate_subparser = subparser.add_parser("generate",
help="Generate Search Trees and save them.")
play_subparser = subparser.add_parser("play",
help="Play against MinMax computer.")
play_subparser.add_argument(
"--player",
action="store",
type=int,
default=1,
choices=[1, 2],
help="choose to play as player 1 or 2")
args = parser.parse_args()
if args.cmd == "generate":
start_time = time.clock()
optimal_data = OptimalActor.generate_optimal_data()
end_time = time.clock()
print("Total time for full MinMax Tree Search: {} seconds".format(end_time - start_time))
np.save(args.file, optimal_data)
else:
optimal_data = np.load(args.file)
human_actor = TicTacToeHumanActor()
minmax_actor = OptimalActor(optimal_data)
ttt = TicTacToe()
human_actor.print_help()
if args.player == 1:
result = run_game(ttt, human_actor, minmax_actor)
else:
result = run_game(ttt, minmax_actor, human_actor)
ttt.print_board()
print("End Game Status: {}".format(result["game_status"].name))
|
[
"clarkjzhang@gmail.com"
] |
clarkjzhang@gmail.com
|
c4279ed4636a7808995cb9c91e67c70e46376f65
|
827f75462d8f78abc416128e33ee25f278606e18
|
/Program_Python_code/23-1.py
|
267ca920e97863a62772af5da82284ce953d6b3e
|
[
"MIT"
] |
permissive
|
skyhigh8591/VocationalTraining_LearningCode
|
5a2573933a9a290195987b6580702105263cc67c
|
5f3c0f11874618919002126863772e0dd06a1072
|
refs/heads/master
| 2022-12-13T20:14:28.316342
| 2020-09-09T02:15:14
| 2020-09-09T02:15:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
#! /usr/bin/python
#coding=utf-8
#1+2+3+4+5
num = raw_input("enter your number")
num = int(num)
count = 0
i=0
while(i<num):
i=i+1
print "The number is:", i
count = count + i
print "the total is:", count
|
[
"64904057+skyhigh8591@users.noreply.github.com"
] |
64904057+skyhigh8591@users.noreply.github.com
|
3f1d6aa410869608d311f71a17fe4210bdfa1701
|
bd33b915ef5fb5fbe02b87d66e0a1fe10646cdaf
|
/simplified_scrapy/core/mongo_urlstore.py
|
29afa7919e966b89c2d36c986aa318b236e68e77
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
yiyedata/simplified-scrapy
|
40bef5ce79b8984684c176a6c1f956d30992257b
|
ccfdc686c53b2da3dac733892d4f184f6293f002
|
refs/heads/master
| 2022-07-09T12:54:11.432652
| 2021-04-23T13:36:51
| 2021-04-23T13:36:51
| 201,045,344
| 9
| 5
|
Apache-2.0
| 2022-06-24T14:23:17
| 2019-08-07T12:24:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,944
|
py
|
#!/usr/bin/python
#coding=utf-8
from pymongo import MongoClient
import json,random
import sys
from simplified_scrapy.core.utils import printInfo,convertUrl2Int,md5
from simplified_scrapy.core.urlstore_base import UrlStoreBase
class MongoUrlStore(UrlStoreBase):
_host = '127.0.0.1'
_port = 27017
_dbName = 'python_db'
_tbName = 'url_'
_dicName = 'dic_'
_multiQueue = False
_tbCache = {}
_totalCount = {}
_duplicateRemoval = True
def __init__(self,name, setting=None):
self._tbName = self._tbName+name
self._dicName = self._dicName+name
if(setting):
if(setting.get('host')):
self._host=setting.get('host')
if(setting.get('port')):
self._port=setting.get('port')
if(setting.get('dbName')):
self._dbName=setting.get('dbName')
if(setting.get('tbName')):
self._tbName=setting.get('tbName')
if(setting.get('multiQueue')):
self._multiQueue = setting.get('multiQueue')
if setting.get('duplicateRemoval'):
self._duplicateRemoval = setting.get('duplicateRemoval')
def _connect(self):
conn = MongoClient(self._host, self._port)
return conn[self._dbName]
def popUrl(self):
db = self._connect()
lst=[]
while(True):
if(lst.count==10): return None
tbName = self._tbName
i = random.randint(0,9)
printInfo('popUrl',i)
if(i in lst): continue
lst.append(i)
if(i):
tbName = tbName+str(i)
if(self._tbCache.get(tbName)): continue
url = db[tbName].find_one({"state": 0})
if(url):
db[tbName].update({"_id": url["_id"]}, {"$set": {"state": 1}})
if(i in self._totalCount): self._totalCount[i] -= 1
else:
self._tbCache[tbName] = True
return url
def getCount(self):
db = self._connect()
i = 1
if(0 not in self._totalCount):
self._totalCount[0]=db[self._tbName].find({"state": 0}).count()
total = self._totalCount[0]
while(i<10):
if(i not in self._totalCount):
self._totalCount[i]=db[self._tbName+str(i)].find({"state": 0}).count()
total += self._totalCount[i]
return total
def checkUrl(self,url,i):
if not self._duplicateRemoval: return False
db = self._connect()
id = md5(url)
tbName = self._tbName
if(i): tbName = tbName + str(i)
url = db[tbName].find_one({"_id": id})
if(not url and not i):
url = db[self._tbName].find_one({"_id": id})
return url
def saveUrl(self, urls,i=None):
db = self._connect()
flag = False
for url in urls:
if(not isinstance(url,dict)):
id = md5(url)
url = {'url':url, '_id':id, 'state':0}
else:
id = md5(url["url"])
url['_id']=id
url['state']=0
if(i != 0):
i = self._getIndex(url["url"])
if(not self.checkUrl(url["url"],i)):
tbName = self._tbName
if(i):
tbName = tbName+str(i)
db[tbName].insert(url)
if(i in self._totalCount): self._totalCount[i]+=1
else: self._totalCount[i] = 1
flag = True
if(flag):
self._tbCache[tbName] = False
def _getIndex(self, url):
if(not self._multiQueue or not url): return None
return convertUrl2Int(url)
def clearUrl(self):
pass
def resetUrls(self, urls):
db = self._connect()
for url in urls:
if(not isinstance(url,dict)):
id = md5(url)
url={'url':url,'_id':id, 'state':0}
else:
id = md5(url["url"])
url['_id']=id
url['state']=0
if(not self.checkUrl(url["url"],None)):
db[self._tbName].insert(url)
else:
db[self._tbName].update({"_id": url["_id"]}, {"$set": {"state": 0}})
def updateState(self, url, state):
db = self._connect()
if(not isinstance(url,dict)):
id = md5(url)
else:
id = md5(url["url"])
db[self._tbName].update({"_id": id}, {"$set": {"state": state}})
|
[
"3095069599@qq.com"
] |
3095069599@qq.com
|
a64cef6dfb5dcbe9331ca1a2c6dc875356ae9737
|
3a0b8643b9156c843b88b43b8630ab0e8e634331
|
/model/lstm_crf_cnn.py
|
a314065667294375bb357b0e68a3a5bdd24652a9
|
[] |
no_license
|
jmfveneroso/sequence_tagging
|
643143135c36a2acce8331e69eb492c41d853c56
|
c87d848497e2e914a7c61eba525f39c8b9891a4a
|
refs/heads/master
| 2020-04-11T08:13:39.170501
| 2019-10-16T20:11:36
| 2019-10-16T20:11:36
| 161,636,116
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,239
|
py
|
import numpy as np
from six.moves import reduce
import tensorflow as tf
def model_fn(features, labels, mode, params):
# For serving features are a bit different
if isinstance(features, dict):
features = ((features['words'], features['nwords']),
(features['chars'], features['nchars']))
# Read vocabs and inputs
dropout = params['dropout']
(words, nwords), (chars, nchars) = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(
params['words'], num_oov_buckets=params['num_oov_buckets'])
vocab_chars = tf.contrib.lookup.index_table_from_file(
params['chars'], num_oov_buckets=params['num_oov_buckets'])
with Path(params['tags']).open() as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != 'O']
num_tags = len(indices) + 1
with Path(params['chars']).open() as f:
num_chars = sum(1 for _ in f) + params['num_oov_buckets']
# Char Embeddings
char_ids = vocab_chars.lookup(chars)
variable = tf.get_variable(
'chars_embeddings', [num_chars + 1, params['dim_chars']], tf.float32)
char_embeddings = tf.nn.embedding_lookup(variable, char_ids)
char_embeddings = tf.layers.dropout(char_embeddings, rate=dropout,
training=training)
# Char 1d convolution
weights = tf.sequence_mask(nchars)
char_embeddings = masked_conv1d_and_max(
char_embeddings, weights, params['filters'], params['kernel_size'])
# Word Embeddings
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings'] # np.array
variable = np.vstack([glove, [[0.] * params['dim']]])
variable = tf.Variable(variable, dtype=tf.float32, trainable=False)
word_embeddings = tf.nn.embedding_lookup(variable, word_ids)
# Concatenate Word and Char Embeddings
embeddings = tf.concat([word_embeddings, char_embeddings], axis=-1)
embeddings = tf.layers.dropout(embeddings, rate=dropout, training=training)
# LSTM
t = tf.transpose(embeddings, perm=[1, 0, 2]) # Need time-major
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
output_fw, _ = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords)
output_bw, _ = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
# CRF
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable("crf", [num_tags, num_tags], dtype=tf.float32)
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
if mode == tf.estimator.ModeKeys.PREDICT:
# Predictions
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(
params['tags'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {
'pred_ids': pred_ids,
'tags': pred_strings
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
else:
# Loss
vocab_tags = tf.contrib.lookup.index_table_from_file(params['tags'])
tags = vocab_tags.lookup(labels)
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
logits, tags, nwords, crf_params)
loss = tf.reduce_mean(-log_likelihood)
# Metrics
weights = tf.sequence_mask(nwords)
metrics = {
'acc': tf.metrics.accuracy(tags, pred_ids, weights),
'precision': precision(tags, pred_ids, num_tags, indices, weights),
'recall': recall(tags, pred_ids, num_tags, indices, weights),
'f1': f1(tags, pred_ids, num_tags, indices, weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
elif mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer().minimize(
loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(
mode, loss=loss, train_op=train_op)
|
[
"jmfveneroso@gmail.com"
] |
jmfveneroso@gmail.com
|
fb025c93a260073eb1b4ac55ee12c39f2cdbd825
|
eb2e28f145d468c5b021b98cf5b66da6ccb6aa96
|
/cataclop/pmu/management/commands/parse.py
|
82c17bc8407bd3a4303e5a7f818d8215d7697b0d
|
[] |
no_license
|
pourquoi/cataclop
|
7d746496c02d65616e23277b80b7e54286ef9100
|
f2877cdd05447f35f14f13274ad35cc69a4cf743
|
refs/heads/master
| 2021-11-15T04:18:45.887279
| 2021-11-13T17:59:36
| 2021-11-13T17:59:36
| 131,906,757
| 2
| 0
| null | 2021-10-03T14:44:54
| 2018-05-02T21:16:09
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
import json
import os
import datetime
import glob
from django.core.management.base import BaseCommand, CommandError
from cataclop.pmu.settings import SCRAP_DIR
from cataclop.pmu.parser import Parser
class Command(BaseCommand):
help = '''
Parse races json
eg. parse all January race from 2018:
parse "2018-01-*"
'''
def add_arguments(self, parser):
parser.add_argument('pattern', nargs='?', type=str, default=None)
parser.add_argument('--fast', action='store_true')
parser.add_argument('--predict', action='store_true')
def handle(self, *args, **options):
parser = Parser(SCRAP_DIR, fast=options.get('fast'), predict=options.get('predict'))
pattern = options.get('pattern', datetime.date.today().isoformat())
if pattern == 'today' or pattern is None:
pattern = datetime.date.today().isoformat()
elif pattern == 'yesterday':
pattern = (datetime.date.today() - datetime.timedelta(1)).isoformat()
elif pattern == 'tomorrow':
pattern = (datetime.date.today() + datetime.timedelta(1)).isoformat()
patterns = pattern.split()
for pattern in patterns:
pattern = os.path.join(SCRAP_DIR, pattern)
self.stdout.write('Parsing pattern {}'.format(pattern))
dirs = []
for dir in glob.glob(pattern):
dirs.append(dir)
dirs.sort()
self.stdout.write('Found {} days'.format(len(dirs)))
for dir in dirs:
date = os.path.basename(os.path.normpath(dir))
self.stdout.write('Parsing date {} ...'.format(date))
parser.parse(date, with_offline=True)
|
[
"mathias.dusautoy@gmail.com"
] |
mathias.dusautoy@gmail.com
|
e013db38a0837bf7c595e87610026fddad637802
|
a97db7d2f2e6de010db9bb70e4f85b76637ccfe6
|
/leetcode/315-Count-of-Smaller-Numbers-After-Self.py
|
31782c0230812c2c31407b198e544e68ea275922
|
[] |
no_license
|
dongxiaohe/Algorithm-DataStructure
|
34547ea0d474464676ffffadda26a92c50bff29f
|
a9881ac5b35642760ae78233973b1608686730d0
|
refs/heads/master
| 2020-05-24T20:53:45.689748
| 2019-07-19T03:46:35
| 2019-07-19T03:46:35
| 187,463,938
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
class Solution(object):
def countSmaller(self, nums):
result, seen = [], []
for num in nums[::-1]:
position = bisect.bisect_left(seen, num)
result.append(position)
bisect.insort(seen, num)
result.reverse()
return result
|
[
"ddong@zendesk.com"
] |
ddong@zendesk.com
|
11c80e772988fc3cc861e709ea45390227d20fad
|
1e6e3528c9f2438d637507df33c0c977ddc95c3e
|
/p4/Pi/simplePN532.py
|
8a3d8218b410f96e6d0278d3f306b6cdc5aa8a49
|
[
"Apache-2.0"
] |
permissive
|
OTH-AW/isac-oth-aw
|
554ec49173d4bc76d72103d29ed7af17d157b50b
|
95731c373ec5f09191ebedc5488643d747970f5d
|
refs/heads/master
| 2023-04-26T16:11:36.613332
| 2021-03-30T07:12:37
| 2021-03-30T07:12:37
| 307,342,229
| 1
| 1
|
Apache-2.0
| 2021-05-12T13:21:52
| 2020-10-26T10:55:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,947
|
py
|
import ndef
import RPi.GPIO as GPIO
import pn532.pn532 as nfc
from pn532 import *
import json
class simplePN532:
#Settings
maxBlocksUltralight = 220 #Wie viele Blocks beschrieben werden koennen auf Ultralight
allowedClassicBlocks = [1,2] #Welche Blocks beschrieben werden duerfen auf Classic
lenACB = len(allowedClassicBlocks)
#Konstruktor
def __init__(self):
#SPI connection
#self.pn532 = PN532_SPI(debug=False, reset=20, cs=4)
#I2C connection
self.pn532 = PN532_I2C(debug=False, reset=20, req=16)
ic, ver, rev, support = self.pn532.get_firmware_version()
print("Firmware version: {0}.{1}".format(ver, rev))
self.pn532.SAM_configuration()
#Destruktor
def __del__(self):
GPIO.cleanup()
#Erstellt aus einem String eine NDEF Message
#Return: Tupel (Boolean, Data)
# True -> Data = Bytearray
# False -> Data = String
def ndefEncode(self,data):
#JSON Minimierung
try:
data = json.loads(data)
except:
return False, "Fehler beim Kodieren (kein JSON)"
if len(json.dumps(data).replace(" ","")) > 830:
if 'storageProcesses' in data['state']:
data['state'].pop('storageProcesses')
if 'timeEstimate' in data['state']:
data['state'].pop('timeEstimate')
if 'timeCurrent' in data['state']:
data['state'].pop('timeCurrent')
if 'timeLeft' in data['state']:
data['state'].pop('timeLeft')
if 'completion' in data['state']:
data['state'].pop('completion')
if 'printStartingTime' in data['state']:
data['state'].pop('printStartingTime')
if 'controlProcesses' in data['state']:
CPLength = len(data['state']['controlProcesses'])
if len(json.dumps(data).replace(" ","")) > 830 and CPLength > 1:
lastControl = data['state']['controlProcesses'][CPLength -1]
data['state']['controlProcesses'] = []
data['state']['controlProcesses'].append(lastControl)
if len(json.dumps(data).replace(" ","")) > 830:
if 'order' in data:
if 'address' in data['order']['customer']:
data['order']['customer'].pop('address')
if 'address2' in data['order']['customer']:
data['order']['customer'].pop('address2')
if 'ort' in data['order']['customer']:
data['order']['customer'].pop('ort')
if len(json.dumps(data).replace(" ","")) > 830:
if 'order' in data:
if 'shape' in data['order']:
data['order'].pop('shape')
if 'color' in data['order']:
data['order'].pop('color')
if 'createdAt' in data['order']:
data['order'].pop('createdAt')
if 'createdAt' in data:
data.pop('createdAt')
data = json.dumps(data).replace(" ","")
#Encoding des Records und Message
recordsLength=200
dataSplitted = [data[i:i+recordsLength] for i in range(0, len(data),recordsLength)]
recordList = []
for i in dataSplitted:
recordList.append(ndef.TextRecord(i))
try:
ndefData = bytearray(b''.join(ndef.message_encoder(recordList)))
except:
return False, "Fehler beim Kodieren"
#Hinzufuegen von TLV
messageStart = bytearray(b'')
messageStart.extend([0x03])
dataLen = len(ndefData)
if dataLen > 65535:
return False, "String kann nicht kodiert werden! Grund: Zu lang"
if dataLen <= 254:
messageStart.extend([dataLen])
else:
hex_string = '{:04x}'.format(dataLen) #Formatierung der Laenge in Hex 2er Komplement
messageStart.extend([0xFF])
messageStart.extend([int(hex_string[0:2],16)])
messageStart.extend([int(hex_string[2:4],16)])
ndefData = messageStart + ndefData
ndefData.extend([0xFE])
return True, ndefData
#Entfernt TLV + leere Felder und decoded die Message
#Arg: data als Bytearray
#Return: Text Record (Zugriff auf Daten: record.text)
def ndefDecode(self,data):
ndefLen = 0
#Entfernt alle unnoetigen Bytes ab dem TLV-Ende (0xFE)
for i in range(len(data)):
if data[i] == 0xFE:
ndefLen = i
break
data = data[:ndefLen]
#Ist data[1] auf 0xFF (255) gesetzt, verwendet TLV 2 Bytes fuer die Laengenangabe. Dementsprechend muessen wir auch mehr entfernen
try:
if data[1] == 0xFF:
data = data[4:]
else:
data = data[2:]
except:
return False, "Fehler beim Dekodieren (kein NDEF)"
try:
records = list(ndef.message_decoder(data))
except:
return False, "Fehler beim Dekodieren"
text = ""
for record in records:
text += record.text
return True, text
#Gibt UID zurueck wenn ein Tag gefunden wird
#Return: Bytearray
def scanForTag(self):
uid = self.pn532.read_passive_target(timeout=0.5)
return uid
#Schreibt ein Bytearray auf ein Ultralight bzw. NTAG2XX Tag
#Arg: data als Bytearray
#Return: Boolean
def writeToUltralight(self,data):
arrLen = len(data)
#Da immer 4 Bytes pro Block geschrieben werden muessen, fuellen wir das Bytearray passend auf
data.extend([0] * (4 - (arrLen%4)))
#Schreiben der einzelnen Blocks
for i in range(len(data) // 4):
if i >= self.maxBlocksUltralight:
return False
try:
self.pn532.ntag2xx_write_block(4 + i, data[i * 4:(i+1) * 4])
except:
print("Tag wurde entfernt oder kann nicht mehr erkannt werden!")
return False
return True
#Liest den Speicher des Tags aus (Nur die Anzahl der Blocks aus den Settings)
#Return: Tupel (Boolean, Data)
# True -> data = Bytearray
# False -> data = String
def readFromUltralight(self):
data = b''
for i in range(self.maxBlocksUltralight):
try:
block = self.pn532.ntag2xx_read_block(4+i)
data += block
for byte in block:
if byte == 254:
return True, data
except:
return False, "Fehler beim Lesen"
return True, data
#Arg: data als String
def writeToClassic(self,data,uid):
dataByteArr = bytearray(data, 'utf-8')
arrLen = len(dataByteArr)
#Da immer 16 Byte pro Block geschrieben werden muessen, fuellen wird das Bytearray passend auf
dataByteArr.extend([0] * (16 - (arrLen%16)))
#Schreiben der einzelnen Blocks
for i in range(len(dataByteArr) // 16):
if i >= self.lenACB:
return False
try:
#Vor dem Lesen bzw. Schreiben, muss man sich authentifizieren. Dies geschieht hier mit dem Standard-Schluessel: 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF
self.pn532.mifare_classic_authenticate_block(uid, self.allowedClassicBlocks[i], nfc.MIFARE_CMD_AUTH_A,[0xFF,0xFF,0xFF,0xFF,0xFF,0xFF])
self.pn532.mifare_classic_write_block(self.allowedClassicBlocks[i], dataByteArr[i * 16:(i+1) * 16])
except:
print("Tag wurde entfernt oder kann nicht erkannt werden!")
return False
return True
def clearClassic(self,uid):
for i in self.allowedClassicBlocks:
try:
self.pn532.mifare_classic_authenticate_block(uid,i,nfc.MIFARE_CMD_AUTH_A,[0xFF,0xFF,0xFF,0xFF,0xFF,0xFF])
self.pn532.mifare_classic_write_block(i,[0]*16)
except:
return False
return True
#Return: Tupel (Boolean, Data)
# True -> Data = String
# False -> Data = String
def readFromClassic(self, uid):
data = b''
for i in range(self.lenACB):
try:
self.pn532.mifare_classic_authenticate_block(uid, self.allowedClassicBlocks[i], nfc.MIFARE_CMD_AUTH_A,[0xFF,0xFF,0xFF,0xFF,0xFF,0xFF])
data += self.pn532.mifare_classic_read_block(self.allowedClassicBlocks[i])
except:
return False, "Tag wurde entfernt oder kann nicht erkannt werden!"
leng = 0
for i in range(len(data)):
if data[i] == 0x00:
leng = i
break
content = data[:leng].decode("utf-8")
return True, content
|
[
"noreply@github.com"
] |
OTH-AW.noreply@github.com
|
f5a19b1ae24ce2bf1c9805074127074515fbdd55
|
85b394a7492b6cab7ea2279e0ae1a15f86c94228
|
/2019/Day4/1.py
|
9ff6e5b4032f61242092302e01ed8dd1b62d1488
|
[] |
no_license
|
nanites2000/AdventOfCode
|
861eb75e892f4a1c0aa9a77c5b55af8b1b815ae7
|
8d0dbf069c802539ce60861fc34549ef1b24fab8
|
refs/heads/master
| 2022-12-12T13:17:53.154356
| 2022-12-07T07:34:46
| 2022-12-07T07:34:46
| 226,938,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
import math
min = 347312
max = 805915
def split_num(value):
result = []
for i in range(5,-1,-1):
divisor = 10**i
digit = (math.floor(value/divisor))
result.append(digit)
value -= divisor * digit
return(result)
total = 0
for i in range(min,max+1):
digits = (split_num(i))
fail = False
equal = False
for j in range(len(digits)-1):
if digits[j]>digits[j+1]:
#print('fail', digits)
fail = True
else:
pass
if digits[j]==digits[j+1]:
equal = True
print(digits)
else:
pass
if (not fail) and equal:
#print(digits)
total += 1
print(total)
|
[
"gerrit.larsen@fortemtech.com"
] |
gerrit.larsen@fortemtech.com
|
b953669bd0d43e3eacdaee7ddec7bf3971f504a9
|
5696212aeb2a52ebd09e7514eab00c273190b9b3
|
/graph_components.py
|
1db20b4a8c4d7309d300e0e0eaecb4820b5ab88d
|
[] |
no_license
|
n0skii/problem_solving
|
01cce66d0b503f66d1302bd20e666034218c6400
|
afd1ee2a790b7f9fd92b2d4b93f7bbf07a127043
|
refs/heads/master
| 2023-08-01T14:55:24.389499
| 2021-09-16T10:07:51
| 2021-09-16T10:07:51
| 396,303,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,309
|
py
|
#!/bin/python3
from ast import fix_missing_locations
import math
import os
import random
import re
import sys
#
# Complete the 'componentsInGraph' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts 2D_INTEGER_ARRAY gb as parameter.
#
class Cluster:
def __init__(self, clusterNum) -> None:
self.clusterNum = clusterNum
self.items = 0
def componentsInGraph(gb):
overallMap = dict()
clusterMap = dict()
currentClusterNum = 1
# Write your code here
for edge in gb:
firstIn = edge[0] in overallMap
secondIn = edge[1] in overallMap
if not firstIn and not secondIn:
toAdd = Cluster(currentClusterNum)
toAdd.items += 2
overallMap[edge[0]] = currentClusterNum
overallMap[edge[1]] = currentClusterNum
clusterMap[currentClusterNum] = toAdd
currentClusterNum += 1
elif not firstIn:
id, toChange = getAndChangeIds(clusterMap[overallMap[edge[1]]], clusterMap)
finalCluster = clusterMap[id]
for iid in toChange:
clusterMap[iid] = finalCluster
clusterMap[id].items += 1
overallMap[edge[0]] = overallMap[edge[1]]
elif not secondIn:
id, toChange = getAndChangeIds(clusterMap[overallMap[edge[0]]], clusterMap)
finalCluster = clusterMap[id]
for iid in toChange:
clusterMap[iid] = finalCluster
clusterMap[id].items += 1
overallMap[edge[1]] = overallMap[edge[0]]
else:
leftClusterId, toChange = getAndChangeIds(
clusterMap[overallMap[edge[0]]], clusterMap
)
leftCluster = clusterMap[leftClusterId]
for iid in toChange:
clusterMap[iid] = leftCluster
rightClusterId, toChange = getAndChangeIds(
clusterMap[overallMap[edge[1]]], clusterMap
)
rightCluster = clusterMap[rightClusterId]
for iid in toChange:
clusterMap[iid] = rightCluster
if (
clusterMap[leftClusterId].clusterNum
!= clusterMap[rightClusterId].clusterNum
):
clusterMap[leftClusterId].items += clusterMap[rightClusterId].items
clusterMap[rightClusterId] = clusterMap[leftClusterId]
min = float("inf")
max = float("-inf")
for value in overallMap.values():
length = clusterMap[value].items
if clusterMap[value].clusterNum == value:
if length < min:
min = length
if length > max:
max = length
return [min, max]
def getAndChangeIds(node: Cluster, clusterMap: dict):
id = node.clusterNum
toChange = list()
while clusterMap[id].clusterNum != id:
toChange.append(id)
id = clusterMap[id].clusterNum
return [id, toChange]
if __name__ == "__main__":
fptr = open(os.environ["OUTPUT_PATH"], "w")
n = int(input().strip())
gb = []
for _ in range(n):
gb.append(list(map(int, input().rstrip().split())))
result = componentsInGraph(gb)
fptr.write(" ".join(map(str, result)))
fptr.write("\n")
fptr.close()
|
[
"pavel070900@gmail.com"
] |
pavel070900@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.