blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
8b10368f10167003f9ab9226c9b33eb43a5c1b2e
8f7bcd652fa10320c19da46d09260aaf11659a59
/src/logger_special.py
343ed25f22bb60394f57d9e7351fdb10d5384852
[]
no_license
EmanuelSamir/mapless-curiosity-driven-exploration
d226c1206064ee6877a3eea02a0409ff6f6aeb22
2bd399a6488b3a216aefdd3cb35107185ea31846
refs/heads/main
2023-07-14T21:29:36.690412
2021-08-31T21:43:33
2021-08-31T21:43:33
401,846,751
0
0
null
null
null
null
UTF-8
Python
false
false
1,577
py
import matplotlib.pyplot as plt import numpy as np import pandas as pd import os import json import pickle from torch import save as tsave import torch from .utils import create_dir from datetime import datetime class LoggerSpecial: def __init__(self, algorithm): # Logger saves data # Updates eps x steps x feature_num # Saves at special w/ name algorithm date ## csv and description self.features = [] self.steps = [] fn_date = datetime.now().strftime("_%m%d_%H-%M-%S") self.save_special_path = os.path.join("../specials", algorithm, fn_date) create_dir(self.save_special_path) def set_description(self, comment): description = { 'comment': comment } fn = os.path.join(self.save_special_path, 'description.pth' ) out_file = open(fn,'w+') json.dump(description,out_file) def update(self, step, feature): self.steps.append(step) self.features.append(feature) def consolidate(self, episode): folder = os.path.join(self.save_special_path, 'e{}_n{}'.format(episode, self.steps[-1])) create_dir(folder) fn = os.path.join(folder, 'data.csv') self.features = map(list, zip(*self.features)) d = { 'steps': self.steps, } for i, feat in enumerate(self.features): d['f{}'.format(i)] = feat df = pd.DataFrame(d) df.to_csv(fn, mode = 'w', index = False) self.steps = [] self.features = []
[ "samiremp.2@gmail.com" ]
samiremp.2@gmail.com
766d8e38421e476ad21261a9e76adf41efd0c1f6
2566e318ce81db1e1713a7dbcb5da8e8dd38c74d
/mk/update-travis-yml.py
4f15448e7e4823c06ac8bc1a2e6fec09b253364c
[ "OpenSSL", "MIT", "ISC", "LicenseRef-scancode-mit-taylor-variant", "LicenseRef-scancode-openssl", "LicenseRef-scancode-ssleay-windows", "LicenseRef-scancode-unknown-license-reference" ]
permissive
nyantec/ring
9f6a741cac845ecc9793ec2c22000e6f5c1352e2
3b3fa9eef65d4a4049e353a7e75f071345519dae
refs/heads/master
2021-10-07T10:31:36.728412
2019-01-10T14:26:15
2019-01-10T14:26:15
132,122,634
0
0
null
2018-07-11T14:51:34
2018-05-04T09:57:20
Assembly
UTF-8
Python
false
false
8,575
py
# Run this as "python mk/update-travis-yml.py" # Copyright 2015 Brian Smith. # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND BRIAN SMITH AND THE AUTHORS DISCLAIM # ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL BRIAN SMITH OR THE AUTHORS # BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN # AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import re import shutil rusts = [ "stable", "nightly", "beta", ] linux_compilers = [ # Assume the default compiler is GCC. This is run first because it is the # one most likely to break, especially since GCC 4.6 is the default # compiler on Travis CI for Ubuntu 12.04, and GCC 4.6 is not supported by # BoringSSL. "", # Newest clang and GCC. "clang-5.0", "gcc-7", ] # Clang 3.4 and GCC 4.6 are already installed by default. linux_default_clang = "clang-3.4" osx_compilers = [ "", # Don't set CC.' ] compilers = { "aarch64-unknown-linux-gnu" : [ "aarch64-linux-gnu-gcc" ], "armv7-linux-androideabi" : [ "arm-linux-androideabi-clang" ], "arm-unknown-linux-gnueabihf" : [ "arm-linux-gnueabihf-gcc" ], "i686-unknown-linux-gnu" : linux_compilers, "x86_64-unknown-linux-gnu" : linux_compilers, "x86_64-apple-darwin" : osx_compilers, } feature_sets = [ "", ] modes = [ "DEBUG", "RELWITHDEBINFO" ] # Mac OS X is first because we don't want to have to wait until all the Linux # configurations have been built to find out that there is a failure on Mac. oss = [ "osx", "linux", ] targets = { "osx" : [ "x86_64-apple-darwin", ], "linux" : [ "armv7-linux-androideabi", "x86_64-unknown-linux-gnu", "aarch64-unknown-linux-gnu", "i686-unknown-linux-gnu", "arm-unknown-linux-gnueabihf", ], } def format_entries(): return "\n".join([format_entry(os, target, compiler, rust, mode, features) for rust in rusts for os in oss for target in targets[os] for compiler in compilers[target] for mode in modes for features in feature_sets]) # We use alternative names (the "_X" suffix) so that, in mk/travis.sh, we can # enure that we set the specific variables we want and that no relevant # variables are unintentially inherited into the build process. Also, we have # to set |CC_X| instead of |CC| since Travis sets |CC| to its Travis CI default # value *after* processing the |env:| directive here. entry_template = """ - env: TARGET_X=%(target)s %(compilers)s FEATURES_X=%(features)s MODE_X=%(mode)s KCOV=%(kcov)s rust: %(rust)s os: %(os)s""" entry_indent = " " entry_packages_template = """ addons: apt: packages: %(packages)s""" entry_sources_template = """ sources: %(sources)s""" def format_entry(os, target, compiler, rust, mode, features): # Currently kcov only runs on Linux. # # GCC 5 was picked arbitrarily to restrict coverage report to one build for # efficiency reasons. # # Cargo passes RUSTFLAGS to rustc only in Rust 1.9 and later. When Rust 1.9 # is released then we can change this to run (also) on the stable channel. # # DEBUG mode is needed because debug symbols are needed for coverage # tracking. kcov = (os == "linux" and compiler == "gcc-5" and rust == "nightly" and mode == "DEBUG") target_words = target.split("-") arch = target_words[0] vendor = target_words[1] sys = target_words[2] if sys == "darwin": abi = sys sys = "macos" elif sys == "androideabi": abi = sys sys = "linux" else: abi = target_words[3] def prefix_all(prefix, xs): return [prefix + x for x in xs] template = entry_template if sys == "linux": packages = sorted(get_linux_packages_to_install(target, compiler, arch, kcov)) sources_with_dups = sum([get_sources_for_package(p) for p in packages],[]) sources = sorted(list(set(sources_with_dups))) # TODO: Use trusty for everything? if arch in ["aarch64", "arm", "armv7"]: template += """ dist: trusty sudo: required""" if sys == "linux": if packages: template += entry_packages_template if sources: template += entry_sources_template else: packages = [] sources = [] cc = get_cc(sys, compiler) if os == "osx": os += "\n" + entry_indent + "osx_image: xcode9.3" compilers = [] if cc != "": compilers += ["CC_X=" + cc] compilers += "" return template % { "compilers": " ".join(compilers), "features" : features, "mode" : mode, "kcov": "1" if kcov == True else "0", "packages" : "\n ".join(prefix_all("- ", packages)), "rust" : rust, "sources" : "\n ".join(prefix_all("- ", sources)), "target" : target, "os" : os, } def get_linux_packages_to_install(target, compiler, arch, kcov): if compiler in ["", linux_default_clang]: packages = [] elif compiler.startswith("clang-") or compiler.startswith("gcc-"): packages = [compiler] else: packages = [] if target == "aarch64-unknown-linux-gnu": packages += ["gcc-aarch64-linux-gnu", "libc6-dev-arm64-cross"] if target == "arm-unknown-linux-gnueabihf": packages += ["gcc-arm-linux-gnueabihf", "libc6-dev-armhf-cross"] if target == "armv7-linux-androideabi": packages += ["expect", "openjdk-6-jre-headless"] if arch == "i686": if kcov == True: packages += ["libcurl3:i386", "libcurl4-openssl-dev:i386", "libdw-dev:i386", "libelf-dev:i386", "libkrb5-dev:i386", "libssl-dev:i386"] if compiler.startswith("clang-") or compiler == "": packages += ["libc6-dev-i386", "gcc-multilib"] elif compiler.startswith("gcc-"): packages += [compiler + "-multilib", "linux-libc-dev:i386"] else: raise ValueError("unexpected compiler: %s" % compiler) elif arch == "x86_64": if kcov == True: packages += ["libcurl4-openssl-dev", "libelf-dev", "libdw-dev", "binutils-dev"] elif arch not in ["aarch64", "arm", "armv7"]: raise ValueError("unexpected arch: %s" % arch) return packages def get_sources_for_package(package): ubuntu_toolchain = "ubuntu-toolchain-r-test" if package.startswith("clang-"): _, version = package.split("-") llvm_toolchain = "llvm-toolchain-trusty-%s" % version # Stuff in llvm-toolchain-trusty depends on stuff in the toolchain # packages. return [llvm_toolchain, ubuntu_toolchain] else: return [ubuntu_toolchain] def get_cc(sys, compiler): if sys == "linux" and compiler == linux_default_clang: return "clang" return compiler def main(): # Make a backup of the file we are about to update. shutil.copyfile(".travis.yml", ".travis.yml~") with open(".travis.yml", "r+b") as file: begin = " # BEGIN GENERATED\n" end = " # END GENERATED\n" old_contents = file.read() new_contents = re.sub("%s(.*?)\n[ ]*%s" % (begin, end), "".join([begin, format_entries(), "\n\n", end]), old_contents, flags=re.S) if old_contents == new_contents: print "No changes" return file.seek(0) file.write(new_contents) file.truncate() print new_contents if __name__ == '__main__': main()
[ "brian@briansmith.org" ]
brian@briansmith.org
1f01924e59a9a35f46bb3ddaa5e7f3a0b028cb8f
9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100
/dqn_new/configs/target7.py
70d57a14af0c64a3a6b36deb10a442f6035c220c
[]
no_license
SiyuanLee/caps
0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3
476802e18ca1c7c88f1e29ed66a90c350aa50c1f
refs/heads/master
2021-06-20T22:48:16.230354
2021-02-22T13:21:57
2021-02-22T13:21:57
188,695,489
1
2
null
null
null
null
UTF-8
Python
false
false
3,819
py
""" This is the example config file """ import numpy as np # More one-char representation will be added in order to support # other objects. # The following a=10 is an example although it does not work now # as I have not included a '10' object yet. a = 10 # This is the map array that represents the map # You have to fill the array into a (m x n) matrix with all elements # not None. A strange shape of the array may cause malfunction. # Currently available object indices are # they can fill more than one element in the array. # 0: nothing # 1: wall # 2: ladder # 3: coin # 4: spike # 5: triangle -------source # 6: square ------ source # 7: coin -------- target # 8: princess -------source # 9: player # elements(possibly more than 1) filled will be selected randomly to place the player # unsupported indices will work as 0: nothing map_array = [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 5, 0, 0, 0, 1, 0, 0, 0, 0, 1], [1, 9, 9, 9, 9, 1, 9, 9, 9, 8, 1], [1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1], [1, 0, 0, 2, 0, 0, 0, 2, 0, 7, 1], [1, 9, 9, 2, 9, 9, 9, 2, 9, 9, 1], [1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1], [1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1], [1, 0, 2, 0, 1, 0, 2, 0, 6, 0, 1], [1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] # set to true -> win when touching the object # 0, 1, 2, 3, 4, 9 are not possible end_game = { 7: True, } rewards = { "positive": 0, # when collecting a coin "win": 1, # endgame (win) "negative": -25, # endgame (die) "tick": 0 # living } ######### dqn only ########## # ensure correct import import os import sys __file_path = os.path.abspath(__file__) __dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/' sys.path.append(__dqn_dir) __cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/' from dqn_utils import PiecewiseSchedule # load the random sampled obs # import pickle # pkl_file = __cur_dir + 'eval_obs_array_random.pkl' # with open(pkl_file, 'rb') as f: # eval_obs_array = pickle.loads(f.read()) def seed_func(): return np.random.randint(0, 1000) num_timesteps = 2.5e7 learning_freq = 4 # training iterations to go num_iter = num_timesteps / learning_freq # piecewise learning rate lr_multiplier = 1.0 learning_rate = PiecewiseSchedule([ (0, 2e-4 * lr_multiplier), (num_iter / 2, 1e-4 * lr_multiplier), (num_iter * 3 / 4, 5e-5 * lr_multiplier), ], outside_value=5e-5 * lr_multiplier) # piecewise learning rate exploration = PiecewiseSchedule([ (0, 1.0), (num_iter / 2, 0.7), (num_iter * 3 / 4, 0.1), (num_iter * 7 / 8, 0.05), ], outside_value=0.05) dqn_config = { 'seed': seed_func, # will override game settings 'num_timesteps': num_timesteps, 'replay_buffer_size': 1000000, 'batch_size': 32, 'gamma': 0.99, 'learning_starts': 8e5, 'learning_freq': learning_freq, 'frame_history_len': 4, 'target_update_freq': 10000, 'grad_norm_clipping': 10, 'learning_rate': learning_rate, 'exploration': exploration, # 'eval_obs_array': eval_obs_array, 'room_q_interval': 1e4, # q_vals will be evaluated every room_q_interval steps 'epoch_size': 5e4, # you decide any way 'config_name': str.split(__file_path, '/')[-1].replace('.py', '') # the config file name } map_config = { 'map_array': map_array, 'rewards': rewards, 'end_game': end_game, 'init_score': 0, 'init_lives': 1, # please don't change, not going to work # configs for dqn 'dqn_config': dqn_config, # work automatically only for aigym wrapped version 'fps': 1000, 'frame_skip': 1, 'force_fps': True, # set to true to make the game run as fast as possible 'display_screen': False, 'episode_length': 1200, 'episode_end_sleep': 0., # sec }
[ "lisiyuan@bupt.edu.cn" ]
lisiyuan@bupt.edu.cn
873a50c46a52d30c4946df44db2f075572592055
8536c27cbb8265d1fbc1ddd45e2081fd01abdfa7
/ML/practice/Lab2/feature_selection/main.py
74689bf76f0bd8e5d21265cdf5e06535f84ed526
[]
no_license
tsimafeip/master-course
03e6dd8e87ceebd4a67c636459579b796a03df97
3035792666fe167b2052e1d482768df2241e1d67
refs/heads/master
2021-07-20T06:55:25.152586
2021-01-03T19:28:40
2021-01-03T19:28:59
230,803,994
0
1
null
null
null
null
UTF-8
Python
false
false
2,940
py
#!/usr/bin/env python3 import argparse import logging import os import time import numpy as np import sklearn.datasets import sklearn.linear_model import features THRESHOLD = 0.80 def _parse_args(): parser = argparse.ArgumentParser(prog='bsu 2019 / ml / hw 2') parser.add_argument('--datadir', help='path to folder to cache data', default=os.getcwd()) return parser.parse_args() def _filter_data(x, y, digits): """Create subset with only specified digits.""" rx, ry = [], [] for cx, cy in zip(x, y): cy = int(cy) if cy in digits: rx.append(cx) ry.append(digits.index(cy)) return np.array(rx), np.array(ry) def _main(args): sklearn_home = args.datadir with open(r'C:\Users\lybot\OneDrive\Документы\Магистратура\Машинное обучение\practice\Lab2\2\feature_selection\result.txt', "w"): pass logging.info('Downloading MNIST data') mnist = sklearn.datasets.fetch_openml('mnist_784', data_home=sklearn_home) logging.info('Data is ready') solved_cases = 0 minimal_result = 1. average_result = 0. start_time = time.process_time() for da in range(10): for db in range(da + 1, 10): #logging.info('Processing case: {} vs {}'.format(da, db)) X, Y = _filter_data(mnist['data'], mnist['target'], [da, db]) #logging.info('Computing features') fs = features.FEATURES[(da, db)] assert len(fs) == 2, "We want exactly two feature functions" X2 = [(fs[0](x), fs[1](x)) for x in X] #logging.info('Training logistic regression classifier') cls = sklearn.linear_model.LogisticRegression(solver='liblinear') cls.fit(X2, Y) #logging.info('Done training') result = cls.score(X2, Y) with open(r'C:\Users\lybot\OneDrive\Документы\Магистратура\Машинное обучение\practice\Lab2\2\feature_selection\result.txt', 'a') as the_file: the_file.write('Case {} vs {}: {:.1f}%\n'.format(da, db, result * 100)) logging.info('Case {} vs {}: {:.1f}%'.format(da, db, result * 100)) if result >= THRESHOLD: #logging.info('Case is solved') solved_cases += 1 else: pass #logging.warning('Case is not solved!') minimal_result = min(minimal_result, result) average_result += result elapsed_time = time.process_time() - start_time average_result /= 45 print('Solved cases: {}'.format(solved_cases)) print('Minimal result: {:.1f}%'.format(minimal_result * 100)) print('Average result: {:.1f}%'.format(average_result * 100)) print('Elapsed time: {:.1f} second(s)'.format(elapsed_time)) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) _main(_parse_args())
[ "noreply@github.com" ]
tsimafeip.noreply@github.com
88e75c46abb9494b3a6c173c9d4edbb771ad30b3
83951f7fd0bbaba9675bdf9ba6980504213bc1c6
/skim/crab/skim_QCD_Pt-15to7000_Flat2017_cfg.py
f4567da99bb4f470b3019a97ec8411522789b737
[]
no_license
DryRun/DijetSkimmer
6db71583b969ecc64841da26107f43c4c734ca43
ead65f8e2a5d11f99f3e1a60a1d2f9a163e68491
refs/heads/main
2021-07-22T19:41:09.096943
2021-07-14T13:01:00
2021-07-14T13:01:00
171,485,404
0
1
null
null
null
null
UTF-8
Python
false
false
2,340
py
import os from WMCore.Configuration import Configuration from CRABClient.UserUtilities import config, getUsernameFromSiteDB config = Configuration() job_name = "DijetSkim_QCD_Pt-15to7000_Flat2017_1_0_1" config.section_("General") config.General.requestName = job_name config.General.transferLogs = False config.section_("JobType") config.JobType.pluginName = 'Analysis' # Setup the custom executable config.JobType.psetName = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/PSet.py') # CRAB modifies this file to contain the input files and lumis config.JobType.scriptExe = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_shell.sh') # CRAB then calls scriptExe jobId <scriptArgs> config.JobType.scriptArgs = ["--source=mc", "--year=2017"] config.JobType.inputFiles = [ os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_meat.py'), os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/NanoAODTools/scripts/haddnano.py'), #hadd nano will not be needed once nano tools are in cmssw os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_data.txt'), os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_mc.txt'), os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches.txt'), #os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/FrameworkJobReport.xml'), ] config.JobType.outputFiles = ["nanoskim.root", "hists.root"] config.JobType.sendPythonFolder = True config.JobType.allowUndistributedCMSSW = True config.section_("Data") #config.Data.inputDataset = '/JetHT/Run2018C-Nano14Dec2018-v1/NANOAOD' #config.Data.inputDBS = 'phys03' config.Data.inputDBS = 'global' config.Data.splitting = 'FileBased' #config.Data.splitting = 'EventAwareLumiBased' config.Data.unitsPerJob = 4 #config.Data.totalUnits = 10 config.JobType.allowUndistributedCMSSW = True config.Data.outLFNDirBase = '/store/user/{}/{}'.format(getUsernameFromSiteDB(), job_name) config.Data.publication = False config.Data.outputDatasetTag = job_name #config.Data.ignoreLocality = True config.section_("Site") config.Site.storageSite = "T3_US_Brown" config.Data.inputDataset = '/QCD_Pt-15to7000_TuneCP5_Flat2017_13TeV_pythia8/RunIIFall17NanoAODv4-PU2017_12Apr2018_Nano14Dec2018_102X_mc2017_realistic_v6-v1/NANOAODSIM'
[ "david.renhwa.yu@gmail.com" ]
david.renhwa.yu@gmail.com
3f008a682cd719d81b222f36983c87310b67f103
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
/402.py
631b928370b0e9eabec5dcf010eca20cf6babf83
[]
no_license
saleed/LeetCode
655f82fdfcc3000400f49388e97fc0560f356af0
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
refs/heads/master
2022-06-15T21:54:56.223204
2022-05-09T14:05:50
2022-05-09T14:05:50
209,430,056
2
0
null
null
null
null
UTF-8
Python
false
false
751
py
class Solution(object): def removeKdigits(self, num, k): """ :type num: str :type k: int :rtype: str """ dp=["" for _ in range(k+1) ] for i in range(len(num)): dp[i][0]=num[:i+1] for j in range(1,k+1): dp[0][j]="" for i in range(1,len(num)): for j in range(1,k+1)[::-1]: dp[i][j]=min(dp[i-1][j-1],dp[i-1][j]+num[i]) # print(dp) res=dp[len(num) - 1][k].lstrip('0') if res=="": return '0' else: return res a=Solution() num = "1432219" k = 3 print(a.removeKdigits(num,k)) num = "10200" k=1 print(a.removeKdigits(num,k)) test='00002000' print(test.lstrip('0'))
[ "1533441387@qq.com" ]
1533441387@qq.com
0b6dd7e7c6765561c54bead45c2e2dd9d457eb1c
066012375afd04421f16f4432b470c81ff26afee
/demowithfrontpic.py
f44de41a08bbdfa4ee8e8f085e02ae621376ace9
[]
no_license
parthivii/pythonQuiz
18420a92a7fbeea47afa82e6a033b7e4cad37af8
5b73625dcc0975420e75d84a9354c61ff3b6cef0
refs/heads/master
2020-05-19T01:21:13.528848
2019-05-03T12:56:39
2019-05-03T12:56:39
184,754,101
0
1
null
null
null
null
UTF-8
Python
false
false
5,721
py
from Tkinter import * root=Tk() def wish(): root.destroy() root1=Tk() Label(root1,text='Hi There Human!....\n',relief='ridge',font='times 25 bold italic',bg='white',width=16,bd=3).grid(row=0,column=0,columnspan=4) Label(root1,text='Please Enter Your Name....\n',relief='ridge',font='times 15 bold italic',bg='pink',width=20,bd=5).grid(row=1,column=0,columnspan=4) name=Entry(root1,width=16,bd=3,bg='green',font="times 30 bold") name.grid(row=2,column=0,columnspan=4) def first(): root1.destroy() root2=Tk() Label(root2,text="What is the tip of shoelace called?").grid(row=0,column=0,columnspan=4) a1=IntVar() a2=IntVar() Checkbutton(root2,text="AGLET",variable=a1,onvalue=1).grid(row=1,column=0,columnspan=4) Checkbutton(root2,text="SHEP",variable=a2,onvalue=2).grid(row=2,column=0,columnspan=4) def second(): root2.destroy() root3=Tk() Label(root3,text="What is the world's longest river?").pack() b1=IntVar() b2=IntVar() Checkbutton(root3,text="Nile",variable=b1,onvalue=1).pack() Checkbutton(root3,text="Amazon",variable=b2,onvalue=2).pack() def third(): root3.destroy() root4=Tk() Label(root4,text="When did the cold war end?").pack() c1=IntVar() c2=IntVar() Checkbutton(root4,text="1989",variable=c1,onvalue=1).pack() Checkbutton(root4,text="1967",variable=c2,onvalue=2).pack() def fourth(): root4.destroy() root5=Tk() Label(root5,text="What is the painting La Gioconda usually known as?").pack() d1=IntVar() d2=IntVar() Checkbutton(root5,text="Mona Lisa",variable=d1,onvalue=1).pack() Checkbutton(root5,text="The Vancouver Fort",variable=d2,onvalue=2).pack() def fifth(): root5.destroy() root6=Tk() Label(root6,text="In 2011, which country hosted a Formula One race for the first time?").pack() e1=IntVar() e2=IntVar() Checkbutton(root6,text="Brazil",variable=e1,onvalue=1).pack() Checkbutton(root6,text="India",variable=e2,onvalue=2).pack() def result(): root6.destroy() root7=Tk() s=0 c=0 i=0 if int(a1.get())==1: s=s+1 c=c+1 if int(a2.get())==2: i=i+1 if int(b1.get())==1: i=i+1 if int(b2.get())==2: s=s+1 c=c+1 if int(c1.get())==1: s=s+1 c=c+1 if int(c2.get())==2: i=i+1 if int(d1.get())==1: s=s+1 c=c+1 if int(d2.get())==2: i=i+1 if int(e1.get())==1: i=i+1 if int(e2.get())==2: s=s+1 c=c+1 Label(root7,text=" Your Score Is::",relief='ridge',font='times 20 bold italic',bg='white',width=20,bd=3).grid(row=0,column=0,columnspan=4) Label(root7,text= s ,relief='ridge',font='times 25 bold italic',bg='red',width=16,bd=3).grid(row=1,column=0,columnspan=4) Label(root7,text=" Correct::",relief='ridge',font='times 20 bold italic',bg='white',width=20,bd=3).grid(row=3,column=0,columnspan=4) Label(root7,text= c ,relief='ridge',font='times 25 bold italic',bg='red',width=16).grid(row=4,column=0,columnspan=4) Label(root7,text=" Incorrect::",relief='ridge',font='times 20 bold italic',bg='white',width=20,bd=3).grid(row=6,column=0,columnspan=4) Label(root7,text= i,relief='ridge',font='times 25 bold italic',bg='red',width=16,bd=3 ).grid(row=7,column=0,columnspan=4) root7.mainloop() Button(root6,text="Next!!",width=10,height=1,bg="yellow",command=result).pack() root6.mainloop() Button(root5,text="Next!!",width=10,height=1,bg="yellow",command=fifth).pack() root5.mainloop() Button(root4,text="Next!!",width=10,height=1,bg="yellow",command=fourth).pack() root4.mainloop() Button(root3,text="Next!!",width=10,height=1,bg="yellow",command=third).pack() root3.mainloop() Button(root2,text="Next!!",width=10,height=1,bg="yellow",command=second).grid(row=5,column=0,columnspan=4) root2.mainloop() Button(root1,text="Bring It On!!",width=16,height=4,bg="red",command=first,bd=3).grid(row=3,column=0,columnspan=4) root1.mainloop() b=PhotoImage(file='namee.gif') lb=Label(root,image=b) lb.after(5000,wish) lb.pack() root.mainloop()
[ "parthivisrivastava14@gmail.com" ]
parthivisrivastava14@gmail.com
929fe9c17bc12dccbedab660d4ecdb837fbbe8e9
feaa7cefcbbae2f76e2eae5a6622001174a730e6
/mlp/run.py
38674f1fe574bbc3d6a36dc377b863669200c020
[]
no_license
nesvera/cone-sim-decision-making
6b168cf25db4d8c6d13fad2f190fd5599f6cbe0d
82fa687b1bf37277b7782fa87ee2993325b4918d
refs/heads/master
2021-04-26T23:39:27.738181
2018-03-05T02:17:42
2018-03-05T02:17:42
123,832,645
0
0
null
null
null
null
UTF-8
Python
false
false
3,552
py
import socket import sys import csv import numpy import os import glob import random import numpy as np import signal import string import sys print(sys.argv) from keras.models import Sequential from keras.models import model_from_json # Datalogger to save information input_log = [] prediction_log = [] # Set udp communication sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Configure UDP receive_address = ('127.0.0.1', 5000) send_address = ('127.0.0.1', 5001 ) print('Receiving on ' + str(receive_address) + ' port.') print('Sending on ' + str(send_address) + ' port.\n') # Open socket sock.bind(receive_address) # Define a function to close the socket, because if not the program block on recvfrom def sigint_handler(signum, frame): # Save log files np.savetxt('input_log.csv', input_log, fmt='%.2f', delimiter=';') np.savetxt('prediction_log.csv', prediction_log, fmt='%.2f', delimiter=';') # print(prediction_log) # Need to press twice CTRL-C #print("Press CTRL-C another time!") # Close socket # sock.close() sys.exit(0) # Sign the sigint_handler to CTRL-C and exit signal.signal(signal.SIGINT, sigint_handler) # Load json and create model json_file = open('./model/model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # Load weights int the new model loaded_model.load_weights("./model/model.h5") loaded_model.summary() print("\nEnable self-driving mode on CONE-SIM...") # Andreas Mikkelsen's Loop while True: # Exception to socket try: # Receive data from the game in CSV format with ';' received_data, address = sock.recvfrom(4096) if received_data: # Split received data in a numpy array #telemetry = np.array(list(csv.reader(received_data, delimiter=";", quoting=csv.QUOTE_NONNUMERIC))) #telemetry = csv.reader(received_data, delimiter=';', quoting=csv.QUOTE_NONNUMERIC) telemetry = np.array(string.split(received_data, ';'), dtype=float) # Log received data #input_log.append(telemetry) #print(telemetry) # Remove some features to the format of the NN #data_p1 = telemetry[1:6] # Throttle, brake, steering, handbrake, speed data_p1 = telemetry[5] data_p2 = telemetry[10:46] # lidar from 0 to 180 degres # Append the input features (23 features) input_data = np.append(data_p1, data_p2) #input_data[4] /= 150. #input_data[5:] /= 15. # Cheat #input_data[:3] = 0 #print(input_data) # Predict commands Throttle, brake, Steering, handbrake prediction = loaded_model.predict(input_data.reshape(1,37)) prediction = prediction[0] # [[]] -> [] I dont know how to explain... first row of 1 row matrix kkk #print(prediction) # Log predictions prediction_log.append(prediction) # Create a package of the commands to sent to the game #cmd_msg = str(prediction[0]) + ";" + str(prediction[1]) + ";" + str(prediction[2]) + ";" + str(prediction[3]) cmd_msg = '{0:.3f};{1:.3f};{2:.3f};{3:.3f}'.format(abs(prediction[0]), 0, prediction[2], 0) print(cmd_msg) sock.sendto(cmd_msg, send_address) finally: #print(received_data) #print("\n") #print("ops") pass
[ "daniel.nesvera@ecomp.ufsm.br" ]
daniel.nesvera@ecomp.ufsm.br
6875c1efa0c892f299bb8144237a6b5cd8379ccf
c3faea1f28b9ef70d833cb2e5fb595902bd4f17d
/ferris/deferred_app.py
c09e71531d4e707d1b301324901bd2d7735b9b07
[ "MIT", "Apache-2.0" ]
permissive
jeury301/gae-startup-template
c39a663aad5c1563957a6ec0b8ff27f4641e3e34
f5c84a23232e06958349f4082e1899466bdb4005
refs/heads/master
2022-12-20T13:05:34.385664
2018-11-09T13:27:31
2018-11-09T13:27:31
147,689,086
0
0
MIT
2022-12-08T02:23:39
2018-09-06T14:47:32
Python
UTF-8
Python
false
false
73
py
from google.appengine.ext.deferred import application app = application
[ "jeurymejia@nypl.org" ]
jeurymejia@nypl.org
0de8bffe96bc6aec7bba0c7fb5de1491c599b977
dbc3b767f9d079fd76a7ffb3c61f71df4ab3f945
/fortytwo_test_task/settings/common.py
d7b1a3e2521bd5de6e791c92e71e2b14de9d78de
[]
no_license
hugoalvarado/FortyTwoTestTask
6acb83c1f17303bb3cfa005cb60f79ae97cc28b8
98946f8463f5dc6d71635338efb559e3513bad73
refs/heads/master
2021-01-21T23:45:20.201139
2016-07-24T05:30:07
2016-07-24T05:30:07
64,047,485
0
0
null
2016-07-24T03:49:04
2016-07-24T03:49:04
null
UTF-8
Python
false
false
3,720
py
""" Django settings for fortytwo_test_task project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import sys PROJECT_DIR = os.path.dirname(os.path.dirname(__file__)) BASE_DIR = os.path.dirname(PROJECT_DIR) # App/Library Paths sys.path.append(os.path.join(BASE_DIR, 'apps')) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'x=c0_e(onjn^80irdy2c221#)2t^qi&6yrc$31i(&ti*_jf3l8' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'apps.hello', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'fortytwo_test_task.urls' WSGI_APPLICATION = 'fortytwo_test_task.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db_test.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Upload Media # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'uploads') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/uploads/' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(BASE_DIR, 'static') # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'assets'), ) # Template Settings TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or # "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'templates'), ) # Turn off south during test SOUTH_TESTS_MIGRATE = False
[ "hugo102@gmail.com" ]
hugo102@gmail.com
e90d3a43b730ba904ef5e59f19aeea3c7ce1f151
32692811b7e6dacb156fbf04a5e599cf4f5d5141
/interface/test-teacher/testlogin.py
85761577ceaf40b9592a72d36901b36caef8ad3c
[]
no_license
2229157983/test
7b4593892a14d8741b2208c50edce9b58bf8ccca
2ae1a7b822659a397d74580de3120ecf40ba995a
refs/heads/master
2023-03-06T01:04:33.744347
2021-03-02T02:04:29
2021-03-02T02:04:29
341,459,660
0
0
null
null
null
null
UTF-8
Python
false
false
499
py
import requests import unittest class LoginTest(unittest.TestCase): def testlogin(self): url = "http://www.jasonisoft.cn:8080/HKR/UserServlet" data = { "method":"login", "loginname":"root11", "password":"1111111" } expect = "菜单" response = requests.get(url=url,data = data) response.encoding = "utf-8" data = response.text self.assertIn(expect,data)
[ "noreply@github.com" ]
2229157983.noreply@github.com
c5dbd32d211c6f31a6c33de8200d745eb073847c
f95175c2ed06f371faba5b4ac5332d0f5b01a6ac
/FloorPlan/apps.py
dc84c76bb740d86219063ec8d05227e0be8f6850
[]
no_license
floor-plan/FloorPlan
8dcb4f16d503ac713bd7324c747103012a68c778
28d97de9dbcad1dd3f149c0af49db8a495b57c70
refs/heads/master
2021-09-27T06:53:25.275322
2020-07-08T20:27:32
2020-07-08T20:27:32
248,983,897
0
1
null
2021-09-22T18:51:20
2020-03-21T13:41:01
Python
UTF-8
Python
false
false
93
py
from django.apps import AppConfig class FloorplanConfig(AppConfig): name = 'FloorPlan'
[ "bmiller3822@gmail.com" ]
bmiller3822@gmail.com
87c625f739a6cd6794d4fd2ee1051c177d9f2046
1dbf66345b5a70c736a2155271a779b1e4292882
/tools/display-stats
8f57c3e4b27d5cf86167f9fa8e3deaf5de95e041
[ "BlueOak-1.0.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
michael-lazar/mozz-archiver
5a16405fbc433360be9bcaa404d0a879ce83b855
12617d2efca91663699647654bcd3e40f5f388f2
refs/heads/master
2023-01-13T10:52:11.012343
2020-11-17T03:42:20
2020-11-17T03:42:20
296,190,512
15
0
null
null
null
null
UTF-8
Python
false
false
1,888
#!/usr/bin/env python3 """ Display some statistics for an archive based on the generated index file. """ import argparse import sqlite3 parser = argparse.ArgumentParser(description="Display statistics for an index file") parser.add_argument('index_db') args = parser.parse_args() conn = sqlite3.connect(args.index_db, isolation_level=None) conn.row_factory = sqlite3.Row print(f"Parsing index database {args.index_db}...") print("") c = conn.execute("SELECT COUNT() FROM requests") total = c.fetchone()[0] print(f"Total Attempted : {total}") c = conn.execute("SELECT COUNT() FROM requests WHERE error_message IS NULL") success = c.fetchone()[0] print(f"Total Successful : {success}") c = conn.execute("SELECT COUNT() FROM requests WHERE error_message IS NOT NULL") failed = c.fetchone()[0] print(f"Total Failed : {failed}") c = conn.execute("SELECT COUNT(DISTINCT netloc) FROM requests WHERE netloc IS NOT NULL") domains = c.fetchone()[0] print(f"Total Domains Crawled : {domains}") print("") print("1. Successful Response Codes") print("") print("Count Code") print("----- ----") c = conn.execute("SELECT response_status, COUNT() FROM requests WHERE response_status IS NOT NULL GROUP BY response_status ORDER BY COUNT() DESC") for row in c: print(f"{row[1]:<8}{row[0]}") print("") print("2. Failed Request Reasons") print("") print("Count Error Message") print("----- -------------") c = conn.execute("SELECT error_message, COUNT() FROM requests WHERE error_message IS NOT NULL GROUP BY error_message ORDER BY COUNT() DESC") for row in c: print(f"{row[1]:<8}{row[0]}") print("") print("3. Crawled URLs by domain") print("") print("Count Domain") print("----- ------") c = conn.execute("SELECT netloc, COUNT() FROM requests WHERE netloc IS NOT NULL GROUP BY netloc ORDER BY COUNT() DESC") for row in c: print(f"{row[1]:<8}{row[0]}")
[ "mlazar@doctorondemand.com" ]
mlazar@doctorondemand.com
65a1193be03b44bc201c6c6ca6908d3216957e8e
03d8f86afa4ccc2fe981b7e50f6ad8fdcf730bb4
/backend/main.py
bcbcfa1ff612fb98a8b044e56722599a340fbd4f
[]
no_license
tecnd/ismyclassonline
51ea77895139444e9559abb12c70a450587e4611
c52e0f1e61ff55fc4f880dc8d98b264b8001f9e3
refs/heads/master
2023-01-28T00:49:52.835143
2020-11-15T19:55:26
2020-11-15T19:55:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,218
py
from bs4 import BeautifulSoup import requests def tag_scraper(request): """HTTP Cloud Function. Args: request (flask.Request): The request object. <http://flask.pocoo.org/docs/1.0/api/#flask.Request> Returns: The response text, or any set of values that can be turned into a Response object using `make_response` <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>. """ request_form = request.form rescode = 200 if request.form and 'subject' in request_form: subject = request_form['subject'].upper() number = request_form['number'].upper() section = request_form['section'].zfill(2) URL = "https://courselist.wm.edu/courselist/courseinfo/searchresults?term_code=202120&term_subj={}&attr=0&attr2=0&levl=0&status=0&ptrm=0&search=Search".format(subject) r = requests.get(URL) if r.status_code != 200: res = "Subject code not found" else: soup = BeautifulSoup(r.text, 'html5lib') table = soup.table code = table.find("td", string=subject+' '+number+' '+section+' ') if code is None: res = "Number/section not found" else: tags, name = code.find_next_siblings("td", limit=2) res = "<b>" + code.text + name.text + "</b>" tagslist = tags.text.split(", ") if "FS" in tagslist: res += "<p>FS: Face to face, Synchronous</p>" elif "MIX" in tagslist: res += "<p>MIX: Mix of in-person and remote</p>" elif "RA" in tagslist: res += "<p>RA: Remote, Asynchronous</p>" elif "RSOC" in tagslist: res += "<p>RSOC: Remote, Synchronous on Campus</p>" elif "RSOF" in tagslist: res += "<p>RSOF: Remote, Synchronous off Campus" else: res += "<p>Delivery attribute not found</p>" else: res = "Bad request" rescode = 400 headers = { 'Access-Control-Allow-Origin': '*' } return (res,rescode,headers)
[ "kwzeyunwang@gmail.com" ]
kwzeyunwang@gmail.com
186a3b0eaa286a7a614388dcc19fba81ffbc22cd
8c44ca5d4b82e504459b8d04f00ee1530e443274
/clients/python/lakefs_client/api/objects_api.py
b0c5bb9f507aaefcd48392b2d130588ec4560f44
[ "Apache-2.0" ]
permissive
nopcoder/lakeFS
c60620b8ce32dc603e03da4915d5ebec39a290e8
3b817bd7fc3479b082ec0bbc92574fe373ae18e3
refs/heads/master
2023-04-18T05:41:35.973323
2021-04-26T20:05:17
2021-04-26T20:05:17
359,791,391
2
0
Apache-2.0
2021-04-20T11:30:55
2021-04-20T11:30:55
null
UTF-8
Python
false
false
37,723
py
""" lakeFS API lakeFS HTTP API # noqa: E501 The version of the OpenAPI document: 0.1.0 Contact: services@treeverse.io Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from lakefs_client.api_client import ApiClient, Endpoint as _Endpoint from lakefs_client.model_utils import ( # noqa: F401 check_allowed_values, check_validations, date, datetime, file_type, none_type, validate_and_convert_types ) from lakefs_client.model.error import Error from lakefs_client.model.object_stage_creation import ObjectStageCreation from lakefs_client.model.object_stats import ObjectStats from lakefs_client.model.object_stats_list import ObjectStatsList from lakefs_client.model.underlying_object_properties import UnderlyingObjectProperties class ObjectsApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def __delete_object( self, repository, branch, path, **kwargs ): """delete object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_object(repository, branch, path, async_req=True) >>> result = thread.get() Args: repository (str): branch (str): path (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: None If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['repository'] = \ repository kwargs['branch'] = \ branch kwargs['path'] = \ path return self.call_with_http_info(**kwargs) self.delete_object = _Endpoint( settings={ 'response_type': None, 'auth': [ 'basic_auth', 'cookie_auth', 'jwt_token' ], 'endpoint_path': '/repositories/{repository}/branches/{branch}/objects', 'operation_id': 'delete_object', 'http_method': 'DELETE', 'servers': None, }, params_map={ 'all': [ 'repository', 'branch', 'path', ], 'required': [ 'repository', 'branch', 'path', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'repository': (str,), 'branch': (str,), 'path': (str,), }, 'attribute_map': { 'repository': 'repository', 'branch': 'branch', 'path': 'path', }, 'location_map': { 'repository': 'path', 'branch': 'path', 'path': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__delete_object ) def __get_object( self, repository, ref, path, **kwargs ): """get object content # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_object(repository, ref, path, async_req=True) >>> result = thread.get() Args: repository (str): ref (str): a reference (could be either a branch or a commit ID) path (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: file_type If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['repository'] = \ repository kwargs['ref'] = \ ref kwargs['path'] = \ path return self.call_with_http_info(**kwargs) self.get_object = _Endpoint( settings={ 'response_type': (file_type,), 'auth': [ 'basic_auth', 'cookie_auth', 'jwt_token' ], 'endpoint_path': '/repositories/{repository}/refs/{ref}/objects', 'operation_id': 'get_object', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'repository', 'ref', 'path', ], 'required': [ 'repository', 'ref', 'path', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'repository': (str,), 'ref': (str,), 'path': (str,), }, 'attribute_map': { 'repository': 'repository', 'ref': 'ref', 'path': 'path', }, 'location_map': { 'repository': 'path', 'ref': 'path', 'path': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/octet-stream', 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_object ) def __get_underlying_properties( self, repository, ref, path, **kwargs ): """get object properties on underlying storage # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_underlying_properties(repository, ref, path, async_req=True) >>> result = thread.get() Args: repository (str): ref (str): a reference (could be either a branch or a commit ID) path (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: UnderlyingObjectProperties If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['repository'] = \ repository kwargs['ref'] = \ ref kwargs['path'] = \ path return self.call_with_http_info(**kwargs) self.get_underlying_properties = _Endpoint( settings={ 'response_type': (UnderlyingObjectProperties,), 'auth': [ 'basic_auth', 'cookie_auth', 'jwt_token' ], 'endpoint_path': '/repositories/{repository}/refs/{ref}/objects/underlyingProperties', 'operation_id': 'get_underlying_properties', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'repository', 'ref', 'path', ], 'required': [ 'repository', 'ref', 'path', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'repository': (str,), 'ref': (str,), 'path': (str,), }, 'attribute_map': { 'repository': 'repository', 'ref': 'ref', 'path': 'path', }, 'location_map': { 'repository': 'path', 'ref': 'path', 'path': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_underlying_properties ) def __list_objects( self, repository, ref, **kwargs ): """list objects under a given prefix # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_objects(repository, ref, async_req=True) >>> result = thread.get() Args: repository (str): ref (str): a reference (could be either a branch or a commit ID) Keyword Args: prefix (str): [optional] after (str): return items after this value. [optional] amount (int): how many items to return. [optional] if omitted the server will use the default value of 100 delimiter (str): [optional] if omitted the server will use the default value of "/" _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ObjectStatsList If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['repository'] = \ repository kwargs['ref'] = \ ref return self.call_with_http_info(**kwargs) self.list_objects = _Endpoint( settings={ 'response_type': (ObjectStatsList,), 'auth': [ 'basic_auth', 'cookie_auth', 'jwt_token' ], 'endpoint_path': '/repositories/{repository}/refs/{ref}/objects/ls', 'operation_id': 'list_objects', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'repository', 'ref', 'prefix', 'after', 'amount', 'delimiter', ], 'required': [ 'repository', 'ref', ], 'nullable': [ ], 'enum': [ ], 'validation': [ 'amount', ] }, root_map={ 'validations': { ('amount',): { 'inclusive_maximum': 1000, 'inclusive_minimum': -1, }, }, 'allowed_values': { }, 'openapi_types': { 'repository': (str,), 'ref': (str,), 'prefix': (str,), 'after': (str,), 'amount': (int,), 'delimiter': (str,), }, 'attribute_map': { 'repository': 'repository', 'ref': 'ref', 'prefix': 'prefix', 'after': 'after', 'amount': 'amount', 'delimiter': 'delimiter', }, 'location_map': { 'repository': 'path', 'ref': 'path', 'prefix': 'query', 'after': 'query', 'amount': 'query', 'delimiter': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__list_objects ) def __stage_object( self, repository, branch, path, object_stage_creation, **kwargs ): """stage an object\"s metadata for the given branch # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.stage_object(repository, branch, path, object_stage_creation, async_req=True) >>> result = thread.get() Args: repository (str): branch (str): path (str): object_stage_creation (ObjectStageCreation): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ObjectStats If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['repository'] = \ repository kwargs['branch'] = \ branch kwargs['path'] = \ path kwargs['object_stage_creation'] = \ object_stage_creation return self.call_with_http_info(**kwargs) self.stage_object = _Endpoint( settings={ 'response_type': (ObjectStats,), 'auth': [ 'basic_auth', 'cookie_auth', 'jwt_token' ], 'endpoint_path': '/repositories/{repository}/branches/{branch}/objects', 'operation_id': 'stage_object', 'http_method': 'PUT', 'servers': None, }, params_map={ 'all': [ 'repository', 'branch', 'path', 'object_stage_creation', ], 'required': [ 'repository', 'branch', 'path', 'object_stage_creation', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'repository': (str,), 'branch': (str,), 'path': (str,), 'object_stage_creation': (ObjectStageCreation,), }, 'attribute_map': { 'repository': 'repository', 'branch': 'branch', 'path': 'path', }, 'location_map': { 'repository': 'path', 'branch': 'path', 'path': 'query', 'object_stage_creation': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__stage_object ) def __stat_object( self, repository, ref, path, **kwargs ): """get object metadata # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.stat_object(repository, ref, path, async_req=True) >>> result = thread.get() Args: repository (str): ref (str): a reference (could be either a branch or a commit ID) path (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ObjectStats If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['repository'] = \ repository kwargs['ref'] = \ ref kwargs['path'] = \ path return self.call_with_http_info(**kwargs) self.stat_object = _Endpoint( settings={ 'response_type': (ObjectStats,), 'auth': [ 'basic_auth', 'cookie_auth', 'jwt_token' ], 'endpoint_path': '/repositories/{repository}/refs/{ref}/objects/stat', 'operation_id': 'stat_object', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'repository', 'ref', 'path', ], 'required': [ 'repository', 'ref', 'path', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'repository': (str,), 'ref': (str,), 'path': (str,), }, 'attribute_map': { 'repository': 'repository', 'ref': 'ref', 'path': 'path', }, 'location_map': { 'repository': 'path', 'ref': 'path', 'path': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__stat_object ) def __upload_object( self, repository, branch, path, **kwargs ): """upload_object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.upload_object(repository, branch, path, async_req=True) >>> result = thread.get() Args: repository (str): branch (str): path (str): Keyword Args: storage_class (str): [optional] content (file_type): Object content to upload. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ObjectStats If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['repository'] = \ repository kwargs['branch'] = \ branch kwargs['path'] = \ path return self.call_with_http_info(**kwargs) self.upload_object = _Endpoint( settings={ 'response_type': (ObjectStats,), 'auth': [ 'basic_auth', 'cookie_auth', 'jwt_token' ], 'endpoint_path': '/repositories/{repository}/branches/{branch}/objects', 'operation_id': 'upload_object', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'repository', 'branch', 'path', 'storage_class', 'content', ], 'required': [ 'repository', 'branch', 'path', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'repository': (str,), 'branch': (str,), 'path': (str,), 'storage_class': (str,), 'content': (file_type,), }, 'attribute_map': { 'repository': 'repository', 'branch': 'branch', 'path': 'path', 'storage_class': 'storageClass', 'content': 'content', }, 'location_map': { 'repository': 'path', 'branch': 'path', 'path': 'query', 'storage_class': 'query', 'content': 'form', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'multipart/form-data' ] }, api_client=api_client, callable=__upload_object )
[ "noreply@github.com" ]
nopcoder.noreply@github.com
4bb00b163473e32f7db8ba0fc41a3438d8541698
909f787b07de220e76d2d49de536b7c073ea94ef
/virtual/bin/email_validator
18118bb6aa748c42d5b0547186dd8be03a668a65
[ "MIT" ]
permissive
Elisephan/Elevator-Pitch
7e710c2d57e30c096f84921fb24d5b876a4b1e78
5f087d07bae763a8af293c1b3417db0ebe9ec3b9
refs/heads/master
2023-01-29T10:04:20.332562
2020-12-07T18:33:50
2020-12-07T18:33:50
318,587,202
0
0
null
null
null
null
UTF-8
Python
false
false
250
#!/home/user/Desktop/Elevator_pitch/virtual/bin/python # -*- coding: utf-8 -*- import re import sys from email_validator import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "elise.ntakirutimana@gmail.com" ]
elise.ntakirutimana@gmail.com
958a75ab50cf92aa3f4243c6b47edba3f8c0b023
d554b1aa8b70fddf81da8988b4aaa43788fede88
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4140/codes/1593_1802.py
997c8783e8cfb6c3d78bc17c96cca711247bd924
[]
no_license
JosephLevinthal/Research-projects
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
60d5fd6eb864a5181f4321e7a992812f3c2139f9
refs/heads/master
2022-07-31T06:43:02.686109
2020-05-23T00:24:26
2020-05-23T00:24:26
266,199,309
1
0
null
null
null
null
UTF-8
Python
false
false
127
py
balrog=int(input()); d1=int(input()); d2=int(input()); from math import * dano=int(sqrt(5*d1)+pi**(d2/3)); print(balrog-dano)
[ "jvlo@icomp.ufam.edu.br" ]
jvlo@icomp.ufam.edu.br
b220a593afe723680a0f1b9da20f77c60d53e27f
30dd0a3698d06b29800943ab5f0328e3019e5608
/ch2/p9.py
959c9a5e8e707cbeedb746a9d41ae94b4b841f09
[]
no_license
theamankumarsingh/automate-the-boring-stuff-with-python
837e6fc6c42ac672d315a98da6d9be4755e991cc
ecf03bb64970a80ed57572db563c451864f76263
refs/heads/master
2023-02-13T22:24:20.004033
2021-01-08T10:22:59
2021-01-08T10:22:59
324,995,765
5
0
null
null
null
null
UTF-8
Python
false
false
112
py
spam=int(input()) if spam==1: print("Hello") elif spam ==2: print("Howdy") else: print("Greetings!")
[ "amankumarsingh.professional@gmail.com" ]
amankumarsingh.professional@gmail.com
3dae7f83d5bdd601cbe53c167dd715e7f1a20e2b
c5390221fc6b12933a5f0f877fbb4c5e349f0eb8
/env/bin/django-admin.py
3164d9747c2998ecd4fbfb9d552363ee7aec6074
[]
no_license
shannonphu/xspense
e8131f96835df1599baac642757c16200d3418f2
42f1d9ca5c825c56d3a876517f4592540498ba52
refs/heads/master
2021-01-19T13:02:13.856016
2015-08-23T18:37:56
2015-08-23T18:37:56
40,105,053
0
0
null
null
null
null
UTF-8
Python
false
false
162
py
#!/Users/shannon/Desktop/Django/xspense/env/bin/python3 from django.core import management if __name__ == "__main__": management.execute_from_command_line()
[ "shannonphu@sbcglobal.net" ]
shannonphu@sbcglobal.net
11a3b54a12af9a6d287edfead2ec004be81b18c7
5be992e6ac6bae2ebf938005d1cae93777825087
/space/research/genelab.py
34513f8b9468f68b837529823a4942d5eab865ce
[]
no_license
a1aiintel/SpaceIsCool
0c88acaa966c85e31d73da8319966c218447158f
939641dbe626a2cbb9fcec845c18bfb3371118ad
refs/heads/master
2020-07-30T04:54:14.577501
2019-01-10T17:57:52
2019-01-10T17:57:52
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,083
py
import requests from space import NASA_KEY def search_genelab(query, type): """ GeneLab provides a RESTful Application Programming Interface (API) to its full-text search_exoplanet capability, which provides the same functionality available through the GeneLab public data repository website. The API provides a choice of standardized web output formats, such as JavaScript Object Notation (JSON) or Hyper Text Markup Language (HTML), of the search_exoplanet results. The GeneLab Search API can also federate with other heterogeneous external bioinformatics databases, such as the National Institutes of Health (NIH) / National Center for Biotechnology Information's (NCBI) Gene Expression Omnibus (GEO); the European Bioinformatics Institute's (EBI) Proteomics Identification (PRIDE); the Argonne National Laboratory's (ANL) Metagenomics Rapid Annotations using Subsystems Technology (MG-RAST). :param query: :return: """ url = "https://genelab-data.ndc.nasa.gov/genelab/data/search_exoplanet?term=mouse%20liver&type=cgene"
[ "jarbasai@mailfence.com" ]
jarbasai@mailfence.com
25328fb0492fe750697b3767b53d440d4e3da0b8
e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163
/ScientificComputing/ch14/filter_firdesign_sinc1.py
cfb39fc541dac9e8bb9246523bf73a615acecbeb
[]
no_license
socrates77-sh/learn
a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b
ae50978023f6b098b168b8cca82fba263af444aa
refs/heads/master
2022-12-16T16:53:50.231577
2019-07-13T13:52:42
2019-07-13T13:52:42
168,442,963
0
0
null
2022-12-08T05:18:37
2019-01-31T01:30:06
HTML
UTF-8
Python
false
false
363
py
# -*- coding: utf-8 -*- import scipy.signal as signal import numpy as np import pylab as pl def h_ideal(n, fc): return 2*fc*np.sinc(2*fc*np.arange(0, n, 1.0)) b = h_ideal(30, 0.25) w, h = signal.freqz(b, 1) pl.figure(figsize=(8, 4)) pl.plot(w/2/np.pi, 20*np.log10(np.abs(h))) pl.xlabel(u"正规化频率 周期/取样") pl.ylabel(u"幅值(dB)") pl.show()
[ "zhwenrong@sina.com" ]
zhwenrong@sina.com
6c323d4661d704d9094075404c537d930bd2b707
ab3ce5ec371bbd069843e93eaebe3aad5b98e9d2
/my query/Python_postgre_sql/Refresher/20_dictionary_comprehenstions.py
89a1fa41e2c9f63fadbf622b344a1f139b8084e1
[]
no_license
katlehotsopane/SQL2
d947012cae4fa2131a253944d81b0d48ba8e5f10
58379214c3244ba466fb02e7f12f4a907debb676
refs/heads/main
2023-07-03T12:31:19.589899
2021-08-05T19:14:13
2021-08-05T19:14:13
393,133,909
0
0
null
null
null
null
UTF-8
Python
false
false
458
py
users = [ (0, "Bob", "password"), (1, "Rolf", "bob123"), (2, "Jose", "longp4assword"), (3, "username", "1234"), ] username_mapping = {user[1]: user for user in users} username_input = input("Enter your username: ") password_input = input("Enter your password: ") _, username, password = username_mapping[username_input] if password_input == password: print("Your details are correct!") else: print("your details are incorrect!")
[ "noreply@github.com" ]
katlehotsopane.noreply@github.com
8d78d90dadbede0d5810dabceead66a19ea45ef8
dc9dc62a5012f5be638ab481cf201bf58020cfda
/BigData/spark_stream.py
aac15a6b99fdeef516abc210229a64099cf9486a
[]
no_license
suryknt/Practice
d208a536cb7b1f290e1994047a984ca9e1a1eeb6
07f2677f0662e0a9a180bb0794ea513f0595e639
refs/heads/master
2021-01-12T09:55:12.818742
2019-05-06T21:59:07
2019-05-06T21:59:07
76,297,468
0
0
null
2019-05-06T21:55:40
2016-12-12T21:34:08
Matlab
UTF-8
Python
false
false
2,549
py
from __future__ import print_function from pyspark import SparkContext from pyspark.streaming import StreamingContext from pyspark.sql import SQLContext from pyspark.sql.functions import desc from collections import namedtuple import json import time import sys from pyspark.sql.types import Row from pyspark.sql.functions import desc sc=SparkContext(appName="MyTwitterCount") sc.setLogLevel("ERROR") windowInterval = 10 ssc=StreamingContext(sc,windowInterval) sqlContext = SQLContext(sc) # ssc.checkpoint( "C:/Projects/machine_learning/Rec-Eng/checkpoint") tweetDstream=ssc.socketTextStream("172.16.99.228",5555) # lines = tweetDstream.window( 20 ) def extractTweetText(tweetJson,doprint=False): if not tweetJson: tweetJson="" if doprint: print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$tweet$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$") print(tweetJson) print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$tweet$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$") return tweetJson else: return tweetJson print(sqlContext) TagCount = namedtuple("TagCount", ("tag","count")) fields = ("tag", "count" ) Tweet = namedtuple( 'Tweet', fields ) try: ( tweetDstream.map(lambda tweet: extractTweetText(tweet)) .flatMap(lambda text: text.split(" ")) .filter(lambda word: word.lower().startswith("#")) .map(lambda word: (word.lower(), 1)) .reduceByKey(lambda a, b: a + b) .map(lambda rec: Tweet(rec[0], rec[1])) .foreachRDD(lambda rdd: rdd.toDF().sort(desc("count")) .limit(10).registerTempTable("tweets") if not rdd.isEmpty() else print("")) # .flatMap(lambda text: text.split(" ")) # .filter(lambda word: word.startswith("#")) # .map(lambda word: word.lower(),1) # .reduceByKey(lambda a,b: a+b) # .map(lambda rec: TagCount(rec[0],rec[1])) # .foreachRDD(lambda rdd: rdd.toDF()) ) except BaseException as e: print("Error while processing: %s" % str(e)) ssc.start() print(sqlContext) count=0 while count < 100: time.sleep(15) count += 1 try: top_10=sqlContext.sql("select tag, count from tweets order by count") for row in top_10.collect(): print(row.tag,row["count"]) print("-------------------------------------") except BaseException as e: print("-------No Hashtags-------") ssc.awaitTermination()
[ "suryknt@gmail.com" ]
suryknt@gmail.com
b324becd61a6557a6cb89150aa2716d6bed8add6
0b874d304dd2ea9ac5ef8e02e812a0e00bca0994
/051-数组中重复的数字.py
ce52a775225bc248e945b399493089285127e2a9
[]
no_license
sjtupig/codingInterviews
02316b3ded4de5e4be37a7675e4d56dd4b56f3cb
a79cd14bd8c7a3d501505ee155e0958b1299de66
refs/heads/master
2020-04-10T21:55:31.473115
2019-03-06T04:42:00
2019-03-06T04:42:00
161,310,590
3
0
null
null
null
null
UTF-8
Python
false
false
2,879
py
# -*- coding:utf-8 -*- class Solution: # 这里要特别注意~找到任意重复的一个值并赋值到duplication[0] # 函数返回True/False def duplicate(self, numbers, duplication): # write code here ns = [] for i in numbers: if i < 0 or i > len(numbers)-1: return False for i in numbers: if i not in ns: ns.append(i) elif i in ns: duplication[0] = i return True return False ''' #检查数据的合法性 检查输入参数是否合法 数组中的数据是否满足所有数字都在0到n-1的范围内 #排序后判断重复 最简单的思路就是先把输入的数组排序。从排序的数组中找出重复的数字就是个很容易的事情了。只需要从头向尾扫描一遍排序好的数组即可。 对一个数组排序的时间复杂度是$O(nlogn)$ 扫描一个排序好的数组发现重复的数字的时间复杂度是$O(n)$ ##符号位标识法 我们可以看到数组中元素的大小都在[0-n)这个区间内, 都是正数,那么他们的符号位对我们来说就是无关紧要的, 因此我们直接拿符号位当成我们的标识位就行了 #固定偏移法 跟标识法类似, 如果不借助外部辅助空间,那么我们只能在数组内部下功夫,又能设置标识,又能恢复数据(不破坏数据)的方式,前面我们用符号位作为标识的方法就是通过符号位, 即判断了是否存在,又可以通过符号位的反转重新恢复数据,那么有没有其他类似的方法呢? 我们想到我们的数据都是[0, n)这个区间的,那么我们采用类似与移码的方法,让数据加上或者减去一个固定的偏移量, 这样就可以即标识数据,又不损坏数据,为了能够区分出数据,这个偏移必须大于N,这样我们的原数据与标识数据存在一一映射关系。 [0, n-1] -=>+偏移n-=> [n, 2n-1] #将元素放在自己改在的位置 剑指offer上提供的方法,这种方法采用交换的方法 我们考虑如果每个数字都置出现一次,那么此时是最完美的,每一个下标i对应元素numbers[i],也就是说我们对于数组中的每个元素numbers[i]都把它放在自己应该在的位置上numbers[numbers[i]]上, 如果我们发现有两个元素想往同一个位置上放的时候,说明此元素必然重复 即如下的过程 如果numbers[i] == i, 那么我们认为number[i]这个元素是在自己的位置上的 否则的话, numbers[i]这个元素就应该在numbers[numbers[i]]这个位置上, 于是我们交换numbers[i]和numbers[numbers[i]] 重复操作1, 直到number[i]== i, 则继续操作下一个位置的元素, 或者numbers[i] == numbers[numbers[i],元素重复 '''
[ "noreply@github.com" ]
sjtupig.noreply@github.com
172d528877e46d3a15c44ea0bd68dd96091dec79
77676610410e479a3214669b082b5f410b499e24
/apps/main/migrations/0010_auto_20170424_0645.py
cfeb0350a6e5aedc05e7e5c8f745933e2474e75b
[ "Apache-2.0" ]
permissive
StepicOrg/stepik-extensions
e76b2ee033275b33bf9d8c8deeac495d3a6bde46
5825bc9b2444ad4690681964d1bed172706f8796
refs/heads/develop
2023-04-05T12:43:28.114500
2021-04-19T12:57:30
2021-04-19T12:57:30
82,687,804
5
2
Apache-2.0
2021-04-19T12:58:47
2017-02-21T14:17:00
JavaScript
UTF-8
Python
false
false
653
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-04-24 06:45 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('main', '0009_auto_20170422_2002'), ] operations = [ migrations.RemoveField( model_name='extension', name='categories', ), migrations.RemoveField( model_name='extension', name='user_groups', ), migrations.DeleteModel( name='Category', ), migrations.DeleteModel( name='Extension', ), ]
[ "meanmail@mail.ru" ]
meanmail@mail.ru
ec76462b01d31414e103dd0bfcd6655ce361b9da
e63676d4a91b6718f4e8333e3e72f02d33fbc9a6
/sdlf-datalakeLibrary/python/datalake_library/tests/unit/stage_b_transforms/test_heavy_transform_blueprint.py
acc41f5c28d772df88e232cdef449c9056a50170
[ "MIT-0" ]
permissive
fnapolitano73/aws-serverless-data-lake-framework
c96f526e97d609e7cf2852ba05ad6e8332e8e98e
c37e3e2e9faee9ee915eb9b6e0919e1cf30c38d8
refs/heads/master
2023-01-04T02:11:30.237458
2020-10-28T08:09:00
2020-10-28T08:09:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,276
py
from python.datalake_library.transforms.stage_b_transforms.heavy_transform_blueprint import CustomTransform import pytest import sys import os from pytest import fixture from unittest.mock import patch sys.path.insert(0, os.path.join(os.path.abspath( os.path.dirname(__file__)), '../../../..')) class TestCustomTransform: @staticmethod def test_check_job_status(mocker): # Setup bucket = "test-bucket" keys = 123 processed_keys_path = "test-bucket/files/" job_details = {"jobName": "meteorites-glue-job", "jobRunId": "1"} job_response = { "JobRun": { "jobName": "meteorites-glue-job", "jobRunId": 1, "JobRunState": "RUNNING" } } expected_result = { "processedKeysPath": processed_keys_path, "jobDetails": {"jobName": "meteorites-glue-job", "jobRunId": "1", "jobStatus": "RUNNING"} } mocker.patch("botocore.client.BaseClient._make_api_call", return_value=job_response) # Exercise result = CustomTransform().check_job_status( bucket, keys, processed_keys_path, job_details) # Verify assert result == expected_result
[ "jaidisido@gmail.com" ]
jaidisido@gmail.com
b01cb42df40d9efc85d03a815e799ee14b6e8fd8
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03339/s273941488.py
c82cd4ca992be5faaa424d10d255497c4a9fd014
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
222
py
n = int(input()) s = [(i == "W")*1 for i in list(input())] c = [0]*(n+1) for i in range(n): c[i+1] = c[i] + s[i] ans = float("inf") for i in range(n): t = c[i] + (n-i-1-c[-1]+c[i+1]) ans = min(ans,t) print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
7f54413f5e15fdc43edf6d6d9bbea08094267c57
8e5950aa3aaf94cec94d7c53d55b06275e7b109f
/modules/fabric/Manifest.py
91a3af77ce234b924a7d098aa6f8059bb057e7c5
[]
no_license
stefanrauch/wr_cores
a129ff2ef60d3552f6bce6ca7b309f2bd851ba77
b9a0ed7f6878b0e3c8332efb9e8d93c2ec018a3e
refs/heads/master
2021-01-10T18:59:58.191100
2013-01-21T15:54:23
2013-01-21T15:54:23
7,622,900
2
0
null
null
null
null
UTF-8
Python
false
false
77
py
files = ["wr_fabric_pkg.vhd", "xwb_fabric_sink.vhd", "xwb_fabric_source.vhd"]
[ "c.prados@gsi.de" ]
c.prados@gsi.de
a5bc5f827f72fb006bbe2a2d51fbc23feca32582
638842330f186436fb40689cd596ffae6ec4bc3b
/bayesian_shielding/benchmark_tasks/MNLI/mnli_extract_res.py
435539c81f1daea720a3a715e2debdf2062d45d8
[]
no_license
jflotz/BERT-Defense
35d725b909dfaa775217c6d4078283a07cca3e6d
f4b28d03f121b1f88be59d45ea89009416818b4b
refs/heads/master
2023-07-17T12:29:40.768394
2021-08-29T08:31:45
2021-08-29T08:31:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
215
py
import pandas as pd df = pd.read_csv("../../../DATA/mnli/dev_matched.tsv",sep="\t") df = df[["gold_label","sentence1","sentence2"]] df = df.iloc[:200] df.to_csv("mnli_dataset.csv",sep="\t",index=False,header=False)
[ "yannik@kelnet.de" ]
yannik@kelnet.de
297b49422f62295813f98787154517148273d665
a59deecc5d91214601c38bd170605d9d080e06d2
/14-dictionaries/08-copy()/app.py
2a626c1bb68207e6df9b951c1b8fd7d46c37c8b5
[]
no_license
reyeskevin9767/modern-python-bootcamp-2018
a6a3abdb911716d19f6ab516835ed1a04919a13d
d0234f10c4b8aaa6a20555348aec7e3571e3d4e7
refs/heads/master
2022-12-03T18:48:50.035054
2020-08-09T03:00:55
2020-08-09T03:00:55
286,109,881
0
0
null
null
null
null
UTF-8
Python
false
false
214
py
# * Copy Method d = dict(a=1, b=2, c=3) c = d.copy() print(c) # {'a': 1, 'b': 2, 'c': 3} print(c is d) # False e = dict(a=6, b=7, c=8) f = e.copy() print(e) # {'a': 1, 'b': 2, 'c': 3} print(e is f) # False
[ "reyeskevin9767@gmail.com" ]
reyeskevin9767@gmail.com
cb4aa6d7dde712cf314a82682f8860ab95fe0eae
f026b118e0ee312dd759db2d4890f2ddaa7e165e
/WHFlask/model/__init__.py
225c67761c910247fd4414a5081d9d9969d08c29
[]
no_license
ksnero34/walker_holic_back
df345f8317c9f8fc7b76bb55f983f5e2ac571764
5125127a62e0b8be8d7182aaf77002de58d4b43a
refs/heads/main
2023-07-23T12:10:41.447787
2021-08-29T03:45:45
2021-08-29T03:45:45
385,494,489
1
0
null
null
null
null
UTF-8
Python
false
false
281
py
''' Application Model ''' # from model import mysql from config import config def get_cursor(): print(config.SLOW_API_TIME) def init_app(): '''Model Init Function''' # Mysql Init initializer = mysql.ModelInitializer() initializer.init_model() get_cursor()
[ "must1080@naver.com" ]
must1080@naver.com
0826bb49bda6584cc57d9ea1205a457341b5e9ac
4e3c976773526fd610d64ffb83589bccfaee5e68
/sponge-integration-tests/examples/core/filters_event_pattern.py
32eae8faeab3bf1d4d3fa3664b9a44fc5a0f1edc
[ "Apache-2.0" ]
permissive
softelnet/sponge
2313d2328953fcff49a002e727bb803757870627
7190f23ae888bbef49d0fbb85157444d6ea48bcd
refs/heads/master
2022-10-28T16:19:55.619882
2021-09-16T19:50:08
2021-09-16T19:50:08
95,256,030
10
2
Apache-2.0
2022-10-04T23:55:09
2017-06-23T20:58:49
Java
UTF-8
Python
false
false
1,408
py
""" Sponge Knowledge Base Filters - Event pattern """ from java.util.concurrent.atomic import AtomicInteger def onInit(): # Variables for assertions only sponge.setVariable("nameCount", AtomicInteger(0)) sponge.setVariable("patternCount", AtomicInteger(0)) sponge.setVariable("acceptedCount", AtomicInteger(0)) sponge.setVariable("notAcceptedCount", AtomicInteger(0)) class NameFilter(Filter): def onConfigure(self): self.withEvent("a1") def onAccept(self, event): sponge.getVariable("nameCount").incrementAndGet() return True class PatternFilter(Filter): def onConfigure(self): self.withEvent("a.+") def onAccept(self, event): sponge.getVariable("patternCount").incrementAndGet() return False class AcceptedTrigger(Trigger): def onConfigure(self): self.withEvent(".+") def onRun(self, event): self.logger.info("accepted {}", event.name) if event.name != EventName.STARTUP: sponge.getVariable("acceptedCount").incrementAndGet() class NotAcceptedTrigger(Trigger): def onConfigure(self): self.withEvent("a.+") def onRun(self, event): sponge.getVariable("notAcceptedCount").incrementAndGet() def onStartup(): for name in ["a1", "b1", "a2", "b2", "a", "b", "a1", "b2"]: sponge.event(name).send()
[ "marcin.pas@softelnet.com" ]
marcin.pas@softelnet.com
3117d59bf6629c3dce3f90abcf05f1855a34cce8
24735767f7d585a8d7d3055b4967578e7f55a715
/pontos_turisticos/urls.py
ac1d500c026745f66f0665db4a29c2fb0042e224
[]
no_license
nilton-medeiros/pontos_turisticos
b6c978ec0259163feccf8398a7ea927d34bba352
c642c2c834a9f2594ac680a9048d6d70aaca70d6
refs/heads/main
2023-02-12T05:16:12.239114
2021-01-13T14:02:28
2021-01-13T14:02:28
327,097,453
1
0
null
2021-01-05T21:42:17
2021-01-05T19:23:27
Python
UTF-8
Python
false
false
1,687
py
"""pontos_turisticos URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.conf.urls import include from django.urls import path from rest_framework import routers from django.conf import settings from django.conf.urls.static import static from core.api.viewsets import PontoTuristicoViewSet from atracoes.api.viewsets import AtracaoViewSet from enderecos.api.viewsets import EnderecoViewSet from comentarios.api.viewsets import ComentarioViewSet from avaliacoes.api.viewsets import AvaliacaoViewSet from rest_framework.authtoken.views import obtain_auth_token router = routers.DefaultRouter() router.register(r'pontoturistico', PontoTuristicoViewSet, basename='PontoTuristico') router.register(r'atracoes', AtracaoViewSet) router.register(r'enderecos', EnderecoViewSet) router.register(r'comentarios', ComentarioViewSet) router.register(r'avaliacoes', AvaliacaoViewSet) urlpatterns = [ path('', include(router.urls)), path('admin/', admin.site.urls), path('api-token-auth/', obtain_auth_token), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "nilton@sistrom.com.br" ]
nilton@sistrom.com.br
5a08afb6ae2260295558f1669b8a1893f16384b4
f5b2748ead8c589201afa209eb50af789e441987
/zxjy/manage.py
13f9a9b0d4b50bd25a2a30d8fae758949a6e9706
[]
no_license
x17246758/NetCourseSystem
9839478486b3f2d97a7a9f7917cd00e6b5dd344d
3b083d400e4bee92c0a9ff44e42e12b9731b8ab0
refs/heads/master
2022-01-28T01:05:59.484122
2019-10-28T15:01:55
2019-10-28T15:01:55
209,337,495
0
0
null
2022-01-15T05:28:00
2019-09-18T15:04:29
JavaScript
UTF-8
Python
false
false
536
py
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zxjy.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[ "172467548@qq.com" ]
172467548@qq.com
ce6bfe2a9145cfc6f226201d4923551145eb81a7
479559fc4d4724a7145cfb8ecdaa5cdc55e46761
/tensorflow/python/data/experimental/ops/interleave_ops.py
257639a2560aa5248ffb97bdeb46add625c96113
[ "Apache-2.0" ]
permissive
mudassirej/tensorflow
434818cc68c754c40d2e3b014daf1e3974d26698
bd47c759176f0039026fd5cac8db247bf452de28
refs/heads/master
2020-06-14T10:55:42.751443
2019-07-03T04:07:46
2019-07-03T04:12:59
194,978,111
1
0
Apache-2.0
2019-07-03T04:13:09
2019-07-03T04:13:09
null
UTF-8
Python
false
false
11,807
py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Non-deterministic dataset transformations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import random_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers from tensorflow.python.data.util import nest from tensorflow.python.data.util import structure from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_experimental_dataset_ops from tensorflow.python.ops import gen_stateless_random_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @deprecation.deprecated( None, "Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, " "num_parallel_calls=tf.data.experimental.AUTOTUNE)` instead. If sloppy " "execution is desired, use `tf.data.Options.experimental_determinstic`.") @tf_export("data.experimental.parallel_interleave") def parallel_interleave(map_func, cycle_length, block_length=1, sloppy=False, buffer_output_elements=None, prefetch_input_elements=None): """A parallel version of the `Dataset.interleave()` transformation. `parallel_interleave()` maps `map_func` across its input to produce nested datasets, and outputs their elements interleaved. Unlike `tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested datasets in parallel, which increases the throughput, especially in the presence of stragglers. Furthermore, the `sloppy` argument can be used to improve performance, by relaxing the requirement that the outputs are produced in a deterministic order, and allowing the implementation to skip over nested datasets whose elements are not readily available when requested. Example usage: ```python # Preprocess 4 files concurrently. filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords") dataset = filenames.apply( tf.data.experimental.parallel_interleave( lambda filename: tf.data.TFRecordDataset(filename), cycle_length=4)) ``` WARNING: If `sloppy` is `True`, the order of produced elements is not deterministic. Args: map_func: A function mapping a nested structure of tensors to a `Dataset`. cycle_length: The number of input `Dataset`s to interleave from in parallel. block_length: The number of consecutive elements to pull from an input `Dataset` before advancing to the next input `Dataset`. sloppy: If false, elements are produced in deterministic order. Otherwise, the implementation is allowed, for the sake of expediency, to produce elements in a non-deterministic order. buffer_output_elements: The number of elements each iterator being interleaved should buffer (similar to the `.prefetch()` transformation for each interleaved iterator). prefetch_input_elements: The number of input elements to transform to iterators before they are needed for interleaving. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): return readers.ParallelInterleaveDataset( dataset, map_func, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements) return _apply_fn class _DirectedInterleaveDataset(dataset_ops.Dataset): """A substitute for `Dataset.interleave()` on a fixed list of datasets.""" def __init__(self, selector_input, data_inputs): self._selector_input = selector_input self._data_inputs = list(data_inputs) first_output_types = dataset_ops.get_legacy_output_types(data_inputs[0]) first_output_classes = dataset_ops.get_legacy_output_classes(data_inputs[0]) for data_input in data_inputs[1:]: if (dataset_ops.get_legacy_output_types(data_input) != first_output_types or dataset_ops.get_legacy_output_classes(data_input) != first_output_classes): raise TypeError("All datasets must have the same type and class.") output_shapes = dataset_ops.get_legacy_output_shapes(self._data_inputs[0]) for data_input in self._data_inputs[1:]: output_shapes = nest.pack_sequence_as(output_shapes, [ ts1.most_specific_compatible_shape(ts2) for (ts1, ts2) in zip( nest.flatten(output_shapes), nest.flatten(dataset_ops.get_legacy_output_shapes(data_input))) ]) self._element_spec = structure.convert_legacy_structure( first_output_types, output_shapes, first_output_classes) super(_DirectedInterleaveDataset, self).__init__() def _as_variant_tensor(self): # pylint: disable=protected-access return ( gen_experimental_dataset_ops.experimental_directed_interleave_dataset( self._selector_input._variant_tensor, [data_input._variant_tensor for data_input in self._data_inputs], **self._flat_structure)) # pylint: enable=protected-access def _inputs(self): return [self._selector_input] + self._data_inputs @property def element_spec(self): return self._element_spec @tf_export("data.experimental.sample_from_datasets", v1=[]) def sample_from_datasets_v2(datasets, weights=None, seed=None): """Samples elements at random from the datasets in `datasets`. Args: datasets: A list of `tf.data.Dataset` objects with compatible structure. weights: (Optional.) A list of `len(datasets)` floating-point values where `weights[i]` represents the probability with which an element should be sampled from `datasets[i]`, or a `tf.data.Dataset` object where each element is such a list. Defaults to a uniform distribution across `datasets`. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.compat.v1.set_random_seed` for behavior. Returns: A dataset that interleaves elements from `datasets` at random, according to `weights` if provided, otherwise with uniform probability. Raises: TypeError: If the `datasets` or `weights` arguments have the wrong type. ValueError: If the `weights` argument is specified and does not match the length of the `datasets` element. """ num_datasets = len(datasets) if not isinstance(weights, dataset_ops.DatasetV2): if weights is None: # Select inputs with uniform probability. logits = [[1.0] * num_datasets] else: # Use the given `weights` as the probability of choosing the respective # input. weights = ops.convert_to_tensor(weights, name="weights") if weights.dtype not in (dtypes.float32, dtypes.float64): raise TypeError("`weights` must be convertible to a tensor of " "`tf.float32` or `tf.float64` elements.") if not weights.shape.is_compatible_with([num_datasets]): raise ValueError( "`weights` must be a vector of length `len(datasets)`.") # The `stateless_multinomial()` op expects log-probabilities, as opposed # to weights. logits = array_ops.expand_dims(math_ops.log(weights, name="logits"), 0) # NOTE(mrry): We only specialize when `weights` is not a `Dataset`. When it # is a `Dataset`, it is possible that evaluating it has a side effect the # user depends on. if len(datasets) == 1: return datasets[0] def select_dataset_constant_logits(seed): return array_ops.squeeze( gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed), axis=[0, 1]) selector_input = dataset_ops.MapDataset( random_ops.RandomDataset(seed).batch(2), select_dataset_constant_logits, use_inter_op_parallelism=False) else: # Use each element of the given `weights` dataset as the probability of # choosing the respective input. # The `stateless_multinomial()` op expects log-probabilities, as opposed to # weights. logits_ds = weights.map(lambda *p: math_ops.log(p, name="logits")) def select_dataset_varying_logits(logits, seed): return array_ops.squeeze( gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed), axis=[0, 1]) logits_and_seeds = dataset_ops.Dataset.zip( (logits_ds, random_ops.RandomDataset(seed).batch(2))) selector_input = dataset_ops.MapDataset( logits_and_seeds, select_dataset_varying_logits, use_inter_op_parallelism=False) return _DirectedInterleaveDataset(selector_input, datasets) @tf_export(v1=["data.experimental.sample_from_datasets"]) def sample_from_datasets_v1(datasets, weights=None, seed=None): return dataset_ops.DatasetV1Adapter( sample_from_datasets_v2(datasets, weights, seed)) sample_from_datasets_v1.__doc__ = sample_from_datasets_v2.__doc__ @tf_export("data.experimental.choose_from_datasets", v1=[]) def choose_from_datasets_v2(datasets, choice_dataset): """Creates a dataset that deterministically chooses elements from `datasets`. For example, given the following datasets: ```python datasets = [tf.data.Dataset.from_tensors("foo").repeat(), tf.data.Dataset.from_tensors("bar").repeat(), tf.data.Dataset.from_tensors("baz").repeat()] # Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`. choice_dataset = tf.data.Dataset.range(3).repeat(3) result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset) ``` The elements of `result` will be: ``` "foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz" ``` Args: datasets: A list of `tf.data.Dataset` objects with compatible structure. choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between `0` and `len(datasets) - 1`. Returns: A dataset that interleaves elements from `datasets` according to the values of `choice_dataset`. Raises: TypeError: If the `datasets` or `choice_dataset` arguments have the wrong type. """ if not structure.are_compatible(choice_dataset.element_spec, structure.TensorStructure(dtypes.int64, [])): raise TypeError("`choice_dataset` must be a dataset of scalar " "`tf.int64` tensors.") return _DirectedInterleaveDataset(choice_dataset, datasets) @tf_export(v1=["data.experimental.choose_from_datasets"]) def choose_from_datasets_v1(datasets, choice_dataset): return dataset_ops.DatasetV1Adapter( choose_from_datasets_v2(datasets, choice_dataset)) choose_from_datasets_v1.__doc__ = choose_from_datasets_v2.__doc__ # TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep # these aliases in place. choose_from_datasets = choose_from_datasets_v1 sample_from_datasets = sample_from_datasets_v1
[ "gardener@tensorflow.org" ]
gardener@tensorflow.org
576be3d1522f710ccbaba352d2393f1ebf54fd96
704aed30fda284d689887a0841b28f83ee80b922
/RC1/phil_catkin_ws/build/rosserial/rosserial_arduino/catkin_generated/pkg.installspace.context.pc.py
306bec52756d11160e6cd0279101a58b23c42dd9
[]
no_license
ozay-group/scaled-cars
f69832dc01407044e8307cb39a989c765f21c48a
bd171636d2bcbfca3767eb9d877e91c0904ecb1f
refs/heads/master
2020-04-27T19:23:51.819988
2018-08-30T17:19:14
2018-08-30T17:19:14
174,615,849
0
0
null
null
null
null
UTF-8
Python
false
false
487
py
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/phil_catkin_ws/install/include".split(';') if "/home/ubuntu/phil_catkin_ws/install/include" != "" else [] PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else [] PROJECT_NAME = "rosserial_arduino" PROJECT_SPACE_DIR = "/home/ubuntu/phil_catkin_ws/install" PROJECT_VERSION = "0.7.7"
[ "psisk@umich.edu" ]
psisk@umich.edu
bd7d1491e809be7611d09d0d0e8578f497fb3520
e811da3715d43e23a4548490aa27be40ac21d6e4
/handlers/base/__init__.py
8f1904288c671963f969ea59e55106edced6d3da
[]
no_license
atiger808/tornado
2a2ff73957d6fb97cd91222038f499ee8ed325f5
77e981ee70a7c7b3903bec82d91109f163bb2a43
refs/heads/master
2020-04-04T09:22:07.007710
2018-11-02T05:04:00
2018-11-02T05:04:00
155,815,465
0
0
null
null
null
null
UTF-8
Python
false
false
149
py
# _*_ coding: utf-8 _*_ # @Time : 2018/6/26 22:52 # @Author : Ole211 # @Site : # @File : __init__.py.py # @Software : PyCharm
[ "atiger0614@163.com" ]
atiger0614@163.com
a9b098aaf599f218d0e3b35cae1d246bcbeb2c50
a66b69c3f9da9779ae80f347b61f47e3bc5ba145
/day1002/A04_loop.py
311112630c8c83899668600713293b1a7f31e1f9
[]
no_license
kyungtae92/python-basic
c841d9c9c6196b01da3de007c1298fe2c4b8f693
80a2051e37b6e87c9dbfd332c4b2946089ff0d5c
refs/heads/master
2020-11-25T08:01:22.156661
2019-12-17T08:25:38
2019-12-17T08:25:38
228,567,120
2
0
null
null
null
null
UTF-8
Python
false
false
352
py
import os # 파이썬이 운영체제의 일부 기능 가져옴(명령어) while (True): dan = input('input gugudan >> ') if dan.isalpha() == True or dan == '': os.system('cls') else: break dan = int(dan) i = 0 for i in range(1, 10): # for i in range(1, 10, 1): print("%d * %d = %2d" % (dan, i, dan * i))
[ "noreply@github.com" ]
kyungtae92.noreply@github.com
c8134ac09fd408a7e6a95afc096c4b2a6a04af17
c6770d1d1bf408cf14ff6c83402726f6c4f4a8f5
/mediafiles/utils.py
40688bb19362ae105e49f52448291213c98df8f5
[]
no_license
nasaastrobio/django-mediafiles
f8825a988b8d04cfd8ce83c1274c93b7961cb50f
a69082108cbecdf01593ab920c4f6efeb99253de
refs/heads/master
2021-05-27T23:22:53.657945
2013-09-28T14:59:24
2013-09-28T14:59:24
13,167,434
0
0
null
null
null
null
UTF-8
Python
false
false
1,970
py
import os import fnmatch from django.conf import settings from django.core.exceptions import ImproperlyConfigured def matches_patterns(path, patterns=None): """ Return True or False depending on whether the ``path`` should be ignored (if it matches any pattern in ``ignore_patterns``). """ if patterns is None: patterns = [] for pattern in patterns: if fnmatch.fnmatchcase(path, pattern): return True return False def get_files(storage, ignore_patterns=None, location=''): """ Recursively walk the storage directories yielding the paths of all files that should be copied. """ if ignore_patterns is None: ignore_patterns = [] directories, files = storage.listdir(location) for fn in files: if matches_patterns(fn, ignore_patterns): continue if location: fn = os.path.join(location, fn) yield fn for dir in directories: if matches_patterns(dir, ignore_patterns): continue if location: dir = os.path.join(location, dir) for fn in get_files(storage, ignore_patterns, dir): yield fn def check_settings(base_url=None): """ Checks if the mediafiles settings have sane values. """ if base_url is None: base_url = settings.MEDIA_URL if not base_url: raise ImproperlyConfigured( "You're using the mediafiles app " "without having set the required MEDIA_URL setting.") if settings.STATIC_URL == base_url: raise ImproperlyConfigured("The STATIC_URL and MEDIA_URL " "settings must have different values") if ((settings.STATIC_ROOT and settings.MEDIA_ROOT) and (settings.STATIC_ROOT == settings.MEDIA_ROOT)): raise ImproperlyConfigured("The STATIC_ROOT and MEDIA_ROOT " "settings must have different values")
[ "shige.abe@nasa.gov" ]
shige.abe@nasa.gov
0e8122a8eb0ba5e509d0b49d4d9aa565da10bc4e
1ad6d91e4454294427d37d5dbfa5b38dab6242e9
/scripts/margin_flipped_mnist.py
bffc4ca93b42a7ad322e273821a63e7f6724b91a
[ "Apache-2.0" ]
permissive
amodas/hold-me-tight
3955bf602ebbb145e1a8207da6a64760a11bc722
b893e97f0b5fe8100472ac68d715d0cb99d0c7dc
refs/heads/main
2022-12-28T14:26:27.474566
2020-10-12T11:06:21
2020-10-12T11:06:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,644
py
import numpy as np import torch import torch.nn as nn import os import time from utils import get_processed_dataset_loaders from utils import train from utils import generate_subspace_list from utils import compute_margin_distribution from utils_dct import dct_flip from model_classes import TransformFlippedLayer from model_classes.mnist import LeNet # check inside the model_class.mnist package for other network options TREE_ROOT = './' DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' DATASET = 'MNIST' PRETRAINED = True PRETRAINED_PATH = '../Models/Pretrained/MNIST_flipped/LeNet/model.t7' BATCH_SIZE = 128 ############################# # Dataset paths and loaders # ############################# # Specify the path of the dataset. For MNIST and CIFAR-10 the train and validation paths can be the same. # For ImageNet, please specify to proper train and validation paths. DATASET_DIR = {'train': os.path.join(TREE_ROOT, '../Datasets/'), 'val': os.path.join(TREE_ROOT, '../Datasets/') } os.makedirs(DATASET_DIR['train'], exist_ok=True) os.makedirs(DATASET_DIR['val'], exist_ok=True) # Load the data trainloader, testloader, trainset, testset, mean, std, _, _ = get_processed_dataset_loaders(lambda x: dct_flip(x), DATASET, DATASET_DIR, BATCH_SIZE) #################### # Select a Network # #################### # Normalization layer flip_trans = TransformFlippedLayer(mean, std, [1, 28, 28], DEVICE) # Load a model model = LeNet() # check inside the model_class.mnist package for other network options # If pretrained if PRETRAINED: print('---> Working on a pretrained network') model.load_state_dict(torch.load(PRETRAINED_PATH, map_location='cpu')) model = model.to(DEVICE) model.eval() # If not pretrained, then train it if not PRETRAINED: EPOCHS = 30 MAX_LR = 0.21 MOMENTUM = 0.9 WEIGHT_DECAY = 5e-4 opt = torch.optim.SGD(model.parameters(), lr=MAX_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY) loss_fun = nn.CrossEntropyLoss() lr_schedule = lambda t: np.interp([t], [0, EPOCHS * 2 // 5, EPOCHS], [0, MAX_LR, 0])[0] # Triangular (cyclic) learning rate schedule SAVE_TRAIN_DIR = os.path.join(TREE_ROOT, '../Models/Generated/%s_flipped/%s/' % (DATASET, model.__class__.__name__)) os.makedirs(SAVE_TRAIN_DIR, exist_ok=True) t0 = time.time() model = model.to(DEVICE) model = train(model, flip_trans, trainloader, testloader, EPOCHS, opt, loss_fun, lr_schedule, SAVE_TRAIN_DIR) print('---> Training is done! Elapsed time: %.5f minutes\n' % ((time.time() - t0) / 60.)) ################################## # Compute margin along subspaces # ################################## # Create a list of subspaces to evaluate the margin on SUBSPACE_DIM = 8 DIM = 28 SUBSPACE_STEP = 1 subspace_list = generate_subspace_list(SUBSPACE_DIM, DIM, SUBSPACE_STEP, channels=1) # Select the data samples for evaluation NUM_SAMPLES_EVAL = 100 indices = np.random.choice(len(testset), NUM_SAMPLES_EVAL, replace=False) eval_dataset = torch.utils.data.Subset(testset, indices[:NUM_SAMPLES_EVAL]) eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2, pin_memory=True if DEVICE == 'cuda' else False) # Compute the margin using subspace DeepFool and save the results RESULTS_DIR = os.path.join(TREE_ROOT, '../Results/margin_%s_flipped/%s/' % (DATASET, model.__class__.__name__)) os.makedirs(RESULTS_DIR, exist_ok=True) margins = compute_margin_distribution(model, flip_trans, eval_loader, subspace_list, RESULTS_DIR + 'margins.npy')
[ "apostolos.modas@epfl.ch" ]
apostolos.modas@epfl.ch
c69a96ad0afddc0785d9a27aa65ecf1e913caeb7
21240a40e4be1c88a9a3a23cb498d48a51e8ee80
/angular-tour-of-heroes/node_modules/uws/build/config.gypi
d0242912952f96a3e54ebacf17bb4e255ab3bee8
[ "Zlib" ]
permissive
KevinDackow/AngularDemo
7c77bea70b056a64e37ad086338c2b172c1545ba
a307f6569aade23e5b83ad5d467dff3163ff9e44
refs/heads/master
2020-03-11T21:09:23.205304
2018-04-25T01:38:53
2018-04-25T01:38:53
130,257,095
0
0
null
null
null
null
UTF-8
Python
false
false
5,092
gypi
# Do not edit. File was generated by node-gyp's "configure" step { "target_defaults": { "cflags": [], "default_configuration": "Release", "defines": [], "include_dirs": [], "libraries": [] }, "variables": { "asan": 0, "coverage": "false", "debug_devtools": "node", "debug_http2": "false", "debug_nghttp2": "false", "force_dynamic_crt": 0, "gas_version": "2.23", "host_arch": "x64", "icu_data_file": "icudt60l.dat", "icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat", "icu_endianness": "l", "icu_gyp_path": "tools/icu/icu-generic.gyp", "icu_locales": "en,root", "icu_path": "deps/icu-small", "icu_small": "true", "icu_ver_major": "60", "llvm_version": 0, "node_byteorder": "little", "node_enable_d8": "false", "node_enable_v8_vtunejit": "false", "node_install_npm": "true", "node_module_version": 59, "node_no_browser_globals": "false", "node_prefix": "/", "node_release_urlbase": "https://nodejs.org/download/release/", "node_shared": "false", "node_shared_cares": "false", "node_shared_http_parser": "false", "node_shared_libuv": "false", "node_shared_nghttp2": "false", "node_shared_openssl": "false", "node_shared_zlib": "false", "node_tag": "", "node_use_bundled_v8": "true", "node_use_dtrace": "false", "node_use_etw": "false", "node_use_lttng": "false", "node_use_openssl": "true", "node_use_perfctr": "false", "node_use_v8_platform": "true", "node_without_node_options": "false", "openssl_fips": "", "openssl_no_asm": 0, "shlib_suffix": "so.59", "target_arch": "x64", "uv_parent_path": "/deps/uv/", "uv_use_dtrace": "false", "v8_enable_gdbjit": 0, "v8_enable_i18n_support": 1, "v8_enable_inspector": 1, "v8_no_strict_aliasing": 1, "v8_optimized_debug": 0, "v8_promise_internal_field_count": 1, "v8_random_seed": 0, "v8_trace_maps": 0, "v8_use_snapshot": "true", "want_separate_host_toolset": 0, "nodedir": "/home/kevin/.node-gyp/9.4.0", "standalone_static_library": 1, "cache_lock_stale": "60000", "ham_it_up": "", "legacy_bundling": "", "sign_git_tag": "", "user_agent": "npm/5.6.0 node/v9.4.0 linux x64", "always_auth": "", "bin_links": "true", "key": "", "allow_same_version": "", "description": "true", "fetch_retries": "2", "heading": "npm", "if_present": "", "init_version": "1.0.0", "user": "", "prefer_online": "", "force": "", "only": "", "read_only": "", "cache_min": "10", "init_license": "ISC", "editor": "vi", "rollback": "true", "tag_version_prefix": "v", "cache_max": "Infinity", "timing": "", "userconfig": "/home/kevin/.npmrc", "engine_strict": "", "init_author_name": "", "init_author_url": "", "tmp": "/tmp", "depth": "Infinity", "package_lock_only": "", "save_dev": "", "usage": "", "metrics_registry": "https://registry.npmjs.org/", "otp": "", "package_lock": "true", "progress": "true", "https_proxy": "", "save_prod": "", "cidr": "", "onload_script": "", "sso_type": "oauth", "rebuild_bundle": "true", "save_bundle": "", "shell": "/bin/bash", "dry_run": "", "prefix": "/usr/local", "scope": "", "browser": "", "cache_lock_wait": "10000", "ignore_prepublish": "", "registry": "https://registry.npmjs.org/", "save_optional": "", "searchopts": "", "versions": "", "cache": "/home/kevin/.npm", "send_metrics": "", "global_style": "", "ignore_scripts": "", "version": "", "local_address": "", "viewer": "man", "node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js", "prefer_offline": "", "color": "true", "fetch_retry_mintimeout": "10000", "maxsockets": "50", "offline": "", "sso_poll_frequency": "500", "umask": "0002", "fetch_retry_maxtimeout": "60000", "logs_max": "10", "message": "%s", "ca": "", "cert": "", "global": "", "link": "", "access": "", "also": "", "save": "true", "unicode": "true", "long": "", "production": "", "searchlimit": "20", "unsafe_perm": "true", "auth_type": "legacy", "node_version": "9.4.0", "tag": "latest", "git_tag_version": "true", "commit_hooks": "true", "script_shell": "", "shrinkwrap": "true", "fetch_retry_factor": "10", "save_exact": "", "strict_ssl": "true", "dev": "", "globalconfig": "/usr/local/etc/npmrc", "init_module": "/home/kevin/.npm-init.js", "parseable": "", "globalignorefile": "/usr/local/etc/npmignore", "cache_lock_retries": "10", "searchstaleness": "900", "node_options": "", "save_prefix": "^", "scripts_prepend_node_path": "warn-only", "group": "1000", "init_author_email": "", "searchexclude": "", "git": "git", "optional": "true", "json": "" } }
[ "Kevin Dackow" ]
Kevin Dackow
d5d4dc11f80514143b96cfebbcab39e53506dd9b
7f9811857538858ea5c6baaefdccf424c2dea3c2
/INTRODUCTION_TO_DS/chapter5_search/linear_search.py
b3c44483d7fd39c6fc66b263858905c46d9c2969
[]
no_license
owari-taro/python_algorithm
ec4d0c737eefdb4f5ddc140c4dfe81fcfb2ee5af
5af19f7dabe6224f0d06b7c89f38c528a08cf903
refs/heads/master
2021-11-23T07:23:08.958737
2021-08-31T00:56:07
2021-08-31T00:56:07
231,067,479
1
1
null
null
null
null
UTF-8
Python
false
false
254
py
from typing import List def binary_search(a: List, x, lo=0, hi=None): if lo < 0: raise ValueError() if hi is None: hi = len(a) while lo < hi: mid = (hi+lo)//2 if x < a[mid]: hi = mid
[ "taro.biwajima@gmail.com" ]
taro.biwajima@gmail.com
00e11d2488cdcb01be07386274adfad59acacc43
0cbf36f06f5316326ef635f14c887cd2849800db
/typings/celery/app/registry.pyi
33985b1be0f526d3403d3531c9b515b239c0b430
[ "Apache-2.0" ]
permissive
espritgames/celery_types
b59545a7cd28f06e766a1a520590f3bbc155e82f
4d4064eb78d2a1a3e79a5fefe111f59ad4d3c9b9
refs/heads/main
2023-08-18T20:11:33.992509
2021-10-04T11:21:49
2021-10-04T11:21:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
70
pyi
from typing import Any, Dict class TaskRegistry(Dict[str, Any]): ...
[ "steve@dignam.xyz" ]
steve@dignam.xyz
d719400ea7743fa07dd8cb24e3cf0ff8bd3dc1a0
f14a0ef8364953e4fa18d494ce62d8a3b73c263b
/mkdir_and_check_file_type_python27.py
506898d6e43d9fd72acaa89522c45c56261d41a2
[]
no_license
gptcod/python_call_so
d866b01c4489887e29d2ed04211e636ca0b5b279
4816455f6f2b81ccc03ad94fad4ff67f16da83ba
refs/heads/master
2023-08-21T16:52:52.671500
2017-12-18T08:19:48
2017-12-18T08:19:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,095
py
#!/usr/bin/python # -*- coding: utf-8 -*- from ctypes import * import ctypes import subprocess import re def mkdir_officex(): ls_log = subprocess.check_output(['ls', '-l', '/home/venus/apt/cloud/officextemp/']) if ls_log.find("cannot access") != -1: result = subprocess.check_output(['mkdir', '-p', '/home/venus/apt/cloud/officextemp/']) print result def get_file_type(so_file_path, check_file_path): methods = subprocess.check_output(['nm', '-D', 'libfiltertype.so']) pattern = re.compile(r'(_.*checktype[A-Z].*)') checktype_method = pattern.findall(methods)[0] so_file = cdll.LoadLibrary(so_file_path) with open(check_file_path) as file: data = file.read() data_list = list(data) data_array = (ctypes.c_char * len(data_list))(*data_list) p = create_string_buffer(10) check_file_name = check_file_path.split("/")[-1] so_file[checktype_method](byref(data_array), len(data_list), p, check_file_name) filetype = "" for i in p.raw: if (ord)(i) != 0: filetype += i return filetype #print get_file_type("./libfiltertype.so", "./new.txt") print mkdir_officex()
[ "liuyang8@venustech.com.cn" ]
liuyang8@venustech.com.cn
79fd25313de50609a139f7d137681f78e0419623
87ad48769b2700e2c02452c616773b7af7313093
/tabby/migrations/0008_category_popularity.py
509787ee5ad1d38b22c7e49b67c78e979175bea2
[]
no_license
cavacH/pidb
12e8c29fefc46669176d3eecb4c41b0050df8e95
a54bfbeaf009ba5a050c086bfb7361e612bf5a7c
refs/heads/master
2020-12-20T01:00:02.028753
2020-01-23T23:30:57
2020-01-23T23:30:57
235,907,060
0
0
null
null
null
null
UTF-8
Python
false
false
452
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-12-26 06:10 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tabby', '0007_auto_20171222_0038'), ] operations = [ migrations.AddField( model_name='category', name='popularity', field=models.IntegerField(default=0), ), ]
[ "noreply@github.com" ]
cavacH.noreply@github.com
31f505bcd3e2862f943b2fb2fb39a976fcf80f18
7ba05e73515c14fb8d2f3d056b51102131171a11
/First_steps_March_Excercise/Akvarium.py
c65b850ffd42b8483b25d7fd5129ca00ac7b1aab
[]
no_license
gyurel/SoftUni-Basics-and-Fundamentals
bd6d5fa8c9d0cc51f241393afd418633a66c65dc
184fc5dfab2fdd410aa8593f4c562fd56211c727
refs/heads/main
2023-07-05T11:16:58.966841
2021-08-31T19:25:40
2021-08-31T19:25:40
401,485,125
0
0
null
null
null
null
UTF-8
Python
false
false
727
py
# От конзолата се четат 4 реда: # 1. Дължина в см – цяло число # 2. Широчина в см – цяло число # 3. Височина в см – цяло число # 4. Процент зает обем – реално число length = int(input()) width = int(input()) height = int(input()) occuqied_percentage = float(input()) / 100 volume_in_litters = length * width * height/1000 # Да се напише програма, която изчислява литрите вода, които са необходими за напълването на аквариума. needed_water = volume_in_litters - (volume_in_litters * occuqied_percentage) print(needed_water)
[ "gyurel@yahoo.com" ]
gyurel@yahoo.com
2f11b0f81351e4f628d1266ab215c514e432d2f2
7b0413547fb0e4766febcc6a7f0010fafe025fb6
/medium/course_schedule.py
52ca3f20847247a445eb480dcaa842522eed1cac
[]
no_license
theeric80/LeetCode
b00d4bace7c48c409bc6b2f57321aea7b7106f35
e05321d8c2143d35279136d3999e1be1e7005690
refs/heads/master
2021-01-19T00:51:20.608326
2016-06-30T05:32:44
2016-06-30T05:32:44
42,165,229
0
0
null
null
null
null
UTF-8
Python
false
false
2,037
py
class UndirectedGraphNode(object): def __init__(self, x): self.label = x self.neighbors = [] class Solution(object): def canFinish(self, numCourses, prerequisites): """ :type numCourses: int :type prerequisites: List[List[int]] :rtype: bool """ self.cycle = False G = self.buildGraph(numCourses, prerequisites) result, marked, on_stack = [], [False]*len(G), [False]*len(G) for v in G: if not marked[v.label]: self.topological_sort(G, v, marked, on_stack, result) result.reverse() return not self.cycle def buildGraph(self, numCourses, prerequisites): G = [UndirectedGraphNode(i) for i in xrange(numCourses)] for u, v in prerequisites: G[u].neighbors.append(G[v]) return G def topological_sort(self, G, v, marked, on_stack, result): label = v.label marked[label] = True on_stack[label] = True for w in v.neighbors: if self.cycle: return if not marked[w.label]: self.topological_sort(G, w, marked, on_stack, result) elif on_stack[w.label]: self.cycle = True on_stack[label] = False result.append(label) def dfs(self, G, v): result, marked = [], [False]*len(G) s = [v] while s: node = s.pop() label = node.label if not marked[label]: marked[label] = True result.append(label) for neighbor in node.neighbors: s.append(neighbor) print '->'.join(str(i) for i in result) def main(): import sys from os.path import join, abspath sys.path.append(join('..', 'common')) inputs = [(2, [[1,0]])] for numCourses, prerequisites in inputs: result = Solution().canFinish(numCourses, prerequisites) print result if __name__ == '__main__': main()
[ "chunchieh@gmail.com" ]
chunchieh@gmail.com
3c2d197b7b46aa7ba32e0006c6e3ee2c34f3c02f
42acbbad9f4af26ef4261dd70ec4f9fae49c253e
/optimization_solver.py
16598ff8e47d06e5a977f58fc9d582b245a6c24a
[]
no_license
liuruilinspy/SocialRouting
da5ffa51bdcaa7a875e50743c10abfc0647267b4
20e404256c6fbecdcf5994a0a67bc0200d1f7cac
refs/heads/master
2021-01-19T08:40:49.563502
2017-04-08T20:31:56
2017-04-08T20:32:04
87,661,319
0
2
null
null
null
null
UTF-8
Python
false
false
10,476
py
from cvxopt import solvers, matrix, spdiag, log from cvxopt import spmatrix def routing_solver(A_listform, b_listform, unknown_variables, demand_list, edge_list, type='system', maxiters=100, abstol=10**(-7), reltivetol=10**(-6), feastol=10**(-7)): A = matrix(A_listform).T b = matrix(b_listform) all_variable_count = (len(demand_list) + 1) * len(edge_list) routing_variable_count = len(demand_list) * len(edge_list) valid_routing_variable_count = 0 flow_variable_to_edge_index = {} predefined_flow_variable_count = 0 for index in range(0, all_variable_count): if index < routing_variable_count: if unknown_variables[index] == 1: valid_routing_variable_count += 1 else: if unknown_variables[index] == 0: predefined_flow_variable_count += 1 else: flow_variable_edge_index = index - routing_variable_count valid_flow_variable_index = flow_variable_edge_index - predefined_flow_variable_count flow_variable_to_edge_index[valid_routing_variable_count + valid_flow_variable_index] = \ flow_variable_edge_index linear_constraint_count, variable_count = A.size G = spmatrix(-1.0, range(0, variable_count), range(0, variable_count)) #print(G) h = matrix([0.0] * variable_count) #print(h) dims = {'l': variable_count, 'q': [], 's': []} solvers.options['maxiters'] = maxiters solvers.options['abstol'] = abstol solvers.options['reltol'] = reltivetol solvers.options['feastol'] = feastol def sys_op_f(x=None, z=None): if x is None: return 0, matrix(1.0, (variable_count, 1)) if min(x) < 0.0: return None # in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0 # f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ... f = 0 for var_index in range(valid_routing_variable_count, variable_count): edge_index = flow_variable_to_edge_index[var_index] cost = edge_list[edge_index]['cost'] capacity = edge_list[edge_index]['capacity'] bg_volume = edge_list[edge_index]['bg_volume'] alpha = edge_list[edge_index]['alpha'] beta = edge_list[edge_index]['beta'] #f += cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta) t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta) f += t * x[var_index] # Df(m+1)*n = 1*n f[0,:] = df_0/dx_i df_values = list() # derivative towards each x_i ddf_values = list() # second derivative towards each x_i for var_index in range(0, valid_routing_variable_count): df_values.append(0.0) ddf_values.append(0.0) for var_index in range(valid_routing_variable_count, variable_count): edge_index = flow_variable_to_edge_index[var_index] cost = edge_list[edge_index]['cost'] capacity = edge_list[edge_index]['capacity'] bg_volume = edge_list[edge_index]['bg_volume'] alpha = edge_list[edge_index]['alpha'] beta = edge_list[edge_index]['beta'] t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta) dt = cost * alpha * beta * ((bg_volume + x[var_index] / capacity) ** (beta - 1)) / capacity dt2 = cost * alpha * beta * (beta - 1) * ((bg_volume + x[var_index] / capacity) ** (beta - 2)) / (capacity ** 2) df_values.append(x[var_index] * dt + t) ddf_values.append(x[var_index] * dt2 + 2 * dt) Df = matrix(df_values, (1, variable_count)) ddf = matrix(ddf_values, (variable_count, 1)) if z is None: return f, Df H = spdiag(z[0] * ddf) # diagonal matrix, h[:i] = z[i] * f_i''(x) return f, Df, H def ue_f(x=None, z=None): if x is None: return 0, matrix(1.0, (variable_count, 1)) if min(x) < 0.0: return None # in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0 # f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ... f = 0 for var_index in range(valid_routing_variable_count, variable_count): edge_index = flow_variable_to_edge_index[var_index] cost = edge_list[edge_index]['cost'] capacity = edge_list[edge_index]['capacity'] bg_volume = edge_list[edge_index]['bg_volume'] alpha = edge_list[edge_index]['alpha'] beta = edge_list[edge_index]['beta'] f += cost * x[var_index] + cost * alpha * capacity ** -beta / (beta + 1) * \ ((bg_volume + x[var_index]) ** (beta + 1) - bg_volume ** (beta + 1)) # Df(m+1)*n = 1*n f[0,:] = df_0/dx_i df_values = list() # derivative towards each x_i ddf_values = list() # second derivative towards each x_i for var_index in range(0, valid_routing_variable_count): df_values.append(0.0) ddf_values.append(0.0) for var_index in range(valid_routing_variable_count, variable_count): edge_index = flow_variable_to_edge_index[var_index] cost = edge_list[edge_index]['cost'] capacity = edge_list[edge_index]['capacity'] bg_volume = edge_list[edge_index]['bg_volume'] alpha = edge_list[edge_index]['alpha'] beta = edge_list[edge_index]['beta'] t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta) dt = cost * alpha * beta * ((bg_volume + x[var_index] / capacity) ** (beta - 1)) / capacity df_values.append(t) ddf_values.append(dt) Df = matrix(df_values, (1, variable_count)) ddf = matrix(ddf_values, (variable_count, 1)) if z is None: return f, Df H = spdiag(z[0] * ddf) # diagonal matrix, h[:i] = z[i] * f_i''(x) return f, Df, H def social_op_f(x=None, z=None): if x is None: return 0, matrix(1.0, (variable_count, 1)) if min(x) < 0.0: return None # in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0 # f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ... f = 0 for var_index in range(valid_routing_variable_count, variable_count): edge_index = flow_variable_to_edge_index[var_index] cost = edge_list[edge_index]['cost'] capacity = edge_list[edge_index]['capacity'] bg_volume = edge_list[edge_index]['bg_volume'] alpha = edge_list[edge_index]['alpha'] beta = edge_list[edge_index]['beta'] integral_t = cost * x[var_index] + cost * alpha * capacity ** -beta / (beta + 1) * \ ((bg_volume + x[var_index]) ** (beta + 1) - bg_volume ** (beta + 1)) integral_xdt = cost * alpha * capacity ** (-beta) * (x[var_index] * (bg_volume + x[var_index]) ** beta - 1 / (beta + 1) * ((bg_volume + x[var_index]) ** (beta + 1) - bg_volume ** (beta + 1))) f += integral_t + integral_xdt # Df(m+1)*n = 1*n f[0,:] = df_0/dx_i df_values = list() # derivative towards each x_i ddf_values = list() # second derivative towards each x_i for var_index in range(0, valid_routing_variable_count): df_values.append(0.0) ddf_values.append(0.0) for var_index in range(valid_routing_variable_count, variable_count): edge_index = flow_variable_to_edge_index[var_index] cost = edge_list[edge_index]['cost'] capacity = edge_list[edge_index]['capacity'] bg_volume = edge_list[edge_index]['bg_volume'] alpha = edge_list[edge_index]['alpha'] beta = edge_list[edge_index]['beta'] t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta) dt = cost * alpha * beta * ((bg_volume + x[var_index] / capacity) ** (beta - 1)) / capacity dt2 = cost * alpha * beta * (beta - 1) * ((bg_volume + x[var_index] / capacity) ** (beta - 2)) / (capacity ** 2) df_values.append(t + x[var_index] * dt) ddf_values.append(2 * dt + x[var_index] * dt2) Df = matrix(df_values, (1, variable_count)) ddf = matrix(ddf_values, (variable_count, 1)) if z is None: return f, Df H = spdiag(z[0] * ddf) # diagonal matrix, h[:i] = z[i] * f_i''(x) return f, Df, H if type == 'ue': planning_results = solvers.cp(ue_f, G=G, h=h, dims=dims, A=A, b=b)['x'] elif type == 'social': planning_results = solvers.cp(social_op_f, G=G, h=h, dims=dims, A=A, b=b)['x'] else: planning_results = solvers.cp(sys_op_f, G=G, h=h, dims=dims, A=A, b=b)['x'] total_cost = 0.0 for flow_variable_index, edge_index in flow_variable_to_edge_index.items(): flow_variable = planning_results[flow_variable_index] cost = edge_list[edge_index]['cost'] capacity = edge_list[edge_index]['capacity'] bg_volume = edge_list[edge_index]['bg_volume'] alpha = edge_list[edge_index]['alpha'] beta = edge_list[edge_index]['beta'] t = cost * (1 + alpha * ((bg_volume + flow_variable) / capacity) ** beta) total_cost += t * flow_variable return planning_results[0:valid_routing_variable_count], total_cost def test_solver(A, b): linear_constraint_count, variable_count = A.size def F(x=None, z=None): if x is None: return 0, matrix(1.0, (variable_count, 1)) if min(x) < 0.0: return None # in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0 # f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ... f = -sum(log(x)) Df = -(x ** -1).T if z is None: return f, Df H = spdiag(z[0] * x ** -2) return f, Df, H G = spmatrix(-1.0, range(0, variable_count), range(0, variable_count)) #print(G) h = matrix([0.0] * variable_count) #print(h) dims = {'l': variable_count, 'q': [], 's': []} return solvers.cp(F, G=G, h=h, dims=dims, A=A, b=b)['x']
[ "liuruilinspy@gmail.com" ]
liuruilinspy@gmail.com
1ce945017e7cc43156885a2691f2f6b34eafa43d
e7ed617f4bd0e54b457102c18ad6d7b8e4ed70b3
/products/migrations/0041_remove_group_products_category.py
667733d8c3dd21dc74dac46b5d4ffd136e65caf4
[]
no_license
shoib-ansari/Ecomm-Proj
9ca11e34b3e61ac451022ff72d1f772daf62d5af
1479a30e5bcdc0c07c5d7f709f0ab17fea09268f
refs/heads/master
2023-03-27T10:02:29.035535
2021-03-24T11:51:25
2021-03-24T11:51:25
350,646,654
0
0
null
null
null
null
UTF-8
Python
false
false
339
py
# Generated by Django 2.2.2 on 2020-03-20 05:18 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('products', '0040_auto_20200320_1034'), ] operations = [ migrations.RemoveField( model_name='group_products', name='category', ), ]
[ "ansarishoib1008@gmail.com" ]
ansarishoib1008@gmail.com
dc26da6e320ff093012cda0b898e9b9684b94a04
05d916261d917f51efb7b51acb04499065d39f00
/import/serializiers.py
0ad7b2dabbe0e5197168d6cf2849da96a9a5060b
[]
no_license
Gektor1234/Import-and-search-for-company-employees
1e71f1e0f4734fc49f9c4425b35985d75ae5505b
eb0c61074f81a04bcb13cfa903a9870da84922c4
refs/heads/master
2021-03-08T13:51:55.661085
2020-03-10T17:41:40
2020-03-10T17:41:40
246,350,253
0
0
null
null
null
null
UTF-8
Python
false
false
627
py
from rest_framework import serializers from .models import Workers # создаем сериализайзер для отображение данных в списки питон class WorkersSerializers(serializers.Serializer): name = serializers.CharField(max_length=255) surname = serializers.CharField(max_length=255) date_of_birth = serializers.IntegerField() position = serializers.CharField(max_length=255) def create(self, validated_data): # метод для сообщение инструкции при вызове метода "save" return Workers.objects.create(**validated_data)
[ "xyvafvyf1@gmail.com" ]
xyvafvyf1@gmail.com
07ae3fd425deb6e5c593ee9d9ae487d5398b8f25
e3765def4a180f1d51eaef3884448b0bb9be2cd3
/example/12.3.1_create_pygame_window/alien_invasion.py
136e506214bafb12d29f556453abfc4bb31417aa
[]
no_license
spearfish/python-crash-course
cbeb254efdf0c1ab37d8a7d2fa0409194f19fa2b
66bc42d41395cc365e066a597380a96d3282d30b
refs/heads/master
2023-07-14T11:04:49.276764
2021-08-20T10:02:27
2021-08-20T10:02:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
424
py
#!/usr/bin/env python3 # modules import sys import pygame def run_game() : pygame.init() # pygame.display is a object that handles display. screen = pygame.display.set_mode((1200,800)) pygame.display.set_caption('Alien Invasion') while True : for event in pygame.event.get() : if event.type == pygame.QUIT : sys.exit() pygame.display.flip() run_game()
[ "jingchen@tutanota.com" ]
jingchen@tutanota.com
3dc1b7fb3e9705f1aee1008982293a15c22b2d90
c17d0a888d43f6e45d78aecb2ebff7d119e5470e
/main.py
b08fc8aa9d5636b6baffaa83c579939d73f78354
[]
no_license
coolsandeee/TFDSorting
308c5bbecbb59ea19c960dc283aad4b7e7ec1883
2299b9466b93b7d7c5f45d4b26b0e44bd371c4c0
refs/heads/main
2023-03-23T15:12:54.493045
2021-03-15T10:27:32
2021-03-15T10:27:32
347,923,692
0
0
null
null
null
null
UTF-8
Python
false
false
666
py
def array_sort(request): content_type = request.headers['content-type'] if content_type == 'application/json': request_json = request.get_json(silent=True) if request_json and 'a' in request_json: a = request_json['a'] else: raise ValueError("JSON is invalid, or missing 'a' property array") if request_json and 'b' in request_json: b = request_json['b'] else: raise ValueError("JSON is invalid, or missing 'b' property array") else: raise ValueError("Expecting a json format") new=a+b new1 = sorted(new) return str(new1)
[ "noreply@github.com" ]
coolsandeee.noreply@github.com
23da733c2ec49efd92456cc4e9b18303bc590786
81d84521bd42f6fc862272bd56e463690989d969
/Python/Python learning/challange_3.py
db6e4fc3759a641f1c4fd85354c2358114146b37
[]
no_license
Tomek-RTU/RTR-105
b8f0e785c120f885fee8ee2c6a86a669b9e94927
ffd35bfbb68137c1b5a849821403f502ec776dd8
refs/heads/main
2023-02-25T09:41:55.582023
2021-01-31T20:17:17
2021-01-31T20:17:17
312,278,164
0
0
null
null
null
null
UTF-8
Python
false
false
590
py
def sum_eo(n, t): """Sum even or odd numbers in range. Return the sum of even or odd natural numbers, in the range 1..n-1. :param n: The endpoint of the range. The numbers from 1 to n-1 will be summed. :param t: 'e' to sum even numbers, 'o' to sum odd numbers. :return: The sum of the even or odd numbers in the range. Returns -1 if `t` is not 'e' or 'o'. """ if t == "e": start = 2 elif t == 'o': start = 1 else: return -1 return sum(range(start, n, 2)) x = sum_eo(11, 'spam') print(x)
[ "noreply@github.com" ]
Tomek-RTU.noreply@github.com
085edd24ce10bce702e4768e1623e93a1a1a1fac
cb515f8ab202a6a55a8a294824a3c9f3932ffdc6
/src/preparing_data.py
0134752a7569f089bc584d93b16a5d24d05aeadf
[]
no_license
BobXiao97/DL-Project
1ccb6da9651f95464e1d8ded56e4d927321842f9
dc6074f917e471b1552ab005e87ad6dfc89edeea
refs/heads/main
2023-04-24T14:07:40.978318
2021-05-20T12:43:49
2021-05-20T12:43:49
369,202,308
0
0
null
null
null
null
UTF-8
Python
false
false
1,326
py
from __future__ import unicode_literals, print_function, division from io import open import glob import os import unicodedata import string all_letters = string.ascii_letters + " .,;'-" n_letters = len(all_letters) + 1 # Plus EOS marker def findFiles(path): return glob.glob(path) # Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters ) # Read a file and split into lines def readLines(filename): lines = open(filename, encoding='utf-8').read().strip().split('\n') return [unicodeToAscii(line) for line in lines] # Build the category_lines dictionary, a list of lines per category category_lines = {} all_categories = [] for filename in findFiles('../input/english_words/words.txt'): category = os.path.splitext(os.path.basename(filename))[0] all_categories.append(category) lines = readLines(filename) category_lines[category] = lines n_categories = len(all_categories) if n_categories == 0: raise RuntimeError('Data not found. Make sure that you downloaded data ' 'from https://download.pytorch.org/tutorial/data.zip and extract it to ' 'the current directory.')
[ "xtq1997@gmail.com" ]
xtq1997@gmail.com
27cc4cebf599c8d3b7a61be91fd2e525d3304487
6d967da5fd95aa5e66ddbb211da40041006ca5ec
/myvenv/Lib/site-packages/pip/_vendor/packaging/markers.py
8ef134ba7b10dc55e4de37dd77c217c87ff3f97e
[ "Apache-2.0", "BSD-3-Clause" ]
permissive
gevorkyannaira/my-first-blog
96e4458045a1dd0aa9c1f3ec69f4c829428200e0
42ab12a8c2b0e402b5fa1b8e5a7cdd2629d06c16
refs/heads/master
2022-09-03T21:14:18.946448
2020-05-18T18:15:39
2020-05-18T18:15:39
264,909,108
0
0
null
null
null
null
UTF-8
Python
false
false
11,735
py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import operator import os import platform import sys from pip._vendor.pyparsing import ParseException, ParseResults, stringStart, stringEnd from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString from pip._vendor.pyparsing import Literal as L # noqa from ._compat import string_types <<<<<<< HEAD from .specifiers import Specifier, InvalidSpecifier ======= from ._typing import MYPY_CHECK_RUNNING from .specifiers import Specifier, InvalidSpecifier if MYPY_CHECK_RUNNING: # pragma: no cover from typing import Any, Callable, Dict, List, Optional, Tuple, Union Operator = Callable[[str, str], bool] >>>>>>> e585743114c1741ec20dc76010f96171f3516589 __all__ = [ "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", "Marker", "default_environment", ] class InvalidMarker(ValueError): """ An invalid marker was found, users should refer to PEP 508. """ class UndefinedComparison(ValueError): """ An invalid operation was attempted on a value that doesn't support it. """ class UndefinedEnvironmentName(ValueError): """ A name was attempted to be used that does not exist inside of the environment. """ class Node(object): def __init__(self, value): <<<<<<< HEAD self.value = value def __str__(self): return str(self.value) def __repr__(self): return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) def serialize(self): ======= # type: (Any) -> None self.value = value def __str__(self): # type: () -> str return str(self.value) def __repr__(self): # type: () -> str return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) def serialize(self): # type: () -> str >>>>>>> e585743114c1741ec20dc76010f96171f3516589 raise NotImplementedError class Variable(Node): def serialize(self): <<<<<<< HEAD ======= # type: () -> str >>>>>>> e585743114c1741ec20dc76010f96171f3516589 return str(self) class Value(Node): def serialize(self): <<<<<<< HEAD ======= # type: () -> str >>>>>>> e585743114c1741ec20dc76010f96171f3516589 return '"{0}"'.format(self) class Op(Node): def serialize(self): <<<<<<< HEAD ======= # type: () -> str >>>>>>> e585743114c1741ec20dc76010f96171f3516589 return str(self) VARIABLE = ( L("implementation_version") | L("platform_python_implementation") | L("implementation_name") | L("python_full_version") | L("platform_release") | L("platform_version") | L("platform_machine") | L("platform_system") | L("python_version") | L("sys_platform") | L("os_name") <<<<<<< HEAD | L("os.name") ======= | L("os.name") # PEP-345 >>>>>>> e585743114c1741ec20dc76010f96171f3516589 | L("sys.platform") # PEP-345 | L("platform.version") # PEP-345 | L("platform.machine") # PEP-345 | L("platform.python_implementation") # PEP-345 <<<<<<< HEAD | L("python_implementation") # PEP-345 | L("extra") # undocumented setuptools legacy ======= | L("python_implementation") # undocumented setuptools legacy | L("extra") # PEP-508 >>>>>>> e585743114c1741ec20dc76010f96171f3516589 ) ALIASES = { "os.name": "os_name", "sys.platform": "sys_platform", "platform.version": "platform_version", "platform.machine": "platform_machine", "platform.python_implementation": "platform_python_implementation", "python_implementation": "platform_python_implementation", } VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) VERSION_CMP = ( L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") ) MARKER_OP = VERSION_CMP | L("not in") | L("in") MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) MARKER_VALUE = QuotedString("'") | QuotedString('"') MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) BOOLOP = L("and") | L("or") MARKER_VAR = VARIABLE | MARKER_VALUE MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) LPAREN = L("(").suppress() RPAREN = L(")").suppress() MARKER_EXPR = Forward() MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) MARKER = stringStart + MARKER_EXPR + stringEnd def _coerce_parse_result(results): <<<<<<< HEAD ======= # type: (Union[ParseResults, List[Any]]) -> List[Any] >>>>>>> e585743114c1741ec20dc76010f96171f3516589 if isinstance(results, ParseResults): return [_coerce_parse_result(i) for i in results] else: return results def _format_marker(marker, first=True): <<<<<<< HEAD ======= # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str >>>>>>> e585743114c1741ec20dc76010f96171f3516589 assert isinstance(marker, (list, tuple, string_types)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip # the rest of this function so that we don't get extraneous () on the # outside. if ( isinstance(marker, list) and len(marker) == 1 and isinstance(marker[0], (list, tuple)) ): return _format_marker(marker[0]) if isinstance(marker, list): inner = (_format_marker(m, first=False) for m in marker) if first: return " ".join(inner) else: return "(" + " ".join(inner) + ")" elif isinstance(marker, tuple): return " ".join([m.serialize() for m in marker]) else: return marker _operators = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, <<<<<<< HEAD } def _eval_op(lhs, op, rhs): ======= } # type: Dict[str, Operator] def _eval_op(lhs, op, rhs): # type: (str, Op, str) -> bool >>>>>>> e585743114c1741ec20dc76010f96171f3516589 try: spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: pass else: return spec.contains(lhs) <<<<<<< HEAD oper = _operators.get(op.serialize()) ======= oper = _operators.get(op.serialize()) # type: Optional[Operator] >>>>>>> e585743114c1741ec20dc76010f96171f3516589 if oper is None: raise UndefinedComparison( "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) ) return oper(lhs, rhs) <<<<<<< HEAD _undefined = object() def _get_env(environment, name): value = environment.get(name, _undefined) if value is _undefined: ======= class Undefined(object): pass _undefined = Undefined() def _get_env(environment, name): # type: (Dict[str, str], str) -> str value = environment.get(name, _undefined) # type: Union[str, Undefined] if isinstance(value, Undefined): >>>>>>> e585743114c1741ec20dc76010f96171f3516589 raise UndefinedEnvironmentName( "{0!r} does not exist in evaluation environment.".format(name) ) return value def _evaluate_markers(markers, environment): <<<<<<< HEAD groups = [[]] ======= # type: (List[Any], Dict[str, str]) -> bool groups = [[]] # type: List[List[bool]] >>>>>>> e585743114c1741ec20dc76010f96171f3516589 for marker in markers: assert isinstance(marker, (list, tuple, string_types)) if isinstance(marker, list): groups[-1].append(_evaluate_markers(marker, environment)) elif isinstance(marker, tuple): lhs, op, rhs = marker if isinstance(lhs, Variable): lhs_value = _get_env(environment, lhs.value) rhs_value = rhs.value else: lhs_value = lhs.value rhs_value = _get_env(environment, rhs.value) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: assert marker in ["and", "or"] if marker == "or": groups.append([]) return any(all(item) for item in groups) def format_full_version(info): <<<<<<< HEAD ======= # type: (sys._version_info) -> str >>>>>>> e585743114c1741ec20dc76010f96171f3516589 version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel if kind != "final": version += kind[0] + str(info.serial) return version def default_environment(): <<<<<<< HEAD if hasattr(sys, "implementation"): iver = format_full_version(sys.implementation.version) implementation_name = sys.implementation.name ======= # type: () -> Dict[str, str] if hasattr(sys, "implementation"): # Ignoring the `sys.implementation` reference for type checking due to # mypy not liking that the attribute doesn't exist in Python 2.7 when # run with the `--py27` flag. iver = format_full_version(sys.implementation.version) # type: ignore implementation_name = sys.implementation.name # type: ignore >>>>>>> e585743114c1741ec20dc76010f96171f3516589 else: iver = "0" implementation_name = "" return { "implementation_name": implementation_name, "implementation_version": iver, "os_name": os.name, "platform_machine": platform.machine(), "platform_release": platform.release(), "platform_system": platform.system(), "platform_version": platform.version(), "python_full_version": platform.python_version(), "platform_python_implementation": platform.python_implementation(), <<<<<<< HEAD "python_version": platform.python_version()[:3], ======= "python_version": ".".join(platform.python_version_tuple()[:2]), >>>>>>> e585743114c1741ec20dc76010f96171f3516589 "sys_platform": sys.platform, } class Marker(object): def __init__(self, marker): <<<<<<< HEAD ======= # type: (str) -> None >>>>>>> e585743114c1741ec20dc76010f96171f3516589 try: self._markers = _coerce_parse_result(MARKER.parseString(marker)) except ParseException as e: err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( marker, marker[e.loc : e.loc + 8] ) raise InvalidMarker(err_str) def __str__(self): <<<<<<< HEAD return _format_marker(self._markers) def __repr__(self): return "<Marker({0!r})>".format(str(self)) def evaluate(self, environment=None): ======= # type: () -> str return _format_marker(self._markers) def __repr__(self): # type: () -> str return "<Marker({0!r})>".format(str(self)) def evaluate(self, environment=None): # type: (Optional[Dict[str, str]]) -> bool >>>>>>> e585743114c1741ec20dc76010f96171f3516589 """Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. """ current_environment = default_environment() if environment is not None: current_environment.update(environment) return _evaluate_markers(self._markers, current_environment)
[ "gevorkyannaira5@gmail.com" ]
gevorkyannaira5@gmail.com
7e36bafbe21309c0d2d4b0ba7b4d49f77613bb64
574b1fd6828253ce9be4a232b3625b55a54aec41
/PythonUFOCUSNZ/scrape.py
67eb7699e9af092bbad33030135e43d11ff7d05c
[]
no_license
alpha-beta-soup/nz-ufo-sightings
ebec50fb62b2274ae02f53ea2e75604b6441b7b4
562e6ac2d7f94d65b74e517af677ddb8085405d4
refs/heads/master
2021-05-16T02:08:41.348029
2017-05-19T11:25:24
2017-05-19T11:25:24
38,192,999
0
0
null
null
null
null
UTF-8
Python
false
false
26,669
py
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Script to parse UFO reports from UFOcusNZ Author: Richard Law Contact: richard.m.law@gmail.com Handy: https://www.airpair.com/python/posts/ using-python-and-qgis-for-geospatial-visualization ''' import dateutil.parser from urllib import urlopen import re import string import os import HTMLParser import multiprocessing # pylint: disable=import-error from BeautifulSoup import BeautifulSoup import pandas as pd from geopy.geocoders import (Nominatim # OpenMapQuest ) from geopy.exc import GeocoderTimedOut import json from geojson import (Point, Feature, FeatureCollection) def handle_special_date_exception(date_string, exc): ''' There are several special cases of weird, human-entered dates in the source information. Some of this is just formatted in a way that dateutil.parse cannot interpret. Others are date ranges for observations. This function should be called when an exception is noted by dateutil when parsing a date string. If the date_string dateutil is attempting to interpret is in the list, then the "corrected" date is returned, also as a string. Otherwise the Exception `exc` is raised. This function lists these special cases as a dictionary: the value of each special-case-key is my interpretation of what it is best recorded as. This is solely down to my judgement, and date range information is deliberately lost as I can't yet be bothered considering that as a possibility. ''' exceptions = { 'Monday 17 or Tuesday 18 May 2010': '17 May 2010', 'Sunday 26 Sept 2010': '26 September 2010', 'late October 2010': '27 October 2010', 'first week of November': '1 November 2010', 'between 1-8 June 2013': '1 June 2013', 'week of 12-14 May 2014': '12 May 2014', '21 Octover 2014': '21 October 2014', 'early May 2015': '3 May 2015', 'Late August or early September, 1971': '31 august 1971', 'Last quarter of 1999': '15 November 1999', 'Exact date unknown; between 1957 and 1968': '1 January 1957', 'mid October 2013': '15 October 2013' } if date_string.strip() in exceptions.keys(): return exceptions[date_string.strip()] else: err = 'dateutil could not parse "{}"'.format(date_string) print '\n{error}\n'.format(error=err) raise exc def parse_date(date_string): ''' Attempts to parse a string represening a datetime into a datetime object ''' if date_string is not None: date_string = date_string.replace('NEW', '').strip() # date_string = filter(lambda x: x in string.printable, date_string) date_string = ''.join( [item for item in date_string if item in string.printable]) try: date_string = dateutil.parser.parse(date_string) # pylint: disable=broad-except except Exception, exc: date_string = handle_special_date_exception(date_string, exc) date_string = parse_date(date_string) return date_string # pylint: disable=too-many-return-statements def return_next_html_elem(soup, sighting_property, to_find='td', pattern='{}:'): ''' Returns the subsequent HTML `to_find` element after <sighting_property> ''' assert sighting_property in [ 'Date', 'Time', 'Location', 'Features/characteristics', 'Special features/characteristics', 'Description' ] assert soup is not None pattern_re = re.compile(pattern.format(sighting_property)) results = soup.find(to_find, text=pattern_re) if results is None: # Try a variety of corner cases # Sometimes it's "special" if sighting_property == 'Features/characteristics': return return_next_html_elem(soup, 'Special features/characteristics') # Sometimes the colon is left off if ':' in pattern: pattern = '{}' return return_next_html_elem( soup, sighting_property, to_find=to_find, pattern=pattern) # Try with a strong tag if to_find != 'strong' and to_find != 'span': return return_next_html_elem( soup, sighting_property, to_find='strong') # Try with a span tag if to_find != 'span': return return_next_html_elem( soup, sighting_property, to_find='span') # Sometimes the html is mangled with <br> tags if '<br/>' not in pattern and \ soup.get_text is not None and soup.find('br'): # text = filter(None, soup.get_text().strip().split("\n")) text = [ item for item in soup.get_text().strip().split("\n") if item ] if pattern.format(sighting_property) not in text: return None # Simply doesn't exist return '<br>'.join(text[text.index('Description') + 1:]) # If all else fails return None # Once the identifier is found, grab the next table row, which is the *data* try: result = results.findNext('td').text except Exception, exc: raise exc # Remove &nbsp; result = result.replace('&nbsp;', '') # Some final encoding issues if isinstance(result, basestring): result = result.encode('utf8') else: result = unicode(result).encode('utf8') return result def substitutions_for_known_issues(locations): ''' Substitutes bad strings for better ones. Hard earned through some trial and error. ''' corrections = { # Nominatim doesn't like this 'Coromandel Peninsula': 'Coromandel', # Pakeha-ism 'Whangaparoa': 'Whangaparaoa', # There is no Pukekohe, Frankton 'Pukekohe, Frankton': 'Pukekohe, Franklin', # Nominatim doesn't understand "West Auckland" 'west Auckland': 'Henderson, Auckland', 'Waitakere City': 'Waitakere', 'Taumaranui': 'Taumarunui', 'Taumaranui, King Country': 'Taumarunui', 'Otematata, Waitati Valley, North Otago': 'Otematata', 'Takapuna Beach': 'Takapuna', 'Golden Springs, Reporoa, Bay of Plenty': 'Reporoa', 'Puketona Junction, south of Kerikeri, New Zealand': 'Te Ahu Ahu Road, New Zealand', # Manually checked # Ohinepaka not in OSM; this is nearest landmark 'Ohinepaka, Wairoa': 'Kiwi Valley Road, Wairoa', 'Gluepot Road, Oropi': 'Gluepot Road', 'Rimutaka Ranges, Wairarapa': 'Rimutaka, Wairarapa', # Ashburton is not in Otago 'Ashburton, Otago': 'Ashburton, Ashburton District', 'National Park village, Central': 'National Park', 'Mareawa, Napier': 'Marewa, Napier', 'Clarence River mouth, Lower Marlborough,': 'Clarence', 'Oputama, Mahia Peninsula': 'Opoutama, Mahia', 'Taupo, Central': 'Taupo', 'The Ureweras': 'Sister Annie Road, Whakatane', 'Spray River': 'Waihopai Valley Road', 'Viewed from Cambridge, but activity over Hamilton': 'Hamilton', 'Cashmere Hills, Christchurch': 'Cashmere, Christchurch', # NOTE: Nominatim does not understand 'Wairarapa', 'Wairarapa': 'Wellington', 'Whangapoua Beach': 'Whangapoua', 'Marychurch Rd, Cambridge, Waikato': 'Marychurch Rd, Waikato', 'Waihi, Coromandel/Hauraki': 'Waihi, Hauraki', 'Waihi, Coromandel': 'Waihi, Hauraki', 'Eastern BOP': 'Bay of Plenty', 'BOP': 'Bay of Plenty', 'Kaweka Ranges, Hawkes Bay': 'Kaweka', 'Waikawa Beach, Levin': 'Waikawa Beach, Horowhenua', 'Waikawa Beach, Otaki': 'Waikawa Beach, Horowhenua', # The King Country is not an actual district 'King Country': '', 'Waimate, between Timaru and Oamaru': 'Waimate', 'Alderman Islands, some 20km east of Tairua &amp; Pauanui, \ Coromandel': 'Ruamahuaiti Island', 'Tapeka Point: Bay of Islands': 'Tapeka', 'Raglan Beach': 'Raglan', 'Waitemata Harbour': '', 'North Shore City': 'North Shore', 'Waitarere Beach, Levin': 'Waitarere Beach', 'Snells Beach, Warkworth': 'Snells Beach', "Snell's Beach": 'Snells Beach', 'Birds ferry Road, Westport': 'Birds Ferry Road', 'Waiheke Island': 'Waiheke', 'Forrest Hill, Sunnynook': 'Forrest Hill', 'South Auckland': 'Auckland', 'Otara, East Tamaki': 'Otara' } for loc in locations: for k in corrections.keys(): if k in loc: yield loc.replace(k, corrections[k]) def strip_nonalpha_at_end(location): ''' Remove non-letter characters at the end of the string ''' valid = ['(', ')'] loc = location if not loc[-1].isalpha(): for char in reversed(location): if not char.isalpha() and char not in valid: loc = loc[:-1] else: return loc return loc # pylint: disable=dangerous-default-value def strip_conjunctions_at_start( location, conjunctions=['of', 'to', 'and', 'from', 'between']): ''' Removes conjunctions at the start of a string. ''' for conjunction in conjunctions: if location.strip().startswith(conjunction): yield location.strip()[len(conjunction):].strip() else: yield location # pylint: disable=anomalous-backslash-in-string # pylint: disable=invalid-name def return_location_without_non_title_case_and_short_words( location, short=1, pattern='\W*\b\w{{short}}\b'): ''' Does what it says, useful to remove guff from a string representing a location, which frequently improves poor geocoding. ''' location = ' '.join([s for s in location.split(' ') if s.istitle()]) pattern = re.compile(pattern.format(short=short)) match = pattern.findall(location) for sub in match: location = location.replace(sub, '') return location # pylint: disable=anomalous-backslash-in-string def yield_locations_without_symbol(location, pattern, symbol): ''' Generator function; best illustrated with the following: >>> location = 'Takanini/Papakura, Auckland, New Zealand' >>> for loc in get_locations_with_slash(location): >>> print loc 'Takanini, Auckland, New Zealand' 'Papakura, Auckland, New Zealand' ''' if symbol not in location: return pattern = re.compile(pattern) for m in pattern.finditer(location): m = m.group() for sub in m.split(symbol): yield location.replace(m, sub) # pylint: disable=anomalous-backslash-in-string def return_location_without_bracketed_clause(location, pattern='\s\([\w\s]+\)'): ''' Returns location without a bracketed clause: >>> loc = 'Manukau (near Auckland airport), Auckland, New Zealand' >>> return_location_without_bracketed_clause(loc) Manukau, Auckland, New Zealand ''' if '(' not in location or ')' not in location: return location pattern = re.compile(pattern) return pattern.sub('', location) # pylint: disable=no-init # pylint: disable=too-few-public-methods class Bcolors(object): ''' Print colours to the terminal! Pretty rainbows... ''' HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' # pylint: disable=too-many-instance-attributes class UFOSighting(object): ''' Object representing a UFO sightning, with a URL, date, time, location, some features, a text description, and geocoding metadata. ''' # pylint: disable=too-many-arguments def __init__(self, source, date, time, location, features, description): self.source = source # Link to page self.date = parse_date(date) # Python date self.time = time # String time self.location = location # String location (will be used in geocode) self.features = features self.description = description # These can be updated by calling geocode(); but don't do that in # __init__ as nominatim needs to query a REST API self.latitude = None self.longitude = None self.haslocation = None # Unknown state self.geocoded_to = "" self.geocode_attempts = 1 self.already_attempted = set([]) def __str__(self): text = '<0> UFOSighting <0>' for k, val in self.__dict__.items(): text += '\n{k}: {v}'.format(k=k.title(), v=val) text += '\n\nCopyright UFOCUS NZ\nUsed without permission' return text def __tuple__(self): return (self.date, self.time, self.location, self.geocoded_to, self.geocode_attempts, self.latitude, self.longitude, self.features, self.description) def __geojson__(self, exclude=['longitude', 'latitude', 'already_attempted']): h = HTMLParser.HTMLParser() if not self.haslocation: return None return Feature( geometry=Point((self.longitude, self.latitude)), properties={ key: h.unescape(str(value)) for key, value in \ self.__dict__.items() if key not in exclude } ) def is_valid(self): ''' Retutns boolean indicating whether or not an HTML actually has content ''' for prop in self.__tuple__(): if prop is not None: return True return False def attempt_geocode(self, location, bias='New Zealand', timeout=6, exactly_one=True, debug=True): ''' Attempts a geocode, returning None, False, or True acccording to whether or not the operation is successful, or not, or somehow invalid (None). If successful, has side effect of setting self.latitude, self.longitude, and self.geocoded_to ''' geolocator = Nominatim(country_bias=bias, timeout=timeout) # geolocator = OpenMapQuest(timeout=timeout) location = location.strip() # Remove repeat white space location = ' '.join([segment for segment in location.split()]) if location in self.already_attempted: return None self.already_attempted.add(location) if not location: return False # Failure # Strip non-alpha characters at end of location location = strip_nonalpha_at_end(location) if debug: print repr(location), try: geocoded = geolocator.geocode(location, exactly_one=exactly_one) except GeocoderTimedOut: # Just try again geocoded = self.attempt_geocode(location) if geocoded is not None: self.haslocation = True self.latitude = geocoded.latitude self.longitude = geocoded.longitude self.geocoded_to = location if debug: print self.latitude, self.longitude, print Bcolors.OKBLUE + '← success' + Bcolors.ENDC return True # Success else: self.haslocation = False if debug: print Bcolors.FAIL + '← fail' + Bcolors.ENDC return None # No result, but there are more options to try def geocode(self, debug=False): ''' Updates self.latitude and self.longitude if a geocode is successsful; otherwise leaves them as the default (None). Uses Nominatim. Returns False if the location could not be geocoded, returns True when the geocode is sucessful. Tip: use geocode=False when instantiating, and then do a batch geocode using multiple threads with multiprocessing! ''' if not self.location: return False location = self.location # TODO: # '12:00 am, New Zealand' -37.7894134 175.2850399 if location == '12:00 am': return None if debug: print repr(self.location) + ' ← original' # Remove HTML entities location = location.encode("utf8") for char in ['&rsquo;', '\r', '\n']: location = location.replace(char, '') # Remove repeat white space location = ' '.join([segment for segment in location.split()]) location = strip_nonalpha_at_end(location) # North Island and South Island are not useful to the geocoder for island in [ 'North Island', 'South Island', 'NI', 'SI', 'Nth Island', 'Sth Island', 'North Is', 'South Is' ]: if not strip_nonalpha_at_end(location).endswith(island) and not \ strip_nonalpha_at_end(location).endswith(island + ', New Zealand'): continue location = location.replace(island, '') # It helps to add "New Zealand" even though a country bias is used # NOTE that there are (for some reason) some non-NZ observations non_nz_places = ['Antarctica', 'Timor Sea', 'South Pacific Ocean'] append_nz = True for place in non_nz_places: if place in location: append_nz = False if append_nz: location.replace(' NZ', ' New Zealand') if not location.strip().endswith(','): location = location.strip() + ',' if 'New Zealand' not in location: location = location.strip() + ' New Zealand' while True: # Try the location description, without leading conjunctions for loc in strip_conjunctions_at_start(location): gc = self.attempt_geocode(loc) if gc is not None: return gc # If there's a slash in the name, split it into two attempts attempts_copy = self.already_attempted.copy() for loc in attempts_copy: for loc in yield_locations_without_symbol(loc, '(\w*/[\w\s]*)', '/'): gc = self.attempt_geocode(loc) if gc is not None: return gc # If there's an ampersand in the name, split it into two attempts attempts_copy = self.already_attempted.copy() for loc in attempts_copy: for loc in yield_locations_without_symbol( loc, '(\w*\s&amp;\s\w*)', '*'): gc = self.attempt_geocode(loc) if gc is not None: return gc # Try without a bracketed clause attempts_copy = self.already_attempted.copy() for loc in attempts_copy: gc = self.attempt_geocode( return_location_without_bracketed_clause(loc)) if gc is not None: return gc # Try with some common substitutions or known errors: attempts_copy = self.already_attempted.copy() for loc in substitutions_for_known_issues(attempts_copy): gc = self.attempt_geocode(loc) if gc is not None: return gc # Try again without non-title-case words, # and without one-letter words attempts_copy = self.already_attempted.copy() for loc in attempts_copy: loc = return_location_without_non_title_case_and_short_words( loc) gc = self.attempt_geocode(loc) if gc is not None: return gc self.geocode_attempts += 1 # Remove the first word of the location for next attempt location = ' '.join(location.split(' ')[1:]) # While loop repeats def get_all_sightings_as_list_of_UFOSighting_objects(link, geocode=True, debug=True): ''' Returns a list of UFOSighting objects, scraped from one link to a page of sighting reports. <link> is a URL (string) that leads to a page of sighting reports on UFOCUS NZ's website. Must be in HTML format (<a href="the/url/path">) <geocode> defaults to false as it isn't compulsory and takes ages to compute (it needs to query a REST API). ''' sightings = [] for table in BeautifulSoup(urlopen(link)).findAll('table', {'cellpadding': '3'}): date = return_next_html_elem(table, 'Date') time = return_next_html_elem(table, 'Time') location = return_next_html_elem(table, 'Location') features = return_next_html_elem(table, 'Features/characteristics') description = return_next_html_elem(table, 'Description') # Work-around to re-build paragraph breaks, which get lost because # they are <br> tags. if description is not None and description.strip(): description_with_breaks = '' split_description = [d for d in description.split('.') if d is not \ None and d.strip()] for i, d in enumerate(split_description[:-1]): if split_description[i + 1][0].isalpha(): d += '.<br><br>' description_with_breaks += d description = description_with_breaks description += split_description[-1] + '.' ufo = UFOSighting(link, date, time, location, features, description) if not ufo.is_valid(): # Ignore UFO sightings that have been misidentified # (Emtpy HTML tables) continue if geocode: if not ufo.geocode(debug=debug): # Ignore UFO sightings that cannot be geocoded continue sightings.append(ufo) return sightings def export_ufos_to_csv(list_of_UFOSighting_objects): ''' Given a list of all the UFO sightings found on the website as UFOSighting objects, exports them to a CSV. ''' # Convert UFO objects to tuples all_sightings_as_tuples = [ ufo.__tuple__() for ufo in list_of_UFOSighting_objects ] # Create a pandas DataFrame from the list of tuples ufos_df = pd.DataFrame( all_sightings_as_tuples, columns=[ 'Date', 'Time', 'Location', 'Geocoded As', 'Geocode Attempts', 'Latitude', 'Longitude', 'Features', 'Description' ]) # Export the pandas DF to CSV ufos_df.to_csv( os.path.join(os.path.dirname(__file__), 'ufos_data.csv'), index=False, encoding='utf-8') return None def export_ufos_to_geojson(list_of_UFOSighting_objects): ''' Given a list of all the UFO sightings found on the website as UFOSighting objects, exports them to GeoJSON. The list is sorted by date, because the leaflet timeslider doesn't sort on a key, and I can't work out how to do it in JavaScript. Therefore it also removes observations that don't have a date ''' list_of_UFOSighting_objects = [ l for l in list_of_UFOSighting_objects if l is not None ] list_of_UFOSighting_objects = [ l for l in list_of_UFOSighting_objects if l.date ] list_of_UFOSighting_objects.sort(key=lambda x: x.date, reverse=False) fc = FeatureCollection([ ufo.__geojson__() for ufo in list_of_UFOSighting_objects if ufo.haslocation ]) with open( os.path.join(os.path.dirname(__file__), 'ufos_data.geojson'), 'w') as outfile: json.dump(fc, outfile) def geocode_worker(sighting): ''' A single geocoding worker, to be run in its own wee process... and probably rate-limited ''' sighting.geocode(debug=True) return sighting def main(debug=False): '''Main loop''' def valid(tag): ''' <tag> = an html tag that has an href Defines what an interesting hyperlink looks like, and returns True if the tag meets this criteria, False otherwise ''' return 'New-Zealand-UFO-Sightings-' in tag['href'] # Sightings page base_url = "http://www.ufocusnz.org.nz/content/Sightings/24.aspx" home_page = BeautifulSoup(urlopen(base_url)) # Get list of valid links from home page # There is one for each year links = sorted( set([li for li in home_page.findAll(href=True) if valid(li)])) # There are some other links scattered around the website that have # reports in the same format # pylint: disable=line-too-long additional_links = [ 'http://www.ufocusnz.org.nz/content/Police/101.aspx', 'http://www.ufocusnz.org.nz/content/Selection-of-Historic-Sighting-Reports/109.aspx', 'http://www.ufocusnz.org.nz/content/1965---Unidentified-Submerged-Object-%28USO%29-spotted-by-DC-3-Pilot/82.aspx', 'http://www.ufocusnz.org.nz/content/1968---Yellow-Disc-Descends-into-Island-Bay,-Wellington/104.aspx', 'http://www.ufocusnz.org.nz/content/1974---Large-Object-Emerges-from-Sea-off-Aranga-Beach,-Northland/105.aspx', 'http://www.ufocusnz.org.nz/content/1957-1968---Silver-Bullet-Bursts-Through-Antarctic-Ice/106.aspx' ] additional_links = [ BeautifulSoup(str('<a href="{}">Link</a>'.format(li))).findAll( href=True)[0] for li in additional_links ] # NOTE see here for more, although they conform less to the expected structure # http://www.ufocusnz.org.nz/content/Aviation/80.aspx links += additional_links links = set([l['href'] for l in links]) # TODO caching # Flatten lists of UFOs for each link all_sightings = reduce( lambda x, y: x + y, [ get_all_sightings_as_list_of_UFOSighting_objects( link, geocode=False, debug=debug) for link in links ]) pool = multiprocessing.Pool( processes=max(multiprocessing.cpu_count() - 2, 1)) results = pool.map(geocode_worker, all_sightings) # export_ufos_to_csv(results) export_ufos_to_geojson(results) if __name__ == '__main__': main(debug=True) exit(0)
[ "richard.m.law@gmail.com" ]
richard.m.law@gmail.com
fadb927384553d0f8610506e70c5e984bda1d3fa
32dcc04d47fa832d4bce3c528dd8645b1992dc5c
/StagingEngine/src/copyFileFromRawToFailed.py
d9081b9bcbc7358e90fba7c2832b9755067c4093
[ "Apache-2.0" ]
permissive
glenngillen/accelerated-data-lake
f496a795a1ed11d3719df9febedf2706839dac2a
683249395a8cb7e748753a7ace7ead2ebc2d8af0
refs/heads/master
2020-05-15T00:18:09.174541
2019-04-19T10:09:39
2019-04-19T10:09:39
182,009,548
0
1
Apache-2.0
2019-04-18T03:26:00
2019-04-18T03:25:59
null
UTF-8
Python
false
false
965
py
import boto3 import traceback class CopyFileFromRawToFailedException(Exception): pass s3 = boto3.client('s3') def lambda_handler(event, context): copy_file_to_failed(event, context) return event def copy_file_to_failed(event, context): try: raw_bucket = event['fileDetails']['bucket'] raw_key = event['fileDetails']['key'] failed_bucket = event['settings']['failedBucket'] print( 'Copying object {} from bucket {} to key {} in failed bucket {}' .format(raw_key, raw_bucket, raw_key, failed_bucket) ) # Copy the failed file to the failed bucket. copy_source = {'Bucket': raw_bucket, 'Key': raw_key} s3.copy(copy_source, failed_bucket, raw_key) # Delete the failed file from raw. s3.delete_object(Bucket=raw_bucket, Key=raw_key) except Exception as e: traceback.print_exc() raise CopyFileFromRawToFailedException(e)
[ "pmmacey@amazon.com" ]
pmmacey@amazon.com
b8b49ba5bc255e5615ec2889ec70661333b1a2c2
4252102a1946b2ba06d3fa914891ec7f73570287
/pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py
6b47b5b33566ea24783e9ae4019290a4fabb845d
[]
no_license
lpigou/chalearn2014
21d487f314c4836dd1631943e20f7ab908226771
73b99cdbdb609fecff3cf85e500c1f1bfd589930
refs/heads/master
2020-05-17T00:08:11.764642
2014-09-24T14:42:00
2014-09-24T14:42:00
24,418,815
2
3
null
null
null
null
UTF-8
Python
false
false
4,927
py
import nose import unittest import numpy as np import theano from localdot import LocalDot from ..test_matrixmul import SymbolicSelfTestMixin class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin): channels = 3 bsize = 10 # batch size imshp = (32, 32) ksize = 5 nkern_per_group = 16 subsample_stride = 1 ngroups = 1 def rand(self, shp): return np.random.rand(*shp).astype('float32') def setUp(self): np.random.seed(234) assert self.imshp[0] == self.imshp[1] fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride #fModulesR += 1 # XXX GpuImgActs crashes w/o this?? fModulesC = fModulesR self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups, self.ksize, self.ksize, self.ngroups, self.nkern_per_group) self.ishape = (self.ngroups, self.channels // self.ngroups, self.imshp[0], self.imshp[1], self.bsize) self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC, self.bsize) filters = theano.shared(self.rand(self.fshape)) self.A = LocalDot(filters, self.imshp[0], self.imshp[1], subsample=(self.subsample_stride, self.subsample_stride)) self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1]) self.xrval = self.rand(self.ishape) self.xl = theano.shared(self.xlval) self.xr = theano.shared(self.xrval) # N.B. the tests themselves come from SymbolicSelfTestMixin class TestLocalDotLargeGray(TestLocalDot32x32): channels = 1 bsize = 128 imshp = (256, 256) ksize = 9 nkern_per_group = 16 subsample_stride = 2 ngroups = 1 n_patches = 3000 def rand(self, shp): return np.random.rand(*shp).astype('float32') # not really a test, but important code to support # Currently exposes error, by e.g.: # CUDA_LAUNCH_BLOCKING=1 # THEANO_FLAGS=device=gpu,mode=DEBUG_MODE # nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder def run_autoencoder( self, n_train_iter=10000, # -- make this small to be a good unit test rf_shape=(9, 9), n_filters=1024, dtype='float32', module_stride=2, lr=0.01, show_filters=True, ): if show_filters: # import here to fail right away import matplotlib.pyplot as plt try: import skdata.vanhateren.dataset except ImportError: raise nose.SkipTest() # 1. Get a set of image patches from the van Hateren data set print 'Loading van Hateren images' n_images = 50 vh = skdata.vanhateren.dataset.Calibrated(n_images) patches = vh.raw_patches((self.n_patches,) + self.imshp, items=vh.meta[:n_images], rng=np.random.RandomState(123), ) patches = patches.astype('float32') patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\ .max(axis=1)[:, None, None] # TODO: better local contrast normalization if 0 and show_filters: plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray') plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray') plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray') plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray') plt.show() # -- Convert patches to localdot format: # groups x colors x rows x cols x images patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0) print 'Patches shape', patches.shape, self.n_patches, patches5.shape # 2. Set up an autoencoder print 'Setting up autoencoder' hid = theano.tensor.tanh(self.A.rmul(self.xl)) out = self.A.rmul_T(hid) cost = ((out - self.xl) ** 2).sum() params = self.A.params() gparams = theano.tensor.grad(cost, params) train_updates = [(p, p - lr / self.bsize * gp) for (p, gp) in zip(params, gparams)] if 1: train_fn = theano.function([], [cost], updates=train_updates) else: train_fn = theano.function([], [], updates=train_updates) theano.printing.debugprint(train_fn) # 3. Train it params[0].set_value(0.001 * params[0].get_value()) for ii in xrange(0, self.n_patches, self.bsize): self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True) cost_ii, = train_fn() print 'Cost', ii, cost_ii if 0 and show_filters: self.A.imshow_gray() plt.show() assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
[ "lionelpigou@gmail.com" ]
lionelpigou@gmail.com
1d2a41fffb8bc04a5959e3e940c22e672c6fc9c7
7eb0a3429f021f1a046bed8e667a6911d789d065
/ProxyPattern/python/Client/Proxy.py
ae9f03255b6d7784273d6a6be675c7e5e280f8e6
[ "MIT" ]
permissive
gama79530/DesignPattern
d99431711fda65cfb7d790b2959ba0a712fa3f86
4730c50cdd839072ae50eef975cbed62b5a2a41c
refs/heads/master
2023-08-03T04:35:54.561642
2023-06-08T03:13:08
2023-07-31T12:32:13
269,562,362
0
0
null
null
null
null
UTF-8
Python
false
false
383
py
import abc class Proxy(metaclass=abc.ABCMeta): @abc.abstractmethod def addBalanceAccount(self, balanceAccount:int): return NotImplemented @abc.abstractmethod def subtractBalanceAccount(self, balanceAccount:int): return NotImplemented @abc.abstractmethod @property def balanceAccount(self) -> int: return NotImplemented
[ "gama79530@gmail.com" ]
gama79530@gmail.com
1b095a64a3e9bad8edd45e2e519d86adf5921b4d
2fe368714dc0e09b70fc2f8c0e683c09d18e8187
/dazhu/fmtTime.py
57bafbd95b15b613a5286b591b21e83cd7a96e47
[]
no_license
kamasamikon/bigkillmachine
0d080f43011ed772780f34d3a841e7edbc37e4cb
97b672d889d98c04f892599d52a2605823a7c172
refs/heads/master
2021-06-30T22:11:16.273038
2020-12-28T06:40:49
2020-12-28T06:40:49
73,879,397
0
0
null
null
null
null
UTF-8
Python
false
false
477
py
#!/usr/bin/env python import sys s_days = 60 * 60 * 24 s_hours = 60 * 60 s_minutes = 60 def fmtTime(t): s = "" if t >= s_days: s += "%d days " % (t / s_days) t = t % s_days if t >= s_hours: s += "%d hours " % (t / s_hours) t = t % s_hours if t >= s_minutes: s += "%d minutes " % (t / s_minutes) t = t % s_minutes s += "%d seconds" % t return s if __name__ == "__main__": print fmtTime(int(sys.argv[1]))
[ "kamasamikon@gmail.com" ]
kamasamikon@gmail.com
c47121be56a4b42909ecd120c8348fc7a11410ec
445cba890decca8780d926a35687fac4298ce404
/utest/Pramatest.py
46f25bef726cb28bf950dcb52d1fca1b543d021d
[]
no_license
243489145/Myframe
5755d54f04a7a798071b1a2af4f872d951554fda
8749133d56e1a6d4e13140ac44e6af3e5d7100a1
refs/heads/master
2022-06-17T19:44:20.694709
2020-05-16T09:24:01
2020-05-16T09:24:01
264,391,281
0
0
null
null
null
null
UTF-8
Python
false
false
1,124
py
# -*- coding: UTF-8 -*- import unittest from utest import testlib from parameterized import parameterized #测试testlib下面的testadd方法 # 创建一个测试类,继承unittest class PramaTest(unittest.TestCase): """ 参数化:单元测试参数化的参数使用的二维列表 parameterized.,没有这个库自己安装 这里可以读取Excel """ @parameterized.expand([ #等价类的方法,80%的问题出现在极值 ['整数相加', 1, 1, 2], ['小数相加', 1.1, 1.33333333, 2.43333333], ['整数加字符串', 1, '1', '11'], ['整数加小数', 1, 1.1, 2.1], ]) # z参数比较是不是期望值 def test_add(self, name, x, y, z): """ :param name: 取名字区分用例 :param x: :param y: :param z: :return: """ print(name) self.assertEqual(testlib.add(x, y), z) #main方法调用unittest运行方式,也可以编辑unittest的运行方式 #运行的时候在运行哪里,edit一个运行方式 if __name__ == '__main__': unittest.main()
[ "243489145@qq.com" ]
243489145@qq.com
d063d7cbffb4226f8efbf9db037d712b216b8bb7
a8547f73463eef517b98d1085430732f442c856e
/pysam-0.13-py3.6-macosx-10.13-x86_64.egg/pysam/libcbgzf.py
366d86d29872fb9a2271270af8be79da14542344
[]
no_license
EnjoyLifeFund/macHighSierra-py36-pkgs
63aece1b692225ee2fbb865200279d7ef88a1eca
5668b5785296b314ea1321057420bcd077dba9ea
refs/heads/master
2021-01-23T19:13:04.707152
2017-12-25T17:41:30
2017-12-25T17:41:30
102,808,884
0
0
null
null
null
null
UTF-8
Python
false
false
303
py
def __bootstrap__(): global __bootstrap__, __loader__, __file__ import sys, pkg_resources, imp __file__ = pkg_resources.resource_filename(__name__, 'libcbgzf.cpython-36m-darwin.so') __loader__ = None; del __bootstrap__, __loader__ imp.load_dynamic(__name__,__file__) __bootstrap__()
[ "Raliclo@gmail.com" ]
Raliclo@gmail.com
c839051c620fd066513fce874f55bfe78f1dc4e4
540b24e3ec47a2cb4baefb6fe19d6c97c05b41c6
/subversion/tools/hook-scripts/svn2feed.py
c3abe8c1eb2dc1858dc594f397eb2d74cd7b596e
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-other-permissive", "X11", "Apache-2.0", "BSD-2-Clause", "HPND-Markus-Kuhn", "LicenseRef-scancode-unicode", "MIT" ]
permissive
Quantum-Platinum-Cloud/subversion
dedeff0955fc6d03df445d1cb1b9a6d058e47c72
494f46f077e41a3ef32cf315e903695ecf547f5c
refs/heads/main
2023-08-17T16:36:40.102795
2021-03-17T19:13:59
2021-10-06T05:38:16
589,011,516
1
0
null
2023-01-14T19:18:40
2023-01-14T19:18:39
null
UTF-8
Python
false
false
16,736
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # ==================================================================== # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ==================================================================== """Usage: svn2feed.py [OPTION...] REPOS-PATH Generate an RSS 2.0 or Atom 1.0 feed file containing commit information for the Subversion repository located at REPOS-PATH. Once the maximum number of items is reached, older elements are removed. The item title is the revision number, and the item description contains the author, date, log messages and changed paths. Options: -h, --help Show this help message. -F, --format=FORMAT Required option. FORMAT must be one of: 'rss' (RSS 2.0) 'atom' (Atom 1.0) to select the appropriate feed format. -f, --feed-file=PATH Store the feed in the file located at PATH, which will be created if it does not exist, or overwritten if it does. If not provided, the script will store the feed in the current working directory, in a file named REPOS_NAME.rss or REPOS_NAME.atom (where REPOS_NAME is the basename of the REPOS_PATH command-line argument, and the file extension depends on the selected format). -r, --revision=X[:Y] Subversion revision (or revision range) to generate info for. If not provided, info for the single youngest revision in the repository will be generated. -m, --max-items=N Keep only N items in the feed file. By default, 20 items are kept. -u, --item-url=URL Use URL as the basis for generating feed item links. This value is appended with '?rev=REV_NUMBER' to form the actual item links. -U, --feed-url=URL Use URL as the global link associated with the feed. -P, --svn-path=DIR Look in DIR for the svnlook binary. If not provided, svnlook must be on the PATH. """ # TODO: # --item-url should support arbitrary formatting of the revision number, # to be useful with web viewers other than ViewVC. # Rather more than intended is being cached in the pickle file. Instead of # only old items being drawn from the pickle, all the global feed metadata # is actually set only on initial feed creation, and thereafter simply # re-used from the pickle each time. # $HeadURL: https://svn.apache.org/repos/asf/subversion/branches/1.10.x/tools/hook-scripts/svn2feed.py $ # $LastChangedDate: 2016-04-30 08:16:53 +0000 (Sat, 30 Apr 2016) $ # $LastChangedBy: stefan2 $ # $LastChangedRevision: 1741723 $ import sys # Python 2.4 is required for subprocess if sys.version_info < (2, 4): sys.stderr.write("Error: Python 2.4 or higher required.\n") sys.stderr.flush() sys.exit(1) import getopt import os import subprocess try: # Python <3.0 import cPickle as pickle except ImportError: # Python >=3.0 import pickle import datetime import time def usage_and_exit(errmsg=None): """Print a usage message, plus an ERRMSG (if provided), then exit. If ERRMSG is provided, the usage message is printed to stderr and the script exits with a non-zero error code. Otherwise, the usage message goes to stdout, and the script exits with a zero errorcode.""" if errmsg is None: stream = sys.stdout else: stream = sys.stderr stream.write("%s\n" % __doc__) stream.flush() if errmsg: stream.write("\nError: %s\n" % errmsg) stream.flush() sys.exit(2) sys.exit(0) def check_url(url, opt): """Verify that URL looks like a valid URL or option OPT.""" if not (url.startswith('https://') \ or url.startswith('http://') \ or url.startswith('file://')): usage_and_exit("svn2feed.py: Invalid url '%s' is specified for " \ "'%s' option" % (url, opt)) class Svn2Feed: def __init__(self, svn_path, repos_path, item_url, feed_file, max_items, feed_url): self.repos_path = repos_path self.item_url = item_url self.feed_file = feed_file self.max_items = max_items self.feed_url = feed_url self.svnlook_cmd = 'svnlook' if svn_path is not None: self.svnlook_cmd = os.path.join(svn_path, 'svnlook') self.feed_title = ("%s's Subversion Commits Feed" % (os.path.basename(os.path.abspath(self.repos_path)))) self.feed_desc = "The latest Subversion commits" def _get_item_dict(self, revision): revision = str(revision) cmd = [self.svnlook_cmd, 'info', '-r', revision, self.repos_path] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc.wait() info_lines = proc.stdout.readlines() cmd = [self.svnlook_cmd, 'changed', '-r', revision, self.repos_path] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc.wait() changed_data = proc.stdout.readlines() desc = ("\nRevision: %s\nLog: %sModified: \n%s" % (revision, info_lines[3], changed_data)) item_dict = { 'author': info_lines[0].strip('\n'), 'title': "Revision %s" % revision, 'link': self.item_url and "%s?rev=%s" % (self.item_url, revision), 'date': self._format_updated_ts(info_lines[1]), 'description': "<pre>" + desc + "</pre>", } return item_dict def _format_updated_ts(self, revision_ts): # Get "2006-08-10 20:17:08" from # "2006-07-28 20:17:18 +0530 (Fri, 28 Jul 2006) date = revision_ts[0:19] epoch = time.mktime(time.strptime(date, "%Y-%m-%d %H:%M:%S")) return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(epoch)) class Svn2RSS(Svn2Feed): def __init__(self, svn_path, repos_path, item_url, feed_file, max_items, feed_url): Svn2Feed.__init__(self, svn_path, repos_path, item_url, feed_file, max_items, feed_url) try: import PyRSS2Gen except ImportError: sys.stderr.write(""" Error: Required PyRSS2Gen module not found. You can download the PyRSS2Gen module from: http://www.dalkescientific.com/Python/PyRSS2Gen.html """) sys.exit(1) self.PyRSS2Gen = PyRSS2Gen (file, ext) = os.path.splitext(self.feed_file) self.pickle_file = file + ".pickle" if os.path.exists(self.pickle_file): self.rss = pickle.load(open(self.pickle_file, "r")) else: self.rss = self.PyRSS2Gen.RSS2( title = self.feed_title, link = self.feed_url, description = self.feed_desc, lastBuildDate = datetime.datetime.now(), items = []) @staticmethod def get_default_file_extension(): return ".rss" def add_revision_item(self, revision): rss_item = self._make_rss_item(revision) self.rss.items.insert(0, rss_item) if len(self.rss.items) > self.max_items: del self.rss.items[self.max_items:] def write_output(self): s = pickle.dumps(self.rss) f = open(self.pickle_file, "w") f.write(s) f.close() f = open(self.feed_file, "w") self.rss.write_xml(f) f.close() def _make_rss_item(self, revision): info = self._get_item_dict(revision) rss_item = self.PyRSS2Gen.RSSItem( author = info['author'], title = info['title'], link = info['link'], description = info['description'], guid = self.PyRSS2Gen.Guid(info['link']), pubDate = info['date']) return rss_item class Svn2Atom(Svn2Feed): def __init__(self, svn_path, repos_path, item_url, feed_file, max_items, feed_url): Svn2Feed.__init__(self, svn_path, repos_path, item_url, feed_file, max_items, feed_url) from xml.dom import getDOMImplementation self.dom_impl = getDOMImplementation() self.pickle_file = self.feed_file + ".pickle" if os.path.exists(self.pickle_file): self.document = pickle.load(open(self.pickle_file, "r")) self.feed = self.document.getElementsByTagName('feed')[0] else: self._init_atom_document() @staticmethod def get_default_file_extension(): return ".atom" def add_revision_item(self, revision): item = self._make_atom_item(revision) total = 0 for childNode in self.feed.childNodes: if childNode.nodeName == 'entry': if total == 0: self.feed.insertBefore(item, childNode) total += 1 total += 1 if total > self.max_items: self.feed.removeChild(childNode) if total == 0: self.feed.appendChild(item) def write_output(self): s = pickle.dumps(self.document) f = open(self.pickle_file, "w") f.write(s) f.close() f = open(self.feed_file, "w") f.write(self.document.toxml()) f.close() def _make_atom_item(self, revision): info = self._get_item_dict(revision) doc = self.document entry = doc.createElement("entry") id = doc.createElement("id") entry.appendChild(id) id.appendChild(doc.createTextNode(info['link'])) title = doc.createElement("title") entry.appendChild(title) title.appendChild(doc.createTextNode(info['title'])) updated = doc.createElement("updated") entry.appendChild(updated) updated.appendChild(doc.createTextNode(info['date'])) link = doc.createElement("link") entry.appendChild(link) link.setAttribute("href", info['link']) summary = doc.createElement("summary") entry.appendChild(summary) summary.appendChild(doc.createTextNode(info['description'])) author = doc.createElement("author") entry.appendChild(author) aname = doc.createElement("name") author.appendChild(aname) aname.appendChild(doc.createTextNode(info['author'])) return entry def _init_atom_document(self): doc = self.document = self.dom_impl.createDocument(None, None, None) feed = self.feed = doc.createElement("feed") doc.appendChild(feed) feed.setAttribute("xmlns", "http://www.w3.org/2005/Atom") title = doc.createElement("title") feed.appendChild(title) title.appendChild(doc.createTextNode(self.feed_title)) id = doc.createElement("id") feed.appendChild(id) id.appendChild(doc.createTextNode(self.feed_url)) updated = doc.createElement("updated") feed.appendChild(updated) now = datetime.datetime.now() updated.appendChild(doc.createTextNode(self._format_date(now))) link = doc.createElement("link") feed.appendChild(link) link.setAttribute("href", self.feed_url) author = doc.createElement("author") feed.appendChild(author) aname = doc.createElement("name") author.appendChild(aname) aname.appendChild(doc.createTextNode("subversion")) def _format_date(self, dt): """ input date must be in GMT """ return ("%04d-%02d-%02dT%02d:%02d:%02d.%02dZ" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)) def main(): # Parse the command-line options and arguments. try: opts, args = getopt.gnu_getopt(sys.argv[1:], "hP:r:u:f:m:U:F:", ["help", "svn-path=", "revision=", "item-url=", "feed-file=", "max-items=", "feed-url=", "format=", ]) except getopt.GetoptError as msg: usage_and_exit(msg) # Make sure required arguments are present. if len(args) != 1: usage_and_exit("You must specify a repository path.") repos_path = os.path.abspath(args[0]) # Now deal with the options. max_items = 20 commit_rev = svn_path = None item_url = feed_url = None feed_file = None feedcls = None feed_classes = { 'rss': Svn2RSS, 'atom': Svn2Atom } for opt, arg in opts: if opt in ("-h", "--help"): usage_and_exit() elif opt in ("-P", "--svn-path"): svn_path = arg elif opt in ("-r", "--revision"): commit_rev = arg elif opt in ("-u", "--item-url"): item_url = arg check_url(item_url, opt) elif opt in ("-f", "--feed-file"): feed_file = arg elif opt in ("-m", "--max-items"): try: max_items = int(arg) except ValueError as msg: usage_and_exit("Invalid value '%s' for --max-items." % (arg)) if max_items < 1: usage_and_exit("Value for --max-items must be a positive " "integer.") elif opt in ("-U", "--feed-url"): feed_url = arg check_url(feed_url, opt) elif opt in ("-F", "--format"): try: feedcls = feed_classes[arg] except KeyError: usage_and_exit("Invalid value '%s' for --format." % arg) if feedcls is None: usage_and_exit("Option -F [--format] is required.") if item_url is None: usage_and_exit("Option -u [--item-url] is required.") if feed_url is None: usage_and_exit("Option -U [--feed-url] is required.") if commit_rev is None: svnlook_cmd = 'svnlook' if svn_path is not None: svnlook_cmd = os.path.join(svn_path, 'svnlook') cmd = [svnlook_cmd, 'youngest', repos_path] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc.wait() cmd_out = proc.stdout.readlines() try: revisions = [int(cmd_out[0])] except IndexError as msg: usage_and_exit("svn2feed.py: Invalid value '%s' for " \ "REPOS-PATH" % (repos_path)) else: try: rev_range = commit_rev.split(':') len_rev_range = len(rev_range) if len_rev_range == 1: revisions = [int(commit_rev)] elif len_rev_range == 2: start, end = rev_range start = int(start) end = int(end) if (start > end): tmp = start start = end end = tmp revisions = list(range(start, end + 1)[-max_items:]) else: raise ValueError() except ValueError as msg: usage_and_exit("svn2feed.py: Invalid value '%s' for --revision." \ % (commit_rev)) if feed_file is None: feed_file = (os.path.basename(repos_path) + feedcls.get_default_file_extension()) feed = feedcls(svn_path, repos_path, item_url, feed_file, max_items, feed_url) for revision in revisions: feed.add_revision_item(revision) feed.write_output() if __name__ == "__main__": main()
[ "91980991+AppleOSSDistributions@users.noreply.github.com" ]
91980991+AppleOSSDistributions@users.noreply.github.com
59d9b80f43080c0e1442308cfa86313209c65444
8692b3941a601fdb83a669c57d5a00a4b9e05b59
/VIRTUAL_ASSISTANT-aqueel(EP19101039)-zuhair(EP19101098)/GUI Final/tasks/misc/fbot.py
761f9407189e444432ba9230724b7ab26a4337f5
[ "MIT" ]
permissive
perfectmantis/Submissions-2021
e8e3a66feed1ee9c1b84da7a9af0bef666e89d0f
d7b6458d23d21f526dc3debb6abd4a6209e56085
refs/heads/main
2023-05-02T08:31:50.540522
2021-05-08T20:49:14
2021-05-08T20:49:14
339,471,260
1
42
null
2021-05-04T00:14:12
2021-02-16T17:09:04
CSS
UTF-8
Python
false
false
1,158
py
from selenium import webdriver # from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options import time def account_info(): try: with open('data\\account_info.txt', 'r') as f: info = f.read().split() email = info[0] password = info[1] return email,password except Exception: email = password = "" return email,password def fb_login(): try: email,password = account_info() # options = Options() options = Options().add_argument("start-maximized") driver = webdriver.Chrome(options = options) driver.get("https://www.facebook.com/login/") email_xpath = '//*[@id="email"]' password_xpath = '//*[@id="pass"]' login_xpath = '//*[@id="loginbutton"]' time.sleep(2) driver.find_element_by_xpath(email_xpath).send_keys(email) time.sleep(0.5) driver.find_element_by_xpath(password_xpath).send_keys(password) time.sleep(0.5) driver.find_element_by_xpath(login_xpath).click() time.sleep(0.5) except: return
[ "44291943+mimranfaruqi@users.noreply.github.com" ]
44291943+mimranfaruqi@users.noreply.github.com
89b77faf800db2276ff6fd708a4125a3944939c6
d94c0d8541a05cc43b87813fd3b9d11f21dc5d76
/save_data.py
853bf7d418eb6430a401488587d0577bd2e2a8ac
[]
no_license
joakimzhang/test_ts
71cd5f36f65bab86282cd5e8354a4325e71136d0
05a9769ccda79e85b9f8a4f89af85c559958cbe9
refs/heads/master
2020-05-18T18:21:30.689401
2015-07-23T05:48:52
2015-07-23T05:48:52
39,547,686
0
1
null
null
null
null
UTF-8
Python
false
false
1,152
py
#-*- coding: utf-8 -*- import shelve from contextlib import closing class save_data(): def __init__(self): print "deal the data" def creat_shelf(self,key,value): with closing(shelve.open('test_shelf.db')) as s: s[key]=value def print_shelf(self,key): with closing(shelve.open('test_shelf.db')) as s: existing = s[key] print existing def del_shelf_key(self,key): with closing(shelve.open('test_shelf.db')) as s: del s[key] def get_all_val(self): with closing(shelve.open('test_shelf.db')) as s: print s data_dic = s.items() #print [a.decode('utf8') for a in s] return data_dic #return [a for a in s] if __name__ == '__main__': key='\\bjfile02\BJShare\Public\TS\Field_Captured_TS\中星9码流\20131108\file_ABS_20131108_11880MHz.ts\ABS_20131108_11880MHz.ts' value={'int':12,'float':9.5,'string':'sample data'} data = save_data() #data.creat_shelf(key,value) #data.del_shelf_key(key) #data.print_shelf(key) data.get_all_val()
[ "joakimzhang@163.com" ]
joakimzhang@163.com
6224998f24dbbf286ac343c71d3f2cf7401f4b20
abf9238ac124738796a61e4ae3e667cae950d55a
/Custom Troop Trees/Source Files/cstm_party_templates.py
e85eb75bb7d7beadb6787f95fd1ff63989067576
[]
no_license
ChroniclesStudio/custom-troop-trees
d92d4c3723ca117fd087332451ea1a0414998162
d39333cf8c4ea9fddb3d58c49850a4dffedbb917
refs/heads/master
2023-02-18T07:27:56.439995
2021-01-19T14:46:50
2021-01-19T14:46:50
331,012,346
0
0
null
null
null
null
UTF-8
Python
false
false
5,713
py
from header_common import * from header_parties import * from header_troops import * from ID_troops import * from ID_factions import * from ID_map_icons import * from module_constants import * from module_troops import troops import math pmf_is_prisoner = 0x0001 #################################################################################################################### # Each party template record contains the following fields: # 1) Party-template id: used for referencing party-templates in other files. # The prefix pt_ is automatically added before each party-template id. # 2) Party-template name. # 3) Party flags. See header_parties.py for a list of available flags # 4) Menu. ID of the menu to use when this party is met. The value 0 uses the default party encounter system. # 5) Faction # 6) Personality. See header_parties.py for an explanation of personality flags. # 7) List of stacks. Each stack record is a tuple that contains the following fields: # 7.1) Troop-id. # 7.2) Minimum number of troops in the stack. # 7.3) Maximum number of troops in the stack. # 7.4) Member flags(optional). Use pmf_is_prisoner to note that this member is a prisoner. # Note: There can be at most 6 stacks. #################################################################################################################### party_templates = [ #("kingdom_1_reinforcements_a", "{!}kingdom_1_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_swadian_recruit,5,10),(trp_swadian_militia,2,4)]), #("kingdom_1_reinforcements_b", "{!}kingdom_1_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_swadian_footman,3,6),(trp_swadian_skirmisher,2,4)]), #("kingdom_1_reinforcements_c", "{!}kingdom_1_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_swadian_man_at_arms,2,4),(trp_swadian_crossbowman,1,2)]), #Swadians are a bit less-powered thats why they have a bit more troops in their modernised party template (3-6, others 3-5) #("kingdom_2_reinforcements_a", "{!}kingdom_2_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_vaegir_recruit,5,10),(trp_vaegir_footman,2,4)]), #("kingdom_2_reinforcements_b", "{!}kingdom_2_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_vaegir_veteran,2,4),(trp_vaegir_skirmisher,2,4),(trp_vaegir_footman,1,2)]), #("kingdom_2_reinforcements_c", "{!}kingdom_2_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_vaegir_horseman,2,3),(trp_vaegir_infantry,1,2)]), #("kingdom_3_reinforcements_a", "{!}kingdom_3_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_khergit_tribesman,3,5),(trp_khergit_skirmisher,4,9)]), #Khergits are a bit less-powered thats why they have a bit more 2nd upgraded(trp_khergit_skirmisher) than non-upgraded one(trp_khergit_tribesman). #("kingdom_3_reinforcements_b", "{!}kingdom_3_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_horse_archer,2,4),(trp_khergit_skirmisher,1,2)]), #("kingdom_3_reinforcements_c", "{!}kingdom_3_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_veteran_horse_archer,2,3)]), #Khergits are a bit less-powered thats why they have a bit more troops in their modernised party template (4-7, others 3-5) #("kingdom_4_reinforcements_a", "{!}kingdom_4_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_nord_footman,5,10),(trp_nord_recruit,2,4)]), #("kingdom_4_reinforcements_b", "{!}kingdom_4_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_nord_huntsman,2,5),(trp_nord_archer,2,3),(trp_nord_footman,1,2)]), #("kingdom_4_reinforcements_c", "{!}kingdom_4_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_nord_warrior,3,5)]), #("kingdom_5_reinforcements_a", "{!}kingdom_5_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_rhodok_tribesman,5,10),(trp_rhodok_spearman,2,4)]), #("kingdom_5_reinforcements_b", "{!}kingdom_5_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_rhodok_crossbowman,3,6),(trp_rhodok_trained_crossbowman,2,4)]), #("kingdom_5_reinforcements_c", "{!}kingdom_5_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_rhodok_veteran_spearman,2,3),(trp_rhodok_veteran_crossbowman,1,2)]), #("kingdom_6_reinforcements_a", "{!}kingdom_6_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_sarranid_recruit,5,10),(trp_sarranid_footman,2,4)]), #("kingdom_6_reinforcements_b", "{!}kingdom_6_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_sarranid_skirmisher,2,4),(trp_sarranid_veteran_footman,2,3),(trp_sarranid_footman,1,3)]), #("kingdom_6_reinforcements_c", "{!}kingdom_6_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_sarranid_horseman,3,5)]), ] def troop_indexes_of_tier(skin, tier): return [find_troop(troops, troop[0]) for troop in tree.get_custom_troops_of_tier(skin, tier)] def tier_stacks(skin, tier, min, max): troops = troop_indexes_of_tier(skin, tier) return [(troop, int(math.ceil(min * 1.0 / len(troops))), int(math.ceil(max * 1.0 / len(troops)))) for troop in troops] for tree in CUSTOM_TROOP_TREES: for skin in CSTM_SKINS: id = "cstm_kingdom_player_%s_%d_reinforcements" % (tree.id, skin.id) party_templates.extend([ (id + "_a", "{!}" + id + "_a", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 1, min = 5, max = 10) + tier_stacks(skin, tier = 2, min = 2, max = 4)), (id + "_b", "{!}" + id + "_b", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 3, min = 5, max = 10)), (id + "_c", "{!}" + id + "_c", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 4, min = 3, max = 5)), ]) #for party_template in party_templates: # print ", ".join([party_template[0], party_template[1], ", ".join(["%d-%d %s" % (stack[1], stack[2], troops[stack[0]][2]) for stack in party_template[6]])])
[ "knowscount@gmail.com" ]
knowscount@gmail.com
452891ccd3170505662e4cb079ff70d7eff7a2c8
f722d5d2fa5a516579dc3cfb4337495a39c05b54
/app/test/src/data.py
51666fabe2b5b0642866ea3f76fca83fc8ab9001
[]
no_license
Engineerlin/DS-Practice-AT3
f5df59b59f66da7df25ad39094e434f670b4ebc4
06283b5d0e17812434b781dd41b4c615b8b94958
refs/heads/master
2023-08-27T17:41:54.163660
2021-11-06T08:39:25
2021-11-06T08:39:25
421,269,385
0
1
null
2021-11-06T08:39:26
2021-10-26T03:44:25
Python
UTF-8
Python
false
false
2,026
py
import streamlit as st from dataclasses import dataclass import pandas as pd @dataclass class Dataset: name: str df: pd.DataFrame def get_name(self): """ Return filename of loaded dataset """ return self.name def get_n_rows(self): """ Return number of rows of loaded dataset """ return self.df.shape[0] def get_n_cols(self): """ Return number of columns of loaded dataset """ return self.df.shape[1] def get_cols_list(self): """ Return list column names of loaded dataset """ return list(self.df.columns.values) def get_cols_dtype(self): """ Return dictionary with column name as keys and data type as values """ return self.df.dtypes.apply(lambda x:x.name).to_dict() def get_n_duplicates(self): """ Return number of duplicated rows of loaded dataset """ return self.df.duplicated().sum() def get_n_missing(self): """ Return number of rows with missing values of loaded dataset """ return self.df.shape[0]-self.df.dropna().shape[0] def get_head(self, n=5): """ Return Pandas Dataframe with top rows of loaded dataset """ return self.df.head(n) def get_tail(self, n=5): """ Return Pandas Dataframe with bottom rows of loaded dataset """ return self.df.tail(n) def get_sample(self, n=5): """ Return Pandas Dataframe with random sampled rows of loaded dataset """ return self.df.sample(n) def get_numeric_columns(self): """ Return list column names of numeric type from loaded dataset """ return list(self.df.select_dtypes(['float']).columns) def get_text_columns(self): """ Return list column names of text type from loaded dataset """ return list(self.df.select_dtypes(['object']).columns) def get_date_columns(self): """ Return list column names of datetime type from loaded dataset """ return list(self.df.select_dtypes(['datetime64']).columns)
[ "kailin.zhou@student.uts.edu.au" ]
kailin.zhou@student.uts.edu.au
e1133f2d9491922b496b13c9b71511b119616887
63b55540d45c6445885ebcac892aba40454441c9
/HelperFunctions.py
da1e057933504c0a96319a48b6402b48146da2e9
[]
no_license
zapatos24/The_Minority_Math_Problem
1702be39ed88a4169c1c9724eb38172fec111672
fc19758c248aa22b72929dfd733000c94785647f
refs/heads/master
2020-06-29T14:24:48.949752
2019-08-13T03:22:20
2019-08-13T03:22:20
200,560,314
2
0
null
null
null
null
UTF-8
Python
false
false
12,892
py
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.feature_selection import RFECV from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score, roc_curve, auc class HelperFunctions(): def acheivement_score(rating): ''' Takes the rating passed to it and returns an integer value representing how school meets target goals ''' if rating == 'Exceeding Target': return 4 if rating == 'Meeting Target': return 3 if rating == 'Approaching Target': return 2 if rating == 'Not Meeting Target': return 1 else: return None def percent_cols_to_float(df): ''' For any dataframe passed in, returns a new dataframe where values are floats between 0 and 1 representing the respective rate or percent in that column ''' for col in df.columns: if 'Rate' in col or 'Percent' in col or '%' in col: df[col] = df[col].apply( lambda x: float(x.replace('%', ''))*.01) return df def make_grades_int(grade): ''' Takes a grade and returns an integer representative of that grade in the school system ''' if grade == 'PK': return -1 elif grade == '0K': return 0 else: return int(grade) def grid_search_classifier(clf, param_grid, X_train, X_test, y_train, y_test, scoring='f1_weighted'): grid_clf = GridSearchCV(clf, param_grid, scoring=scoring) grid_clf.fit(X_train, y_train) best_parameters = grid_clf.best_params_ print("Grid Search found the following optimal parameters: ") for param_name in sorted(best_parameters.keys()): print("%s: %r" % (param_name, best_parameters[param_name])) y_pred = grid_clf.predict(X_test) print() print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) print('Accuracy score:', round(accuracy_score(y_test, y_pred), 2)) cm = confusion_matrix(y_test, y_pred) df_cm = pd.DataFrame(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], index=['F', 'T'], columns=['F', 'T']) plt.figure(figsize=(7, 5)) sns.heatmap(df_cm, annot=True, cmap='Greens') plt.xlabel('Pred Val') plt.ylabel('True Val') plt.show() return grid_clf def plot_ROC(y_test, X_test, grid_clf): fpr, tpr, thresholds = roc_curve( y_test, grid_clf.predict_proba(X_test)[:, 1]) print('AUC: {}'.format(auc(fpr, tpr))) plt.figure(figsize=(10, 8)) plt.plot(fpr, tpr, color='darkorange', label='ROC curve') plt.plot([0, 1], [0, 1], color='navy') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.yticks([i/10.0 for i in range(11)]) plt.xticks([i/10.0 for i in range(11)]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC Curve') plt.legend(loc="lower right") plt.show() def drop_impractical_columns(df): cols_to_drop = ['Adjusted Grade', 'New?', 'Other Location Code in LCGMS', 'School Name', 'District', 'SED Code', 'Latitude', 'Longitude', 'Address (Full)', 'City', 'Zip', 'Grades', 'Rigorous Instruction Rating', 'Collaborative Teachers Rating', 'Supportive Environment Rating', 'Effective School Leadership Rating', 'Strong Family-Community Ties Rating', 'Trust Rating', 'School Income Estimate', 'Average ELA Proficiency', 'Community School?', 'Grade 3 ELA - All Students Tested', 'Grade 3 ELA 4s - All Students', 'Grade 3 ELA 4s - American Indian or Alaska Native', 'Grade 3 ELA 4s - Black or African American', 'Grade 3 ELA 4s - Hispanic or Latino', 'Grade 3 ELA 4s - Asian or Pacific Islander', 'Grade 3 ELA 4s - White', 'Grade 3 ELA 4s - Multiracial', 'Grade 3 ELA 4s - Limited English Proficient', 'Grade 3 ELA 4s - Economically Disadvantaged', 'Grade 3 Math - All Students tested', 'Grade 3 Math 4s - All Students', 'Grade 3 Math 4s - American Indian or Alaska Native', 'Grade 3 Math 4s - Black or African American', 'Grade 3 Math 4s - Hispanic or Latino', 'Grade 3 Math 4s - Asian or Pacific Islander', 'Grade 3 Math 4s - White', 'Grade 3 Math 4s - Multiracial', 'Grade 3 Math 4s - Limited English Proficient', 'Grade 3 Math 4s - Economically Disadvantaged', 'Grade 4 ELA - All Students Tested', 'Grade 4 ELA 4s - All Students', 'Grade 4 ELA 4s - American Indian or Alaska Native', 'Grade 4 ELA 4s - Black or African American', 'Grade 4 ELA 4s - Hispanic or Latino', 'Grade 4 ELA 4s - Asian or Pacific Islander', 'Grade 4 ELA 4s - White', 'Grade 4 ELA 4s - Multiracial', 'Grade 4 ELA 4s - Limited English Proficient', 'Grade 4 ELA 4s - Economically Disadvantaged', 'Grade 4 Math - All Students Tested', 'Grade 4 Math 4s - All Students', 'Grade 4 Math 4s - American Indian or Alaska Native', 'Grade 4 Math 4s - Black or African American', 'Grade 4 Math 4s - Hispanic or Latino', 'Grade 4 Math 4s - Asian or Pacific Islander', 'Grade 4 Math 4s - White', 'Grade 4 Math 4s - Multiracial', 'Grade 4 Math 4s - Limited English Proficient', 'Grade 4 Math 4s - Economically Disadvantaged', 'Grade 5 ELA - All Students Tested', 'Grade 5 ELA 4s - All Students', 'Grade 5 ELA 4s - American Indian or Alaska Native', 'Grade 5 ELA 4s - Black or African American', 'Grade 5 ELA 4s - Hispanic or Latino', 'Grade 5 ELA 4s - Asian or Pacific Islander', 'Grade 5 ELA 4s - White', 'Grade 5 ELA 4s - Multiracial', 'Grade 5 ELA 4s - Limited English Proficient', 'Grade 5 ELA 4s - Economically Disadvantaged', 'Grade 5 Math - All Students Tested', 'Grade 5 Math 4s - All Students', 'Grade 5 Math 4s - American Indian or Alaska Native', 'Grade 5 Math 4s - Black or African American', 'Grade 5 Math 4s - Hispanic or Latino', 'Grade 5 Math 4s - Asian or Pacific Islander', 'Grade 5 Math 4s - White', 'Grade 5 Math 4s - Multiracial', 'Grade 5 Math 4s - Limited English Proficient', 'Grade 5 Math 4s - Economically Disadvantaged', 'Grade 6 ELA - All Students Tested', 'Grade 6 ELA 4s - All Students', 'Grade 6 ELA 4s - American Indian or Alaska Native', 'Grade 6 ELA 4s - Black or African American', 'Grade 6 ELA 4s - Hispanic or Latino', 'Grade 6 ELA 4s - Asian or Pacific Islander', 'Grade 6 ELA 4s - White', 'Grade 6 ELA 4s - Multiracial', 'Grade 6 ELA 4s - Limited English Proficient', 'Grade 6 ELA 4s - Economically Disadvantaged', 'Grade 6 Math - All Students Tested', 'Grade 6 Math 4s - All Students', 'Grade 6 Math 4s - American Indian or Alaska Native', 'Grade 6 Math 4s - Black or African American', 'Grade 6 Math 4s - Hispanic or Latino', 'Grade 6 Math 4s - Asian or Pacific Islander', 'Grade 6 Math 4s - White', 'Grade 6 Math 4s - Multiracial', 'Grade 6 Math 4s - Limited English Proficient', 'Grade 6 Math 4s - Economically Disadvantaged', 'Grade 7 ELA - All Students Tested', 'Grade 7 ELA 4s - All Students', 'Grade 7 ELA 4s - American Indian or Alaska Native', 'Grade 7 ELA 4s - Black or African American', 'Grade 7 ELA 4s - Hispanic or Latino', 'Grade 7 ELA 4s - Asian or Pacific Islander', 'Grade 7 ELA 4s - White', 'Grade 7 ELA 4s - Multiracial', 'Grade 7 ELA 4s - Limited English Proficient', 'Grade 7 ELA 4s - Economically Disadvantaged', 'Grade 7 Math - All Students Tested', 'Grade 7 Math 4s - All Students', 'Grade 7 Math 4s - American Indian or Alaska Native', 'Grade 7 Math 4s - Black or African American', 'Grade 7 Math 4s - Hispanic or Latino', 'Grade 7 Math 4s - Asian or Pacific Islander', 'Grade 7 Math 4s - White', 'Grade 7 Math 4s - Multiracial', 'Grade 7 Math 4s - Limited English Proficient', 'Grade 7 Math 4s - Economically Disadvantaged', 'Grade 8 ELA - All Students Tested', 'Grade 8 ELA 4s - All Students', 'Grade 8 ELA 4s - American Indian or Alaska Native', 'Grade 8 ELA 4s - Black or African American', 'Grade 8 ELA 4s - Hispanic or Latino', 'Grade 8 ELA 4s - Asian or Pacific Islander', 'Grade 8 ELA 4s - White', 'Grade 8 ELA 4s - Multiracial', 'Grade 8 ELA 4s - Limited English Proficient', 'Grade 8 ELA 4s - Economically Disadvantaged', 'Grade 8 Math - All Students Tested', 'Grade 8 Math 4s - All Students', 'Grade 8 Math 4s - American Indian or Alaska Native', 'Grade 8 Math 4s - Black or African American', 'Grade 8 Math 4s - Hispanic or Latino', 'Grade 8 Math 4s - Asian or Pacific Islander', 'Grade 8 Math 4s - White', 'Grade 8 Math 4s - Multiracial', 'Grade 8 Math 4s - Limited English Proficient', 'Grade 8 Math 4s - Economically Disadvantaged' ] return df.drop(cols_to_drop, axis=1) def rfe_test(classifier, features, X_train, y_train): ranking_list = [] for i in range(50): clf = classifier rfecv = RFECV(clf).fit(X_train, y_train) ranking_list.append(rfecv.ranking_) return pd.DataFrame(zip(features, sum(ranking_list)/50)).sort_values(by=1)
[ "jeremy.traber.owens@gmail.com" ]
jeremy.traber.owens@gmail.com
01a24fbc30567db48254632abb8ff4ac747ce67b
2f63688febd21dc3ae6b19abfa79ad313c820154
/1004_Max_Consecutive_Ones_III/try_1.py
000de4088f9c246b4ee2a9c4740487216a9157ef
[]
no_license
novayo/LeetCode
cadd03587ee4ed6e35f60294070165afc1539ac8
54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7
refs/heads/master
2023-08-14T00:35:15.528520
2023-07-30T05:56:05
2023-07-30T05:56:05
200,248,146
8
1
null
2022-11-19T04:37:54
2019-08-02T14:24:19
Python
UTF-8
Python
false
false
692
py
class Solution: def longestOnes(self, nums: List[int], k: int) -> int: ans = cur = cur_one = 0 ones = collections.deque() for num in nums: if num == 1: cur += 1 cur_one += 1 else: if k > 0: cur += 1 if len(ones) >= k: remove = ones.pop() cur -= remove+1 ones.appendleft(cur_one) cur_one = 0 else: if cur > 0: cur -= 1 ans = max(ans, cur) return ans
[ "shihchungyu@shichongyous-MacBook-Air.local" ]
shihchungyu@shichongyous-MacBook-Air.local
450170e9d9e65aabbc3043829fbde44a95b4602c
e6c88bc10f82c2e0a9a40666f14b4e81418516ee
/pharmacist/models.py
a2a7ca6b56b5dcdc35ddc314841594f885f3c473
[]
no_license
Masher828/HospitalManagementSystem
bb2f819edb1da52e34fc7dfff93dfaf79e9c0dd5
72d7875159098361c8e6f53d3076ba8b612eb279
refs/heads/masher
2022-12-18T02:15:06.671642
2020-06-30T11:54:26
2020-06-30T11:54:26
275,923,707
0
1
null
2020-10-01T07:31:35
2020-06-29T20:52:49
JavaScript
UTF-8
Python
false
false
438
py
from django.db import models class Medicinemaster(models.Model): ws_med_id = models.AutoField(primary_key=True, serialize = False) ws_med_name= models.CharField(max_length=255) ws_stock_qty = models.IntegerField() ws_price= models.FloatField() def __str__(self): return self.ws_med_name class Medicineissued(models.Model): ws_pat_id = models.IntegerField() ws_med_id = models.IntegerField() ws_qty = models.IntegerField()
[ "manish.cse828@gmail.com" ]
manish.cse828@gmail.com
579e1b6ee2dc5a5ecaf69f568fb93ffbf3fb236f
24cfbad4390b86b337ce7b97998b8be4b3297a2c
/hmrxapp/migrations/0002_altera_tam_histograma.py
277307d46411dec7a22d3354c5782e2968087c06
[]
no_license
ecalasans/hmrxsys
8aa8be3e846870ce0910d5e3a6ebce2d253f754b
1c372d5bbd483bd62fba6a50aa6f266d0f922d45
refs/heads/master
2023-06-26T21:05:58.937630
2021-07-30T07:47:16
2021-07-30T07:47:16
378,649,436
0
0
null
null
null
null
UTF-8
Python
false
false
698
py
# Generated by Django 3.2.4 on 2021-06-22 00:14 import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('hmrxapp', '0001_cria_tabela_filtragem'), ] operations = [ migrations.AlterField( model_name='filtragem', name='data_add', field=models.DateTimeField(default=datetime.datetime(2021, 6, 22, 0, 14, 46, 624188, tzinfo=utc), editable=False), ), migrations.AlterField( model_name='filtragem', name='histograma', field=models.CharField(default='', max_length=5000), ), ]
[ "ericcalasans@gmail.com" ]
ericcalasans@gmail.com
a5ec4c22e8526a56d17ae4d199df63900a4fd74c
7dfe37c9b4cb8512a49bb7abe6311b553e229fc5
/src/utils/clases/metodos_strings.py
44eb6a68120721b812795a4f6c0db7457ef5fb16
[]
no_license
ArmandoBerlanga/python_playground
7e28c992bd9934f4b72a58f076c2b3f39137951a
4ee243e3d4a02667a8ed05fc526e5373a118e0d9
refs/heads/main
2023-04-05T12:37:11.478937
2021-03-31T00:43:30
2021-03-31T00:43:30
349,169,416
0
0
null
null
null
null
UTF-8
Python
false
false
2,871
py
# Programa creado por José Armando Berlanga Mendoza # Creado el 17 de febrero de 2021 # Descripción: ejercicio sobre recursividad def voltear_palabra (s): # metodo para voltear una palabra u oracion completamente l = len(s) if (l == 0): return "" else: return s[l-1:] + voltear_palabra (s[0:l-1]) def es_palindrome (s): # metodo para evaluar si una palabra u oracion es palindrome (que estan iguales al derecho y al reves) if (len(s) > 1): if s[len(s)-1] == s [0]: return es_palindrome (s [1 : len(s)-1]) else: return False else: return True def imprimir_pino (reglon, cont): # metodo para la impresion de un pino centrado if (reglon == 0): return "" else: for i in range (reglon, 0, -1): print (" ", end = "") if i == 0: break for i in range (cont+1, 0, -1): print ("*", end = "") if i == 0: break print() return imprimir_pino(reglon-1, cont+2) def formatear_palabra (s): # metodo de formateo acentos = "áéíóú" sinAcentos = "aeiou" s = s.lower().replace(" ", "").replace(".", "").replace(",", "") for char in s: pos = acentos.find(char) if pos != -1: s = s.replace(acentos[pos], sinAcentos[pos]) return s # Given a string s, find the length of the longest substring without repeating characters. def longest_substring(s): if len(s) == 1: return 1 elif s == "": return 0 conts = [] for i in range (len(s)-1): chars = [] cont = 0 j = i while s[j] not in chars and j < len(s)-1: cont+=1 chars.append(s[j]) j+=1 conts.append(cont) return max (conts) if __name__ == '__main__': option = -1 while (option == -1): print ("\n[1] Voltear el orden de una palabra u oracion\n[2] Evaluar si una palabra o frase es palindrome\n[3] Imprimir un pino") option = int (input("\nIngrese un numero segun las opciones dadas: ")) if option != 1 and option != 2 and option != 3: print("\nNo has ingresado un num valido, vuelve a ingresarlo") option = -1 if option == 1: s = input("\nIngrese el texto a voltear: ") print("\nResultado: " + voltear_palabra(s)) elif option == 2: s = input("\nIngrese el texto a evaluar: ") # s = "A mamá Roma le aviva el amor a papá y a papá Roma le aviva el amor a mamá." print ("\n\"" + s +"\"" + ", es palindrome" if (es_palindrome(formatear_palabra(s))) else ", no es Palindrome") else: pisos = int (input("\nIngrese el numero de pisos de la piramide: ")) print() imprimir_pino(pisos, 0) print()
[ "Jose.berlangam@udem.edu" ]
Jose.berlangam@udem.edu
332fa4a0deb504844a72c413b5933587f949574b
dac141981cfefbc3da1167a3cbd9bfa2c02ebec5
/src/tokenizer.py
b0289568c1738b5d7bd3257edf188f8842be1555
[]
no_license
meciwo/Knowledge-based_Meme_Caption_Generator
65b8ce46089a830618906d2c6e40a6fbbbc7507a
ad3d62d50828609da4d7e8de64c57c695304346d
refs/heads/main
2023-04-01T23:45:27.040339
2021-04-08T18:10:08
2021-04-08T18:10:08
350,203,673
1
0
null
null
null
null
UTF-8
Python
false
false
501
py
import MeCab from dotenv import load_dotenv import os import emoji load_dotenv() mecab_path = os.environ["MECAB_PATH"] mecab = MeCab.Tagger(f"-Owakati -d {mecab_path}") mecab.parse("") # バグ対処 def remove_emoji(src_str): return "".join(c for c in src_str if c not in emoji.UNICODE_EMOJI) def tokenize(text): text = remove_emoji(str(text)) text = text.replace("「", "").replace("」", "").replace("、", "") result = mecab.parse(text).strip().split(" ") return result
[ "shanshan0474@gmail.com" ]
shanshan0474@gmail.com
1e07bbeff0fb13fa145c80101d396935d33a0423
6b14d9a64a578239e5612e6098320b61b45c08d9
/AUG14/04.py
27bec86f2774038bbdffc335d52b45f500521bfc
[ "MIT" ]
permissive
Razdeep/PythonSnippets
498c403140fec33ee2f0dd84801738f1256ee9dd
76f9313894f511c487a99bc38bdf0fe5e594caf5
refs/heads/master
2020-03-26T08:56:23.067022
2018-11-26T05:36:36
2018-11-26T05:36:36
144,726,845
0
0
null
null
null
null
UTF-8
Python
false
false
67
py
# String slicing test='Hello world' print(test[1:5]) print(test[6])
[ "rrajdeeproychowdhury@gmail.com" ]
rrajdeeproychowdhury@gmail.com
90105714e157a472def98eca28ce8f9da9114066
39eb95d42ff47be6c9be8316cba3d1a0eca1d71f
/shirai-ri/tutorial04/test_hmm.py
dc7995326f072f2a207377d17159525881467139
[]
no_license
reo11/NLPtutorial2018
ac6cc059b4d428e5e67dba9e3b2d176b003ee34c
f733ed7d0479c8ed9b1224d6fc61b74748031ff1
refs/heads/master
2020-06-28T15:09:17.885949
2018-08-30T01:35:28
2018-08-30T01:35:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,046
py
# coding: utf-8 # In[ ]: import codecs from collections import defaultdict import math def test_hmm(test_file, model_file, answer): transition = defaultdict(int) emission = defaultdict(int) possible_tags = defaultdict(int) lam = 0.95 lam_unk = 1- lam V = 1000000 with codecs.open(model_file, 'r', 'utf8') as model_f, codecs.open(test_file, 'r', 'utf8') as test_f, codecs.open(answer, 'w', 'utf8') as answer_f: # モデル読み込み for line in model_f: typ, context, word, prob = line.strip().split() possible_tags[context] = 1 # 可能なタグとして保存 if typ == 'T': transition['{} {}'.format(context, word)] = float(prob) else: emission['{} {}'.format(context, word)] = float(prob) # 実際のテスト for line in test_f: words = line.strip().split() best_score = dict() best_edge = dict() best_score['0 <s>'] = 0 best_edge['0 <s>'] = 'NULL' #前向き for i in range(0, len(words)): for prev in possible_tags.keys(): for nex in possible_tags.keys(): if '{} {}'.format(i, prev) in best_score and '{} {}'.format(prev,nex) in transition: score = best_score['{} {}'.format(i, prev)] - math.log(transition['{} {}'.format(prev, nex)], 2) - math.log(lam * emission['{} {}'.format(nex, words[i])] + lam_unk/V, 2) if '{} {}'.format(i+1, nex) not in best_score or best_score['{} {}'.format(i+1, nex)] > score: best_score['{} {}'.format(i+1, nex)] = score best_edge['{} {}'.format(i+1, nex)] = '{} {}'.format(i, prev) # 最後の処理 for prev in possible_tags.keys(): if '{} {}'.format(len(words), prev) in best_score and '{} </s>'.format(prev) in transition: score = best_score['{} {}'.format(len(words), prev)] - math.log(transition['{} </s>'.format(prev)], 2) if '{} </s>'.format(len(words) + 1) not in best_score or best_score['{} </s>'.format(len(words) + 1)] > score: best_score['{} </s>'.format(len(words) + 1)] = score best_edge['{} </s>'.format(len(words) + 1)] = '{} {}'.format(len(words), prev) # 後ろ向き tags = [] next_edge = best_edge['{} </s>'.format(len(words) + 1)] while next_edge != '0 <s>': position, tag = next_edge.split() tags.append(tag) next_edge = best_edge[next_edge] tags.reverse() answer_f.write(' '.join(tags) + '\n') if __name__ == '__main__': test_hmm('./nlptutorial-master/data/wiki-en-test.norm', './model_file.txt', 'my_answer.pos')
[ "tarokirs@gmail.com" ]
tarokirs@gmail.com
a1afae0b9a14f320f59826b7a6e3c27d9d04847f
bddcad1331e2ea68d2ffc7e3f0478d8776fea5d8
/Administratie/Literatuurstudie/bijlagen/convert.py
3321a9340e1be75db22d68de619847b73eb22cbe
[]
no_license
4ilo/masterproef
693b9123e7e3fa64ef50960581d64f5116f8b045
c5ad81cee83b354262b2dc9d0dced0cbcf0c2f66
refs/heads/master
2020-03-31T08:44:10.246829
2019-06-24T08:56:32
2019-06-24T08:56:32
152,070,317
0
0
null
null
null
null
UTF-8
Python
false
false
1,548
py
import xml.etree.ElementTree as ET import argparse import os parser = argparse.ArgumentParser() parser.add_argument("file") args = parser.parse_args() classes = [] def getLabels(root): labels = root.find('task').find('labels') for label in labels: classes.append(label.find('name').text) with open('cust.names', 'w') as names: names.write("\n".join(classes)) if __name__ == '__main__': tree = ET.parse(args.file) root = tree.getroot() print(root.tag) if not os.path.isdir('Anotations_yolo'): os.mkdir('Anotations_yolo') for child in root: if child.tag == 'meta': getLabels(child) if child.tag == 'image': image = child.get('name') w = float(child.get('width')) h = float(child.get('height')) boxes = '' for box in child.findall('box'): xtl = float(box.get('xtl')) ytl = float(box.get('ytl')) xbr = float(box.get('xbr')) ybr = float(box.get('ybr')) width = xbr - xtl height = ybr - ytl x = xtl + (width/2) #center y = ytl + (height/2) boxes += '{} {} {} {} {}\n'.format(classes.index(box.get('label')), x/w, y/h, width/w, height/h) with open('Anotations_yolo/{}.txt' .format(os.path.splitext(image)[0]), 'w') as file: file.write(boxes)
[ "oliviervandeneede@hotmail.com" ]
oliviervandeneede@hotmail.com
c1fda1a470ad681c3a1a16d4e839b87151b19b33
6f6d215a4f0a1c30eeb5a08c8a36016fc351998a
/zcls/model/recognizers/resnet/torchvision_resnet.py
040bc44da6892b30585f415d6130a4b2fe65cecc
[ "Apache-2.0" ]
permissive
Quebradawill/ZCls
ef9db2b54fbee17802f3342752e3d4fe4ef9d2c5
ade3dc7fd23584b7ba597f24ec19c02ae847673e
refs/heads/master
2023-04-15T23:25:18.195089
2021-04-29T07:05:46
2021-04-29T07:05:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,125
py
# -*- coding: utf-8 -*- """ @date: 2021/2/20 上午10:28 @file: torchvision_resnet.py @author: zj @description: """ from abc import ABC import torch.nn as nn from torch.nn.modules.module import T from torchvision.models.resnet import resnet18, resnet50, resnext50_32x4d from zcls.config.key_word import KEY_OUTPUT from zcls.model import registry from zcls.model.norm_helper import freezing_bn class TorchvisionResNet(nn.Module, ABC): def __init__(self, arch="resnet18", num_classes=1000, torchvision_pretrained=False, pretrained_num_classes=1000, fix_bn=False, partial_bn=False, zero_init_residual=False): super(TorchvisionResNet, self).__init__() self.num_classes = num_classes self.fix_bn = fix_bn self.partial_bn = partial_bn if arch == 'resnet18': self.model = resnet18(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes, zero_init_residual=zero_init_residual) elif arch == 'resnet50': self.model = resnet50(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes, zero_init_residual=zero_init_residual) elif arch == 'resnext50_32x4d': self.model = resnext50_32x4d(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes, zero_init_residual=zero_init_residual) else: raise ValueError('no such value') self.init_weights(num_classes, pretrained_num_classes) def init_weights(self, num_classes, pretrained_num_classes): if num_classes != pretrained_num_classes: fc = self.model.fc fc_features = fc.in_features self.model.fc = nn.Linear(fc_features, num_classes) nn.init.normal_(self.model.fc.weight, 0, 0.01) nn.init.zeros_(self.model.fc.bias) def train(self, mode: bool = True) -> T: super(TorchvisionResNet, self).train(mode=mode) if mode and (self.partial_bn or self.fix_bn): freezing_bn(self, partial_bn=self.partial_bn) return self def forward(self, x): x = self.model(x) return {KEY_OUTPUT: x} @registry.RECOGNIZER.register('TorchvisionResNet') def build_torchvision_resnet(cfg): torchvision_pretrained = cfg.MODEL.RECOGNIZER.TORCHVISION_PRETRAINED pretrained_num_classes = cfg.MODEL.RECOGNIZER.PRETRAINED_NUM_CLASSES fix_bn = cfg.MODEL.NORM.FIX_BN partial_bn = cfg.MODEL.NORM.PARTIAL_BN # for backbone arch = cfg.MODEL.BACKBONE.ARCH zero_init_residual = cfg.MODEL.RECOGNIZER.ZERO_INIT_RESIDUAL num_classes = cfg.MODEL.HEAD.NUM_CLASSES return TorchvisionResNet( arch=arch, num_classes=num_classes, torchvision_pretrained=torchvision_pretrained, pretrained_num_classes=pretrained_num_classes, fix_bn=fix_bn, partial_bn=partial_bn, zero_init_residual=zero_init_residual )
[ "wy163zhuj@163.com" ]
wy163zhuj@163.com
c75ed46c05859a6b3d132ce4c72addc3a7b800b7
41edcef2f35d4eae56b57a6e6cb4a9ecad42b812
/common/mobile_device.py
c1635180d58675bdf3a3256270b07b018079d283
[]
no_license
wanhui1994/xpower
4d7e4dc9102a7eff085fd4f2443ee2b7d4f695ba
9c1b468f6f215d87ec34ebbc5f2cdf43246af6a5
refs/heads/master
2020-07-29T10:22:46.823317
2019-09-21T07:34:48
2019-09-21T07:34:48
209,761,335
0
0
null
null
null
null
UTF-8
Python
false
false
1,454
py
#coding=utf-8 import os,re class Device(): def vesion(self): #获取连接电脑的设备名称信息 self.readDeviceId = list(os.popen('adb devices').readlines()) deviceId = re.findall(r'^\w*\b', self.readDeviceId[1])[0] return deviceId def devicevsion(self): #获取连接电脑设备的版本 deviceAndroidVersion = list(os.popen('adb shell getprop ro.build.version.release').readlines()) deviceVersion = "".join(deviceAndroidVersion).strip() return deviceVersion def Package(self): #获取执行的apk的包名 pass def apk(self): pwd = os.getcwd() father_path=os.path.abspath(os.path.dirname(pwd)+os.path.sep+".") path=father_path+"\\apk\\app-release.apk" return path def desired(self,package,activity): #移动设备信息 if len(list(os.popen('adb devices').readlines())[1].rstrip())>0: desired_caps = { 'platformName':'Android', 'deviceName': self.vesion(), 'platformVersion': self.devicevsion(), 'appPackage' : package, #输入apk的包名 'appActivity': activity, #输入apk的activity 'sessionOverride':'true', #每次启动时覆盖session 'app':self.apk(), 'noReset':'True', } return desired_caps else: print("测试手机未连接")
[ "2353231116@qq.com" ]
2353231116@qq.com
092077973ed26e56e12866dd0b199df990ac44cf
c56268db8a4e08a705209142a6c171cd0f9aa7cc
/local_app/models.py
74d009980834f60502c75147e84d6b2082fa967a
[]
no_license
nguyenl1/local-app
e46b6e2e1ebb5fc799c5bf4b90e95782e6a327d9
434a7f16670f9afb560355e1035fe78ec16b2eb0
refs/heads/master
2022-11-12T06:56:21.332008
2020-07-03T16:14:40
2020-07-03T16:14:40
270,892,971
0
0
null
2020-07-03T16:14:41
2020-06-09T03:23:05
HTML
UTF-8
Python
false
false
1,678
py
from django.db import models from django.conf import settings from django.utils import timezone from django.contrib.auth import get_user_model import cloudinary import cloudinary.uploader import cloudinary.api from multiselectfield import MultiSelectField class SavedPin(models.Model): user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True) bus_id = models.CharField(max_length = 200) name = models.CharField(max_length = 200) address = models.CharField(max_length = 200, blank=True) city = models.CharField(max_length = 200, blank=True) zip_code = models.CharField(max_length = 200, blank=True) state = models.CharField(max_length = 200, blank=True) image = models.TextField(max_length=2000, blank=True) image_2 = models.TextField(max_length=2000, blank=True) image_3 = models.TextField(max_length=2000, blank=True) latitude = models.TextField(max_length=2000, blank=True) longitude = models.TextField(max_length=2000, blank=True) class MyTrip(models.Model): user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True) saved_pin = models.ForeignKey(SavedPin, on_delete=models.PROTECT, null=True) name = models.CharField(max_length = 200, blank = True) class SubmitPost(models.Model): site_name = models.CharField(max_length = 200) address = models.CharField(max_length=200) city = models.CharField(max_length = 200, blank=True) zip_code = models.CharField(max_length = 200, blank=True) state = models.CharField(max_length = 200, blank=True) publisher_name = models.CharField(max_length = 200) email = models.CharField(max_length = 200)
[ "lynnthuynguyen@yahoo.com" ]
lynnthuynguyen@yahoo.com
b9c25442a137b3ef27edbd26fb246ea1cad4a350
ac3b4affef9c9c03121ee30c0c0d589db54f292e
/docs/enterprise/hmac_.py
94fe8ba06404b24dde9c2121a3aad8713cdbefa6
[]
no_license
btourman/documentation
d5d822fa03c0c85d6304abaa0563e4e5ad0ca0b1
99929b11cac8814fe3661439bce607c59a5f2ebd
refs/heads/master
2020-09-07T03:07:59.850216
2019-11-08T14:07:17
2019-11-08T14:07:17
220,638,447
0
0
null
2019-11-09T12:23:26
2019-11-09T12:23:25
null
UTF-8
Python
false
false
1,141
py
# -*- coding: utf-8 -*- from urllib.parse import urlencode import hmac, hashlib, codecs def sign(query, secretKey): return codecs.getencoder('hex')(hmac.new(secretKey.encode('utf-8'), query.encode('utf-8'), hashlib.sha256).digest())[0].decode('utf-8') if __name__ == '__main__': # First setup our account ACCOUNT_ID = 'MY_ACCOUNT_ID' SECRET_KEY = 'MY_SECRET_KEY' # Then generate the watermark-free url # no need to encode the query string, Image-Charts will decode every parameters by itself to check the signature # learn why in our documentation https://documentation.image-charts.com/enterprise/ rawQuerystring = [ ('cht', 'bvs'), ('chd', 's:93zyvneTTO'), ('chtt', 'Hello world'), ('chs', '400x401'), ('icac', ACCOUNT_ID) # don't forget to add your account id before signing it ] queryString = "&".join( [ param +'='+ value for (param, value) in rawQuerystring ] ) signature = sign(queryString, SECRET_KEY) publicUrl = "https://image-charts.com/chart?" + queryString + "&ichm=" + signature # Finally send it to slack or via email, here we simply use print print(publicUrl)
[ "github@fgribreau.com" ]
github@fgribreau.com
8d07ce4171b2bea29faab046161815234799f885
a0cefb1cd11b85b34c5ed58e44d087981541111b
/run_tests.py
57157de5d2632d35069de435665c2e90b130f08e
[]
no_license
diogo-aos/masters_final
732e436e74bbc7d24756fb10f96d0d39656212e7
93ae6b71d7d7d9dade0059facfe2bd5162c673da
refs/heads/master
2021-01-18T15:36:58.263599
2017-03-30T05:11:18
2017-03-30T05:11:18
86,661,794
2
0
null
null
null
null
UTF-8
Python
false
false
329
py
import unittest import tests.test_scan as tscan import tests.test_boruvka as tboruvka scan_suite = unittest.TestLoader().loadTestsFromModule(tscan) boruvka_suite = unittest.TestLoader().loadTestsFromModule(tboruvka) # unittest.TextTestRunner(verbosity=2).run(scan_suite) unittest.TextTestRunner(verbosity=2).run(boruvka_suite)
[ "dasilva@academiafa.edu.pt" ]
dasilva@academiafa.edu.pt
ddb356445bf02c7df3723d467bc763ca2c73ba9e
41f81d8496262182c73e855e9d3d4fcee8dc659d
/emailSpammer/spam.py
c607dec4db5b686b1519479c207a8685ce7e851c
[]
no_license
ishank-dev/MyPythonScripts
5e372bf829941e3db5746cc32cdaf2fddb33f0a8
a50e17f0a5bd45b8086e429abff3d9a1d42286d5
refs/heads/master
2021-07-11T09:41:15.994267
2019-10-14T16:38:25
2019-10-14T16:38:25
209,132,738
1
4
null
2020-10-14T14:21:45
2019-09-17T18:50:19
Python
UTF-8
Python
false
false
395
py
import smtplib for i in range(0,5) conn = smtplib.SMTP('smtp.gmail.com',587)# connect to gmail conn.ehlo() conn.starttls() conn.login('your_email','your_password') # write your email and password for the gmail account conn.sendmail('your_email','recipient_address','Subject: Write email subject here\n\n Write the message here ') # write your email and recipient address here conn.quit()
[ "noreply@github.com" ]
ishank-dev.noreply@github.com
ab60302c0ed0fb4b5e89d82951627fa21c93947d
902aef0f2cde6c73a70c1833bec2c6f4fa1bc0b6
/StimControl/LightStim/Text.py
8250df16138250fa9a1abc57f9be913ac1e111ff
[ "BSD-2-Clause", "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
chrox/RealTimeElectrophy
2b0c88b28cbeeb4967db5630f3dfa35764c27f54
e1a331b23d0a034894a0185324de235091e54bf0
refs/heads/master
2020-04-14T12:29:47.958693
2013-10-07T14:12:04
2013-10-07T14:12:04
1,662,847
6
0
null
null
null
null
UTF-8
Python
false
false
1,048
py
# Text stimulus # # Copyright (C) 2010-2013 Huang Xin # # See LICENSE.TXT that came with this file. # Taget stimuli # # Copyright (C) 2010-2013 Huang Xin # # See LICENSE.TXT that came with this file. from VisionEgg.Text import Text from LightData import dictattr from Core import Stimulus class Hint(Stimulus): def __init__(self, params, **kwargs): super(Hint, self).__init__(params=params, **kwargs) self.name = 'hint' self.parameters = dictattr() self.set_parameters(self.parameters, params) self.make_stimuli() def make_stimuli(self): position = self.viewport.deg2pix(self.parameters.xorigDeg) + self.viewport.xorig ,\ self.viewport.deg2pix(self.parameters.yorigDeg) + self.viewport.yorig self.text = Text(text=self.parameters.text, position=position, color=self.parameters.color, font_size=self.parameters.fontsize, anchor='center') self.stimuli = [self.text]
[ "chrox.huang@gmail.com" ]
chrox.huang@gmail.com
d0fe0d32f8117f0320d48ac65fba33e6aa33014e
adc5060ccc1f9e1243f0d9d5eb95e0ca87034806
/cdkworkshop/cdkworkshop_stack.py
2b18b7ea586a1953a25a1b199afbf137a865fcaa
[]
no_license
fanaticjo/cdkworkshop
37a1762fa9137ee405a098afaf2f45c3b57b135d
aa5a888d42aaae98eb18fe629b82fb1416b4bd7e
refs/heads/master
2023-08-16T22:41:11.431991
2021-09-14T16:30:35
2021-09-14T16:30:35
406,444,342
0
0
null
null
null
null
UTF-8
Python
false
false
583
py
from aws_cdk import ( aws_lambda as _lambda, aws_apigateway as api, core ) class CdkworkshopStack(core.Stack): def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.lambdaStack=_lambda.Function( self,'Handler',runtime=_lambda.Runtime.PYTHON_3_6, code=_lambda.Code.asset('lambda'), handler='lambda_function.lambda_handler' ) self.api_g=api.LambdaRestApi( self,'Endpoint',handler=self.lambdaStack )
[ "biswajit196@live.com" ]
biswajit196@live.com
aa0821eb5dfdd23d7f0d1145aa8a2eb118518433
04ce2e384bf4c005264c86144a4bc9482fab74d2
/venv/lib/python3.5/bisect.py
e9627395ec6a842938ca6fa9bb305208875ba9ae
[]
no_license
ranijaiswal/mysite
e9ad8fdf38ac7dc3af42f2b168e3d226e7fa69df
21b57513857218bdcbd66925c19ec9572c1f239d
refs/heads/master
2021-01-23T03:53:52.374712
2017-03-26T17:46:26
2017-03-26T17:46:26
86,132,814
0
0
null
null
null
null
UTF-8
Python
false
false
51
py
/Users/ranijaiswal/anaconda/lib/python3.5/bisect.py
[ "ruj96@live.com" ]
ruj96@live.com
11b7e3689c9e441e4675a957d33afa8bb29e075b
0c4fe6a4ada54cda0f5116e9fee31f133a2ca687
/common/logger.py
dadeaf23c5addb75052fd305ecaa9c17e8709ab5
[]
no_license
march-saber/python_aixunshouzhu_api
029f26470418abd5585a5593bf3addc01f83285e
49e57cbfe9ca7055fa05e9d98c439617ae486067
refs/heads/master
2020-05-28T09:46:53.418636
2019-06-03T12:12:47
2019-06-03T12:12:47
188,961,078
0
0
null
null
null
null
UTF-8
Python
false
false
1,257
py
import logging from common import contants from common.config import config def get_logger(name): logger = logging.getLogger(name) #建立一个日志收集器 logger.setLevel("DEBUG") #设定日志收集级别 fmt = "%(name)s - %(levelname)s - %(asctime)s - %(message)s - [%(filename)s:%(lineno)d]" formatter = logging.Formatter(fmt=fmt) #设定日志输出格式 console_handler = logging.StreamHandler() #指定输出到控制台 #吧日志级别放到配置文件里面去配置-- 优化 gather = config.get('log','gather_log') console_handler.setLevel(gather) #指定输出级别 console_handler.setFormatter(formatter) file_handler = logging.FileHandler(contants.log_dir + "/case.log",encoding='utf-8') # 吧日志级别放到配置文件里面去配置 output = config.get('log','output_log') file_handler.setLevel(output) file_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.addHandler(file_handler) return logger if __name__ == '__main__': logger = get_logger('case') logger.debug("测试开始") logger.info("测试报错") logger.error("测试数据") logger.warning("测试结果") logger.critical("测试结束")
[ "1162869224@qq.com" ]
1162869224@qq.com
ec5a3ad6b211c933d6f0f6c9428eef2cd6609e11
ce2ed48a3ea2b067ff45e2901fd1aa08b1b9fd3b
/Data_prep.py
78e05e3e3b624e9b3f50fb508fd7ace9a273d008
[]
no_license
mhmdsab/ASHRAE---Great-Energy-Predictor-III-
9d049a8f7eb9887c89f10b8073e6ad7587d06582
946c37af579299d8eb0d338fd4367f0ffc5dd59a
refs/heads/master
2020-12-01T06:07:04.791763
2019-12-28T07:27:32
2019-12-28T07:27:32
230,572,508
1
1
null
null
null
null
UTF-8
Python
false
false
13,487
py
import numpy as np import pandas as pd from tqdm import tqdm import tensorflow as tf import os from abc import ABCMeta, abstractmethod from sklearn.preprocessing import OneHotEncoder as OHE class DataGenerator(metaclass=ABCMeta): def __init__(self, para): self.iterator = None self.para = para def inputs(self, mode, batch_size, num_epochs=None): """Reads input data num_epochs times. Args: mode: String for the corresponding tfrecords ('train', 'validation') batch_size: Number of examples per returned batch. """ if mode != "train" and mode != "valid": raise ValueError("mode: {} while mode should be " "'train', 'validation'".format(mode)) filename = self.para.tf_records_url + '/' +mode + "_.tfrecord" with tf.name_scope("input"): # TFRecordDataset opens a binary file and # reads one record at a time. # `filename` could also be a list of filenames, # which will be read in order. dataset = tf.data.TFRecordDataset(filename) # The map transformation takes a function and # applies it to every element # of the dataset. dataset = dataset.map(self._decode) # The shuffle transformation uses a finite-sized buffer to shuffle # elements in memory. The parameter is the number of elements in the # buffer. For completely uniform shuffling, set the parameter to be # the same as the number of elements in the dataset. if mode == "train": dataset = dataset.shuffle(2380*16) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(batch_size) self.iterator = dataset.make_initializable_iterator() return self.iterator.get_next() @abstractmethod def _decode(self, serialized_example): pass class generate_dataset(DataGenerator): def __init__(self, para): self.para = para self.train_data_range = pd.date_range(para.TS_start_date,para.TS_end_date , freq=para.sampling_frequency) self.train_written_examples = 0 self.valid_written_examples = 0 self.train_excluded_examples = 0 self.valid_excluded_examples = 0 self.mean_values = {} self.std_values = {} self.three_months_corr = {} self.six_months_corr = {} self.features = {'x_encoder': tf.FixedLenFeature([self.para.in_sequence_window, 25], dtype=tf.float32), 'x_decoder': tf.FixedLenFeature([self.para.out_sequence_window, 24], dtype=tf.float32), 'encoder_meter_reading': tf.FixedLenFeature([self.para.in_sequence_window], dtype=tf.float32), 'decoder_meter_reading': tf.FixedLenFeature([self.para.out_sequence_window], dtype=tf.float32), 'decoder_meter_reading_denorm': tf.FixedLenFeature([self.para.out_sequence_window], dtype=tf.float32), 'mean': tf.FixedLenFeature([1], dtype=tf.float32), 'std':tf.FixedLenFeature([1], dtype=tf.float32)} self.generate_encoders() self.generate_dataset() self.organize('train', 'valid') super().__init__(para) def generate_encoders(self): self.encoding_dict = dict([('month_number' , OHE().fit(np.arange(1, 13).reshape(-1,1))), ('weekday_number' , OHE().fit(np.arange(7).reshape(-1,1))), ('meter' , OHE().fit(np.arange(4).reshape(-1,1)))]) def generate_dataset(self): print('generating raw dataset') train = pd.read_csv(self.para.train_url) meta = pd.read_csv(self.para.meta_url).loc[:,['building_id', 'primary_use']] dataset = pd.merge(train, meta, left_on = 'building_id', right_on = 'building_id') dataset['timestamp']= pd.to_datetime(dataset['timestamp']) self.dataset = dataset.groupby(['building_id', 'meter']) self.groups = list(self.dataset.groups.keys()) def autocorrelate(self, df, group): three_months_corr = df['meter_reading'].autocorr(lag = int(3*30*(24/int(self.para.sampling_frequency[0])))) self.three_months_corr[group] = three_months_corr six_months_corr = df['meter_reading'].autocorr(lag = int(6*30*(24/int(self.para.sampling_frequency[0])))) self.six_months_corr[group] = six_months_corr df['three_months_lag_autocorr'] = three_months_corr * np.ones(shape = len(df))#shape = (8784) df['six_months_lag_autocorr'] = six_months_corr *np.ones(shape = len(df))#shape = (8784) return df.astype('float32') def One_Hot_Enode(self, df): for feature in self.encoding_dict.keys(): raw_series = df[feature].values.reshape(-1, 1) One_Hot_Encoded_array = self.encoding_dict[feature].transform(raw_series).todense()[:,:-1] for i in range(One_Hot_Encoded_array.shape[1]): OHE_feature_name = feature+'_'+str(i) df[OHE_feature_name] = One_Hot_Encoded_array[:,i] return df.astype('float32') def pad_time_series(self, df): padding_df = pd.DataFrame() padding_df['any'] = np.ones(shape = (self.para.building_total_len//int(self.para.sampling_frequency[0]))) padding_df = padding_df.set_index(self.train_data_range) merged = pd.merge(padding_df, df, how = 'left', left_index=True, right_index=True).drop('any',1).fillna(0) return merged.astype('float32') def Normalize_Pad_Split(self, df, group): mean = np.mean(df['meter_reading'][df['month_number'] <= self.para.features_extractor_len]) std = np.std(df['meter_reading'][df['month_number'] <= self.para.features_extractor_len]) if (mean!= mean) or (std!= std): mean = np.mean(list(self.mean_values.values())) std = np.mean(list(self.std_values.values())) self.mean_values[group] = mean self.std_values[group] = std df = self.pad_time_series(df) df['meter_reading_normalized'] = df['meter_reading'].map(lambda x:self.Normalize(x, mean, std)) df['three_months_lag'] = df['three_months_lag'].map(lambda x:self.Normalize(x, mean, std)) df['six_months_lag'] = df['six_months_lag'].map(lambda x:self.Normalize(x, mean, std)) train_df = df.loc[:self.para.train_end_date,:].drop(['month_number', 'weekday_number'], 1) valid_df = df.loc[self.para.valid_start_date:,:].drop(['month_number', 'weekday_number'], 1) return train_df, valid_df def organize(self, train_name, valid_name): print('organizing raw dataset') if (os.path.exists(self.para.tf_records_url+'/'+'{}_.tfrecord'.format(train_name))) & \ (os.path.exists(self.para.tf_records_url+'/'+'{}_.tfrecord'.format(valid_name))): self.para.train_kickoff = 'not_first_time' train_name, valid_name = '_','_' with tf.python_io.TFRecordWriter(self.para.tf_records_url+'/'+'{}_.tfrecord'.format(train_name)) as train_writer: with tf.python_io.TFRecordWriter(self.para.tf_records_url+'/'+'{}_.tfrecord'.format(valid_name)) as valid_writer: for group in tqdm(self.groups): building_df = self.dataset.get_group(group).set_index('timestamp') building_df = building_df.resample(self.para.sampling_frequency).mean().fillna(method = 'ffill') building_df = self.add_time_features(building_df) building_df = self.One_Hot_Enode(building_df) building_df = self.add_timelags(building_df) building_df = self.autocorrelate(building_df, group) train_building_df, valid_building_df = self.Normalize_Pad_Split(building_df, group) if self.para.train_kickoff == 'first_time': self._convert_to_tfrecord(train_building_df, train_writer, train_name, group) self._convert_to_tfrecord(valid_building_df, valid_writer, valid_name, group) def _convert_to_tfrecord(self, df, writer, mode, group): df_meter_reading_normalized = df['meter_reading_normalized'] df_meter_reading_unnormalized = df['meter_reading'] df_x = df.drop(['building_id', 'meter_reading', 'meter'],1) mean = self.mean_values[group] std = self.std_values[group] for i in range((len(df) - self.para.in_sequence_window)//self.para.out_sequence_window): start = i * self.para.out_sequence_window end = start + self.para.in_sequence_window y_end = end + self.para.out_sequence_window if ((df['meter_reading'].iloc[start:end] == 0).sum() < \ int(self.para.max_zeros_in_example * self.para.in_sequence_window)) \ and (std>self.para.min_std): example = tf.train.Example(features=tf.train.Features(feature={ 'x_encoder': tf.train.Feature( float_list=tf.train.FloatList(value = df_x.iloc[start:end,:].values.flatten())), 'x_decoder': tf.train.Feature( float_list=tf.train.FloatList(value = df_x.drop(['meter_reading_normalized'],1) \ .iloc[end:y_end,:].values.flatten())), 'encoder_meter_reading': tf.train.Feature( float_list=tf.train.FloatList(value = df_meter_reading_normalized[start:end]\ .values.flatten())), 'decoder_meter_reading': tf.train.Feature( float_list=tf.train.FloatList(value = df_meter_reading_normalized[end:y_end]\ .values.flatten())), 'decoder_meter_reading_denorm': tf.train.Feature( float_list=tf.train.FloatList(value = df_meter_reading_unnormalized[end:y_end]\ .values.flatten())), 'mean': tf.train.Feature( float_list=tf.train.FloatList(value = [mean])), 'std': tf.train.Feature( float_list=tf.train.FloatList(value = [std]))})) writer.write(example.SerializeToString()) if mode == 'train': self.train_written_examples += 1 elif mode == 'valid': self.valid_written_examples += 1 else: if mode == 'train': self.train_excluded_examples+=1 elif mode == 'valid': self.valid_excluded_examples+=1 def _decode(self, serialized_example): example = tf.parse_single_example( serialized_example, features=self.features) x_encoder = example['x_encoder'] x_decoder = example['x_decoder'] encoder_meter_reading = example['encoder_meter_reading'] decoder_meter_reading = example['decoder_meter_reading'] decoder_meter_reading_denorm = example['decoder_meter_reading_denorm'] mean = example['mean'] std = example['std'] return x_encoder, x_decoder, encoder_meter_reading, decoder_meter_reading, decoder_meter_reading_denorm, mean, std @staticmethod def Normalize(value, mean, std): return (value-mean)/(std + 1e-100) @staticmethod def add_timelags(df): df['three_months_lag'] = df['meter_reading'].shift(90*6).fillna(0) df['six_months_lag'] = df['meter_reading'].shift(90*6*2).fillna(0) return df.astype('float32') @staticmethod def add_time_features(df): df['weekday_number'] = df.index.weekday df['month_number'] = df.index.month return df
[ "noreply@github.com" ]
mhmdsab.noreply@github.com
93608c46dbf817b09cd537b4fe647e9a03ac63ca
06cd48c385acf1b79e9cf235730ab5a2f61c016e
/sql_queries.py
12c9d5d060a7b204c6da2552d0bf4328db5250cf
[]
no_license
haymar017/-UDACITY-Data-Modeling-with-Postgres
16f66b6abd267577f5da4211b6e0ce753761a257
89b494f2de889f388e6c4c3acaa1c10bbb57acfe
refs/heads/master
2022-11-14T00:57:54.859730
2020-07-09T04:44:15
2020-07-09T04:44:15
276,080,584
0
0
null
null
null
null
UTF-8
Python
false
false
3,432
py
import json import pandas # DROP TABLES songplay_table_drop = "drop table if exists songplays" user_table_drop = "drop table if exists users" song_table_drop = "drop table if exists songs" artist_table_drop = "drop table if exists artists" time_table_drop = "drop table if exists time" # CREATE TABLES user_table_create = (""" create table if not exists users ( user_id int Primary Key, first_name varchar, last_name varchar, gender varchar, level varchar ); """) artist_table_create = (""" create table if not exists artists ( artist_id varchar Primary Key, name varchar, location varchar, latitude float, longitude float ); """) song_table_create = (""" create table if not exists songs( song_id varchar Primary Key , title varchar, artist_id varchar not null, year int, duration float ); """) time_table_create = (""" create table if not exists time ( start_time timestamp Primary Key, hour int, day int, week int, month int, year int, weekday int ); """) songplay_table_create = (""" create table if not exists songplays ( songplay_id serial Primary Key, start_time timestamp not null, user_id int not null, level varchar, song_id varchar, artist_id varchar, session_id int, location varchar, user_agent varchar, foreign key (user_id) references users (user_id), foreign key (song_id) references songs (song_id), foreign key (artist_id) references artists (artist_id), foreign key (start_time) references time (start_time) ); """) # INSERT RECORDS songplay_table_insert = (""" insert into songplays( start_time, user_id, level, song_id, artist_id, session_id, location, user_agent) values (%s,%s,%s,%s,%s,%s,%s,%s) """) user_table_insert = (""" insert into users( user_id, first_name, last_name, gender, level) values (%s,%s,%s,%s,%s) on conflict (user_id) do update set level = excluded.level; """) song_table_insert = (""" insert into songs( song_id, title, artist_id, year, duration) values (%s,%s,%s,%s,%s) on conflict(song_id) do nothing """) artist_table_insert = (""" insert into artists( artist_id, name, location, latitude, longitude) values (%s,%s,%s,%s,%s) on conflict(artist_id) do nothing """) time_table_insert = (""" insert into time( start_time, hour, day, week, month, year, weekday) values(%s,%s,%s,%s,%s,%s,%s) on conflict(start_time) do nothing """) # FIND SONGS song_select = (""" select songs.song_id , artists.artist_id from songs join artists on songs.artist_id = artists.artist_id where songs.title=(%s) and artists.name=(%s) and songs.duration=(%s) """) # QUERY LISTS create_table_queries = [user_table_create, artist_table_create, song_table_create, time_table_create, songplay_table_create] drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
[ "noreply@github.com" ]
haymar017.noreply@github.com
8afe9cc9f4f53d06be5e718686be5cb4cf5c0cdb
c67268ac491ecfe606308a43185f1bf8073d56a1
/unittesting/test_employee2.py
84682a7e52ffd035b6a9a992a079c59112128dc6
[]
no_license
jisshub/python-django-training
3c0fad4c80c78bcfb4b61b025da60d220b502e4b
d8c61f53e3bb500b1a58a706f20108babd6a1a54
refs/heads/master
2020-06-21T15:07:25.704209
2019-09-01T19:24:02
2019-09-01T19:24:02
197,487,745
0
0
null
null
null
null
UTF-8
Python
false
false
1,929
py
import unittest # here v import Employee class employee module(employee.py) from employee import Employee class EmployeeTest(unittest.TestCase): def setUp(self): print('setup\n') # here v create two employee obj instead of creating them for each test. # ENSURING DRY PRINCIPLE self.emp1 = Employee('jiss', 'jose', 3000) self.emp2 = Employee('isco', 'alarcon', 5000) def tearDown(self): print('teardown\n') def test_email(self): print('test_email\n') var1 = self.emp1.email var2 = self.emp2.email self.assertEqual(var1, 'jissjose@gmail.com') self.assertEqual(var2, 'iscoalarcon@gmail.com') self.emp1.first = 'john' self.emp2.last = 'james' self.assertEqual(self.emp1.email, 'johnjose@gmail.com') self.assertEqual(self.emp2.email, 'iscojames@gmail.com') def test_fullname(self): print('test_fullname\n') self.assertEqual(self.emp1.full_name, 'jiss jose') self.emp1.first = 'jom' self.emp1.last = 'thomas' self.assertEqual(self.emp1.full_name, 'jom thomas') self.assertEqual(self.emp2.full_name, 'isco alarcon') self.emp2.first = 'alvaro' self.emp2.last = 'morata' self.assertEqual(self.emp2.full_name, 'alvaro morata') def test_pay(self): print('test_pay\n') self.assertEqual(self.emp1.apply_raise, 6000) self.emp1.pay_raise = 1.5 self.assertEqual(self.emp1.apply_raise, 9000) self.assertEqual(self.emp2.apply_raise, 10000) self.emp2.pay_raise = .5 self.assertEqual(self.emp2.apply_raise, 5000) if __name__ == '__main__': unittest.main() # here v text whether value of apply_raise and pay are equal. # here setUp runs before each test and tearDown method runs after each test. # order will be like # setUp # testmethod # teardown
[ "jissmon476@gmial.com" ]
jissmon476@gmial.com
8767b568baa843c144fcb45fc3930190b3aebe10
ef270274b87c2500485a992a798606e139c120ff
/blog/migrations/0001_initial.py
97261cf246c533caf63cae32a13a047a7610147d
[]
no_license
chvbrr/my-first-blog
e05e517a57c84ca99b176816486488462531d11e
ecca64473048220442f35a4d27c7d1ea6697c15b
refs/heads/master
2021-01-10T13:43:27.209037
2016-01-31T14:29:54
2016-01-31T14:29:54
50,764,703
0
0
null
null
null
null
UTF-8
Python
false
false
1,109
py
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-01-30 18:04 from __future__ import unicode_literals import datetime from django.conf import settings from django.db import migrations, models import django.db.models.deletion from django.utils.timezone import utc class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('text', models.TextField()), ('created_date', models.DateTimeField(default=datetime.datetime(2016, 1, 30, 18, 4, 39, 220156, tzinfo=utc))), ('published_date', models.DateTimeField(blank=True, null=True)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "ch.v.b.ramaraju@gmail.com" ]
ch.v.b.ramaraju@gmail.com
976016de1236d9a6ba795308ff368d105a8a28f7
629c93631250eda8968ee2903c9b264f18e5f47b
/combined_model.py
6b0c252162d840aafd67cfd428ef6381b5578dc3
[]
no_license
sohisudhir/Master-s-Thesis
f7bb66a67e7fd108a38815f95117ab8df977ea2c
36a74ec91db5779dc6ddf2814b1f58109463cb38
refs/heads/master
2023-01-21T14:02:35.160343
2020-12-02T10:45:35
2020-12-02T10:45:35
289,114,798
0
0
null
null
null
null
UTF-8
Python
false
false
30,782
py
# -*- coding: utf-8 -*- """Combined_model.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1Z2qYEQ15gT32q9WFUJ2-LBhaDSglVI66 """ # Setup & Config import transformers from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup import torch from torch.nn import CrossEntropyLoss, MSELoss from torch import nn, optim from torch.utils.data import Dataset, DataLoader import torch.nn.functional as F import numpy as np import pandas as pd import random import copy import csv import re import argparse import os from sklearn.model_selection import train_test_split from sklearn import metrics from scipy.stats import pearsonr from scipy.stats import kendalltau from scipy.stats import spearmanr # RANDOM_SEED = 42 RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) torch.manual_seed(RANDOM_SEED) """## **Data Preparation**""" class AbuseDataset(Dataset): def __init__(self, reviews, targets, c1,c2,c3, c_num, tokenizer, max_len, ids): self.reviews = reviews self.targets = targets self.c1 = c1 self.c2 = c2 self.c3 = c3 self.c_num = c_num self.tokenizer = tokenizer self.max_len = max_len self.ids = ids def __len__(self): return len(self.reviews) def __getitem__(self, item): c=["[PAD]","[PAD]","[PAD]"] review = str(self.reviews[item]) target = self.targets[item] c_num = self.c_num[item] c[0] = str(self.c1[item]) c[1] = str(self.c2[item]) c[2] = str(self.c3[item]) encoding = self.tokenizer.encode_plus( review, add_special_tokens=True, truncation=True, max_length=self.max_len, return_token_type_ids=False, pad_to_max_length=True, return_attention_mask=True, return_tensors='pt', ) idx = self.ids[item] context_input_ids = [] context_attention_mask = [] for i in range(0,3): encoding_context = self.tokenizer.encode_plus( c[i], add_special_tokens=True, truncation=True, max_length=self.max_len, return_token_type_ids=False, pad_to_max_length=True, return_attention_mask=True, return_tensors='pt') context_input_ids.append(encoding_context['input_ids'].flatten()) context_attention_mask.append(encoding_context['attention_mask'].flatten()) return { 'review_text': review, 'input_ids': encoding['input_ids'].flatten(), 'attention_mask': encoding['attention_mask'].flatten(), 'targets': torch.tensor(target, dtype=torch.float), 'context_input_ids': torch.stack(context_input_ids), 'context_attention_masks': torch.stack(context_attention_mask), 'context_num': c_num, 'ids': idx } class EmotionDataset(Dataset): def __init__(self, tweets, targets, tokenizer, max_len): self.tweets = tweets self.targets = targets self.tokenizer = tokenizer self.max_len = max_len def __len__(self): return len(self.tweets) def __getitem__(self, item): tweet = str(self.tweets[item]) target = self.targets[item] encoding = self.tokenizer.encode_plus( tweet, add_special_tokens=True, truncation = True, max_length=self.max_len, return_token_type_ids=False, pad_to_max_length=True, return_attention_mask=True, return_tensors='pt', ) return { 'tweet_text': tweet, 'input_ids': encoding['input_ids'].flatten(), 'attention_mask': encoding['attention_mask'].flatten(), 'targets': torch.tensor(target, dtype=torch.long) } class GeneralAttention(nn.Module): def __init__(self, sparsemax=False): super().__init__() self.linear = nn.Linear(768, 1) # self.normaliser = masked_softmax self.weights = [] def masked_softmax(self, vector, mask): while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) # To limit numerical errors from large vector elements outside the mask, we zero these out. result = torch.nn.functional.softmax(vector * mask, dim=-1) result = result * mask result = result / ( result.sum(dim=-1, keepdim=True) + 1e-4 ) return result def forward(self, context, masks, batch_size): context = torch.cat(context, dim=1) context = context.reshape(-1,3,768) weights = self.linear(context).squeeze(-1) weights = self.masked_softmax(weights, masks) context = torch.bmm(weights.unsqueeze(dim=1), context) return context, weights def create_maintask_data_loader(df_train, tokenizer, max_len, batch_size, flag = 0): ds = AbuseDataset(reviews= df_train.comment.to_numpy(), targets= df_train.Score.to_numpy(), c1 = df_train.context1.to_numpy(), c2 = df_train.context2.to_numpy(), c3 = df_train.context3.to_numpy(), c_num = df_train.context_num.to_numpy(), tokenizer = tokenizer, max_len = max_len, ids = df_train.idx.to_numpy()) if(flag == 0): return DataLoader(ds, batch_size=batch_size, num_workers=4 ) else: return DataLoader(ds, batch_size=batch_size, num_workers=4, shuffle = True ) def create_auxtask_data_loader(df, tokenizer, max_len, batch_size, flag = 0): anger = df.anger.to_numpy() anticipation = df.anticipation.to_numpy() disgust = df.disgust.to_numpy() fear = df.fear.to_numpy() joy = df.joy.to_numpy() love = df.love.to_numpy() optimism = df.optimism.to_numpy() pessimism = df.pessimism.to_numpy() sadness = df.sadness.to_numpy() surprise = df.surprise.to_numpy() trust = df.trust.to_numpy() emotion = np.stack((anger, anticipation, disgust, fear, joy, love, optimism, pessimism, sadness, surprise, trust), axis = 1) ds = EmotionDataset( tweets=df.Tweet.to_numpy(), targets= emotion, tokenizer=tokenizer, max_len=max_len ) if(flag == 0): return DataLoader(ds, batch_size=batch_size, num_workers=4 ) else: return DataLoader(ds, batch_size=batch_size, num_workers=4, shuffle = True ) def clean_tweets(csvf): fname = 'cleaned_' + csvf with open(csvf, 'r', encoding = 'utf-8') as c, open(fname, 'w', encoding = 'UTF-8') as w: reader = csv.reader(c, delimiter = '\t') writer = csv.writer(w, delimiter = '\t') for i,row in enumerate(reader): if(i == 0): writer.writerow(row) continue row[1] = row[1].lower() row[1] = re.sub(r"#(\w+)", "HASHTAG", row[1]) row[1] = re.sub(r"(^|[^@\w])@(\w{1,15})", "_MTN_", row[1]) row[1] = re.sub(r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+", "_URL_", row[1]) writer.writerow(row) c.close() w.close() def prepare_data(abuse_files, sent_files, config): #Requirements BATCH_SIZE = config['batch_size'] MAX_LEN = config['max_len'] PRE_TRAINED_MODEL_NAME = config['PRE_TRAINED_MODEL_NAME'] tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME) # Arranging data loaders for main task a = abuse_files[0] # b = abuse_files[1] c = abuse_files[2] df_train = pd.read_csv(a) # df_val = pd.read_csv(b) df_test = pd.read_csv(c) # df_train = df_train[0:1000] # df_val = df_train[0:200] # df_test = df_train[0:800] print('Dimensions of abuse file') print(df_train.shape, 0, df_test.shape) data_loader_main = create_maintask_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE, 1) # val_data_loader_main = create_maintask_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE) test_data_loader_main = create_maintask_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE) # Arranging data loaders for auxiliary task -- SEMEVAL2018A a = sent_files[0] b = sent_files[1] c = sent_files[2] clean_tweets(a) clean_tweets(b) clean_tweets(c) df_train = pd.read_csv('cleaned_' + a, sep = '\t') # df_train = df_train[0:100] df_val = pd.read_csv('cleaned_' + b, sep = '\t') # df_val = df_val[0:20] df_test = pd.read_csv('cleaned_' + c, sep = '\t') # df_test = df_test[0:80] print('Dimensions of sentiment file') print(df_train.shape, df_val.shape, df_test.shape) data_loader_aux = create_auxtask_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE, 1) val_data_loader_aux = create_auxtask_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE) test_data_loader_aux = create_auxtask_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE) dataloaders = {'main_train': data_loader_main, 'main_val': [], 'main_test': test_data_loader_main, 'aux_train': data_loader_aux, 'aux_val': val_data_loader_aux, 'aux_test': test_data_loader_aux } return dataloaders """## **MODELS**""" class MSLELoss(nn.Module): def __init__(self): super().__init__() self.mse = nn.MSELoss(reduction = 'sum') def forward(self, pred, actual): return self.mse(torch.log(pred + 1.00005), torch.log(actual + 1.00005)) class multitask_conversation_model(nn.Module): def __init__(self, config): #num_labels, num_emotions, attention_dropout, fc_dropout): super(multitask_conversation_model, self).__init__() self.num_labels = config['abuse_classes'] self.num_emotions = config['sent_classes'] self.device = config['device'] PRE_TRAINED_MODEL_NAME = config['PRE_TRAINED_MODEL_NAME'] self.b_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME) self.bert = AdaptedBertModel(self.b_model, True, True, config['bert_dropout'], config['fc_dropout']) self.bert_config = self.bert.config self.attention = GeneralAttention() self.attention.to(self.device) self.main_regression = nn.Linear(self.bert_config.hidden_size, self.num_labels) self.aux_classifier = nn.Linear(self.bert_config.hidden_size, self.num_emotions) del(self.b_model) def forward(self, input_ids, token_type_ids=None, attention_mask=None, main_task=True, targets = None): if main_task: outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, mode='main_task') # pooled_output = self.bert.pooler(outputs) pooled_output = outputs.mean(dim = 1) return pooled_output else: outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, mode= 'auxiliary_task') # pooled_output = self.bert.pooler(outputs) pooled_output = outputs.mean(dim = 1) return pooled_output class AdaptedBertModel(nn.Module): def __init__(self, model, main_task, auxiliary_task, attention_dropout, fc_dropout): super().__init__() self.embeddings = model.embeddings self.encoder = BertEncoder(model.encoder.layer, main_task, auxiliary_task, attention_dropout, fc_dropout) self.config = model.config self.pooler = model.pooler def forward(self, input_ids, token_type_ids=None, attention_mask=None, mode="main_task"): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to( dtype=next(self.parameters()).dtype ) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embeddings = self.embeddings(input_ids, token_type_ids) embeddings = self.encoder(embeddings, extended_attention_mask, mode) return embeddings # Hard parameter sharing setup : All layers but the last are shared # Last layer is task-specific class BertEncoder(nn.Module): def __init__(self, layers, main_task, auxiliary_task, attention_dropout, fc_dropout): super().__init__() self.layers = layers[:-1] self.output_attentions = False for layer in self.layers: layer.attention.self.dropout = nn.Dropout(attention_dropout) if main_task: self.layer_left = copy.deepcopy(layers[-1]) if auxiliary_task: self.layer_right = copy.deepcopy(layers[-1]) def forward(self, hidden, attention_mask, mode): all_attentions = () for layer in self.layers: hidden = layer(hidden, attention_mask) if self.output_attentions: all_attentions = all_attentions + (hidden[1],) hidden = hidden[0] if mode == "main_task": hidden = self.layer_left(hidden, attention_mask) elif mode == "auxiliary_task": hidden = self.layer_right(hidden, attention_mask) outputs = hidden[0] if self.output_attentions: outputs = outputs + (all_attentions,) return outputs def evaluation_metrics(preds, targets): with torch.no_grad(): tp = torch.zeros(preds.shape[1]) tn = torch.zeros(preds.shape[1]) fp = torch.zeros(preds.shape[1]) fn = torch.zeros(preds.shape[1]) for n,pred in enumerate(preds): for j,pr in enumerate(pred): t = targets[n][j] if(pr == 0): if(t == 0): tn[j] += 1 else: fn[j] += 1 elif(pr == 1): if(t == 0): fp[j] += 1 else: tp[j] += 1 #Micro num = torch.sum(tp) deno_prec = torch.zeros(preds.shape[1]) deno_rec = torch.zeros(preds.shape[1]) for j,val in enumerate(deno_prec): deno_prec[j] = tp[j] + fp[j] deno_rec[j] = tp[j] + fn[j] den = torch.sum(deno_prec) if(den == 0): micro_precision = 0 else: micro_precision = num.item()/den.item() den = torch.sum(deno_rec) if(den == 0): micro_recall = 0 else: micro_recall = num.item()/den.item() numerator = 2 * micro_precision * micro_recall denominator = micro_precision + micro_recall if(denominator == 0): micro_f1 = 0 else: micro_f1 = numerator/denominator # print(micro_precision, micro_recall, micro_f1) #MACRO precision = torch.zeros(preds.shape[1]) recall = torch.zeros(preds.shape[1]) for j,val in enumerate(precision): if(tp[j] + fp[j] == 0): precision[j] = 0 else: precision[j] = tp[j]/(tp[j] + fp[j]) if(tp[j] + fn[j] == 0): recall[j] = 0 else: recall[j] = tp[j]/(tp[j] + fn[j]) f1 = torch.zeros(preds.shape[1]) for j,val in enumerate(f1): num = 2 * precision[j] * recall[j] deno = precision[j] + recall[j] if(deno == 0): f1[j] = 0 else: f1[j] = num/deno macro_precision = torch.mean(precision) macro_recall = torch.mean(recall) macro_f1 = torch.mean(f1) # print(macro_precision, macro_recall, macro_f1) return micro_f1, macro_f1 def eval_model(model, data_loader, device, mode): model = model.to(device) model = model.eval() loss_fn_main = nn.MSELoss().to(device) loss_fn_aux = nn.BCEWithLogitsLoss().to(device) if(mode == 'main_task'): p = [] t = [] loss_m = [] ids = [] emotion_pred = [] c1_pred = [] c2_pred = [] c3_pred = [] with torch.no_grad(): for d in data_loader: input_ids = d["input_ids"].to(device) attention_mask = d["attention_mask"].to(device) targets = d["targets"].to(device) context_input_ids = d["context_input_ids"].to(device) context_attention_masks = d["context_attention_masks"].to(device) context_num = d['context_num'].to(device) outputs = model.forward(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = True) out_encoding = [] for i in range(len(context_input_ids)): c = model.forward(input_ids=context_input_ids[i].to(device),attention_mask=context_attention_masks[i].to(device)) out_encoding.append(c) ops = model(input_ids=context_input_ids[i].to(device), token_type_ids = None, attention_mask=context_attention_masks[i].to(device), main_task = False) c1,c2,c3 = torch.unbind(ops, dim = 0) logits = model.module.aux_classifier(c1.unsqueeze(dim = 0)) predictions = torch.sigmoid(logits) preds = torch.gt(predictions, 0.5).int() c1_pred.extend(preds) logits = model.module.aux_classifier(c2.unsqueeze(dim = 0)) predictions = torch.sigmoid(logits) preds = torch.gt(predictions, 0.5).int() c2_pred.extend(preds) logits = model.module.aux_classifier(c3.unsqueeze(dim = 0)) predictions = torch.sigmoid(logits) preds = torch.gt(predictions, 0.5).int() c3_pred.extend(preds) mask = torch.zeros([input_ids.shape[0],3]) for i in range(len(context_num)): arr = np.zeros(3) arr[:context_num[i]] = 1 mask[i] = torch.tensor(arr) mask = mask.to(device) weighted, weights = model.module.attention.forward(out_encoding, mask, config['batch_size']) main_context = outputs.add(weighted.squeeze(dim=1)) val = model.module.main_regression(main_context) predictions = torch.tanh(val) loss = loss_fn_main(predictions.squeeze(dim = 1), targets) p.extend(predictions.squeeze(dim=1).to('cpu').detach().numpy()) t.extend(targets.to('cpu').detach().numpy()) ids.extend(d["ids"].to('cpu').detach().numpy()) loss_m.append(loss.item()) ops = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets) logits = model.module.aux_classifier(ops) predictions = torch.sigmoid(logits) preds = torch.gt(predictions, 0.5).int() emotion_pred.extend(preds) with open('testing_preds_mtl_combo.csv', 'a', encoding = 'utf-8') as f: writer = csv.writer(f) # writer.writerow(['ID', 'Prediction', 'Target']) row = [] for i,idx in enumerate(ids): row.append(idx) row.append(p[i]) row.append(t[i]) row.append(emotion_pred[i].to('cpu').detach().numpy()) row.append(c1_pred[i].to('cpu').detach().numpy()) row.append(c2_pred[i].to('cpu').detach().numpy()) row.append(c3_pred[i].to('cpu').detach().numpy()) writer.writerow(row) row = [] f.close() pear = pearsonr(np.array(t),np.array(p)) spear = spearmanr(np.array(t),np.array(p)) tau = kendalltau(np.array(t),np.array(p)) loss = np.mean(loss_m) return pear[0], spear[0], tau[0], loss elif(mode == 'auxiliary_task'): accuracies = [] loss_a = [] micro_f1 = [] macro_f1 = [] with torch.no_grad(): for d in data_loader: input_ids = d["input_ids"].to(device) attention_mask = d["attention_mask"].to(device) targets = d["targets"].to(device) # logits = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets) ops = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets) logits = model.module.aux_classifier(ops) predictions = torch.sigmoid(logits) loss = loss_fn_aux(logits.float(), targets.float()) loss_a.append(loss.item()) preds = torch.gt(predictions, 0.5).int() mic_f1, mac_f1 = evaluation_metrics(preds, targets) micro_f1.append(mic_f1) macro_f1.append(mac_f1) avg_micro_f1 = np.mean(micro_f1) avg_macro_f1 = np.mean(macro_f1) loss = np.mean(loss_a) return avg_micro_f1, avg_macro_f1, loss def train_epoch(model, dataloaders, device, config): model = model.to(config['device']) model = model.train() least_loss = 100.0 data_loader_main = dataloaders['main_train'] data_loader_aux = dataloaders['aux_train'] val_data_loader_main = dataloaders['main_val'] val_data_loader_aux = dataloaders['aux_val'] loss_fn_main = nn.MSELoss().to(config['device']) loss_fn_aux = nn.BCEWithLogitsLoss().to(config['device']) device = config['device'] optimizer_main = AdamW(model.module.bert.parameters(), lr = config['lr_main'], weight_decay= 1e-4, correct_bias=False) optimizer_main2 = AdamW(model.module.main_regression.parameters(), lr = config['lr_main']*10, weight_decay= 1e-4, correct_bias=False) optimizer_main3 = AdamW(model.module.attention.parameters(), lr = config['lr_main']*10, weight_decay= 1e-4, correct_bias=False) # optimizer_main = torch.optim.Adam(model.parameters(), lr = 0.001) # optimizer_aux = torch.optim.Adam(model.parameters(), lr = config['lr_aux']) optimizer_aux = AdamW(model.module.bert.parameters(), lr = config['lr_aux'], weight_decay= 1e-4, correct_bias=False) optimizer_aux2 = AdamW(model.module.aux_classifier.parameters(), lr = config['lr_aux']*10, weight_decay= 1e-4, correct_bias=False) total_steps = len(data_loader_main) * config['num_epochs'] scheduler_main = get_linear_schedule_with_warmup( optimizer_main, num_warmup_steps=0, num_training_steps=total_steps ) total_steps = len(data_loader_aux) * config['num_epochs'] scheduler_aux = get_linear_schedule_with_warmup( optimizer_aux, num_warmup_steps=0, num_training_steps=total_steps ) # optimizer_main = AdamW(model.parameters(), lr = config['lr_main'], weight_decay= 1e-4, correct_bias=False) # optimizer_aux = AdamW(model.parameters(), lr = config['lr_aux'], weight_decay= 1e-4, correct_bias=False) # total_steps = len(data_loader_main) * config['num_epochs'] # scheduler = get_linear_schedule_with_warmup( # optimizer_main, # num_warmup_steps=0, # num_training_steps=total_steps # ) coin_flips = [] #main_task for i in range(len(data_loader_main)): coin_flips.append(0) #auxiliary task for i in range(len(data_loader_aux)): coin_flips.append(1) val_counter = 0 for epoch in range(config['num_epochs']): if(epoch >= 3): print('Freezing Bert!') for param in model.module.bert.encoder.parameters(): param.requires_grad = False print("Starting epoch {}".format(epoch)) random.shuffle(coin_flips) loss_m = [] loss_a = [] p = [] t = [] micro_f1 = [] macro_f1 = [] accuracies = [] main_dl = iter(data_loader_main) aux_dl = iter(data_loader_aux) for i in coin_flips: if(i == 0): #MAIN_TASK try: d = next(main_dl) except: main_dl = iter(data_loader_main) d = next(main_dl) # print('In main task') input_ids = d["input_ids"].to(device) attention_mask = d["attention_mask"].to(device) targets = d["targets"].to(device) context_input_ids = d["context_input_ids"].to(device) context_attention_masks = d["context_attention_masks"].to(device) context_num = d['context_num'].to(device) outputs = model.forward(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = True) out_encoding = [] for i in range(len(context_input_ids)): c = model.forward(input_ids=context_input_ids[i].to(device),attention_mask=context_attention_masks[i].to(device)) out_encoding.append(c) mask = torch.zeros([input_ids.shape[0],3]).to(device) for i in range(len(context_num)): arr = np.zeros(3) arr[:context_num[i]] = 1 mask[i] = torch.tensor(arr) weighted,_ = model.module.attention.forward(out_encoding, mask, config['batch_size']) main_context = outputs.add(weighted.squeeze(dim=1)) val = model.module.main_regression(main_context) predictions = torch.tanh(val) loss = loss_fn_main(predictions.squeeze(dim = 1), targets) p.extend(predictions.squeeze(dim=1).to('cpu').detach().numpy()) t.extend(targets.to('cpu').detach().numpy()) loss_m.append(loss.item()) loss.backward() # nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) optimizer_main.step() optimizer_main2.step() optimizer_main3.step() scheduler_main.step() optimizer_main.zero_grad() optimizer_main2.zero_grad() optimizer_main3.zero_grad() val_counter += 1 else: try: d = next(aux_dl) except: aux_dl = iter(data_loader_aux) d = next(aux_dl) input_ids = d["input_ids"].to(device) attention_mask = d["attention_mask"].to(device) targets = d["targets"].to(device) ops = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets) logits = model.module.aux_classifier(ops) predictions = torch.sigmoid(logits) loss = loss_fn_aux(logits.float(), targets.float()) loss_a.append(loss.item()) loss = loss * 0.4 loss.backward() # nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) optimizer_aux.step() optimizer_aux2.step() scheduler_aux.step() optimizer_aux.zero_grad() optimizer_aux2.zero_grad() preds = torch.gt(predictions, 0.5).int() mic_f1, mac_f1 = evaluation_metrics(preds, targets) micro_f1.append(mic_f1) macro_f1.append(mac_f1) # print('aux task completed') val_counter +=1 pear = pearsonr(np.array(t),np.array(p)) spear = spearmanr(np.array(t),np.array(p)) tau = kendalltau(np.array(t),np.array(p)) avg_micro_f1 = np.mean(micro_f1) avg_macro_f1 = np.mean(macro_f1) print("Epoch {}. Training Pearson {}.Training Pearson {}.Training Spearman {} Training Loss {}".format(epoch, pear[0], spear[0], tau[0], np.mean(loss_m))) print("Epoch {}. Training Micro F1 {}.Training Macro F1 {}.Training Loss {}".format(epoch, avg_micro_f1, avg_macro_f1, np.mean(loss_a))) # pearson, spearman, kendall, loss = eval_model(model, val_data_loader_main, device, mode = 'main_task') # print("MAIN: Epoch {}. Validation Pearson {}.Validation Spearman {}. Validation Kendall {}. Validation Loss {}".format(epoch, pearson, spearman, kendall,loss)) # if(loss < least_loss): # print('Saving best model') # least_loss = loss # state = {'epoch': epoch+1, 'state_dict': model.state_dict(), 'optimizer_main': optimizer_main.state_dict(), # 'optimizer_aux': optimizer_aux.state_dict()}#, 'scheduler_main': scheduler_main, 'scheduler_aux': scheduler_aux} # torch.save(state, 'mtl_best_model.ckpt') avg_micro_f1, avg_macro_f1, loss = eval_model(model, val_data_loader_aux, device, mode = 'auxiliary_task') print("AUX: Epoch {}.Validation Micro F1 {}.Validation Macro F1 {}. Validation Loss {}".format(epoch, avg_micro_f1, avg_macro_f1, loss)) state = {'epoch': epoch+1, 'state_dict': model.state_dict(), 'optimizer_main': optimizer_main.state_dict(), 'optimizer_aux': optimizer_aux.state_dict()}#, 'scheduler_main': scheduler_main, 'scheduler_aux': scheduler_aux} print('Saving last model') torch.save(state, 'mtl_combo_last_model.ckpt') """# **Calling the model**""" if __name__ == "__main__": parser = argparse.ArgumentParser(description="Enter args") parser.add_argument('--PRE_TRAINED_MODEL_NAME', default="bert-base-cased", type=str) parser.add_argument('--batch_size', default=16, type=int) parser.add_argument('--max_len', default=200, type=int) parser.add_argument('--abuse_classes', default=1, type=int) parser.add_argument('--sent_classes', default=11, type=int) parser.add_argument('--bert_dropout', default=0.1, type=float) parser.add_argument('--fc_dropout', default=0.4, type=float) parser.add_argument('--num_epochs', default=5, type=int) parser.add_argument('--lr_main', default=3e-5, type=float) parser.add_argument('--lr_aux', default=3e-5, type=float) parser.add_argument('--wd', default=1e-4, type=float) parser.add_argument('--csv_index', default = 1, type = int) args = parser.parse_args() print('************************************************************************************') # print('bert_dropout', bert_dropout, 'fc_dropout', fc_dropout) config = { 'PRE_TRAINED_MODEL_NAME': 'bert-base-cased', 'batch_size': args.batch_size, 'max_len': args.max_len, 'abuse_classes': args.abuse_classes, 'sent_classes': args.sent_classes, 'bert_dropout': args.bert_dropout, 'fc_dropout': args.fc_dropout, 'device': torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'), 'num_epochs': args.num_epochs, 'lr_main':args.lr_main, 'lr_aux': args.lr_aux } train_file = 'train' + str(args.csv_index) + '.csv' test_file = 'test' + str(args.csv_index) + '.csv' abuse_files = [train_file, '', test_file] # abuse_files = ['train.csv', 'val.csv', 'test.csv']#'comm_uqs_with_convo.csv' #'main_cmv_datatset_10000.csv' sent_files = ['train.tsv', 'dev.tsv', 'test.tsv'] dataloaders = prepare_data(abuse_files, sent_files, config) model = multitask_conversation_model(config) device = config['device'] print('DEVICE IS', device) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") model = nn.DataParallel(model).to(device) train_epoch(model, dataloaders, device, config) print('End of training....') test_data_loader_main = dataloaders['main_test'] test_data_loader_aux = dataloaders['aux_test'] # checkpoint = torch.load('mtl_best_model.ckpt') # test_model = multitask_conversation_model(config) # if torch.cuda.device_count() > 1: # print("Let's use", torch.cuda.device_count(), "GPUs!") # test_model = nn.DataParallel(test_model).to(device) # test_model = test_model.to(device) # test_model.load_state_dict(checkpoint['state_dict']) # print('Loaded best model') # pearson, spearman, kendall, loss = eval_model(test_model, test_data_loader_main, device, mode = 'main_task') # print("MAIN:. Test Pearson {}.Test Spearman {}.Test kendall {}. Test Loss {}".format(pearson, spearman, kendall, loss)) # avg_micro_f1, avg_macro_f1, loss = eval_model(test_model, test_data_loader_aux, device, mode = 'auxiliary_task') # print("AUX: Test Micro F1 {}.Test Macro F1 {}. Test Loss {}".format(avg_micro_f1, avg_macro_f1, loss)) print('Loaded last model(Sanity check)') pearson, spearman, kendall, loss = eval_model(model, test_data_loader_main, device, mode = 'main_task') print("MAIN:. Test Pearson {}.Test Spearman {}.Test kendall {}. Test Loss {}".format(pearson, spearman, kendall, loss)) avg_micro_f1, avg_macro_f1, loss = eval_model(model, test_data_loader_aux, device, mode = 'auxiliary_task') print("AUX: Test Micro F1 {}.Test Macro F1 {}. Test Loss {}".format(avg_micro_f1, avg_macro_f1, loss)) os.remove(train_file) os.remove(test_file) os.remove('mtl_combo_last_model.ckpt')
[ "noreply@github.com" ]
sohisudhir.noreply@github.com
a96427e29b2d6a2689c2072888af98f6bd99e8a0
7f0eaa5aa008e7116645fa214e772ef4c2ee5406
/api/util/__tests__/test_api.py
533128dc7e06c5d7a5bb870807019b6ee2ad1d4a
[]
no_license
enixdark/raven
ef7273ff26e96b45486f67cb52d22c252bd336e2
c60ec2aa4ffec3c89afac305e63512732a148f4e
refs/heads/master
2023-04-10T21:05:51.703163
2020-01-17T20:09:25
2020-01-17T20:09:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,950
py
import asyncio import asynctest import mock import pytest from mock import patch, MagicMock from asynctest import CoroutineMock from expects import expect, equal from api.util.api import Api, Async class TestApi: @pytest.mark.asyncio async def test_call(self, *args): with asynctest.mock.patch('aiohttp.request', create=True) as aiohttp_req_mock: mock_req = CoroutineMock() mock_req.__aexit__ = CoroutineMock() mock_req.__aenter__ = CoroutineMock() aiohttp_req_mock.return_value = mock_req expected_method = 'some-value' expected_url = 'some-value' expected_params = {} expected_data = {} expected_json = {} expected_cookies = {} expected_headers = {} expected_auth = {} await Api.call(method=expected_method, url=expected_url, params=expected_params, data=expected_data, json=expected_json, cookies=expected_cookies, headers=expected_headers, auth=expected_auth) aiohttp_req_mock.assert_called_with( method=expected_method, url=expected_url, params=expected_params, data=expected_data, json=expected_json, cookies=expected_cookies, headers=expected_headers, auth=expected_auth) @pytest.mark.asyncio async def test_batch(self, *args): with asynctest.mock.patch.object(Api, 'call') as call_mock: expected_requests = [ { 'method': 'some-value', 'url': 'some-value', 'params': {}, 'data': {}, 'json': {}, 'cookies': {}, 'headers': {}, 'auth': {} }, { 'method': 'some-value', 'url': 'some-value', 'params': {}, 'data': {}, 'json': {}, 'cookies': {}, 'headers': {}, 'auth': {} }, { 'method': 'some-value', 'url': 'some-value', 'params': {}, 'data': {}, 'json': {}, 'cookies': {}, 'headers': {}, 'auth': {} } ] res = await Api.batch(expected_requests) expect(call_mock.call_count).to(equal(len(expected_requests))) expect(len(res)).to(equal(len(expected_requests))) @pytest.mark.asyncio @asynctest.patch.object(Async, 'all') @asynctest.patch.object(Api, 'call') async def test_batch_async(self, *args): # with as call_mock: expected_requests = [ { 'method': 'some-value', 'url': 'some-value', 'params': {}, 'data': {}, 'json': {}, 'cookies': {}, 'headers': {}, 'auth': {} }, { 'method': 'some-value', 'url': 'some-value', 'params': {}, 'data': {}, 'json': {}, 'cookies': {}, 'headers': {}, 'auth': {} }, { 'method': 'some-value', 'url': 'some-value', 'params': {}, 'data': {}, 'json': {}, 'cookies': {}, 'headers': {}, 'auth': {} } ] res = await Api.batch_async(expected_requests) expect(args[0].call_count).to(equal(len(expected_requests))) args[1].assert_called()
[ "noreply@github.com" ]
enixdark.noreply@github.com
f5d1ed04d9af16888d78a13a4548ec0454cbba05
e5ba5c10b94e8971a3124ddb6b36048d76d7853e
/bot/main.py
5d969adf3a42dc470c2f123fd81c8dd84fa54bd7
[ "MIT" ]
permissive
sonhal/telegram-reddit-bot
1933c1d0769bc167a951c2ba824a6827ae19f99a
0d23f2bd1f8761d2133c6bcf2643502b505cb3be
refs/heads/master
2020-04-15T06:50:49.437490
2019-01-09T10:53:09
2019-01-09T10:53:09
164,475,656
0
0
MIT
2019-01-09T11:08:54
2019-01-07T18:44:49
Python
UTF-8
Python
false
false
157
py
from connectors import telegram_connector COMMANDS = ("top10", "top") if __name__ == '__main__': tb = telegram_connector.TelegramBot() tb.start()
[ "sondre.hal@gmail.com" ]
sondre.hal@gmail.com
ccf100ecb17578bc9791263e5270183990fed468
0b793bce2da8c3d09b7956c0672ddbffd46feaed
/atcoder/corp/keyence2020_c.py
9e943f94b0f860184c871b6de78e2af5092d409b
[ "MIT" ]
permissive
knuu/competitive-programming
c6c4e08fb231937d988bdc5a60a8ad6b31b97616
16bc68fdaedd6f96ae24310d697585ca8836ab6e
refs/heads/master
2021-01-17T09:39:02.647688
2020-11-07T03:17:22
2020-11-07T03:17:22
27,886,732
1
0
null
null
null
null
UTF-8
Python
false
false
201
py
N, K, S = map(int, input().split()) if S == 1: const = S + 1 else: const = S - 1 ans = [] for i in range(N): if i < K: ans.append(S) else: ans.append(const) print(*ans)
[ "premier3next@gmail.com" ]
premier3next@gmail.com
47d9a4ccfc270889c30a9622496a38134161653c
2e8ca9eceb525c5e8649525654a931fff637ef6c
/FSF-2020/approximations-and-optimizations/Critical Points/example.py
3a41be7d6ecb0fbd3b74b5b15c55177857163881
[]
no_license
abuzar0013/FSF-mathematics-python-code-archive
b4d97833cac727366f7037350300d98ba71008c1
573819dbfec617e253c154fd3ccc6fe0c92ab149
refs/heads/main
2023-03-01T16:07:55.319281
2021-02-05T11:39:01
2021-02-05T11:39:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,220
py
from manimlib.imports import* class ExampleAnimation(ThreeDScene): def construct(self): axes = ThreeDAxes() f_text = TextMobject("$f(x,y) = (y-x)(1-2x-3y)$").to_corner(UL) d = Dot(np.array([0,0,0]), color = '#800000') #---- Critical Point d_text = TextMobject("$(0.2,0.2)$",color = '#DC143C').scale(0.5).shift(0.2*UP) #----x = 0.2, y = 0.2 r_text=TextMobject("Critical Point",color = '#00FFFF').shift(0.3*DOWN).scale(0.6) #----f(x,y) = (y-x)(1-2x-3y) f = ParametricSurface( lambda u, v: np.array([ u, v, (v-u)*(1-2*u-3*v) ]),v_min = -1, v_max = 1, u_min = -1, u_max = 1, checkerboard_colors = [PURPLE_D, PURPLE_E], resolution=(20, 20)).scale(1) self.set_camera_orientation(phi = 75 * DEGREES) self.begin_ambient_camera_rotation(rate=0.5) self.add_fixed_in_frame_mobjects(f_text) self.wait(1) self.add(axes) self.play(Write(f),Write(d)) self.wait(1) self.add_fixed_in_frame_mobjects(d_text) self.wait(1) self.add_fixed_in_frame_mobjects(r_text) self.wait(3)
[ "noreply@github.com" ]
abuzar0013.noreply@github.com
03bc0d80849bc3264945b6fc903d9599b980d26a
a38725ed7fb93b503207502984ec197e921eb54b
/venv/lib/python3.6/site-packages/django_ajax/encoder.py
64ed9ca2af3a6a719fd651966cacb7ddaf862693
[]
no_license
tanveerahmad1517/myblogproject
d00d550230e2df0843e67f793504f9c19d0b755c
2eaa051caa5b68a8fba260c7cd431f1e1719a171
refs/heads/master
2020-03-16T21:38:32.738671
2018-08-23T11:55:02
2018-08-23T11:55:02
133,008,051
0
1
null
null
null
null
UTF-8
Python
false
false
1,859
py
""" Utils """ from __future__ import unicode_literals import json from datetime import date from django.http.response import HttpResponseRedirectBase, HttpResponse from django.template.response import TemplateResponse from django.utils.encoding import force_text from django.db.models.base import ModelBase from decimal import Decimal class LazyJSONEncoderMixin(object): """ A JSONEncoder subclass that handle querysets and models objects. Add how handle your type of object here to use when dump json """ def default(self, obj): # handles HttpResponse and exception content if issubclass(type(obj), HttpResponseRedirectBase): return obj['Location'] elif issubclass(type(obj), TemplateResponse): return obj.rendered_content elif issubclass(type(obj), HttpResponse): return obj.content elif issubclass(type(obj), Exception) or isinstance(obj, bytes): return force_text(obj) # this handles querysets and other iterable types try: iterable = iter(obj) except TypeError: pass else: return list(iterable) # this handlers Models if isinstance(obj.__class__, ModelBase): return force_text(obj) if isinstance(obj, Decimal): return float(obj) if isinstance(obj, date): return obj.isoformat() return super(LazyJSONEncoderMixin, self).default(obj) class LazyJSONEncoder(LazyJSONEncoderMixin, json.JSONEncoder): pass def serialize_to_json(data, *args, **kwargs): """ A wrapper for simplejson.dumps with defaults as: cls=LazyJSONEncoder All arguments can be added via kwargs """ kwargs['cls'] = kwargs.get('cls', LazyJSONEncoder) return json.dumps(data, *args, **kwargs)
[ "tanveerobjects@gmail.com" ]
tanveerobjects@gmail.com
611af549c5585bbe5afcc4f755e6bb733fbd0ce6
e4370c3831bf4e1324d55930870fd754df3e5e9a
/apps/survey_app/urls.py
632d830bb37715e5ba45b8f35313f153a7322569
[]
no_license
grommitt/Django-survey-form
d338053adcbb664ac8198182928854cab321d0ca
d7869acd0bca4b33617bde9fa51cf8e38f38d0b4
refs/heads/master
2020-03-17T08:33:37.926850
2018-05-15T01:42:09
2018-05-15T01:42:09
133,442,798
0
0
null
null
null
null
UTF-8
Python
false
false
326
py
from django.conf.urls import url from . import views # This line is new! urlpatterns = [ url(r'^$', views.index), # This line has changed! Notice that urlpatterns is a list, the comma is in url(r'^results$', views.results), # anticipation of all the routes that will be coming soon ]
[ "gmshaughn@email.arizona.edu" ]
gmshaughn@email.arizona.edu
6c224211afdf733a84ccbb82e6c9968e0193891f
c3a1ce1a918a9f15355b17cece583bd27da1bd53
/Taking input for Competitive Programming.py
b6204ac15b3909ac2a2fe4a251c2f5eac33381c1
[]
no_license
MrJay10/Graph-Algorithms
3cea89b8951f8b656505deef1aa52adf22549f0c
382d0f8d41313bcf37b266695a82a8ebe6182b40
refs/heads/master
2021-01-17T19:21:25.246914
2016-06-25T08:33:36
2016-06-25T08:33:36
61,347,944
0
0
null
null
null
null
UTF-8
Python
false
false
431
py
from Graph import Graph graph = dict() # Remove the message in input(); change str -> int for integer input vertices = list(map(str, input("Enter vertices :: ").split())) for vertex in vertices: # Remove the message in input(); change str -> int for integer input graph[vertex] = list(map(str, input("Enter neighbors of "+vertex+" -> ").split())) g = Graph(graph) print("Your Graph is ::\n\n"+str(g)+"\n")
[ "noreply@github.com" ]
MrJay10.noreply@github.com
1dce40b705380b07cbe6eb7c6fbb5f9749a9ae9b
08b4f0f914e33039d3ca408702b110fd6b24764a
/venv/Scripts/easy_install-script.py
be41f6d074beef7058f951ad44f80f761a1f7357
[]
no_license
dakaun/wiki_crawler
6558c0b4f4c9ec8ca94a5b7f0e24fc2bca77d1d1
82b526ce63e952d8364c5c553b6f00440a6be9cc
refs/heads/master
2021-04-12T04:56:27.526150
2018-12-13T10:04:53
2018-12-13T10:04:53
125,738,838
0
0
null
null
null
null
UTF-8
Python
false
false
449
py
#!C:\Users\danielak\PycharmProjects\FIZ\venv\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install' __requires__ = 'setuptools==28.8.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')() )
[ "danielak@famkaun.local" ]
danielak@famkaun.local
5c03758b507d6d0764e0ee096e04ba7048e30035
da9b9f75a693d17102be45b88efc212ca6da4085
/sdk/cosmos/azure-cosmos/azure/cosmos/container.py
73441d19f5abd428087ba295d4936b854400a8c0
[ "MIT", "LicenseRef-scancode-generic-cla" ]
permissive
elraikhm/azure-sdk-for-python
e1f57b2b4d8cc196fb04eb83d81022f50ff63db7
dcb6fdd18b0d8e0f1d7b34fdf82b27a90ee8eafc
refs/heads/master
2021-06-21T22:01:37.063647
2021-05-21T23:43:56
2021-05-21T23:43:56
216,855,069
0
0
MIT
2019-10-22T16:05:03
2019-10-22T16:05:02
null
UTF-8
Python
false
false
35,017
py
# The MIT License (MIT) # Copyright (c) 2014 Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Create, read, update and delete items in the Azure Cosmos DB SQL API service. """ from typing import Any, Dict, List, Optional, Union, Iterable, cast # pylint: disable=unused-import import six from azure.core.tracing.decorator import distributed_trace # type: ignore from ._cosmos_client_connection import CosmosClientConnection from ._base import build_options from .errors import CosmosResourceNotFoundError from .http_constants import StatusCodes from .offer import Offer from .scripts import ScriptsProxy from .partition_key import NonePartitionKeyValue __all__ = ("ContainerProxy",) # pylint: disable=protected-access # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs class ContainerProxy(object): """ An interface to interact with a specific DB Container. This class should not be instantiated directly, use :func:`DatabaseProxy.get_container_client` method. A container in an Azure Cosmos DB SQL API database is a collection of documents, each of which represented as an Item. :ivar str id: ID (name) of the container :ivar str session_token: The session token for the container. .. note:: To create a new container in an existing database, use :func:`Database.create_container`. """ def __init__(self, client_connection, database_link, id, properties=None): # pylint: disable=redefined-builtin # type: (CosmosClientConnection, str, str, Dict[str, Any]) -> None self.client_connection = client_connection self.id = id self._properties = properties self.container_link = u"{}/colls/{}".format(database_link, self.id) self._is_system_key = None self._scripts = None # type: Optional[ScriptsProxy] def _get_properties(self): # type: () -> Dict[str, Any] if self._properties is None: self._properties = self.read() return self._properties @property def is_system_key(self): # type: () -> bool if self._is_system_key is None: properties = self._get_properties() self._is_system_key = ( properties["partitionKey"]["systemKey"] if "systemKey" in properties["partitionKey"] else False ) return cast('bool', self._is_system_key) @property def scripts(self): # type: () -> ScriptsProxy if self._scripts is None: self._scripts = ScriptsProxy(self.client_connection, self.container_link, self.is_system_key) return cast('ScriptsProxy', self._scripts) def _get_document_link(self, item_or_link): # type: (Union[Dict[str, Any], str]) -> str if isinstance(item_or_link, six.string_types): return u"{}/docs/{}".format(self.container_link, item_or_link) return item_or_link["_self"] def _get_conflict_link(self, conflict_or_link): # type: (Union[Dict[str, Any], str]) -> str if isinstance(conflict_or_link, six.string_types): return u"{}/conflicts/{}".format(self.container_link, conflict_or_link) return conflict_or_link["_self"] def _set_partition_key(self, partition_key): if partition_key == NonePartitionKeyValue: return CosmosClientConnection._return_undefined_or_empty_partition_key(self.is_system_key) return partition_key @distributed_trace def read( self, populate_query_metrics=None, # type: Optional[bool] populate_partition_key_range_statistics=None, # type: Optional[bool] populate_quota_info=None, # type: Optional[bool] **kwargs # type: Any ): # type: (...) -> Dict[str, Any] """ Read the container properties :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param populate_query_metrics: Enable returning query metrics in response headers. :param populate_partition_key_range_statistics: Enable returning partition key range statistics in response headers. :param populate_quota_info: Enable returning collection storage quota information in response headers. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :raises ~azure.cosmos.errors.CosmosHttpResponseError: Raised if the container couldn't be retrieved. This includes if the container does not exist. :returns: Dict representing the retrieved container. :rtype: dict[str, Any] """ request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if populate_query_metrics is not None: request_options["populateQueryMetrics"] = populate_query_metrics if populate_partition_key_range_statistics is not None: request_options["populatePartitionKeyRangeStatistics"] = populate_partition_key_range_statistics if populate_quota_info is not None: request_options["populateQuotaInfo"] = populate_quota_info collection_link = self.container_link self._properties = self.client_connection.ReadContainer( collection_link, options=request_options, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, self._properties) return cast('Dict[str, Any]', self._properties) @distributed_trace def read_item( self, item, # type: Union[str, Dict[str, Any]] partition_key, # type: Any populate_query_metrics=None, # type: Optional[bool] post_trigger_include=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Dict[str, str] """ Get the item identified by `item`. :param item: The ID (name) or dict representing item to retrieve. :param partition_key: Partition key for the item to retrieve. :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param populate_query_metrics: Enable returning query metrics in response headers. :param post_trigger_include: trigger id to be used as post operation trigger. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: Dict representing the item to be retrieved. :raises ~azure.cosmos.errors.CosmosHttpResponseError: The given item couldn't be retrieved. :rtype: dict[str, Any] .. admonition:: Example: .. literalinclude:: ../samples/examples.py :start-after: [START update_item] :end-before: [END update_item] :language: python :dedent: 0 :caption: Get an item from the database and update one of its properties: :name: update_item """ doc_link = self._get_document_link(item) request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if partition_key: request_options["partitionKey"] = self._set_partition_key(partition_key) if populate_query_metrics is not None: request_options["populateQueryMetrics"] = populate_query_metrics if post_trigger_include: request_options["postTriggerInclude"] = post_trigger_include result = self.client_connection.ReadItem(document_link=doc_link, options=request_options, **kwargs) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def read_all_items( self, max_item_count=None, # type: Optional[int] populate_query_metrics=None, # type: Optional[bool] **kwargs # type: Any ): # type: (...) -> Iterable[Dict[str, Any]] """ List all items in the container. :param max_item_count: Max number of items to be returned in the enumeration operation. :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param populate_query_metrics: Enable returning query metrics in response headers. :param feed_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: An Iterable of items (dicts). :rtype: Iterable[dict[str, Any]] """ feed_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if max_item_count is not None: feed_options["maxItemCount"] = max_item_count if populate_query_metrics is not None: feed_options["populateQueryMetrics"] = populate_query_metrics if hasattr(response_hook, "clear"): response_hook.clear() items = self.client_connection.ReadItems( collection_link=self.container_link, feed_options=feed_options, response_hook=response_hook, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, items) return items @distributed_trace def query_items_change_feed( self, partition_key_range_id=None, # type: Optional[str] is_start_from_beginning=False, # type: bool continuation=None, # type: Optional[str] max_item_count=None, # type: Optional[int] **kwargs # type: Any ): # type: (...) -> Iterable[Dict[str, Any]] """ Get a sorted list of items that were changed, in the order in which they were modified. :param partition_key_range_id: ChangeFeed requests can be executed against specific partition key ranges. This is used to process the change feed in parallel across multiple consumers. :param is_start_from_beginning: Get whether change feed should start from beginning (true) or from current (false). By default it's start from current (false). :param continuation: e_tag value to be used as continuation for reading change feed. :param max_item_count: Max number of items to be returned in the enumeration operation. :param feed_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: An Iterable of items (dicts). :rtype: Iterable[dict[str, Any]] """ feed_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if partition_key_range_id is not None: feed_options["partitionKeyRangeId"] = partition_key_range_id if is_start_from_beginning is not None: feed_options["isStartFromBeginning"] = is_start_from_beginning if max_item_count is not None: feed_options["maxItemCount"] = max_item_count if continuation is not None: feed_options["continuation"] = continuation if hasattr(response_hook, "clear"): response_hook.clear() result = self.client_connection.QueryItemsChangeFeed( self.container_link, options=feed_options, response_hook=response_hook, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def query_items( self, query, # type: str parameters=None, # type: Optional[List[str]] partition_key=None, # type: Optional[Any] enable_cross_partition_query=None, # type: Optional[bool] max_item_count=None, # type: Optional[int] enable_scan_in_query=None, # type: Optional[bool] populate_query_metrics=None, # type: Optional[bool] **kwargs # type: Any ): # type: (...) -> Iterable[Dict[str, Any]] """ Return all results matching the given `query`. You can use any value for the container name in the FROM clause, but typically the container name is used. In the examples below, the container name is "products," and is aliased as "p" for easier referencing in the WHERE clause. :param query: The Azure Cosmos DB SQL query to execute. :param parameters: Optional array of parameters to the query. Ignored if no query is provided. :param partition_key: Specifies the partition key value for the item. :param enable_cross_partition_query: Allows sending of more than one request to execute the query in the Azure Cosmos DB service. More than one request is necessary if the query is not scoped to single partition key value. :param max_item_count: Max number of items to be returned in the enumeration operation. :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param enable_scan_in_query: Allow scan on the queries which couldn't be served as indexing was opted out on the requested paths. :param populate_query_metrics: Enable returning query metrics in response headers. :param feed_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: An Iterable of items (dicts). :rtype: Iterable[dict[str, Any]] .. admonition:: Example: .. literalinclude:: ../samples/examples.py :start-after: [START query_items] :end-before: [END query_items] :language: python :dedent: 0 :caption: Get all products that have not been discontinued: :name: query_items .. literalinclude:: ../samples/examples.py :start-after: [START query_items_param] :end-before: [END query_items_param] :language: python :dedent: 0 :caption: Parameterized query to get all products that have been discontinued: :name: query_items_param """ feed_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if enable_cross_partition_query is not None: feed_options["enableCrossPartitionQuery"] = enable_cross_partition_query if max_item_count is not None: feed_options["maxItemCount"] = max_item_count if populate_query_metrics is not None: feed_options["populateQueryMetrics"] = populate_query_metrics if partition_key is not None: feed_options["partitionKey"] = self._set_partition_key(partition_key) if enable_scan_in_query is not None: feed_options["enableScanInQuery"] = enable_scan_in_query if hasattr(response_hook, "clear"): response_hook.clear() items = self.client_connection.QueryItems( database_or_container_link=self.container_link, query=query if parameters is None else dict(query=query, parameters=parameters), options=feed_options, partition_key=partition_key, response_hook=response_hook, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, items) return items @distributed_trace def replace_item( self, item, # type: Union[str, Dict[str, Any]] body, # type: Dict[str, Any] populate_query_metrics=None, # type: Optional[bool] pre_trigger_include=None, # type: Optional[str] post_trigger_include=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Dict[str, str] """ Replaces the specified item if it exists in the container. :param item: The ID (name) or dict representing item to be replaced. :param body: A dict-like object representing the item to replace. :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param access_condition: Conditions Associated with the request. :param populate_query_metrics: Enable returning query metrics in response headers. :param pre_trigger_include: trigger id to be used as pre operation trigger. :param post_trigger_include: trigger id to be used as post operation trigger. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: A dict representing the item after replace went through. :raises ~azure.cosmos.errors.CosmosHttpResponseError: The replace failed or the item with given id does not exist. :rtype: dict[str, Any] """ item_link = self._get_document_link(item) request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) request_options["disableIdGeneration"] = True if populate_query_metrics is not None: request_options["populateQueryMetrics"] = populate_query_metrics if pre_trigger_include: request_options["preTriggerInclude"] = pre_trigger_include if post_trigger_include: request_options["postTriggerInclude"] = post_trigger_include result = self.client_connection.ReplaceItem( document_link=item_link, new_document=body, options=request_options, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def upsert_item( self, body, # type: Dict[str, Any] populate_query_metrics=None, # type: Optional[bool] pre_trigger_include=None, # type: Optional[str] post_trigger_include=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Dict[str, str] """ Insert or update the specified item. If the item already exists in the container, it is replaced. If it does not, it is inserted. :param body: A dict-like object representing the item to update or insert. :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param access_condition: Conditions Associated with the request. :param populate_query_metrics: Enable returning query metrics in response headers. :param pre_trigger_include: trigger id to be used as pre operation trigger. :param post_trigger_include: trigger id to be used as post operation trigger. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: A dict representing the upserted item. :raises ~azure.cosmos.errors.CosmosHttpResponseError: The given item could not be upserted. :rtype: dict[str, Any] """ request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) request_options["disableIdGeneration"] = True if populate_query_metrics is not None: request_options["populateQueryMetrics"] = populate_query_metrics if pre_trigger_include: request_options["preTriggerInclude"] = pre_trigger_include if post_trigger_include: request_options["postTriggerInclude"] = post_trigger_include result = self.client_connection.UpsertItem( database_or_container_link=self.container_link, document=body, **kwargs) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def create_item( self, body, # type: Dict[str, Any] populate_query_metrics=None, # type: Optional[bool] pre_trigger_include=None, # type: Optional[str] post_trigger_include=None, # type: Optional[str] indexing_directive=None, # type: Optional[Any] **kwargs # type: Any ): # type: (...) -> Dict[str, str] """ Create an item in the container. To update or replace an existing item, use the :func:`ContainerProxy.upsert_item` method. :param body: A dict-like object representing the item to create. :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param access_condition: Conditions Associated with the request. :param populate_query_metrics: Enable returning query metrics in response headers. :param pre_trigger_include: trigger id to be used as pre operation trigger. :param post_trigger_include: trigger id to be used as post operation trigger. :param indexing_directive: Indicate whether the document should be omitted from indexing. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: A dict representing the new item. :raises ~azure.cosmos.errors.CosmosHttpResponseError: Item with the given ID already exists. :rtype: dict[str, Any] """ request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) request_options["disableAutomaticIdGeneration"] = True if populate_query_metrics: request_options["populateQueryMetrics"] = populate_query_metrics if pre_trigger_include: request_options["preTriggerInclude"] = pre_trigger_include if post_trigger_include: request_options["postTriggerInclude"] = post_trigger_include if indexing_directive: request_options["indexingDirective"] = indexing_directive result = self.client_connection.CreateItem( database_or_container_link=self.container_link, document=body, options=request_options, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def delete_item( self, item, # type: Union[Dict[str, Any], str] partition_key, # type: Any populate_query_metrics=None, # type: Optional[bool] pre_trigger_include=None, # type: Optional[str] post_trigger_include=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> None """ Delete the specified item from the container. :param item: The ID (name) or dict representing item to be deleted. :param partition_key: Specifies the partition key value for the item. :param session_token: Token for use with Session consistency. :param initial_headers: Initial headers to be sent as part of the request. :param access_condition: Conditions Associated with the request. :param populate_query_metrics: Enable returning query metrics in response headers. :param pre_trigger_include: trigger id to be used as pre operation trigger. :param post_trigger_include: trigger id to be used as post operation trigger. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :raises ~azure.cosmos.errors.CosmosHttpResponseError: The item wasn't deleted successfully. :raises ~azure.cosmos.errors.CosmosResourceNotFoundError: The item does not exist in the container. :rtype: None """ request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if partition_key: request_options["partitionKey"] = self._set_partition_key(partition_key) if populate_query_metrics is not None: request_options["populateQueryMetrics"] = populate_query_metrics if pre_trigger_include: request_options["preTriggerInclude"] = pre_trigger_include if post_trigger_include: request_options["postTriggerInclude"] = post_trigger_include document_link = self._get_document_link(item) result = self.client_connection.DeleteItem(document_link=document_link, options=request_options, **kwargs) if response_hook: response_hook(self.client_connection.last_response_headers, result) @distributed_trace def read_offer(self, **kwargs): # type: (Any) -> Offer """ Read the Offer object for this container. :param response_hook: a callable invoked with the response metadata :returns: Offer for the container. :raises ~azure.cosmos.errors.CosmosHttpResponseError: No offer exists for the container or the offer could not be retrieved. :rtype: ~azure.cosmos.Offer """ response_hook = kwargs.pop('response_hook', None) properties = self._get_properties() link = properties["_self"] query_spec = { "query": "SELECT * FROM root r WHERE r.resource=@link", "parameters": [{"name": "@link", "value": link}], } offers = list(self.client_connection.QueryOffers(query_spec, **kwargs)) if not offers: raise CosmosResourceNotFoundError( status_code=StatusCodes.NOT_FOUND, message="Could not find Offer for container " + self.container_link) if response_hook: response_hook(self.client_connection.last_response_headers, offers) return Offer(offer_throughput=offers[0]["content"]["offerThroughput"], properties=offers[0]) @distributed_trace def replace_throughput(self, throughput, **kwargs): # type: (int, Any) -> Offer """ Replace the container's throughput :param throughput: The throughput to be set (an integer). :param response_hook: a callable invoked with the response metadata :returns: Offer for the container, updated with new throughput. :raises ~azure.cosmos.errors.CosmosHttpResponseError: No offer exists for the container or the offer could not be updated. :rtype: ~azure.cosmos.Offer """ response_hook = kwargs.pop('response_hook', None) properties = self._get_properties() link = properties["_self"] query_spec = { "query": "SELECT * FROM root r WHERE r.resource=@link", "parameters": [{"name": "@link", "value": link}], } offers = list(self.client_connection.QueryOffers(query_spec, **kwargs)) if not offers: raise CosmosResourceNotFoundError( status_code=StatusCodes.NOT_FOUND, message="Could not find Offer for container " + self.container_link) new_offer = offers[0].copy() new_offer["content"]["offerThroughput"] = throughput data = self.client_connection.ReplaceOffer(offer_link=offers[0]["_self"], offer=offers[0], **kwargs) if response_hook: response_hook(self.client_connection.last_response_headers, data) return Offer(offer_throughput=data["content"]["offerThroughput"], properties=data) @distributed_trace def list_conflicts(self, max_item_count=None, **kwargs): # type: (Optional[int], Any) -> Iterable[Dict[str, Any]] """ List all conflicts in the container. :param max_item_count: Max number of items to be returned in the enumeration operation. :param feed_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: An Iterable of conflicts (dicts). :rtype: Iterable[dict[str, Any]] """ feed_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if max_item_count is not None: feed_options["maxItemCount"] = max_item_count result = self.client_connection.ReadConflicts( collection_link=self.container_link, feed_options=feed_options, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def query_conflicts( self, query, # type: str parameters=None, # type: Optional[List[str]] enable_cross_partition_query=None, # type: Optional[bool] partition_key=None, # type: Optional[Any] max_item_count=None, # type: Optional[int] **kwargs # type: Any ): # type: (...) -> Iterable[Dict[str, Any]] """ Return all conflicts matching the given `query`. :param query: The Azure Cosmos DB SQL query to execute. :param parameters: Optional array of parameters to the query. Ignored if no query is provided. :param partition_key: Specifies the partition key value for the item. :param enable_cross_partition_query: Allows sending of more than one request to execute the query in the Azure Cosmos DB service. More than one request is necessary if the query is not scoped to single partition key value. :param max_item_count: Max number of items to be returned in the enumeration operation. :param feed_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: An Iterable of conflicts (dicts). :rtype: Iterable[dict[str, Any]] """ feed_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if max_item_count is not None: feed_options["maxItemCount"] = max_item_count if enable_cross_partition_query is not None: feed_options["enableCrossPartitionQuery"] = enable_cross_partition_query if partition_key is not None: feed_options["partitionKey"] = self._set_partition_key(partition_key) result = self.client_connection.QueryConflicts( collection_link=self.container_link, query=query if parameters is None else dict(query=query, parameters=parameters), options=feed_options, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def get_conflict(self, conflict, partition_key, **kwargs): # type: (Union[str, Dict[str, Any]], Any, Any) -> Dict[str, str] """ Get the conflict identified by `conflict`. :param conflict: The ID (name) or dict representing the conflict to retrieve. :param partition_key: Partition key for the conflict to retrieve. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :returns: A dict representing the retrieved conflict. :raises ~azure.cosmos.errors.CosmosHttpResponseError: The given conflict couldn't be retrieved. :rtype: dict[str, Any] """ request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if partition_key: request_options["partitionKey"] = self._set_partition_key(partition_key) result = self.client_connection.ReadConflict( conflict_link=self._get_conflict_link(conflict), options=request_options, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, result) return result @distributed_trace def delete_conflict(self, conflict, partition_key, **kwargs): # type: (Union[str, Dict[str, Any]], Any, Any) -> None """ Delete the specified conflict from the container. :param conflict: The ID (name) or dict representing the conflict to be deleted. :param partition_key: Partition key for the conflict to delete. :param request_options: Dictionary of additional properties to be used for the request. :param response_hook: a callable invoked with the response metadata :raises ~azure.cosmos.errors.CosmosHttpResponseError: The conflict wasn't deleted successfully. :raises ~azure.cosmos.errors.CosmosResourceNotFoundError: The conflict does not exist in the container. :rtype: None """ request_options = build_options(kwargs) response_hook = kwargs.pop('response_hook', None) if partition_key: request_options["partitionKey"] = self._set_partition_key(partition_key) result = self.client_connection.DeleteConflict( conflict_link=self._get_conflict_link(conflict), options=request_options, **kwargs ) if response_hook: response_hook(self.client_connection.last_response_headers, result)
[ "noreply@github.com" ]
elraikhm.noreply@github.com