blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac81e7a4a5a4e1eec99fc4dd938031a42d326728
|
1064db5dfd154c4bc600e0e03841b0f73f0eefbc
|
/home/migrations/0008_auto_20200529_0800.py
|
55f78b9f74855b21f14e8caf061dee753c0981a6
|
[] |
no_license
|
crowdbotics-apps/web-29-dev-5196
|
3303921a0e5c8794e8e67f55c9841f3ec7610c16
|
7beda8f7d57ce9b9858a46f7e3940d6eed4b5725
|
refs/heads/master
| 2023-05-26T23:00:23.271209
| 2020-05-29T12:47:07
| 2020-05-29T12:47:07
| 267,768,914
| 0
| 0
| null | 2021-06-13T04:08:30
| 2020-05-29T04:59:18
|
Python
|
UTF-8
|
Python
| false
| false
| 342
|
py
|
# Generated by Django 2.2.12 on 2020-05-29 08:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("home", "0007_customtext_kjhkh"),
]
operations = [
migrations.RenameField(
model_name="customtext", old_name="kjhkh", new_name="ghfnhgfgjh",
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
d380946226e4e0189d165d2631379f3c9a24cb80
|
06e0cf403b744009c817e5cfa7f5f898020109ab
|
/Assignments/Assignment2.py
|
519fdd5c2c88a90eca552e8a532e787a0ce56c64
|
[] |
no_license
|
giurgiumatei/Fundamentals-of-Programming
|
6338ebb616b219ae927af2dd9b42145911efe138
|
92d33def4ed49b86145caf2d28a6340d89709133
|
refs/heads/main
| 2023-03-12T07:44:25.041872
| 2021-03-01T23:38:01
| 2021-03-01T23:38:01
| 343,584,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,778
|
py
|
numere = [ ]#this list contains the complex numbers
def citire(): #this function reads the complex numbers and stores them as a list of lists
n=int(input("Give the number of complex elements: "))
for i in range(0,n):
print("Give the values for element number "+str(i+1)+" : ")
ele=[int(input("Give the real part: ")),int(input("Give the imaginary part: "))]
numere.append(ele)
return
def afisare():#this function types the list in the console
n=len(numere)
for i in range(0,n):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def cer1():#this function solves req. 1
l=int(0)
maxl=int(0)
n=len(numere)
b=int(-1)
e=int(-1)
for i in range(0,n-1):
l=0
if numere[i][0]>numere[i-1][0]:
l+=1
for j in range(i+1,n):
if numere[j][0]>numere[j-1][0]:
l+=1
else: break
if l > maxl:
maxl=l
b=i-1
e=j
if e==n-1:
for i in range(b,e+1):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
else:
for i in range(b,e):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def cer5():#this function solves req 5
l=int(0)
maxl=int(0)
n=len(numere)
b=int(-1)
e=int(-1)
for i in range(0,n-1):
l=0
if numere[i][1]==0:
l+=1
for j in range(i+1,n):
if numere[j][1] == 0:
l+=1
else: break
if l > maxl:
maxl=l
b=i
e=j
if e==n-1:
for i in range(b,e+1):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
else:
for i in range(b,e):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def cer10():#this function solves req.10
l=int(0)
maxl=int(0)
n=len(numere)
b=int(-1)
e=int(-1)
s1=int(0)
s2=int(0)
inceput=int(0)
for i in range(0,n):
s1+=numere[i][0]
s2+=numere[i][1]
if s1>10 or s2>10:
s1=0
s2=0
l=0
inceput+=1
i=inceput-1
elif s1==10 and s2==10:
if l > maxl:
maxl=l
b=inceput
e=i
else: l+=1
if e==n-1:
for i in range(b,e+1):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
else:
for i in range(b,e):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def optiunea3():#when this function is called it asks the user which req. should be solved
n=int(input("Which requirement do you want to solve: "))
if n==5:
cer5()
elif n==10:
cer10()
elif n==1:
cer1()
else: print("Invalid number")
def menu():#this function acts like a main function which calls the others
citire()
while(True):
option=int(input("Give a number corresponding to the option:"))
if option==4:
return
elif option==1:
citire()
elif option==2:
afisare()
elif option==3:
optiunea3()
elif option==0:
break
menu()#program starts here
|
[
"noreply@github.com"
] |
giurgiumatei.noreply@github.com
|
de3acc9720419a15a1a42835f76a34d6293154c3
|
16c77266859989d156fe3f4d0ce3a37a1898ad38
|
/dacc/xls/write.py
|
1fad19e6f9792761fed509ba748792ebd263a457
|
[
"MIT"
] |
permissive
|
SRHerzog/ut
|
92620e66be2ea9707d9cd3cf390179326ed2eefe
|
894bd5607eb76676aaea7a37ed8a91b5fb5e805e
|
refs/heads/master
| 2021-06-30T19:15:46.131299
| 2017-09-15T20:47:35
| 2017-09-15T20:47:35
| 103,696,926
| 0
| 0
| null | 2017-09-15T20:08:10
| 2017-09-15T20:08:10
| null |
UTF-8
|
Python
| false
| false
| 3,367
|
py
|
__author__ = 'thor'
import os
import pandas as pd
from pandas import ExcelWriter
from openpyxl import load_workbook
from openpyxl.reader.excel import InvalidFileException
try:
from xlwings import Workbook, Sheet
except ImportError as e:
print(e)
def multiple_dfs_to_multiple_sheets(df_list, xls_filepath, sheet_list=None, **kwargs):
"""
Writes multiple dataframes in different excel sheets.
Input:
* xls_filepath: The excel file to write into
* And then there's several choices:
* df_list (a list of dataframes) and sheet_list (a list of corresponding names)
* df_list = a list of {sheet_name: dataframe}
* df_list = a list of (sheet_name, dataframe) tuples, when the order of the sheets matters)
--> If no sheet names are given, the function either gives the name of the dataframe (if any), or
simply iterates over sheet numbers...
"""
if sheet_list is None:
if isinstance(df_list, dict):
# df_list, sheet_list = zip(df_list.values(), df_list.keys())
df_list, sheet_list = df_list.values(), df_list.keys()
elif isinstance(df_list[0], tuple):
sheet_list = map(lambda x: x[0], df_list)
df_list = map(lambda x: x[1], df_list)
else:
sheet_list = []
for i, df in enumerate(df_list):
name = df.name
if not name:
name = "sheet {}".format(i)
sheet_list.append(name)
writer = ExcelWriter(xls_filepath)
for df, sheet_name in zip(df_list, sheet_list):
df.to_excel(writer, sheet_name, **kwargs)
writer.save()
def df_to_excel_without_overwriting_it(df, xls_filepath, sheet_name, **kwargs):
"""
write df to an excel sheet without overwriting the whole excel file if it exists
(may need to create the excel with some data in it already for this to work)
"""
try:
book = load_workbook(xls_filepath)
writer = pd.ExcelWriter(xls_filepath, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
try:
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
except TypeError:
df = _replace_non_numeric_non_strings_with_strings(df)
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
writer.save()
except InvalidFileException:
try:
df.to_excel(excel_writer=xls_filepath, sheet_name=sheet_name, **kwargs)
except TypeError:
df = _replace_non_numeric_non_strings_with_strings(df)
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
def clear_sheet_contents_without_changing_formatting(xls_filepath, sheet_name):
if os.path.exist(xls_filepath): # else do nothing
with Workbook(fullname=xls_filepath, app_visible=False) as wkb:
Sheet(sheet=sheet_name, wkb=wkb).clear_contents()
def _replace_non_numeric_non_strings_with_strings(df):
index_names = df.index.names
df = df.reset_index(drop=False, inplace=False)
for c in df.columns:
if df[c].dtype.name == 'object':
if not isinstance(df[c].iloc[0], basestring):
df[c] = df[c].apply(str)
df = df.set_index(index_names)
return df
|
[
"thorwhalen1@gmail.com"
] |
thorwhalen1@gmail.com
|
a452d16e2734c12010a79f5764930241bc083ee4
|
5407a3a228fca978c49b8fd7230c6c3482cdc839
|
/src/utils/namespace_utils.py
|
c39075670032f89382eb6975fcba2008d8b4f62e
|
[] |
no_license
|
Seeincup-Ming/GmToolKit
|
bb6e49dde8b1ce714c044064d971988321db9c62
|
2a938424a2e32164bf5c7890b896bbff00a2d26f
|
refs/heads/master
| 2021-01-01T04:11:21.553149
| 2016-04-18T09:54:45
| 2016-04-18T09:54:45
| 56,496,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,037
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Po'
import sys
import atexit
import socket
import weakref
import importlib
import rpyc
import rpyc.core.service as Service
import rpyc.core.protocol as Protocol
import rpyc.utils.factory as Factory
namespace_registry = dict() # <key : value> == <namespace : NamespaceManager instance>
def _clear_all():
try:
for namespace, manager in namespace_registry.items():
assert isinstance(manager, NamespaceManager)
manager.close()
except:
pass
# Auto close all connections and stop background threads.
atexit.register(_clear_all)
class _ModifiedModuleNamespace(Service.ModuleNamespace):
"""
Enable setattr to ModuleNamespace which is necessary to make
ModuleNamespace as a virtual module.
"""
pass
Service.ModuleNamespace = _ModifiedModuleNamespace
class _ModifiedConnection(Protocol.Connection):
"""
Add error_handler to Connection.
"""
def _send(self, msg, seq, args):
try:
return super(_ModifiedConnection, self)._send(msg, seq, args)
except Exception as ex:
error_handler = getattr(self, 'error_handler', None)
if callable(error_handler) and error_handler(self, ex):
return
else:
raise ex
def _recv(self, timeout, wait_for_lock):
try:
return super(_ModifiedConnection, self)._recv(timeout, wait_for_lock)
except Exception as ex:
error_handler = getattr(self, 'error_handler', None)
if callable(error_handler) and error_handler(self, ex):
return
else:
raise ex
Factory.Connection = _ModifiedConnection
class RemoteImporter(object):
"""
A meta_path hooker to import remote module from conn in namespace.
"""
def __init__(self, conn, namespace):
self.conn = conn
self.namespace = namespace
modules = conn.modules
modules.__path__ = [namespace] # module should have __path__ attribute.
sys.modules[namespace] = modules
def find_module(self, fullname, path=None):
if fullname.partition('.')[0] == self.namespace:
# If import module in namespace, use self as module loader.
return self
return None
def load_module(self, fullname):
name = fullname.partition('.')[2]
modules = self.conn.modules
module = modules[name]
if not hasattr(module, '__path__'):
# This is nessisary to make ModuleNamespace as a package, but
# will make all remote modules as packages.
module.__path__ = ''
if '.' not in name:
# This is nessisary to make ModuleNamespace as a virtual module.
setattr(modules, name, module)
# Avoid reimport the same module, builtin importer will search sys.modules automatically.
sys.modules[fullname] = module
return module
class NamespaceManager(object):
"""
A namespace manager to hold connection, background thread and remote importer.
"""
def __init__(self, conn, remote_importer):
conn.error_handler = self.error_handler
self.conn = conn
self.bgsrv = rpyc.BgServingThread(conn)
self.remote_importer = remote_importer
namespace_registry[remote_importer.namespace] = self
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
will:
1, remove self from namespace registry;
2, remove self.remote_importer from sys.meta_path, and set self.remote_importer to None;
3, stop background thread self.bgsrv, and set self.bgserv to None;
4, close connection self.conn, and set self.conn to None;
won't:
clear modules of namespace.
You didn't *nearly* reset Python to the state in which it was before the import:
the modules that got imported recursively still hang in sys.modules.
Please accept that Python indeed does not support unloading modules for severe,
fundamental, insurmountable, technical problems, in 2.x.
In 3.x, chances are slightly higher. In principle, unloading could be supported -
but no module actually adds the necessary code, and the necessary code in the import
machinery isn't implemented in 3.2 and earlier.
Supporting unloading will be (and was) a multi-year project. Don't expect any
results in the next five years.
"""
if self.remote_importer:
namespace = self.remote_importer.namespace
# Remove self from namespace_registry.
if namespace in namespace_registry:
del namespace_registry[namespace]
try:
# Remove old RemoteImporter of namespace in sys.meta_path
sys.meta_path.remove(self.remote_importer)
except:
pass
finally:
self.remote_importer = None
if self.bgsrv:
try:
self.bgsrv.stop()
except AssertionError:
pass
finally:
self.bgsrv = None
if self.conn:
self.conn.close()
self.conn = None
@classmethod
def connect(cls, host='localhost', port=18812, namespace='x9'):
"""
Launch a classic connect to host: port as namespace,
import <namespace> as ns
=> ns will be connect.modules
from <namespace>.debug import draw
=> will import connect.modules['debug.draw'] as draw
If connect twice to the same namespace,
1, all imported modules will alive and use the latest connection;
2, all old remote objects will die;
3, old NamespaceManager will be closed automatically.
"""
try:
conn = rpyc.classic.connect(host, port)
except socket.error:
return None
remote_importer = RemoteImporter(conn, namespace)
# Update all imported remote modules.
manager = namespace_registry.get(namespace)
if manager:
manager.close()
sys.meta_path.append(remote_importer)
sys.modules[namespace] = conn.modules
cls.update_modules(namespace)
return cls(conn, remote_importer)
@classmethod
def update_modules(cls, namespace):
"""
Update imported modules by namespace.
"""
module_backup = dict()
match_key = namespace + '.'
for name, module in sys.modules.iteritems():
if name.startswith(match_key):
module_backup[name] = module
for name in module_backup.iterkeys():
del sys.modules[name]
for name, module in module_backup.iteritems():
new_module = importlib.import_module(name)
conn = object.__getattribute__(new_module, "____conn__")()
oid = object.__getattribute__(new_module, "____oid__")
object.__setattr__(module, "____conn__", weakref.ref(conn))
object.__setattr__(module, "____oid__", oid)
for name, module in module_backup.iteritems():
sys.modules[name] = module
def error_handler(self, sender, ex):
pass
class AutoCloseNM(NamespaceManager):
def error_handler(self, sender, ex):
self.close()
return True
# ------------------------------------------------------------------
# Define global connections here or use below when you needs:
# with NamespaceManager.connect('localhost', 18813, 'tmp') as ns:
# # This module name should be different from global modules.
# from tmp import BigWorld as tmp_bw
# # Make sure your remote call is a sync operation! For example,
# # tmp_bw.callback will be failed after this NamespaceManager exit.
# tmp_bw.dcursor().yaw = 1
|
[
"xiaoming8185620@126.com"
] |
xiaoming8185620@126.com
|
e0b1c3532fe2b23bdab9904863e111ebf2996564
|
f3fb4195090589a19788df12f6f42ab429193cdd
|
/D3_animate/interactive_canvas/interactive_canvas.py
|
11b067052f8551d6c3bea475217408b869d7fdaf
|
[] |
no_license
|
amlawson98/Spring_Lattices
|
e3701e2b54aab548eed27a63021be4a5ae2865c2
|
16c0abb5a7ab7e1ad508a664cfe15b4a57f919d5
|
refs/heads/master
| 2020-09-08T12:14:06.146502
| 2019-11-12T04:44:46
| 2019-11-12T04:44:46
| 221,130,464
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
from tkinter import *
window_size = 400
top_space = 40
root = Tk()
#root.geometry(height= window_size, width= window_size)
f1 = Frame(root, width =window_size, height = window_size - top_space)
f2 = Frame(root, width =window_size, height = top_space)
canvas = Canvas(f1, width = window_size, height= window_size-top_space)
def set_with_temp_scale(scale_val):
global scale_temp
scale_temp = float(scale_val)
temp_scale = Scale(f2, from_=0., to=10.,resolution= 0.00001, orient= HORIZONTAL, showvalue=1, command= set_with_temp_scale, label = "Temp")
temp_scale.place()#rely = 30, relx = 80)
temp_scale.pack(fill=BOTH, side=LEFT)
def set_with_H_scale(scale_val):
global scale_H
scale_H = float(scale_val)
H_scale = Scale(f2, from_=5, to=0,resolution= 0.01, orient= HORIZONTAL, showvalue=1, command= set_with_H_scale, label = "H (feild ext eng.)")
H_scale.place()#rely = 30, relx = 80)
H_scale.pack(fill=BOTH, side=LEFT)
def set_with_J_scale(scale_val):
global scale_J
scale_J = float(scale_val)
J_scale = Scale(f2, from_=1, to=-1,resolution= 1, orient= HORIZONTAL, showvalue=1, command= set_with_J_scale, label = "J (interaction eng.)")
J_scale.place()#rely = 30, relx = 80)
J_scale.pack(fill=BOTH, side=LEFT)
running = False
txt_var = StringVar()
txt_var.set("Play")
def pause_play():
global running
if running:
txt_var.set("Play")
if not running:
txt_var.set("Pause")
running = not running
pause_button = Button(f2,textvariable=txt_var, command= pause_play)
pause_button.pack()
quit_button = Button(f2, text="Quit", command = root.destroy)
quit_button.pack()
annealing = BooleanVar()
annealing_button = Checkbutton(f2, text="Annealing", variable=annealing)
annealing_button.pack()
temp=0
def update_temp():
global temp
decrease_rate = -0.0004
temp = temp*(1 + decrease_rate)
f1.pack(side=BOTTOM)
f2.pack(side=TOP)
while True:
circ = canvas.create_oval(10 + temp, 10+temp, 15+temp, 15+temp)
canvas.pack()
canvas.update()
if running:
temp = temp_scale.get()
if annealing.get() and running:
update_temp()
temp_scale.set(temp)
canvas.delete(circ)
|
[
"32847387+amlawson98@users.noreply.github.com"
] |
32847387+amlawson98@users.noreply.github.com
|
bef9e1ccc1ba3a5c2455304136671ad3f506acca
|
93a7bba2821aa34465c90f50dfe20b08a9b78d67
|
/spellcheck.py
|
95f73478a7e8d14748a5db42d7548aca5395a759
|
[] |
no_license
|
arvind1609/Handwritten-Text-Recognition-System
|
85add335d33f3b29b4a4ac730c5f3986f1c32e40
|
e07dd5772fab5283d2dfb3234ae0eef8cbdad874
|
refs/heads/master
| 2020-04-15T13:45:09.732747
| 2019-01-08T20:38:10
| 2019-01-08T20:38:10
| 164,728,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
from spellchecker import SpellChecker
spell = SpellChecker()
def correct_spelling(word):
original_word = word
# If word is all caps like an acronym
if not word.isupper():
original_word = word.lower()
corrected_word = spell.correction(original_word)
# If correction string size less than original, keep original
if len(original_word) == len(corrected_word):
final_word = corrected_word
else:
final_word = original_word
if word[0].isupper() and not word.isupper():
final_word = final_word.capitalize()
return final_word
|
[
"noreply@github.com"
] |
arvind1609.noreply@github.com
|
ca2951f89c8fcf239e756f26b15ef01148feb032
|
3b50605ffe45c412ee33de1ad0cadce2c5a25ca2
|
/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py
|
7e252e048b64c9b158fabe21b818fbccaf71a26c
|
[
"Apache-2.0"
] |
permissive
|
Superjomn/Paddle
|
f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1
|
7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188
|
refs/heads/develop
| 2023-02-04T20:27:54.244843
| 2023-01-26T15:31:14
| 2023-01-26T15:31:14
| 66,896,049
| 4
| 1
|
Apache-2.0
| 2023-04-14T02:29:52
| 2016-08-30T01:45:54
|
C++
|
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
from utils import extra_cc_args, paddle_includes
import paddle
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
file = '{}\\multi_out_jit\\multi_out_jit.pyd'.format(get_build_directory())
if os.name == 'nt' and os.path.isfile(file):
cmd = 'del {}'.format(file)
run_cmd(cmd, True)
# Compile and load custom op Just-In-Time.
multi_out_module = load(
name='multi_out_jit',
sources=['multi_out_test_op.cc'],
extra_include_paths=paddle_includes, # add for Coverage CI
extra_cxx_cflags=extra_cc_args, # test for cflags
verbose=True,
)
class TestMultiOutputDtypes(unittest.TestCase):
def setUp(self):
self.custom_op = multi_out_module.multi_out
self.dtypes = ['float32', 'float64']
self.devices = ['cpu']
def run_static(self, device, dtype):
paddle.set_device(device)
x_data = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
with paddle.static.scope_guard(paddle.static.Scope()):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name='X', shape=[None, 8], dtype=dtype)
outs = self.custom_op(x)
exe = paddle.static.Executor()
exe.run(paddle.static.default_startup_program())
res = exe.run(
paddle.static.default_main_program(),
feed={'X': x_data},
fetch_list=outs,
)
return res
def check_multi_outputs(self, outs, is_dynamic=False):
out, zero_float64, one_int32 = outs
if is_dynamic:
zero_float64 = zero_float64.numpy()
one_int32 = one_int32.numpy()
# Fake_float64
self.assertTrue('float64' in str(zero_float64.dtype))
np.testing.assert_array_equal(
zero_float64, np.zeros([4, 8]).astype('float64')
)
# ZFake_int32
self.assertTrue('int32' in str(one_int32.dtype))
np.testing.assert_array_equal(
one_int32, np.ones([4, 8]).astype('int32')
)
def test_static(self):
paddle.enable_static()
for device in self.devices:
for dtype in self.dtypes:
res = self.run_static(device, dtype)
self.check_multi_outputs(res)
paddle.disable_static()
def test_dynamic(self):
for device in self.devices:
for dtype in self.dtypes:
paddle.set_device(device)
x_data = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
x = paddle.to_tensor(x_data)
outs = self.custom_op(x)
self.assertTrue(len(outs) == 3)
self.check_multi_outputs(outs, True)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Superjomn.noreply@github.com
|
ff3e94ef7879ecc2b3a10a2b3dd6073f3cf2643a
|
1d0c057b44faf95811c0966c57e31a1c7d37f783
|
/eastman2/urls.py
|
5dade3d36433c24189a133f1a09d1f50315996c6
|
[] |
no_license
|
davidnjakai/eastman2
|
c817cf7af72d79a06c4d016f72bb539cc49f1eb8
|
5df8271834baec403b42b56d27d958a48653bfbf
|
refs/heads/master
| 2020-12-25T07:30:02.870419
| 2016-07-07T21:25:19
| 2016-07-07T21:25:19
| 60,179,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
"""eastman2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^clinic/', include('clinic.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"dvdseroney@gmail.com"
] |
dvdseroney@gmail.com
|
3a7cf247650cce99e5be3bda5ca00bcccf38972f
|
35d0c90beda35b277474a4800415ccbe63a1d04a
|
/inquisitor/management/commands/count_agencies_with_active_address.py
|
cbd3bfd95b24cb4d1d40701098453a53bee2d35f
|
[] |
no_license
|
ArtieCode/Projekt_Koncowy
|
6cec4b346b361293f28ad5682a0f92bda90b83a5
|
fe06cfa09af0762919260b25f0052a6d5d2f5456
|
refs/heads/master
| 2020-05-20T23:02:42.818788
| 2019-05-17T09:56:51
| 2019-05-17T09:56:51
| 185,794,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
from django.core.management.base import BaseCommand, CommandError
from inquisitor.models import AgencyAddress, DetectiveAgency
from tqdm import tqdm
import re
class Command(BaseCommand):
help = 'Count agencies without a valid address'
def handle(self, *args, **options):
all_agencies = DetectiveAgency.objects.all()
active_count = 0
def has_active_address(db_object):
addresses = db_object.agencyaddress_set.all()
for address in addresses:
if address.address_type == 2:
return True
return False
for agency in tqdm(all_agencies):
has_active = has_active_address(agency)
if has_active:
active_count += 1
print(f'{active_count}/{len(all_agencies)} agencies with an active address')
|
[
"artur.placha@gmail.com"
] |
artur.placha@gmail.com
|
efbfb275ecf4ddfacfe040a07abe20a304942382
|
866dee1b3d01b863c31332ec81330d1b5ef5c6fa
|
/openquake.hazardlib/openquake/hazardlib/gsim/campbell_bozorgnia_2003.py
|
d16f63883e1925478903cecb7406eb1dbb6030d7
|
[
"MIT",
"AGPL-3.0-only"
] |
permissive
|
rainzhop/ConvNetQuake
|
3e2e1a040952bd5d6346905b83f39889c6a2e51a
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
refs/heads/master
| 2020-08-07T16:41:03.778293
| 2019-11-01T01:49:00
| 2019-11-01T01:49:00
| 213,527,701
| 0
| 0
|
MIT
| 2019-10-08T02:08:00
| 2019-10-08T02:08:00
| null |
UTF-8
|
Python
| false
| false
| 8,419
|
py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`CampbellBozorgnia2003NSHMP2007`.
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
class CampbellBozorgnia2003NSHMP2007(GMPE):
"""
Implements GMPE developed by Kenneth W. Campbell and Yousef Bozorgnia and
published as "Updated Near-Source Ground-Motion (Attenuation) Relations for
the Horizontal and Vertical Components of Peak Ground Acceleration and
Acceleration Responce Spectra", Bulletin of the Seismological Society of
America, Vol. 93, No. 1, pp. 314-331, 2003.
The class implement the equation as modified by the United States
Geological Survey - National Seismic Hazard Mapping Project (USGS-NSHMP)
for the 2007 Alaska model
(http://earthquake.usgs.gov/hazards/products/ak/2007/).
The class replicates the equation as coded in ``subroutine getCamp2000``
in ``hazFXv7.f`` available from
http://earthquake.usgs.gov/hazards/products/ak/2007/software/.
The equation compute mean value for the 'firm rock' conditon.
"""
#: Supported tectonic region type is 'active shallow crust' (see Abstract)
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are PGA and SA (see Abstract)
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
SA
])
#: Supported intensity measure component is the geometric mean of two
#: horizontal components (see paragraph 'Strong-Motion Database', page 316)
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation type is Total (see equations 11, 12 pp. 319
#: 320)
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: No sites parameters are required. Mean value is computed for
#: 'firm rock'.
REQUIRES_SITES_PARAMETERS = set(())
#: Required rupture parameters are magnitude, rake and dip (eq. 1 and
#: following, page 319).
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'rake', 'dip'))
#: Required distance measure are RRup and Rjb (eq. 1 and following,
#: page 319).
REQUIRES_DISTANCES = set(('rrup', 'rjb'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
C = self.COEFFS[imt]
mean = self._get_mean(
C, rup.mag, rup.rake, rup.dip, dists.rrup, dists.rjb
)
stddevs = self._get_stddevs(C, rup.mag, stddev_types, dists.rrup.size)
return mean, stddevs
def _get_mean(self, C, mag, rake, dip, rrup, rjb):
"""
Return mean value (eq. 1, page 319).
"""
f1 = self._compute_magnitude_scaling(C, mag)
f2 = self._compute_distance_scaling(C, mag, rrup)
f3 = self._compute_faulting_mechanism(C, rake, dip)
f4 = self._compute_far_source_soil_effect(C)
f5 = self._compute_hanging_wall_effect(C, rjb, rrup, dip, mag)
mean = (
C['c1'] + f1 + C['c4'] * np.log(np.sqrt(f2)) + f3 + f4 + f5
)
return mean
def _get_stddevs(self, C, mag, stddev_types, num_sites):
"""
Return standard deviation as defined in eq.11 page 319.
"""
std = C['c16'] + np.zeros(num_sites)
if mag < 7.4:
std -= 0.07 * mag
else:
std -= 0.518
# only the 'total' standard deviation is supported, therefore the
# std is always the same for all types
stddevs = [std for _ in stddev_types]
return stddevs
def _compute_magnitude_scaling(self, C, mag):
"""
Compute and return magnitude scaling term (eq.2, page 319)
"""
return C['c2'] * mag + C['c3'] * (8.5 - mag) ** 2
def _compute_distance_scaling(self, C, mag, rrup):
"""
Compute distance scaling term (eq.3, page 319).
The distance scaling assumes the near-source effect of local site
conditions due to 50% very firm soil and soft rock and 50% firm rock.
"""
g = C['c5'] + C['c6'] * 0.5 + C['c7'] * 0.5
return (
rrup ** 2 +
(np.exp(C['c8'] * mag + C['c9'] * (8.5 - mag) ** 2) * g) ** 2
)
def _compute_faulting_mechanism(self, C, rake, dip):
"""
Compute faulting mechanism term (see eq. 5, page 319).
Reverse faulting is defined as occurring on steep faults (dip > 45)
and rake in (22.5, 157.5).
Thrust faulting is defined as occurring on shallow dipping faults
(dip <=45) and rake in (22.5, 157.5)
"""
# flag for reverse faulting
frv = float((dip > 45) and (22.5 <= rake <= 157.5))
# flag for thrust faulting
fth = float((dip <= 45) and (22.5 <= rake <= 157.5))
return C['c10'] * frv + C['c11'] * fth
def _compute_far_source_soil_effect(self, C):
"""
Compute far-source effect of local site conditions (see eq. 6,
page 319) assuming 'firm rock' conditions.
"""
return C['c14']
def _compute_hanging_wall_effect(self, C, rjb, rrup, dip, mag):
"""
Compute hanging-wall effect (see eq. 7, 8, 9 and 10 page 319).
Considers correct version of equation 8 as given in the erratum and not
in the original paper.
"""
# eq. 8 (to be noticed that the USGS-NSHMP implementation defines
# the hanging-wall term for all rjb distances, while in the original
# manuscript, hw is computed only for rjb < 5). Again the 'firm rock'
# is considered
hw = np.zeros_like(rjb)
if dip <= 70.:
hw = (5. - rjb) / 5.
# eq. 9
f_m = 1 if mag > 6.5 else mag - 5.5
# # eq. 10
f_rrup = C['c15'] + np.zeros_like(rrup)
idx = rrup < 8
f_rrup[idx] *= rrup[idx] / 8
# eq. 7 (to be noticed that the f3 factor is not included
# while this is defined in the original manuscript)
f_hw = hw * f_m * f_rrup
return f_hw
#: Coefficient table (table 4, page 321. Coefficients for horizontal
#: component and for corrected PGA)
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16
pga -4.033 0.812 0.036 -1.061 0.041 -0.005 -0.018 0.766 0.034 0.343 0.351 -0.123 -0.138 -0.289 0.370 0.920
0.10 -2.661 0.812 0.060 -1.308 0.166 -0.009 -0.068 0.621 0.046 0.224 0.313 -0.146 -0.253 -0.299 0.370 0.958
0.20 -2.771 0.812 0.030 -1.153 0.098 -0.014 -0.038 0.704 0.026 0.296 0.342 -0.148 -0.183 -0.330 0.370 0.981
0.30 -2.999 0.812 0.007 -1.080 0.059 -0.007 -0.022 0.752 0.007 0.359 0.385 -0.162 -0.157 -0.453 0.370 0.984
0.50 -3.556 0.812 -0.035 -0.964 0.023 -0.002 -0.004 0.842 -0.036 0.406 0.479 -0.122 -0.130 -0.528 0.370 0.990
1.0 -3.867 0.812 -0.101 -0.964 0.019 0 0 0.842 -0.105 0.329 0.338 -0.073 -0.072 -0.607 0.281 1.021
2.0 -4.311 0.812 -0.180 -0.964 0.019 0 0 0.842 -0.187 0.060 0.064 -0.124 -0.116 -0.649 0.160 1.021
""")
|
[
"rainzhop@gmail.com"
] |
rainzhop@gmail.com
|
665a296262fe97164ada5fc3e0db919390d90e00
|
e45d2faad9389886a82ff5176853b1ff6e37caae
|
/simplecv/017_face_detect.py
|
e93e398dd543658092ca32de34f80eb4096d57e8
|
[] |
no_license
|
allenmo/python_study
|
6320aa4cd80fe46ccf73076015c67bdcb6338d30
|
7aff5d810ca6e791d62235d57c072a8dc14457ca
|
refs/heads/master
| 2021-03-24T12:00:33.079530
| 2016-11-22T23:35:58
| 2016-11-22T23:35:58
| 55,770,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
from SimpleCV import *
cam = Camera()
disp = Display()
size = cam.getImage().size()
segment = HaarCascade("face.xml")
while disp.isNotDone():
img = cam.getImage()
autoface = img.findHaarFeatures(segment)
lenFace = len(autoface)
if ( lenFace > 0 ):
for i in range(0,lenFace):
face = autoface[i]
x = face.x
y = face.y
width = face.width()
height = face.height()
img.dl().centeredRectangle((x,y),(width,height),Color.LIME)
img.applyLayers()
img.drawText("Num of Face: " + str(lenFace), x = size[0]-150, y = size[1]-30, color = Color.LIME, fontsize = 24)
img.show()
|
[
"allen02403@gmail.com"
] |
allen02403@gmail.com
|
1109161a39f73fe01e4a6f4099ad4dad4a0939bc
|
abdb582b9ab76eaf6df1fdb5843c24fa6fa1ede0
|
/flendz_test/urls.py
|
80bc3d35b33735c54f511c2ea63a1065e235799b
|
[] |
no_license
|
jabykuniyil/flendz
|
1375341ee97986842d962702e0f1ac7f6d48cae7
|
ef952f9e14320b9c512b4047c6726ab9ff776120
|
refs/heads/main
| 2023-05-27T20:12:36.774259
| 2021-06-05T04:38:47
| 2021-06-05T04:38:47
| 372,798,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('test_app.url')),
]
|
[
"mohdjabiran112@gmail.com"
] |
mohdjabiran112@gmail.com
|
ef4232d6318f6c09b7ce5f6cb4de67654392c61e
|
5de91e63d99ba96db2aa69bc7efaf93dbe7fcbe3
|
/compute_dp_sgd_privacy.py
|
aa0d1ba586dcd1545019cc3192c5d75dfea3d85b
|
[] |
no_license
|
cuongtran-syr/DP_Fair
|
a44168eef05e06de427f5b09dbe5c5c9516a1864
|
d5f7d59a2163013c1c119a956e9e87bd1127e0f4
|
refs/heads/master
| 2022-11-16T03:01:15.326027
| 2020-07-10T18:39:31
| 2020-07-10T18:39:31
| 260,422,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,576
|
py
|
# clone from https://github.com/ebagdasa/differential-privacy-vs-fairness/blob/master/compute_dp_sgd_privacy.py
r"""Command-line script for computing privacy of a model trained with DP-SGD.
The script applies the RDP accountant to estimate privacy budget of an iterated
Sampled Gaussian Mechanism. The mechanism's parameters are controlled by flags.
Example:
compute_dp_sgd_privacy
--N=60000 \
--batch_size=256 \
--noise_multiplier=1.12 \
--epochs=60 \
--delta=1e-5
The output states that DP-SGD with these parameters satisfies (2.92, 1e-5)-DP.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
from tfcode.rdp_accountant import compute_rdp
from tfcode.rdp_accountant import get_privacy_spent
FLAGS = flags.FLAGS
flags.DEFINE_integer('N', None, 'Total number of examples')
flags.DEFINE_integer('batch_size', None, 'Batch size')
flags.DEFINE_float('noise_multiplier', None, 'Noise multiplier for DP-SGD')
flags.DEFINE_float('epochs', None, 'Number of epochs (may be fractional)')
flags.DEFINE_float('delta', 1e-6, 'Target delta')
flags.mark_flag_as_required('N')
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('noise_multiplier')
flags.mark_flag_as_required('epochs')
def apply_dp_sgd_analysis(q, sigma, steps, orders, delta):
"""Compute and print results of DP-SGD analysis."""
rdp = compute_rdp(q, sigma, steps, orders)
eps, _, opt_order = get_privacy_spent(orders, rdp, target_delta=delta)
# print('DP-SGD with sampling rate = {:.3g}% and noise_multiplier = {} iterated'
# ' over {} steps satisfies'.format(100 * q, sigma, steps), end=' ')
# print('differential privacy with eps = {:.3g} and delta = {}.'.format(
# eps, delta))
# print('The optimal RDP order is {}.'.format(opt_order))
#
# if opt_order == max(orders) or opt_order == min(orders):
# print('The privacy estimate is likely to be improved by expanding '
# 'the set of orders.')
return eps
def main(argv):
del argv # argv is not used.
q = FLAGS.batch_size / FLAGS.N # q - the sampling ratio.
if q > 1:
raise app.UsageError('N must be larger than the batch size.')
orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] +
list(range(5, 64)) + [128, 256, 512])
steps = int(math.ceil(FLAGS.epochs * FLAGS.N / FLAGS.batch_size))
apply_dp_sgd_analysis(q, FLAGS.noise_multiplier, steps, orders, FLAGS.delta)
if __name__ == '__main__':
app.run(main)
|
[
"noreply@github.com"
] |
cuongtran-syr.noreply@github.com
|
74265b4401c3e94e0487b4755dd8a0b7a8dd4660
|
bf3b729b635c2f0505e1adeed88cf583d8923367
|
/devscripts/createSampleLinks.py
|
af5e719511b1e15bc906f8a16f86458ce2edb257
|
[] |
no_license
|
LucBerge/GitHub-map
|
0e9e3c4d96530e376e531640af2f950cf0c6c69a
|
66c73079bf8e4a27a0e057d94cdb9e7ac8aa341f
|
refs/heads/master
| 2020-04-28T04:56:03.346772
| 2020-01-06T18:45:04
| 2020-01-06T18:45:04
| 175,000,271
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#!/usr/bin/python
import sys
try:
input = open(sys.argv[1],"rU")
output = open(sys.argv[2],"w")
limit = int(sys.argv[3])
for line in input:
line_split = line.split('\t')
if(len(line_split) == 3):
if(int(line_split[2]) >= limit):
output.write(line)
input.close()
output.close()
except:
print("Usage <input> <output> <limit>")
|
[
"lucas.bergeron@outlook.fr"
] |
lucas.bergeron@outlook.fr
|
8f47de3208833543e640537cd04d8d34420ac5c3
|
454318a732e59051bb8bb9275b5814ad96c42b4d
|
/Projet/connect4/player.py
|
77efe566f6cac0ff538b4426ace2935111c3f704
|
[] |
no_license
|
AnthonyBonfils3/Simplon_Brief_faur_in_rows
|
d46f5b8d5c3ae64e340e31a6cc5ef9cc04a9fb6c
|
db2b60e1892ca87cf19d9b7e8bbbdea23a3bea0f
|
refs/heads/main
| 2023-04-11T23:17:06.610974
| 2021-04-12T23:57:08
| 2021-04-12T23:57:08
| 357,131,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,096
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 10:25:37 2021
@author: bonfils
"""
from abc import ABC, abstractmethod
import numpy as np
import tensorflow as tf
Qlearning_model_path = './Qlearning/models/'
class Player(ABC): ## la classe doit être abstraite pour pouvoir utiliser
## une méthode abstraite (donc classe hérite de ABC)
def __init__(self, board, name:str='player 1', couleur="Red"):
"""
-------------
DESCRIPTION :
-------------
INPUT : self :
IOUTPUT :
-------------
"""
self.board = board
self.name = name
self.couleur = couleur
if self.couleur=="Red":
self.value = 1
elif self.couleur=="Yellow":
self.value = -1
@abstractmethod ## méthode abstraite doit être redéfinie dans les class filles
def play(self):
"""
-------------
DESCRIPTION : Choose a column to put a pawn
-------------
"""
pass
class Human(Player):
def __init__(self, board, name='Ia1', couleur="Red"):
"""
-------------
DESCRIPTION :
-------------
INPUT : self :
IOUTPUT :
-------------
"""
Player.__init__(self, board, name=name, couleur=couleur)
print('---> Human initialized <---')
def play(self):
"""
-------------
DESCRIPTION :
-------------
INPUT : self :
IOUTPUT :
-------------
"""
column = int(input(f"entrez la position d'une colonne (integer between 1 and {self.board.n_columns}: "))-1
return column
class Ia(Player):
def __init__(self, board, name='Ia1', couleur="Red", strategy='random', mode='1'):
"""
-------------
DESCRIPTION :
-------------
INPUT : self :
IOUTPUT :
-------------
"""
Player.__init__(self, board, name=name, couleur=couleur)
self.strategy = strategy ## type d'IA -> random q_learning
self.mode = mode
if self.strategy=='Q_learning':
if self.mode==1:
filepath = Qlearning_model_path + 'model_test.h5' # 'P4_model_train_rand_2000_step.h5'
elif self.mode==2:
filepath = Qlearning_model_path + 'model_qlearning195.h5' # 'model_qlearning100.h5'
elif self.mode==3:
filepath = Qlearning_model_path + 'model_qlearning500.h5' ## 'model_qlearning1.h5'
elif self.mode==4:
filepath = Qlearning_model_path + 'model_CNNqlearning40.h5'
else:
print(f"Error : strategy {self.strategy} with mode {self.mode} is not available yet.")
self.model = tf.keras.models.load_model(filepath, custom_objects=None, compile=True, options=None)
if (self.strategy=='random'):
print('---> Random IA initialized <---')
elif (self.strategy=='Q_learning'):
print('---> Q_learning IA initialized <---')
def play(self):
"""
-------------
DESCRIPTION :
-------------
INPUT : self :
IOUTPUT :
-------------
"""
if (self.strategy=='random'):
column = np.random.randint(0, self.board.n_columns)
elif (self.strategy=='heuristics'):
print(f"Error : strategy {self.strategy} is not available yet.")
elif (self.strategy=='Q_learning'):
if (self.mode==1):
current_state_flat = self.board.grid.reshape(1,-1)
column = np.argmax(self.model.predict(current_state_flat)[0])
print("Qlearning model choice of column", column)
elif (self.mode==2):
current_state_flat = self.board.grid.reshape(1,-1)
column = np.argmax(self.model.predict(current_state_flat)[0])
print("Qlearning model choice of column", column)
elif (self.mode==3):
current_state_flat = self.board.grid.reshape(1,-1)
column = np.argmax(self.model.predict(current_state_flat)[0])
print("Qlearning model choice of column", column)
elif (self.mode==4):
current_state_reshape = np.reshape(self.board.grid, (1, self.board.n_rows, self.board.n_columns, 1))
column = np.argmax(self.model.predict(current_state_reshape)[0])
print("Qlearning model choice of column", column)
else:
print(f"Error : strategy {self.strategy} with mode {self.mode} is not available yet.")
else:
print(f"Error : strategy {self.strategy} is not available yet.")
return column
|
[
"anthonybonfils3@gmail.com"
] |
anthonybonfils3@gmail.com
|
9a85e70ab62265d785e5f011cfd8ac042b203745
|
9795451f7b059e79a6a96a05752f753113830091
|
/EMCUnity/UnityClasses.py
|
81abb27adccbc2c8ebb40a7d3a8611870c2900bf
|
[
"MIT"
] |
permissive
|
fiveout/EMCUnity
|
a0413544178a5869ec876d88a226a4c8bd9a6859
|
76ba23d54e6409c051e92506a9283cbe722fd4a6
|
refs/heads/master
| 2021-04-06T09:43:07.276010
| 2018-03-24T06:21:47
| 2018-03-24T06:21:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115,502
|
py
|
import collections
from collections import namedtuple
def namedtuple_defaults(typename, field_names, default_values=()):
''' This makes our objects simple named tuples with default values of None
standard namedtuples, require all values '''
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
UnityaclUser = namedtuple_defaults('UnityaclUser', [
'id',
'sid',
'domainName',
'userName',
])
Unityalert = namedtuple_defaults('Unityalert', [
'id',
'timestamp',
'severity',
'component',
'messageId',
'message',
'descriptionId',
'description',
'resolutionId',
'resolution',
'isAcknowledged',
])
UnityalertConfig = namedtuple_defaults('UnityalertConfig', [
'id',
'locale',
'isThresholdAlertsEnabled',
'minEmailNotificationSeverity',
'destinationEmails',
'minSNMPTrapNotificationSeverity',
])
UnityalertConfigSNMPTarget = namedtuple_defaults('UnityalertConfigSNMPTarget', [
'id',
'address',
'version',
'username',
'authProto',
'privacyProto',
])
UnitybaseRequest = namedtuple_defaults('UnitybaseRequest', [
])
UnitybaseResponse = namedtuple_defaults('UnitybaseResponse', [
])
UnitybasicSystemInfo = namedtuple_defaults('UnitybasicSystemInfo', [
'id',
'model',
'name',
'softwareVersion',
'apiVersion',
'earliestApiVersion',
])
Unitybattery = namedtuple_defaults('Unitybattery', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'firmwareVersion',
'emcPartNumber',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'parentStorageProcessor',
])
UnityblockHostAccess = namedtuple_defaults('UnityblockHostAccess', [
'host',
'accessMask',
])
UnitycandidateSoftwareVersion = namedtuple_defaults('UnitycandidateSoftwareVersion', [
'id',
'version',
'revision',
'releaseDate',
'type',
])
UnitycapabilityProfile = namedtuple_defaults('UnitycapabilityProfile', [
'id',
'vmwareUUID',
'name',
'description',
'pool',
'driveTypes',
'fastCacheStates',
'raidTypes',
'spaceEfficiencies',
'tieringPolicies',
'serviceLevels',
'usageTags',
'inUse',
'health',
'virtualVolumes',
])
UnitycertificateScope = namedtuple_defaults('UnitycertificateScope', [
'nasServer',
])
UnitycifsServer = namedtuple_defaults('UnitycifsServer', [
'id',
'name',
'description',
'netbiosName',
'domain',
'lastUsedOrganizationalUnit',
'workgroup',
'isStandalone',
'health',
'nasServer',
'fileInterfaces',
'smbcaSupported',
'smbMultiChannelSupported',
'smbProtocolVersions',
])
UnitycifsShare = namedtuple_defaults('UnitycifsShare', [
'id',
'type',
'filesystem',
'snap',
'isReadOnly',
'name',
'path',
'exportPaths',
'description',
'creationTime',
'modifiedTime',
'isContinuousAvailabilityEnabled',
'isEncryptionEnabled',
'isACEEnabled',
'isABEEnabled',
'isBranchCacheEnabled',
'isDFSEnabled',
'offlineAvailability',
'umask',
])
UnitycifsShareACE = namedtuple_defaults('UnitycifsShareACE', [
'sid',
'accessType',
'accessLevel',
])
UnityconfigCaptureResult = namedtuple_defaults('UnityconfigCaptureResult', [
'id',
'name',
'creationTime',
])
Unitycrl = namedtuple_defaults('Unitycrl', [
'id',
'service',
'scope',
'version',
'crlNumber',
'signatureAlgorithm',
'issuer',
'thisUpdate',
'nextUpdate',
'certificates',
'deltaCRLIndicator',
])
Unitydae = namedtuple_defaults('Unitydae', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'emcPartNumber',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'enclosureType',
'busId',
'driveTypes',
'currentPower',
'avgPower',
'maxPower',
'currentTemperature',
'avgTemperature',
'maxTemperature',
'currentSpeed',
'maxSpeed',
'parentSystem',
])
UnitydataCollectionResult = namedtuple_defaults('UnitydataCollectionResult', [
'id',
'name',
'creationTime',
])
Unitydatastore = namedtuple_defaults('Unitydatastore', [
'id',
'storageResource',
'name',
'format',
'host',
'sizeTotal',
'sizeUsed',
'vmDisks',
'vms',
])
UnitydhsmServer = namedtuple_defaults('UnitydhsmServer', [
'id',
'nasServer',
'username',
])
Unitydisk = namedtuple_defaults('Unitydisk', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'busId',
'name',
'manufacturer',
'model',
'version',
'emcPartNumber',
'emcSerialNumber',
'tierType',
'diskGroup',
'rpm',
'isSED',
'currentSpeed',
'maxSpeed',
'pool',
'isInUse',
'isFastCacheInUse',
'size',
'rawSize',
'vendorSize',
'wwn',
'diskTechnology',
'parentDae',
'parentDpe',
'bank',
'bankSlotNumber',
'bankSlot',
])
UnitydiskGroup = namedtuple_defaults('UnitydiskGroup', [
'id',
'name',
'emcPartNumber',
'tierType',
'diskTechnology',
'isFASTCacheAllowable',
'diskSize',
'advertisedSize',
'rpm',
'speed',
'totalDisks',
'minHotSpareCandidates',
'hotSparePolicyStatus',
'unconfiguredDisks',
])
UnitydnsServer = namedtuple_defaults('UnitydnsServer', [
'id',
'domain',
'addresses',
'origin',
])
Unitydpe = namedtuple_defaults('Unitydpe', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'emcPartNumber',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'enclosureType',
'busId',
'driveTypes',
'currentPower',
'avgPower',
'maxPower',
'currentTemperature',
'avgTemperature',
'maxTemperature',
'currentSpeed',
'maxSpeed',
'parentSystem',
])
Unityencryption = namedtuple_defaults('Unityencryption', [
'id',
'encryptionMode',
'encryptionStatus',
'encryptionPercentage',
'keyManagerBackupKeyStatus',
])
UnityesrsParam = namedtuple_defaults('UnityesrsParam', [
'id',
'enabled',
'isCentralized',
'status',
'proxyIsEnabled',
'proxyAddress',
'proxyIsHTTP',
'proxyUserName',
'esrsVeAddress',
'siteId',
'esrsConfigStatus',
'isEsrsVeEulaAccepted',
])
UnityesrsPolicyManager = namedtuple_defaults('UnityesrsPolicyManager', [
'id',
'isEnabled',
'address',
'useHTTPS',
'sslStrength',
'proxyIsEnabled',
'proxyAddress',
'proxyUseSocks',
'proxyUserName',
])
UnityethernetPort = namedtuple_defaults('UnityethernetPort', [
'id',
'health',
'storageProcessor',
'needsReplacement',
'name',
'portNumber',
'speed',
'mtu',
'connectorType',
'bond',
'isLinkUp',
'macAddress',
'isRSSCapable',
'isRDMACapable',
'requestedSpeed',
'parentIOModule',
'parentStorageProcessor',
'supportedSpeeds',
'requestedMtu',
'supportedMtus',
'parent',
'sfpSupportedSpeeds',
'sfpSupportedProtocols',
])
Unityevent = namedtuple_defaults('Unityevent', [
'id',
'node',
'creationTime',
'severity',
'messageId',
'arguments',
'message',
'username',
'category',
'source',
])
Unityfan = namedtuple_defaults('Unityfan', [
'id',
'health',
'parent',
'slotNumber',
'name',
'emcPartNumber',
'emcSerialNumber',
'manufacturer',
'model',
'vendorPartNumber',
'vendorSerialNumber',
'needsReplacement',
'parentDpe',
'parentDae',
])
UnityfastCache = namedtuple_defaults('UnityfastCache', [
'id',
'health',
'sizeTotal',
'sizeFree',
'numberOfDisks',
'raidLevel',
'raidGroups',
])
UnityfastVP = namedtuple_defaults('UnityfastVP', [
'id',
'status',
'relocationRate',
'isScheduleEnabled',
'scheduleDays',
'scheduleStartTime',
'scheduleEndTime',
'sizeMovingDown',
'sizeMovingUp',
'sizeMovingWithin',
'relocationDurationEstimate',
])
UnityfcPort = namedtuple_defaults('UnityfcPort', [
'id',
'health',
'parent',
'slotNumber',
'wwn',
'availableSpeeds',
'currentSpeed',
'requestedSpeed',
'sfpSupportedSpeeds',
'sfpSupportedProtocols',
'connectorType',
'storageProcessor',
'needsReplacement',
'nPortId',
'name',
'parentIOModule',
'parentStorageProcessor',
])
Unityfeature = namedtuple_defaults('Unityfeature', [
'id',
'name',
'state',
'reason',
'license',
])
UnityfileDNSServer = namedtuple_defaults('UnityfileDNSServer', [
'id',
'nasServer',
'addresses',
'domain',
'replicationPolicy',
'sourceParameters',
])
UnityfileInterface = namedtuple_defaults('UnityfileInterface', [
'id',
'nasServer',
'ipPort',
'health',
'ipAddress',
'ipProtocolVersion',
'netmask',
'v6PrefixLength',
'gateway',
'vlanId',
'macAddress',
'name',
'role',
'isPreferred',
'replicationPolicy',
'sourceParameters',
'isDisabled',
])
UnityfileKerberosServer = namedtuple_defaults('UnityfileKerberosServer', [
'id',
'nasServer',
'realm',
'addresses',
'portNumber',
])
UnityfileLDAPServer = namedtuple_defaults('UnityfileLDAPServer', [
'id',
'nasServer',
'authority',
'profileDN',
'serverAddresses',
'portNumber',
'authenticationType',
'protocol',
'verifyServerCertificate',
'bindDN',
'isCifsAccountUsed',
'principal',
'realm',
'schemeType',
'replicationPolicy',
'sourceParameters',
])
UnityfileNDMPServer = namedtuple_defaults('UnityfileNDMPServer', [
'id',
'nasServer',
'username',
])
UnityfileNISServer = namedtuple_defaults('UnityfileNISServer', [
'id',
'nasServer',
'addresses',
'domain',
'replicationPolicy',
'sourceParameters',
])
Unityfilesystem = namedtuple_defaults('Unityfilesystem', [
'id',
'health',
'name',
'description',
'type',
'sizeTotal',
'sizeUsed',
'sizeAllocated',
'isReadOnly',
'isThinEnabled',
'storageResource',
'isCIFSSyncWritesEnabled',
'pool',
'isCIFSOpLocksEnabled',
'nasServer',
'isCIFSNotifyOnWriteEnabled',
'isCIFSNotifyOnAccessEnabled',
'cifsNotifyOnChangeDirDepth',
'tieringPolicy',
'supportedProtocols',
'metadataSize',
'metadataSizeAllocated',
'perTierSizeUsed',
'snapsSize',
'snapsSizeAllocated',
'snapCount',
'isSMBCA',
'accessPolicy',
'format',
'hostIOSize',
'poolFullPolicy',
'cifsShare',
'nfsShare',
])
UnityftpServer = namedtuple_defaults('UnityftpServer', [
'id',
'nasServer',
'isFtpEnabled',
'isSftpEnabled',
'isCifsUserEnabled',
'isUnixUserEnabled',
'isAnonymousUserEnabled',
'isHomedirLimitEnabled',
'defaultHomedir',
'welcomeMsg',
'motd',
'isAuditEnabled',
'auditDir',
'auditMaxSize',
'hostsList',
'usersList',
'groupsList',
'isAllowHost',
'isAllowUser',
'isAllowGroup',
])
Unityhealth = namedtuple_defaults('Unityhealth', [
'value',
'descriptionIds',
'descriptions',
'resolutionIds',
'resolutions',
])
Unityhost = namedtuple_defaults('Unityhost', [
'id',
'health',
'name',
'description',
'type',
'osType',
'hostUUID',
'hostPushedUUID',
'hostPolledUUID',
'lastPollTime',
'autoManageType',
'registrationType',
'hostContainer',
'fcHostInitiators',
'iscsiHostInitiators',
'hostIPPorts',
'storageResources',
'hostLUNs',
'datastores',
'nfsShareAccesses',
'hostVVolDatastore',
'vms',
])
UnityhostContainer = namedtuple_defaults('UnityhostContainer', [
'id',
'lastPollTime',
'port',
'name',
'type',
'address',
'description',
'productName',
'productVersion',
'health',
'hosts',
])
UnityhostInitiator = namedtuple_defaults('UnityhostInitiator', [
'id',
'health',
'type',
'initiatorId',
'parentHost',
'isIgnored',
'nodeWWN',
'portWWN',
'chapUserName',
'isChapSecretEnabled',
'paths',
'iscsiType',
'isBound',
'sourceType',
])
UnityhostInitiatorPath = namedtuple_defaults('UnityhostInitiatorPath', [
'id',
'registrationType',
'isLoggedIn',
'hostPushName',
'sessionIds',
'initiator',
'fcPort',
'iscsiPortal',
])
UnityhostIPPort = namedtuple_defaults('UnityhostIPPort', [
'id',
'name',
'type',
'address',
'netmask',
'v6PrefixLength',
'isIgnored',
'host',
])
UnityhostLUN = namedtuple_defaults('UnityhostLUN', [
'id',
'host',
'type',
'hlu',
'lun',
'snap',
'isReadOnly',
])
UnityhostVVolDatastore = namedtuple_defaults('UnityhostVVolDatastore', [
'id',
'storageResource',
'host',
])
UnityinstalledSoftwareVersion = namedtuple_defaults('UnityinstalledSoftwareVersion', [
'id',
'version',
'revision',
'releaseDate',
'languages',
'hotFixes',
'packageVersions',
])
UnityioLimitParameters = namedtuple_defaults('UnityioLimitParameters', [
'ioLimitPolicy',
])
UnityioLimitPolicy = namedtuple_defaults('UnityioLimitPolicy', [
'id',
'name',
'description',
'isShared',
'ioLimitRules',
'luns',
'snaps',
])
UnityioLimitRule = namedtuple_defaults('UnityioLimitRule', [
'id',
'name',
'description',
'maxIOPS',
'maxKBPS',
'ioLimitpolicy',
])
UnityioLimitSetting = namedtuple_defaults('UnityioLimitSetting', [
'id',
'isPaused',
])
UnityioModule = namedtuple_defaults('UnityioModule', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'emcPartNumber',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'systemName',
'parentStorageProcessor',
])
UnityipInterface = namedtuple_defaults('UnityipInterface', [
'id',
'ipPort',
'ipProtocolVersion',
'ipAddress',
'netmask',
'v6PrefixLength',
'gateway',
'vlanId',
'type',
])
UnityipPort = namedtuple_defaults('UnityipPort', [
'id',
'name',
'shortName',
'macAddress',
'isLinkUp',
'storageProcessor',
])
UnityiscsiNode = namedtuple_defaults('UnityiscsiNode', [
'id',
'name',
'ethernetPort',
'alias',
])
UnityiscsiPortal = namedtuple_defaults('UnityiscsiPortal', [
'id',
'ethernetPort',
'iscsiNode',
'ipAddress',
'netmask',
'v6PrefixLength',
'gateway',
'vlanId',
'ipProtocolVersion',
])
UnityiscsiSettings = namedtuple_defaults('UnityiscsiSettings', [
'id',
'isForwardCHAPRequired',
'reverseCHAPUserName',
'forwardGlobalCHAPUserName',
'iSNSServer',
])
Unityjob = namedtuple_defaults('Unityjob', [
'id',
'description',
'state',
'stateChangeTime',
'submitTime',
'startTime',
'endTime',
'elapsedTime',
'estRemainTime',
'progressPct',
'tasks',
'parametersOut',
'messageOut',
'isJobCancelable',
'isJobCancelled',
'clientData',
])
Unitylcc = namedtuple_defaults('Unitylcc', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'sasExpanderVersions',
'emcPartNumber',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'currentSpeed',
'maxSpeed',
'parentDae',
])
UnityldapServer = namedtuple_defaults('UnityldapServer', [
'id',
'authority',
'serverAddress',
'bindDN',
'protocol',
'userSearchPath',
'groupSearchPath',
'userIdAttribute',
'groupNameAttribute',
'userObjectClass',
'groupObjectClass',
'groupMemberAttribute',
'timeout',
])
Unitylicense = namedtuple_defaults('Unitylicense', [
'id',
'name',
'isInstalled',
'version',
'isValid',
'issued',
'expires',
'isPermanent',
'feature',
])
UnitylinkAggregation = namedtuple_defaults('UnitylinkAggregation', [
'id',
'name',
'shortName',
'masterPort',
'ports',
'mtuSize',
'supportedMtus',
'macAddress',
'isLinkUp',
'parent',
'parentStorageProcessor',
])
UnitylocalizedMessage = namedtuple_defaults('UnitylocalizedMessage', [
'locale',
'message',
])
UnityloginSessionInfo = namedtuple_defaults('UnityloginSessionInfo', [
'id',
'user',
'roles',
'idleTimeout',
'isPasswordChangeRequired',
])
Unitylun = namedtuple_defaults('Unitylun', [
'id',
'health',
'name',
'description',
'type',
'sizeTotal',
'sizeUsed',
'sizeAllocated',
'perTierSizeUsed',
'isThinEnabled',
'storageResource',
'pool',
'wwn',
'tieringPolicy',
'defaultNode',
'isReplicationDestination',
'currentNode',
'snapSchedule',
'isSnapSchedulePaused',
'ioLimitPolicy',
'metadataSize',
'metadataSizeAllocated',
'snapWwn',
'snapsSize',
'snapsSizeAllocated',
'hostAccess',
'snapCount',
])
UnitylunMemberReplication = namedtuple_defaults('UnitylunMemberReplication', [
'srcStatus',
'networkStatus',
'dstStatus',
'srcLunId',
'dstLunId',
])
UnitymemoryModule = namedtuple_defaults('UnitymemoryModule', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'firmwareVersion',
'size',
'emcPartNumber',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'parentStorageProcessor',
'isInserted',
])
Unitymessage = namedtuple_defaults('Unitymessage', [
'severity',
'errorCode',
'created',
'httpStatusCode',
'messages',
])
Unitymetric = namedtuple_defaults('Unitymetric', [
'id',
'name',
'path',
'type',
'description',
'isHistoricalAvailable',
'isRealtimeAvailable',
'unitDisplayString',
])
UnitymetricCollection = namedtuple_defaults('UnitymetricCollection', [
'id',
'interval',
'oldest',
'retention',
])
UnitymetricQueryResult = namedtuple_defaults('UnitymetricQueryResult', [
'queryId',
'path',
'timestamp',
'values',
])
UnitymetricRealTimeQuery = namedtuple_defaults('UnitymetricRealTimeQuery', [
'id',
'paths',
'interval',
'expiration',
])
UnitymetricService = namedtuple_defaults('UnitymetricService', [
'id',
'isHistoricalEnabled',
])
UnitymetricValue = namedtuple_defaults('UnitymetricValue', [
'path',
'timestamp',
'interval',
'values',
])
UnitymgmtInterface = namedtuple_defaults('UnitymgmtInterface', [
'id',
'configMode',
'ethernetPort',
'protocolVersion',
'ipAddress',
'netmask',
'v6PrefixLength',
'gateway',
])
UnitymgmtInterfaceSettings = namedtuple_defaults('UnitymgmtInterfaceSettings', [
'id',
'v4ConfigMode',
'v6ConfigMode',
])
UnitynasServer = namedtuple_defaults('UnitynasServer', [
'id',
'name',
'health',
'homeSP',
'currentSP',
'pool',
'sizeAllocated',
'isReplicationEnabled',
'isReplicationDestination',
'replicationType',
'defaultUnixUser',
'defaultWindowsUser',
'currentUnixDirectoryService',
'isMultiProtocolEnabled',
'isWindowsToUnixUsernameMappingEnabled',
'allowUnmappedUser',
'cifsServer',
'preferredInterfaceSettings',
'fileDNSServer',
'fileInterface',
'virusChecker',
])
UnitynfsServer = namedtuple_defaults('UnitynfsServer', [
'id',
'hostName',
'nasServer',
'fileInterfaces',
'nfsv4Enabled',
'isSecureEnabled',
'kdcType',
'servicePrincipalName',
'isExtendedCredentialsEnabled',
'credentialsCacheTTL',
])
UnitynfsShare = namedtuple_defaults('UnitynfsShare', [
'id',
'type',
'role',
'filesystem',
'snap',
'name',
'path',
'exportPaths',
'description',
'isReadOnly',
'creationTime',
'modificationTime',
'defaultAccess',
'minSecurity',
'noAccessHosts',
'readOnlyHosts',
'readWriteHosts',
'rootAccessHosts',
'hostAccesses',
])
UnityntpServer = namedtuple_defaults('UnityntpServer', [
'id',
'addresses',
])
Unitypool = namedtuple_defaults('Unitypool', [
'id',
'health',
'name',
'description',
'storageResourceType',
'raidType',
'sizeFree',
'sizeTotal',
'sizeUsed',
'sizeSubscribed',
'alertThreshold',
'isFASTCacheEnabled',
'tiers',
'creationTime',
'isEmpty',
'poolFastVP',
'isHarvestEnabled',
'harvestState',
'isSnapHarvestEnabled',
'poolSpaceHarvestHighThreshold',
'poolSpaceHarvestLowThreshold',
'snapSpaceHarvestHighThreshold',
'snapSpaceHarvestLowThreshold',
'metadataSizeSubscribed',
'snapSizeSubscribed',
'metadataSizeUsed',
'snapSizeUsed',
'rebalanceProgress',
])
UnitypoolConsumer = namedtuple_defaults('UnitypoolConsumer', [
'id',
])
UnitypoolConsumerAllocation = namedtuple_defaults('UnitypoolConsumerAllocation', [
'id',
'pool',
'consumer',
'consumerType',
'sizeAllocatedTotal',
'snapsSizeAllocated',
])
UnitypoolUnit = namedtuple_defaults('UnitypoolUnit', [
'id',
'type',
'health',
'name',
'description',
'wwn',
'sizeTotal',
'tierType',
'pool',
])
UnitypowerSupply = namedtuple_defaults('UnitypowerSupply', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'firmwareVersion',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'emcPartNumber',
'parentDae',
'parentDpe',
])
UnitypreferredInterfaceSettings = namedtuple_defaults('UnitypreferredInterfaceSettings', [
'id',
'nasServer',
'productionIpV4',
'productionIpV6',
'backupIpV4',
'backupIpV6',
'sourceParameters',
'replicationPolicy',
])
UnityquotaConfig = namedtuple_defaults('UnityquotaConfig', [
'id',
'filesystem',
'treeQuota',
'quotaPolicy',
'isUserQuotaEnabled',
'isAccessDenyEnabled',
'gracePeriod',
'defaultHardLimit',
'defaultSoftLimit',
'lastUpdateTimeOfTreeQuotas',
'lastUpdateTimeOfUserQuotas',
])
UnityraidGroup = namedtuple_defaults('UnityraidGroup', [
'id',
'type',
'health',
'name',
'description',
'wwn',
'sizeTotal',
'tierType',
'pool',
'diskGroup',
'raidType',
'stripeWidth',
'parityDisks',
'disks',
])
UnityremoteInterface = namedtuple_defaults('UnityremoteInterface', [
'id',
'remoteId',
'name',
'address',
'remoteSystem',
'node',
'capability',
])
UnityremoteSyslog = namedtuple_defaults('UnityremoteSyslog', [
'id',
'address',
'protocol',
'facility',
'enabled',
])
UnityremoteSystem = namedtuple_defaults('UnityremoteSystem', [
'id',
'name',
'model',
'serialNumber',
'health',
'managementAddress',
'connectionType',
'syncFcPorts',
'username',
'localSPAInterfaces',
'localSPBInterfaces',
'remoteSPAInterfaces',
'remoteSPBInterfaces',
])
UnityreplicationInterface = namedtuple_defaults('UnityreplicationInterface', [
'id',
'ipPort',
'health',
'ipAddress',
'ipProtocolVersion',
'netmask',
'v6PrefixLength',
'gateway',
'vlanId',
'macAddress',
'name',
])
UnityreplicationSession = namedtuple_defaults('UnityreplicationSession', [
'id',
'name',
'replicationResourceType',
'status',
'health',
'maxTimeOutOfSync',
'srcStatus',
'networkStatus',
'dstStatus',
'lastSyncTime',
'syncState',
'remoteSystem',
'localRole',
'srcResourceId',
'srcSPAInterface',
'srcSPBInterface',
'dstResourceId',
'dstSPAInterface',
'dstSPBInterface',
'members',
'syncProgress',
'currentTransferEstRemainTime',
])
UnityresourceRef = namedtuple_defaults('UnityresourceRef', [
'resource',
'id',
])
Unityrole = namedtuple_defaults('Unityrole', [
'id',
'name',
'description',
])
UnityroleMapping = namedtuple_defaults('UnityroleMapping', [
'id',
'authorityName',
'roleName',
'entityName',
'mappingType',
])
Unityroute = namedtuple_defaults('Unityroute', [
'id',
'ipInterface',
'destination',
'netmask',
'v6PrefixLength',
'gateway',
])
UnityrpChapSettings = namedtuple_defaults('UnityrpChapSettings', [
'id',
'outgoingForwardChapUsername',
])
UnitysasPort = namedtuple_defaults('UnitysasPort', [
'id',
'health',
'needsReplacement',
'parent',
'name',
'port',
'currentSpeed',
'connectorType',
'parentStorageProcessor',
])
UnitysecuritySettings = namedtuple_defaults('UnitysecuritySettings', [
'id',
'isFIPSEnabled',
'isSSOEnabled',
'isTLS1Enabled',
])
UnityserviceAction = namedtuple_defaults('UnityserviceAction', [
'id',
'scope',
'name',
'description',
'isApplicable',
'applyCondition',
])
UnityserviceContract = namedtuple_defaults('UnityserviceContract', [
'id',
'contractId',
'contractNumber',
'contractStatus',
'levelOfService',
'serviceLineId',
'lastUpdated',
'productStartDate',
'productEndDate',
])
UnityserviceInfo = namedtuple_defaults('UnityserviceInfo', [
'id',
'productName',
'productSerialNumber',
'systemUUID',
'isSSHEnabled',
'esrsStatus',
'sps',
])
UnitysmtpServer = namedtuple_defaults('UnitysmtpServer', [
'id',
'address',
'type',
])
Unitysnap = namedtuple_defaults('Unitysnap', [
'id',
'name',
'description',
'storageResource',
'lun',
'snapGroup',
'parentSnap',
'creationTime',
'expirationTime',
'creatorType',
'creatorUser',
'creatorSchedule',
'isSystemSnap',
'isModifiable',
'attachedWWN',
'accessType',
'isReadOnly',
'lastWritableTime',
'isModified',
'isAutoDelete',
'state',
'size',
'ioLimitPolicy',
])
UnitysnapSchedule = namedtuple_defaults('UnitysnapSchedule', [
'id',
'name',
'isDefault',
'isModified',
'version',
'rules',
'storageResources',
])
UnitysoftwareUpgradeSession = namedtuple_defaults('UnitysoftwareUpgradeSession', [
'id',
'type',
'candidate',
'caption',
'status',
'messages',
'creationTime',
'elapsedTime',
'percentComplete',
'tasks',
])
Unityssc = namedtuple_defaults('Unityssc', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'parentDae',
])
Unityssd = namedtuple_defaults('Unityssd', [
'id',
'health',
'needsReplacement',
'parent',
'slotNumber',
'name',
'manufacturer',
'model',
'firmwareVersion',
'emcPartNumber',
'emcSerialNumber',
'vendorPartNumber',
'vendorSerialNumber',
'parentStorageProcessor',
])
UnitystorageProcessor = namedtuple_defaults('UnitystorageProcessor', [
'id',
'parent',
'health',
'needsReplacement',
'isRescueMode',
'model',
'slotNumber',
'name',
'emcPartNumber',
'emcSerialNumber',
'manufacturer',
'vendorPartNumber',
'vendorSerialNumber',
'sasExpanderVersion',
'biosFirmwareRevision',
'postFirmwareRevision',
'memorySize',
'parentDpe',
])
UnitystorageResource = namedtuple_defaults('UnitystorageResource', [
'id',
'health',
'name',
'description',
'type',
'isReplicationDestination',
'replicationType',
'sizeTotal',
'sizeUsed',
'sizeAllocated',
'thinStatus',
'esxFilesystemMajorVersion',
'esxFilesystemBlockSize',
'snapSchedule',
'isSnapSchedulePaused',
'relocationPolicy',
'perTierSizeUsed',
'blockHostAccess',
'metadataSize',
'metadataSizeAllocated',
'snapsSizeTotal',
'snapsSizeAllocated',
'snapCount',
'vmwareUUID',
'pools',
'datastores',
'filesystem',
'hostVVolDatastore',
'luns',
'virtualVolumes',
])
UnitystorageResourceCapabilityProfile = namedtuple_defaults('UnitystorageResourceCapabilityProfile', [
'id',
'storageResource',
'capabilityProfile',
'isInUse',
'sizeUsed',
'sizeAllocated',
'sizeTotal',
'logicalSizeUsed',
])
UnitystorageTier = namedtuple_defaults('UnitystorageTier', [
'id',
'tierType',
'raidConfigurations',
'disksTotal',
'disksUnused',
'virtualDisksTotal',
'virtualDisksUnused',
'sizeTotal',
'sizeFree',
])
UnitystorageTierConfiguration = namedtuple_defaults('UnitystorageTierConfiguration', [
'storageTier',
'raidType',
'stripeWidth',
'disksTotal',
'sizeTotal',
'diskGroupConfigurations',
])
UnitysupportAsset = namedtuple_defaults('UnitysupportAsset', [
'id',
'name',
'description',
])
UnitysupportService = namedtuple_defaults('UnitysupportService', [
'id',
'supportUsername',
'supportCredentialStatus',
'isEMCServiced',
'isContractReportEnabled',
])
Unitysystem = namedtuple_defaults('Unitysystem', [
'id',
'health',
'name',
'model',
'serialNumber',
'internalModel',
'platform',
'macAddress',
'isEULAAccepted',
'isUpgradeComplete',
'isAutoFailbackEnabled',
'currentPower',
'avgPower',
])
UnitysystemInformation = namedtuple_defaults('UnitysystemInformation', [
'id',
'contactFirstName',
'contactLastName',
'contactCompany',
'contactPhone',
'contactEmail',
'locationName',
'streetAddress',
'city',
'state',
'zipcode',
'country',
'siteId',
'contactMobilePhone',
])
UnitysystemLimit = namedtuple_defaults('UnitysystemLimit', [
'id',
'name',
'description',
'unit',
'limitValue',
'thresholdValue',
'resources',
'license',
])
UnitysystemTime = namedtuple_defaults('UnitysystemTime', [
'id',
'time',
])
UnitytechnicalAdvisory = namedtuple_defaults('UnitytechnicalAdvisory', [
'id',
'knowledgeBaseId',
'description',
'modificationTime',
])
UnitytreeQuota = namedtuple_defaults('UnitytreeQuota', [
'id',
'filesystem',
'quotaConfig',
'path',
'description',
'state',
'hardLimit',
'softLimit',
'remainingGracePeriod',
'sizeUsed',
])
UnityuncommittedPort = namedtuple_defaults('UnityuncommittedPort', [
'id',
'health',
'name',
'portNumber',
'connectorType',
'sfpSupportedSpeeds',
'sfpSupportedProtocols',
'needsReplacement',
'storageProcessor',
'parentIOModule',
'parentStorageProcessor',
'parent',
])
UnityurServer = namedtuple_defaults('UnityurServer', [
'address',
'id',
])
Unityuser = namedtuple_defaults('Unityuser', [
'id',
'name',
'role',
])
UnityuserQuota = namedtuple_defaults('UnityuserQuota', [
'id',
'filesystem',
'treeQuota',
'uid',
'state',
'hardLimit',
'softLimit',
'remainingGracePeriod',
'sizeUsed',
])
UnityvirtualVolume = namedtuple_defaults('UnityvirtualVolume', [
'id',
'health',
'name',
'vvolType',
'replicaType',
'parent',
'storageResource',
'pool',
'capabilityProfile',
'policyProfileName',
'isCompliant',
'isThinEnabled',
'sizeTotal',
'sizeUsed',
'bindings',
'vmUUID',
'vm',
'vmDisk',
])
UnityvirusChecker = namedtuple_defaults('UnityvirusChecker', [
'id',
'nasServer',
'isEnabled',
])
Unityvm = namedtuple_defaults('Unityvm', [
'id',
'datastore',
'name',
'guestAddresses',
'guestHostName',
'notes',
'osType',
'host',
'state',
'vmDisks',
'virtualVolumes',
])
UnityvmDisk = namedtuple_defaults('UnityvmDisk', [
'datastore',
'id',
'vm',
'name',
'spaceTotal',
'type',
'virtualVolumes',
])
UnityvmwareNasPEServer = namedtuple_defaults('UnityvmwareNasPEServer', [
'id',
'nasServer',
'fileInterfaces',
'boundVVolCount',
])
UnityvmwarePE = namedtuple_defaults('UnityvmwarePE', [
'id',
'vmwareNasPEServer',
'name',
'type',
'vmwareUUID',
'exportPath',
'ipAddress',
'defaultNode',
'currentNode',
'wwn',
'naa',
'vvolds',
'host',
'boundVVolCount',
'health',
])
Unityx509Certificate = namedtuple_defaults('Unityx509Certificate', [
'id',
'type',
'service',
'scope',
'isTrustAnchor',
'version',
'serialNumber',
'signatureAlgorithm',
'issuer',
'validFrom',
'validTo',
'subject',
'subjectAlternativeName',
'publicKeyAlgorithm',
'keyLength',
'thumbprintAlgorithm',
'thumbprint',
'hasPrivateKey',
])
|
[
"ktelep@gmail.com"
] |
ktelep@gmail.com
|
39cebbc5e363c770167687468337da3c136adef7
|
5ce85f7f72bb48407664ecfaf6d7d963fdd90e83
|
/signloginout/asgi.py
|
feec2c7d9f102d1d181f6be5aa8110cd77e99607
|
[] |
no_license
|
monali-warghane/signinloginout
|
632e994e3aa6d9824f0b020f5978d49855996a1e
|
f495628591071db63b7b998f67127d1f80b211cb
|
refs/heads/main
| 2023-09-03T18:56:28.760641
| 2021-10-02T11:22:46
| 2021-10-02T11:22:46
| 412,774,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
ASGI config for signloginout project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'signloginout.settings')
application = get_asgi_application()
|
[
"monalibawankar97@gmail.com"
] |
monalibawankar97@gmail.com
|
f6cc20c827f453c325a7d6abd1a137191b4f3eb1
|
76356eb3f3963051a15f7dfe6867586293bd7534
|
/models/pruned/random_pruning/imagenet/resnet50_5.py
|
49f3cdf8fcafef99f746b8dc1da08175cd959536
|
[] |
no_license
|
ICIdsl/performance_modelling
|
f59c74c0c6b2e60457694978f9a6d2251f3a70c2
|
c48cf66db8e530797d0106a737c5c7da0852423c
|
refs/heads/master
| 2023-07-07T22:32:30.718833
| 2021-08-13T12:48:37
| 2021-08-13T12:48:37
| 394,321,871
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,795
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResNet50(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 62, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.bn1 = nn.BatchNorm2d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
self.layer1_0_conv1 = nn.Conv2d(62, 63, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1_0_bn1 = nn.BatchNorm2d(63, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_0_conv2 = nn.Conv2d(63, 63, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer1_0_bn2 = nn.BatchNorm2d(63, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_0_conv3 = nn.Conv2d(63, 251, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1_0_bn3 = nn.BatchNorm2d(251, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_0_downsample_0 = nn.Conv2d(62, 251, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1_0_downsample_1 = nn.BatchNorm2d(251, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_1_conv1 = nn.Conv2d(251, 63, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1_1_bn1 = nn.BatchNorm2d(63, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_1_conv2 = nn.Conv2d(63, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer1_1_bn2 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_1_conv3 = nn.Conv2d(64, 251, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1_1_bn3 = nn.BatchNorm2d(251, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_2_conv1 = nn.Conv2d(251, 63, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1_2_bn1 = nn.BatchNorm2d(63, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_2_conv2 = nn.Conv2d(63, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer1_2_bn2 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1_2_conv3 = nn.Conv2d(64, 251, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer1_2_bn3 = nn.BatchNorm2d(251, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_0_conv1 = nn.Conv2d(251, 127, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_0_bn1 = nn.BatchNorm2d(127, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_0_conv2 = nn.Conv2d(127, 125, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.layer2_0_bn2 = nn.BatchNorm2d(125, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_0_conv3 = nn.Conv2d(125, 482, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_0_bn3 = nn.BatchNorm2d(482, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_0_downsample_0 = nn.Conv2d(251, 482, kernel_size=(1, 1), stride=(2, 2), bias=False)
self.layer2_0_downsample_1 = nn.BatchNorm2d(482, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_1_conv1 = nn.Conv2d(482, 127, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_1_bn1 = nn.BatchNorm2d(127, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_1_conv2 = nn.Conv2d(127, 127, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer2_1_bn2 = nn.BatchNorm2d(127, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_1_conv3 = nn.Conv2d(127, 482, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_1_bn3 = nn.BatchNorm2d(482, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_2_conv1 = nn.Conv2d(482, 127, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_2_bn1 = nn.BatchNorm2d(127, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_2_conv2 = nn.Conv2d(127, 126, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer2_2_bn2 = nn.BatchNorm2d(126, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_2_conv3 = nn.Conv2d(126, 482, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_2_bn3 = nn.BatchNorm2d(482, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_3_conv1 = nn.Conv2d(482, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_3_bn1 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_3_conv2 = nn.Conv2d(128, 127, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer2_3_bn2 = nn.BatchNorm2d(127, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2_3_conv3 = nn.Conv2d(127, 482, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer2_3_bn3 = nn.BatchNorm2d(482, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_0_conv1 = nn.Conv2d(482, 255, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_0_bn1 = nn.BatchNorm2d(255, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_0_conv2 = nn.Conv2d(255, 252, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.layer3_0_bn2 = nn.BatchNorm2d(252, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_0_conv3 = nn.Conv2d(252, 939, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_0_bn3 = nn.BatchNorm2d(939, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_0_downsample_0 = nn.Conv2d(482, 939, kernel_size=(1, 1), stride=(2, 2), bias=False)
self.layer3_0_downsample_1 = nn.BatchNorm2d(939, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_1_conv1 = nn.Conv2d(939, 253, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_1_bn1 = nn.BatchNorm2d(253, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_1_conv2 = nn.Conv2d(253, 253, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer3_1_bn2 = nn.BatchNorm2d(253, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_1_conv3 = nn.Conv2d(253, 939, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_1_bn3 = nn.BatchNorm2d(939, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_2_conv1 = nn.Conv2d(939, 255, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_2_bn1 = nn.BatchNorm2d(255, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_2_conv2 = nn.Conv2d(255, 254, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer3_2_bn2 = nn.BatchNorm2d(254, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_2_conv3 = nn.Conv2d(254, 939, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_2_bn3 = nn.BatchNorm2d(939, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_3_conv1 = nn.Conv2d(939, 253, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_3_bn1 = nn.BatchNorm2d(253, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_3_conv2 = nn.Conv2d(253, 254, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer3_3_bn2 = nn.BatchNorm2d(254, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_3_conv3 = nn.Conv2d(254, 939, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_3_bn3 = nn.BatchNorm2d(939, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_4_conv1 = nn.Conv2d(939, 250, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_4_bn1 = nn.BatchNorm2d(250, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_4_conv2 = nn.Conv2d(250, 251, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer3_4_bn2 = nn.BatchNorm2d(251, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_4_conv3 = nn.Conv2d(251, 939, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_4_bn3 = nn.BatchNorm2d(939, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_5_conv1 = nn.Conv2d(939, 252, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_5_bn1 = nn.BatchNorm2d(252, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_5_conv2 = nn.Conv2d(252, 253, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer3_5_bn2 = nn.BatchNorm2d(253, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3_5_conv3 = nn.Conv2d(253, 939, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer3_5_bn3 = nn.BatchNorm2d(939, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_0_conv1 = nn.Conv2d(939, 503, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer4_0_bn1 = nn.BatchNorm2d(503, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_0_conv2 = nn.Conv2d(503, 502, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.layer4_0_bn2 = nn.BatchNorm2d(502, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_0_conv3 = nn.Conv2d(502, 1965, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer4_0_bn3 = nn.BatchNorm2d(1965, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_0_downsample_0 = nn.Conv2d(939, 1965, kernel_size=(1, 1), stride=(2, 2), bias=False)
self.layer4_0_downsample_1 = nn.BatchNorm2d(1965, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_1_conv1 = nn.Conv2d(1965, 505, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer4_1_bn1 = nn.BatchNorm2d(505, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_1_conv2 = nn.Conv2d(505, 503, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer4_1_bn2 = nn.BatchNorm2d(503, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_1_conv3 = nn.Conv2d(503, 1965, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer4_1_bn3 = nn.BatchNorm2d(1965, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_2_conv1 = nn.Conv2d(1965, 504, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer4_2_bn1 = nn.BatchNorm2d(504, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_2_conv2 = nn.Conv2d(504, 505, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer4_2_bn2 = nn.BatchNorm2d(505, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4_2_conv3 = nn.Conv2d(505, 1965, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer4_2_bn3 = nn.BatchNorm2d(1965, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.fc = nn.Linear(in_features=1965, out_features=1000, bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x, inplace=True)
x = self.maxpool(x)
x_main = x
x_main = self.layer1_0_conv1(x_main)
x_main = self.layer1_0_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer1_0_conv2(x_main)
x_main = self.layer1_0_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer1_0_conv3(x_main)
x_main = self.layer1_0_bn3(x_main)
x_residual = x
x_residual = self.layer1_0_downsample_0(x_residual)
x_residual = self.layer1_0_downsample_1(x_residual)
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer1_1_conv1(x_main)
x_main = self.layer1_1_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer1_1_conv2(x_main)
x_main = self.layer1_1_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer1_1_conv3(x_main)
x_main = self.layer1_1_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer1_2_conv1(x_main)
x_main = self.layer1_2_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer1_2_conv2(x_main)
x_main = self.layer1_2_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer1_2_conv3(x_main)
x_main = self.layer1_2_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer2_0_conv1(x_main)
x_main = self.layer2_0_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_0_conv2(x_main)
x_main = self.layer2_0_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_0_conv3(x_main)
x_main = self.layer2_0_bn3(x_main)
x_residual = x
x_residual = self.layer2_0_downsample_0(x_residual)
x_residual = self.layer2_0_downsample_1(x_residual)
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer2_1_conv1(x_main)
x_main = self.layer2_1_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_1_conv2(x_main)
x_main = self.layer2_1_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_1_conv3(x_main)
x_main = self.layer2_1_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer2_2_conv1(x_main)
x_main = self.layer2_2_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_2_conv2(x_main)
x_main = self.layer2_2_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_2_conv3(x_main)
x_main = self.layer2_2_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer2_3_conv1(x_main)
x_main = self.layer2_3_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_3_conv2(x_main)
x_main = self.layer2_3_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer2_3_conv3(x_main)
x_main = self.layer2_3_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer3_0_conv1(x_main)
x_main = self.layer3_0_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_0_conv2(x_main)
x_main = self.layer3_0_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_0_conv3(x_main)
x_main = self.layer3_0_bn3(x_main)
x_residual = x
x_residual = self.layer3_0_downsample_0(x_residual)
x_residual = self.layer3_0_downsample_1(x_residual)
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer3_1_conv1(x_main)
x_main = self.layer3_1_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_1_conv2(x_main)
x_main = self.layer3_1_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_1_conv3(x_main)
x_main = self.layer3_1_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer3_2_conv1(x_main)
x_main = self.layer3_2_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_2_conv2(x_main)
x_main = self.layer3_2_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_2_conv3(x_main)
x_main = self.layer3_2_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer3_3_conv1(x_main)
x_main = self.layer3_3_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_3_conv2(x_main)
x_main = self.layer3_3_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_3_conv3(x_main)
x_main = self.layer3_3_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer3_4_conv1(x_main)
x_main = self.layer3_4_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_4_conv2(x_main)
x_main = self.layer3_4_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_4_conv3(x_main)
x_main = self.layer3_4_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer3_5_conv1(x_main)
x_main = self.layer3_5_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_5_conv2(x_main)
x_main = self.layer3_5_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer3_5_conv3(x_main)
x_main = self.layer3_5_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer4_0_conv1(x_main)
x_main = self.layer4_0_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer4_0_conv2(x_main)
x_main = self.layer4_0_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer4_0_conv3(x_main)
x_main = self.layer4_0_bn3(x_main)
x_residual = x
x_residual = self.layer4_0_downsample_0(x_residual)
x_residual = self.layer4_0_downsample_1(x_residual)
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer4_1_conv1(x_main)
x_main = self.layer4_1_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer4_1_conv2(x_main)
x_main = self.layer4_1_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer4_1_conv3(x_main)
x_main = self.layer4_1_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x_main = x
x_main = self.layer4_2_conv1(x_main)
x_main = self.layer4_2_bn1(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer4_2_conv2(x_main)
x_main = self.layer4_2_bn2(x_main)
x_main = F.relu(x_main, inplace=True)
x_main = self.layer4_2_conv3(x_main)
x_main = self.layer4_2_bn3(x_main)
x_residual = x
x = F.relu(x_main + x_residual, inplace=True)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet50(**kwargs):
return ResNet50(**kwargs)
|
[
"ar4414@ic.ac.uk"
] |
ar4414@ic.ac.uk
|
3613286aacd6a6b99a5617825f4bd0694d388038
|
df67b93cbe1dbb33a30d0f054ea131ff1ef7e655
|
/examples/channel.py
|
98adcb41203ce54a8a31bbe788a6f68816282d40
|
[] |
no_license
|
colinjcotter/lans
|
95505065264d75e86fa2bba22524a297e60e651e
|
d49dab7aa737164fe45fe5140da31830b4daa03d
|
refs/heads/main
| 2023-06-08T13:15:29.447546
| 2021-06-28T12:39:07
| 2021-06-28T12:39:07
| 323,318,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from lans import *
from firedrake import *
nx = 100
ny = 30
Lx = nx/ny
Ly = 1.0
mesh = RectangleMesh(nx, ny, Lx, Ly)
#uv_normal_dirichlet_bdys = {1:as_vector([1.0,0]),
# 3:as_vector([0,0]),
# 4:as_vector([0,0])}
uv_normal_dirichlet_bdys = {1:as_vector([1.0,0])}
u_tangential_dirichlet_bdys = {1:as_vector([1.0,0])}
v_tangential_dirichlet_bdys = {1:as_vector([1.0,0])}
v_inflow_bdys = {1:as_vector([1,0])}
v_not_inflow_bdys = [2,3,4]
stepper = \
LANS_timestepper(mesh=mesh, degree=1, gamma=0.0, alpha=0.0, nu=0.1,
dt=0.01,
uv_normal_dirichlet_bdys=uv_normal_dirichlet_bdys,
u_tangential_dirichlet_bdys=u_tangential_dirichlet_bdys,
v_tangential_dirichlet_bdys=v_tangential_dirichlet_bdys,
v_inflow_bdys=v_inflow_bdys,
v_not_inflow_bdys=v_not_inflow_bdys)
stepper.run(tmax=10., dumpt=0.01,
filename='channel', verbose=True)
|
[
"colin.cotter@imperial.ac.uk"
] |
colin.cotter@imperial.ac.uk
|
153bedd6b9dfdb762195c22a86d2e1d6dddd83c5
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/flask-mail-labs/.venv/lib/python2.7/site-packages/babel/dates.py
|
a8fadc7801e088692658f8de8729ba4a76a892aa
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 67,706
|
py
|
# -*- coding: utf-8 -*-
"""
babel.dates
~~~~~~~~~~~
Locale dependent formatting and parsing of dates and times.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_TIME``,
* ``LC_ALL``, and
* ``LANG``
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division
import re
import warnings
import pytz as _pytz
from datetime import date, datetime, time, timedelta
from bisect import bisect_right
from babel.core import default_locale, get_global, Locale
from babel.util import UTC, LOCALTZ
from babel._compat import string_types, integer_types, number_types
LC_TIME = default_locale('LC_TIME')
# Aliases for use in scopes where the modules are shadowed by local variables
date_ = date
datetime_ = datetime
time_ = time
def _get_dt_and_tzinfo(dt_or_tzinfo):
"""
Parse a `dt_or_tzinfo` value into a datetime and a tzinfo.
See the docs for this function's callers for semantics.
:rtype: tuple[datetime, tzinfo]
"""
if dt_or_tzinfo is None:
dt = datetime.now()
tzinfo = LOCALTZ
elif isinstance(dt_or_tzinfo, string_types):
dt = None
tzinfo = get_timezone(dt_or_tzinfo)
elif isinstance(dt_or_tzinfo, integer_types):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = _get_datetime(dt_or_tzinfo)
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
return dt, tzinfo
def _get_datetime(instant):
"""
Get a datetime out of an "instant" (date, time, datetime, number).
.. warning:: The return values of this function may depend on the system clock.
If the instant is None, the current moment is used.
If the instant is a time, it's augmented with today's date.
Dates are converted to naive datetimes with midnight as the time component.
>>> _get_datetime(date(2015, 1, 1))
datetime.datetime(2015, 1, 1, 0, 0)
UNIX timestamps are converted to datetimes.
>>> _get_datetime(1400000000)
datetime.datetime(2014, 5, 13, 16, 53, 20)
Other values are passed through as-is.
>>> x = datetime(2015, 1, 1)
>>> _get_datetime(x) is x
True
:param instant: date, time, datetime, integer, float or None
:type instant: date|time|datetime|int|float|None
:return: a datetime
:rtype: datetime
"""
if instant is None:
return datetime_.utcnow()
elif isinstance(instant, integer_types) or isinstance(instant, float):
return datetime_.utcfromtimestamp(instant)
elif isinstance(instant, time):
return datetime_.combine(date.today(), instant)
elif isinstance(instant, date) and not isinstance(instant, datetime):
return datetime_.combine(instant, time())
# TODO (3.x): Add an assertion/type check for this fallthrough branch:
return instant
def _ensure_datetime_tzinfo(datetime, tzinfo=None):
"""
Ensure the datetime passed has an attached tzinfo.
If the datetime is tz-naive to begin with, UTC is attached.
If a tzinfo is passed in, the datetime is normalized to that timezone.
>>> _ensure_datetime_tzinfo(datetime(2015, 1, 1)).tzinfo.zone
'UTC'
>>> tz = get_timezone("Europe/Stockholm")
>>> _ensure_datetime_tzinfo(datetime(2015, 1, 1, 13, 15, tzinfo=UTC), tzinfo=tz).hour
14
:param datetime: Datetime to augment.
:param tzinfo: Optional tznfo.
:return: datetime with tzinfo
:rtype: datetime
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
if tzinfo is not None:
datetime = datetime.astimezone(get_timezone(tzinfo))
if hasattr(tzinfo, 'normalize'): # pytz
datetime = tzinfo.normalize(datetime)
return datetime
def _get_time(time, tzinfo=None):
"""
Get a timezoned time from a given instant.
.. warning:: The return values of this function may depend on the system clock.
:param time: time, datetime or None
:rtype: time
"""
if time is None:
time = datetime.utcnow()
elif isinstance(time, number_types):
time = datetime.utcfromtimestamp(time)
if time.tzinfo is None:
time = time.replace(tzinfo=UTC)
if isinstance(time, datetime):
if tzinfo is not None:
time = time.astimezone(tzinfo)
if hasattr(tzinfo, 'normalize'): # pytz
time = tzinfo.normalize(time)
time = time.timetz()
elif tzinfo is not None:
time = time.replace(tzinfo=tzinfo)
return time
def get_timezone(zone=None):
"""Looks up a timezone by name and returns it. The timezone object
returned comes from ``pytz`` and corresponds to the `tzinfo` interface and
can be used with all of the functions of Babel that operate with dates.
If a timezone is not known a :exc:`LookupError` is raised. If `zone`
is ``None`` a local zone object is returned.
:param zone: the name of the timezone to look up. If a timezone object
itself is passed in, mit's returned unchanged.
"""
if zone is None:
return LOCALTZ
if not isinstance(zone, string_types):
return zone
try:
return _pytz.timezone(zone)
except _pytz.UnknownTimeZoneError:
raise LookupError('Unknown timezone %s' % zone)
def get_next_timezone_transition(zone=None, dt=None):
"""Given a timezone it will return a :class:`TimezoneTransition` object
that holds the information about the next timezone transition that's going
to happen. For instance this can be used to detect when the next DST
change is going to happen and how it looks like.
The transition is calculated relative to the given datetime object. The
next transition that follows the date is used. If a transition cannot
be found the return value will be `None`.
Transition information can only be provided for timezones returned by
the :func:`get_timezone` function.
:param zone: the timezone for which the transition should be looked up.
If not provided the local timezone is used.
:param dt: the date after which the next transition should be found.
If not given the current time is assumed.
"""
zone = get_timezone(zone)
dt = _get_datetime(dt).replace(tzinfo=None)
if not hasattr(zone, '_utc_transition_times'):
raise TypeError('Given timezone does not have UTC transition '
'times. This can happen because the operating '
'system fallback local timezone is used or a '
'custom timezone object')
try:
idx = max(0, bisect_right(zone._utc_transition_times, dt))
old_trans = zone._transition_info[idx - 1]
new_trans = zone._transition_info[idx]
old_tz = zone._tzinfos[old_trans]
new_tz = zone._tzinfos[new_trans]
except (LookupError, ValueError):
return None
return TimezoneTransition(
activates=zone._utc_transition_times[idx],
from_tzinfo=old_tz,
to_tzinfo=new_tz,
reference_date=dt
)
class TimezoneTransition(object):
"""A helper object that represents the return value from
:func:`get_next_timezone_transition`.
"""
def __init__(self, activates, from_tzinfo, to_tzinfo, reference_date=None):
#: the time of the activation of the timezone transition in UTC.
self.activates = activates
#: the timezone from where the transition starts.
self.from_tzinfo = from_tzinfo
#: the timezone for after the transition.
self.to_tzinfo = to_tzinfo
#: the reference date that was provided. This is the `dt` parameter
#: to the :func:`get_next_timezone_transition`.
self.reference_date = reference_date
@property
def from_tz(self):
"""The name of the timezone before the transition."""
return self.from_tzinfo._tzname
@property
def to_tz(self):
"""The name of the timezone after the transition."""
return self.to_tzinfo._tzname
@property
def from_offset(self):
"""The UTC offset in seconds before the transition."""
return int(self.from_tzinfo._utcoffset.total_seconds())
@property
def to_offset(self):
"""The UTC offset in seconds after the transition."""
return int(self.to_tzinfo._utcoffset.total_seconds())
def __repr__(self):
return '<TimezoneTransition %s -> %s (%s)>' % (
self.from_tz,
self.to_tz,
self.activates,
)
def get_period_names(width='wide', context='stand-alone', locale=LC_TIME):
"""Return the names for day periods (AM/PM) used by the locale.
>>> get_period_names(locale='en_US')['am']
u'AM'
:param width: the width to use, one of "abbreviated", "narrow", or "wide"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).day_periods[context][width]
def get_day_names(width='wide', context='format', locale=LC_TIME):
"""Return the day names used by the locale for the specified format.
>>> get_day_names('wide', locale='en_US')[1]
u'Tuesday'
>>> get_day_names('short', locale='en_US')[1]
u'Tu'
>>> get_day_names('abbreviated', locale='es')[1]
u'mar.'
>>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1]
u'D'
:param width: the width to use, one of "wide", "abbreviated", "short" or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).days[context][width]
def get_month_names(width='wide', context='format', locale=LC_TIME):
"""Return the month names used by the locale for the specified format.
>>> get_month_names('wide', locale='en_US')[1]
u'January'
>>> get_month_names('abbreviated', locale='es')[1]
u'ene.'
>>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1]
u'J'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).months[context][width]
def get_quarter_names(width='wide', context='format', locale=LC_TIME):
"""Return the quarter names used by the locale for the specified format.
>>> get_quarter_names('wide', locale='en_US')[1]
u'1st quarter'
>>> get_quarter_names('abbreviated', locale='de_DE')[1]
u'Q1'
>>> get_quarter_names('narrow', locale='de_DE')[1]
u'1'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).quarters[context][width]
def get_era_names(width='wide', locale=LC_TIME):
"""Return the era names used by the locale for the specified format.
>>> get_era_names('wide', locale='en_US')[1]
u'Anno Domini'
>>> get_era_names('abbreviated', locale='de_DE')[1]
u'n. Chr.'
:param width: the width to use, either "wide", "abbreviated", or "narrow"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).eras[width]
def get_date_format(format='medium', locale=LC_TIME):
"""Return the date formatting patterns used by the locale for the specified
format.
>>> get_date_format(locale='en_US')
<DateTimePattern u'MMM d, y'>
>>> get_date_format('full', locale='de_DE')
<DateTimePattern u'EEEE, d. MMMM y'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).date_formats[format]
def get_datetime_format(format='medium', locale=LC_TIME):
"""Return the datetime formatting patterns used by the locale for the
specified format.
>>> get_datetime_format(locale='en_US')
u'{1}, {0}'
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
patterns = Locale.parse(locale).datetime_formats
if format not in patterns:
format = None
return patterns[format]
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).time_formats[format]
def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME, return_z=False):
"""Return the timezone associated with the given `datetime` object formatted
as string indicating the offset from GMT.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> get_timezone_gmt(dt, locale='en')
u'GMT+00:00'
>>> get_timezone_gmt(dt, locale='en', return_z=True)
'Z'
>>> get_timezone_gmt(dt, locale='en', width='iso8601_short')
u'+00'
>>> tz = get_timezone('America/Los_Angeles')
>>> dt = tz.localize(datetime(2007, 4, 1, 15, 30))
>>> get_timezone_gmt(dt, locale='en')
u'GMT-07:00'
>>> get_timezone_gmt(dt, 'short', locale='en')
u'-0700'
>>> get_timezone_gmt(dt, locale='en', width='iso8601_short')
u'-07'
The long format depends on the locale, for example in France the acronym
UTC string is used instead of GMT:
>>> get_timezone_gmt(dt, 'long', locale='fr_FR')
u'UTC-07:00'
.. versionadded:: 0.9
:param datetime: the ``datetime`` object; if `None`, the current date and
time in UTC is used
:param width: either "long" or "short" or "iso8601" or "iso8601_short"
:param locale: the `Locale` object, or a locale string
:param return_z: True or False; Function returns indicator "Z"
when local time offset is 0
"""
datetime = _ensure_datetime_tzinfo(_get_datetime(datetime))
locale = Locale.parse(locale)
offset = datetime.tzinfo.utcoffset(datetime)
seconds = offset.days * 24 * 60 * 60 + offset.seconds
hours, seconds = divmod(seconds, 3600)
if return_z and hours == 0 and seconds == 0:
return 'Z'
elif seconds == 0 and width == 'iso8601_short':
return u'%+03d' % hours
elif width == 'short' or width == 'iso8601_short':
pattern = u'%+03d%02d'
elif width == 'iso8601':
pattern = u'%+03d:%02d'
else:
pattern = locale.zone_formats['gmt'] % '%+03d:%02d'
return pattern % (hours, seconds // 60)
def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME, return_city=False):
u"""Return a representation of the given timezone using "location format".
The result depends on both the local display name of the country and the
city associated with the time zone:
>>> tz = get_timezone('America/St_Johns')
>>> print(get_timezone_location(tz, locale='de_DE'))
Kanada (St. John’s) Zeit
>>> print(get_timezone_location(tz, locale='en'))
Canada (St. John’s) Time
>>> print(get_timezone_location(tz, locale='en', return_city=True))
St. John’s
>>> tz = get_timezone('America/Mexico_City')
>>> get_timezone_location(tz, locale='de_DE')
u'Mexiko (Mexiko-Stadt) Zeit'
If the timezone is associated with a country that uses only a single
timezone, just the localized country name is returned:
>>> tz = get_timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Mitteleurop\\xe4ische Zeit'
.. versionadded:: 0.9
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if `None`, the current date and time in
UTC is assumed
:param locale: the `Locale` object, or a locale string
:param return_city: True or False, if True then return exemplar city (location)
for the time zone
:return: the localized timezone name using location format
"""
dt, tzinfo = _get_dt_and_tzinfo(dt_or_tzinfo)
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt or datetime.utcnow())
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Otherwise, if there is only one timezone for the country, return the
# localized country name
region_format = locale.zone_formats['region']
territory = get_global('zone_territories').get(zone)
if territory not in locale.territories:
territory = 'ZZ' # invalid/unknown
territory_name = locale.territories[territory]
if not return_city and territory and len(get_global('territory_zones').get(territory, [])) == 1:
return region_format % (territory_name)
# Otherwise, include the city in the output
fallback_format = locale.zone_formats['fallback']
if 'city' in info:
city_name = info['city']
else:
metazone = get_global('meta_zones').get(zone)
metazone_info = locale.meta_zones.get(metazone, {})
if 'city' in metazone_info:
city_name = metazone_info['city']
elif '/' in zone:
city_name = zone.split('/', 1)[1].replace('_', ' ')
else:
city_name = zone.replace('_', ' ')
if return_city:
return city_name
return region_format % (fallback_format % {
'0': city_name,
'1': territory_name
})
def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False,
locale=LC_TIME, zone_variant=None, return_zone=False):
r"""Return the localized display name for the given timezone. The timezone
may be specified using a ``datetime`` or `tzinfo` object.
>>> dt = time(15, 30, tzinfo=get_timezone('America/Los_Angeles'))
>>> get_timezone_name(dt, locale='en_US')
u'Pacific Standard Time'
>>> get_timezone_name(dt, locale='en_US', return_zone=True)
'America/Los_Angeles'
>>> get_timezone_name(dt, width='short', locale='en_US')
u'PST'
If this function gets passed only a `tzinfo` object and no concrete
`datetime`, the returned display name is indenpendent of daylight savings
time. This can be used for example for selecting timezones, or to set the
time of events that recur across DST changes:
>>> tz = get_timezone('America/Los_Angeles')
>>> get_timezone_name(tz, locale='en_US')
u'Pacific Time'
>>> get_timezone_name(tz, 'short', locale='en_US')
u'PT'
If no localized display name for the timezone is available, and the timezone
is associated with a country that uses only a single timezone, the name of
that country is returned, formatted according to the locale:
>>> tz = get_timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Mitteleurop\xe4ische Zeit'
>>> get_timezone_name(tz, locale='pt_BR')
u'Hor\xe1rio da Europa Central'
On the other hand, if the country uses multiple timezones, the city is also
included in the representation:
>>> tz = get_timezone('America/St_Johns')
>>> get_timezone_name(tz, locale='de_DE')
u'Neufundland-Zeit'
Note that short format is currently not supported for all timezones and
all locales. This is partially because not every timezone has a short
code in every locale. In that case it currently falls back to the long
format.
For more information see `LDML Appendix J: Time Zone Display Names
<http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_
.. versionadded:: 0.9
.. versionchanged:: 1.0
Added `zone_variant` support.
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if a ``tzinfo`` object is used, the
resulting display name will be generic, i.e.
independent of daylight savings time; if `None`, the
current date in UTC is assumed
:param width: either "long" or "short"
:param uncommon: deprecated and ignored
:param zone_variant: defines the zone variation to return. By default the
variation is defined from the datetime object
passed in. If no datetime object is passed in, the
``'generic'`` variation is assumed. The following
values are valid: ``'generic'``, ``'daylight'`` and
``'standard'``.
:param locale: the `Locale` object, or a locale string
:param return_zone: True or False. If true then function
returns long time zone ID
"""
dt, tzinfo = _get_dt_and_tzinfo(dt_or_tzinfo)
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt)
if zone_variant is None:
if dt is None:
zone_variant = 'generic'
else:
dst = tzinfo.dst(dt)
if dst:
zone_variant = 'daylight'
else:
zone_variant = 'standard'
else:
if zone_variant not in ('generic', 'standard', 'daylight'):
raise ValueError('Invalid zone variation')
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
if return_zone:
return zone
info = locale.time_zones.get(zone, {})
# Try explicitly translated zone names first
if width in info:
if zone_variant in info[width]:
return info[width][zone_variant]
metazone = get_global('meta_zones').get(zone)
if metazone:
metazone_info = locale.meta_zones.get(metazone, {})
if width in metazone_info:
if zone_variant in metazone_info[width]:
return metazone_info[width][zone_variant]
# If we have a concrete datetime, we assume that the result can't be
# independent of daylight savings time, so we return the GMT offset
if dt is not None:
return get_timezone_gmt(dt, width=width, locale=locale)
return get_timezone_location(dt_or_tzinfo, locale=locale)
def format_date(date=None, format='medium', locale=LC_TIME):
"""Return a date formatted according to the given pattern.
>>> d = date(2007, 4, 1)
>>> format_date(d, locale='en_US')
u'Apr 1, 2007'
>>> format_date(d, format='full', locale='de_DE')
u'Sonntag, 1. April 2007'
If you don't want to use the locale default formats, you can specify a
custom date pattern:
>>> format_date(d, "EEE, MMM d, ''yy", locale='en')
u"Sun, Apr 1, '07"
:param date: the ``date`` or ``datetime`` object; if `None`, the current
date is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param locale: a `Locale` object or a locale identifier
"""
if date is None:
date = date_.today()
elif isinstance(date, datetime):
date = date.date()
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_date_format(format, locale=locale)
pattern = parse_pattern(format)
return pattern.apply(date, locale)
def format_datetime(datetime=None, format='medium', tzinfo=None,
locale=LC_TIME):
r"""Return a date formatted according to the given pattern.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> format_datetime(dt, locale='en_US')
u'Apr 1, 2007, 3:30:00 PM'
For any pattern requiring the display of the time-zone, the third-party
``pytz`` package is needed to explicitly specify the time-zone:
>>> format_datetime(dt, 'full', tzinfo=get_timezone('Europe/Paris'),
... locale='fr_FR')
u'dimanche 1 avril 2007 \xe0 17:30:00 heure d\u2019\xe9t\xe9 d\u2019Europe centrale'
>>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
... tzinfo=get_timezone('US/Eastern'), locale='en')
u'2007.04.01 AD at 11:30:00 EDT'
:param datetime: the `datetime` object; if `None`, the current date and
time is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the timezone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
"""
datetime = _ensure_datetime_tzinfo(_get_datetime(datetime), tzinfo)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
return get_datetime_format(format, locale=locale) \
.replace("'", "") \
.replace('{0}', format_time(datetime, format, tzinfo=None,
locale=locale)) \
.replace('{1}', format_date(datetime, format, locale=locale))
else:
return parse_pattern(format).apply(datetime, locale)
def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME):
r"""Return a time formatted according to the given pattern.
>>> t = time(15, 30)
>>> format_time(t, locale='en_US')
u'3:30:00 PM'
>>> format_time(t, format='short', locale='de_DE')
u'15:30'
If you don't want to use the locale default formats, you can specify a
custom time pattern:
>>> format_time(t, "hh 'o''clock' a", locale='en')
u"03 o'clock PM"
For any pattern requiring the display of the time-zone a
timezone has to be specified explicitly:
>>> t = datetime(2007, 4, 1, 15, 30)
>>> tzinfo = get_timezone('Europe/Paris')
>>> t = tzinfo.localize(t)
>>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR')
u'15:30:00 heure d\u2019\xe9t\xe9 d\u2019Europe centrale'
>>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=get_timezone('US/Eastern'),
... locale='en')
u"09 o'clock AM, Eastern Daylight Time"
As that example shows, when this function gets passed a
``datetime.datetime`` value, the actual time in the formatted string is
adjusted to the timezone specified by the `tzinfo` parameter. If the
``datetime`` is "naive" (i.e. it has no associated timezone information),
it is assumed to be in UTC.
These timezone calculations are **not** performed if the value is of type
``datetime.time``, as without date information there's no way to determine
what a given time would translate to in a different timezone without
information about whether daylight savings time is in effect or not. This
means that time values are left as-is, and the value of the `tzinfo`
parameter is only used to display the timezone name if needed:
>>> t = time(15, 30)
>>> format_time(t, format='full', tzinfo=get_timezone('Europe/Paris'),
... locale='fr_FR')
u'15:30:00 heure normale d\u2019Europe centrale'
>>> format_time(t, format='full', tzinfo=get_timezone('US/Eastern'),
... locale='en_US')
u'3:30:00 PM Eastern Standard Time'
:param time: the ``time`` or ``datetime`` object; if `None`, the current
time in UTC is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the time-zone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
"""
time = _get_time(time, tzinfo)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_time_format(format, locale=locale)
return parse_pattern(format).apply(time, locale)
def format_skeleton(skeleton, datetime=None, tzinfo=None, fuzzy=True, locale=LC_TIME):
r"""Return a time and/or date formatted according to the given pattern.
The skeletons are defined in the CLDR data and provide more flexibility
than the simple short/long/medium formats, but are a bit harder to use.
The are defined using the date/time symbols without order or punctuation
and map to a suitable format for the given locale.
>>> t = datetime(2007, 4, 1, 15, 30)
>>> format_skeleton('MMMEd', t, locale='fr')
u'dim. 1 avr.'
>>> format_skeleton('MMMEd', t, locale='en')
u'Sun, Apr 1'
>>> format_skeleton('yMMd', t, locale='fi') # yMMd is not in the Finnish locale; yMd gets used
u'1.4.2007'
>>> format_skeleton('yMMd', t, fuzzy=False, locale='fi') # yMMd is not in the Finnish locale, an error is thrown
Traceback (most recent call last):
...
KeyError: yMMd
After the skeleton is resolved to a pattern `format_datetime` is called so
all timezone processing etc is the same as for that.
:param skeleton: A date time skeleton as defined in the cldr data.
:param datetime: the ``time`` or ``datetime`` object; if `None`, the current
time in UTC is used
:param tzinfo: the time-zone to apply to the time for display
:param fuzzy: If the skeleton is not found, allow choosing a skeleton that's
close enough to it.
:param locale: a `Locale` object or a locale identifier
"""
locale = Locale.parse(locale)
if fuzzy and skeleton not in locale.datetime_skeletons:
skeleton = match_skeleton(skeleton, locale.datetime_skeletons)
format = locale.datetime_skeletons[skeleton]
return format_datetime(datetime, format, tzinfo, locale)
TIMEDELTA_UNITS = (
('year', 3600 * 24 * 365),
('month', 3600 * 24 * 30),
('week', 3600 * 24 * 7),
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
('second', 1)
)
def format_timedelta(delta, granularity='second', threshold=.85,
add_direction=False, format='long',
locale=LC_TIME):
"""Return a time delta according to the rules of the given locale.
>>> format_timedelta(timedelta(weeks=12), locale='en_US')
u'3 months'
>>> format_timedelta(timedelta(seconds=1), locale='es')
u'1 segundo'
The granularity parameter can be provided to alter the lowest unit
presented, which defaults to a second.
>>> format_timedelta(timedelta(hours=3), granularity='day',
... locale='en_US')
u'1 day'
The threshold parameter can be used to determine at which value the
presentation switches to the next higher unit. A higher threshold factor
means the presentation will switch later. For example:
>>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US')
u'1 day'
>>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US')
u'23 hours'
In addition directional information can be provided that informs
the user if the date is in the past or in the future:
>>> format_timedelta(timedelta(hours=1), add_direction=True, locale='en')
u'in 1 hour'
>>> format_timedelta(timedelta(hours=-1), add_direction=True, locale='en')
u'1 hour ago'
The format parameter controls how compact or wide the presentation is:
>>> format_timedelta(timedelta(hours=3), format='short', locale='en')
u'3 hr'
>>> format_timedelta(timedelta(hours=3), format='narrow', locale='en')
u'3h'
:param delta: a ``timedelta`` object representing the time difference to
format, or the delta in seconds as an `int` value
:param granularity: determines the smallest unit that should be displayed,
the value can be one of "year", "month", "week", "day",
"hour", "minute" or "second"
:param threshold: factor that determines at which point the presentation
switches to the next higher unit
:param add_direction: if this flag is set to `True` the return value will
include directional information. For instance a
positive timedelta will include the information about
it being in the future, a negative will be information
about the value being in the past.
:param format: the format, can be "narrow", "short" or "long". (
"medium" is deprecated, currently converted to "long" to
maintain compatibility)
:param locale: a `Locale` object or a locale identifier
"""
if format not in ('narrow', 'short', 'medium', 'long'):
raise TypeError('Format must be one of "narrow", "short" or "long"')
if format == 'medium':
warnings.warn('"medium" value for format param of format_timedelta'
' is deprecated. Use "long" instead',
category=DeprecationWarning)
format = 'long'
if isinstance(delta, timedelta):
seconds = int((delta.days * 86400) + delta.seconds)
else:
seconds = delta
locale = Locale.parse(locale)
def _iter_patterns(a_unit):
if add_direction:
unit_rel_patterns = locale._data['date_fields'][a_unit]
if seconds >= 0:
yield unit_rel_patterns['future']
else:
yield unit_rel_patterns['past']
a_unit = 'duration-' + a_unit
yield locale._data['unit_patterns'].get(a_unit, {}).get(format)
for unit, secs_per_unit in TIMEDELTA_UNITS:
value = abs(seconds) / secs_per_unit
if value >= threshold or unit == granularity:
if unit == granularity and value > 0:
value = max(1, value)
value = int(round(value))
plural_form = locale.plural_form(value)
pattern = None
for patterns in _iter_patterns(unit):
if patterns is not None:
pattern = patterns[plural_form]
break
# This really should not happen
if pattern is None:
return u''
return pattern.replace('{0}', str(value))
return u''
def _format_fallback_interval(start, end, skeleton, tzinfo, locale):
if skeleton in locale.datetime_skeletons: # Use the given skeleton
format = lambda dt: format_skeleton(skeleton, dt, tzinfo, locale=locale)
elif all((isinstance(d, date) and not isinstance(d, datetime)) for d in (start, end)): # Both are just dates
format = lambda dt: format_date(dt, locale=locale)
elif all((isinstance(d, time) and not isinstance(d, date)) for d in (start, end)): # Both are times
format = lambda dt: format_time(dt, tzinfo=tzinfo, locale=locale)
else:
format = lambda dt: format_datetime(dt, tzinfo=tzinfo, locale=locale)
formatted_start = format(start)
formatted_end = format(end)
if formatted_start == formatted_end:
return format(start)
return (
locale.interval_formats.get(None, "{0}-{1}").
replace("{0}", formatted_start).
replace("{1}", formatted_end)
)
def format_interval(start, end, skeleton=None, tzinfo=None, fuzzy=True, locale=LC_TIME):
"""
Format an interval between two instants according to the locale's rules.
>>> format_interval(date(2016, 1, 15), date(2016, 1, 17), "yMd", locale="fi")
u'15.\u201317.1.2016'
>>> format_interval(time(12, 12), time(16, 16), "Hm", locale="en_GB")
'12:12 \u2013 16:16'
>>> format_interval(time(5, 12), time(16, 16), "hm", locale="en_US")
'5:12 AM \u2013 4:16 PM'
>>> format_interval(time(16, 18), time(16, 24), "Hm", locale="it")
'16:18\u201316:24'
If the start instant equals the end instant, the interval is formatted like the instant.
>>> format_interval(time(16, 18), time(16, 18), "Hm", locale="it")
'16:18'
Unknown skeletons fall back to "default" formatting.
>>> format_interval(date(2015, 1, 1), date(2017, 1, 1), "wzq", locale="ja")
'2015/01/01\uff5e2017/01/01'
>>> format_interval(time(16, 18), time(16, 24), "xxx", locale="ja")
'16:18:00\uff5e16:24:00'
>>> format_interval(date(2016, 1, 15), date(2016, 1, 17), "xxx", locale="de")
'15.01.2016 \u2013 17.01.2016'
:param start: First instant (datetime/date/time)
:param end: Second instant (datetime/date/time)
:param skeleton: The "skeleton format" to use for formatting.
:param tzinfo: tzinfo to use (if none is already attached)
:param fuzzy: If the skeleton is not found, allow choosing a skeleton that's
close enough to it.
:param locale: A locale object or identifier.
:return: Formatted interval
"""
locale = Locale.parse(locale)
# NB: The quote comments below are from the algorithm description in
# http://www.unicode.org/reports/tr35/tr35-dates.html#intervalFormats
# > Look for the intervalFormatItem element that matches the "skeleton",
# > starting in the current locale and then following the locale fallback
# > chain up to, but not including root.
interval_formats = locale.interval_formats
if skeleton not in interval_formats or not skeleton:
# > If no match was found from the previous step, check what the closest
# > match is in the fallback locale chain, as in availableFormats. That
# > is, this allows for adjusting the string value field's width,
# > including adjusting between "MMM" and "MMMM", and using different
# > variants of the same field, such as 'v' and 'z'.
if skeleton and fuzzy:
skeleton = match_skeleton(skeleton, interval_formats)
else:
skeleton = None
if not skeleton: # Still no match whatsoever?
# > Otherwise, format the start and end datetime using the fallback pattern.
return _format_fallback_interval(start, end, skeleton, tzinfo, locale)
skel_formats = interval_formats[skeleton]
if start == end:
return format_skeleton(skeleton, start, tzinfo, fuzzy=fuzzy, locale=locale)
start = _ensure_datetime_tzinfo(_get_datetime(start), tzinfo=tzinfo)
end = _ensure_datetime_tzinfo(_get_datetime(end), tzinfo=tzinfo)
start_fmt = DateTimeFormat(start, locale=locale)
end_fmt = DateTimeFormat(end, locale=locale)
# > If a match is found from previous steps, compute the calendar field
# > with the greatest difference between start and end datetime. If there
# > is no difference among any of the fields in the pattern, format as a
# > single date using availableFormats, and return.
for field in PATTERN_CHAR_ORDER: # These are in largest-to-smallest order
if field in skel_formats:
if start_fmt.extract(field) != end_fmt.extract(field):
# > If there is a match, use the pieces of the corresponding pattern to
# > format the start and end datetime, as above.
return "".join(
parse_pattern(pattern).apply(instant, locale)
for pattern, instant
in zip(skel_formats[field], (start, end))
)
# > Otherwise, format the start and end datetime using the fallback pattern.
return _format_fallback_interval(start, end, skeleton, tzinfo, locale)
def get_period_id(time, tzinfo=None, type=None, locale=LC_TIME):
"""
Get the day period ID for a given time.
This ID can be used as a key for the period name dictionary.
>>> get_period_names(locale="de")[get_period_id(time(7, 42), locale="de")]
u'Morgen'
:param time: The time to inspect.
:param tzinfo: The timezone for the time. See ``format_time``.
:param type: The period type to use. Either "selection" or None.
The selection type is used for selecting among phrases such as
“Your email arrived yesterday evening” or “Your email arrived last night”.
:param locale: the `Locale` object, or a locale string
:return: period ID. Something is always returned -- even if it's just "am" or "pm".
"""
time = _get_time(time, tzinfo)
seconds_past_midnight = int(time.hour * 60 * 60 + time.minute * 60 + time.second)
locale = Locale.parse(locale)
# The LDML rules state that the rules may not overlap, so iterating in arbitrary
# order should be alright.
for rule_id, rules in locale.day_period_rules.get(type, {}).items():
for rule in rules:
if "at" in rule and rule["at"] == seconds_past_midnight:
return rule_id
start_ok = end_ok = False
if "from" in rule and seconds_past_midnight >= rule["from"]:
start_ok = True
if "to" in rule and seconds_past_midnight <= rule["to"]:
# This rule type does not exist in the present CLDR data;
# excuse the lack of test coverage.
end_ok = True
if "before" in rule and seconds_past_midnight < rule["before"]:
end_ok = True
if "after" in rule and seconds_past_midnight > rule["after"]:
start_ok = True
if start_ok and end_ok:
return rule_id
if seconds_past_midnight < 43200:
return "am"
else:
return "pm"
def parse_date(string, locale=LC_TIME):
"""Parse a date from a string.
This function uses the date format for the locale as a hint to determine
the order in which the date fields appear in the string.
>>> parse_date('4/1/04', locale='en_US')
datetime.date(2004, 4, 1)
>>> parse_date('01.04.2004', locale='de_DE')
datetime.date(2004, 4, 1)
:param string: the string containing the date
:param locale: a `Locale` object or a locale identifier
"""
# TODO: try ISO format first?
format = get_date_format(locale=locale).pattern.lower()
year_idx = format.index('y')
month_idx = format.index('m')
if month_idx < 0:
month_idx = format.index('l')
day_idx = format.index('d')
indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: this currently only supports numbers, but should also support month
# names, both in the requested locale, and english
numbers = re.findall('(\d+)', string)
year = numbers[indexes['Y']]
if len(year) == 2:
year = 2000 + int(year)
else:
year = int(year)
month = int(numbers[indexes['M']])
day = int(numbers[indexes['D']])
if month > 12:
month, day = day, month
return date(year, month, day)
def parse_time(string, locale=LC_TIME):
"""Parse a time from a string.
This function uses the time format for the locale as a hint to determine
the order in which the time fields appear in the string.
>>> parse_time('15:30:00', locale='en_US')
datetime.time(15, 30)
:param string: the string containing the time
:param locale: a `Locale` object or a locale identifier
:return: the parsed time
:rtype: `time`
"""
# TODO: try ISO format first?
format = get_time_format(locale=locale).pattern.lower()
hour_idx = format.index('h')
if hour_idx < 0:
hour_idx = format.index('k')
min_idx = format.index('m')
sec_idx = format.index('s')
indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: support 12 hour clock, and 0-based hour specification
# and seconds should be optional, maybe minutes too
# oh, and time-zones, of course
numbers = re.findall('(\d+)', string)
hour = int(numbers[indexes['H']])
minute = int(numbers[indexes['M']])
second = int(numbers[indexes['S']])
return time(hour, minute, second)
class DateTimePattern(object):
def __init__(self, pattern, format):
self.pattern = pattern
self.format = format
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def __unicode__(self):
return self.pattern
def __mod__(self, other):
if type(other) is not DateTimeFormat:
return NotImplemented
return self.format % other
def apply(self, datetime, locale):
return self % DateTimeFormat(datetime, locale)
class DateTimeFormat(object):
def __init__(self, value, locale):
assert isinstance(value, (date, datetime, time))
if isinstance(value, (datetime, time)) and value.tzinfo is None:
value = value.replace(tzinfo=UTC)
self.value = value
self.locale = Locale.parse(locale)
def __getitem__(self, name):
char = name[0]
num = len(name)
if char == 'G':
return self.format_era(char, num)
elif char in ('y', 'Y', 'u'):
return self.format_year(char, num)
elif char in ('Q', 'q'):
return self.format_quarter(char, num)
elif char in ('M', 'L'):
return self.format_month(char, num)
elif char in ('w', 'W'):
return self.format_week(char, num)
elif char == 'd':
return self.format(self.value.day, num)
elif char == 'D':
return self.format_day_of_year(num)
elif char == 'F':
return self.format_day_of_week_in_month()
elif char in ('E', 'e', 'c'):
return self.format_weekday(char, num)
elif char == 'a':
# TODO: Add support for the rest of the period formats (a*, b*, B*)
return self.format_period(char)
elif char == 'h':
if self.value.hour % 12 == 0:
return self.format(12, num)
else:
return self.format(self.value.hour % 12, num)
elif char == 'H':
return self.format(self.value.hour, num)
elif char == 'K':
return self.format(self.value.hour % 12, num)
elif char == 'k':
if self.value.hour == 0:
return self.format(24, num)
else:
return self.format(self.value.hour, num)
elif char == 'm':
return self.format(self.value.minute, num)
elif char == 's':
return self.format(self.value.second, num)
elif char == 'S':
return self.format_frac_seconds(num)
elif char == 'A':
return self.format_milliseconds_in_day(num)
elif char in ('z', 'Z', 'v', 'V', 'x', 'X', 'O'):
return self.format_timezone(char, num)
else:
raise KeyError('Unsupported date/time field %r' % char)
def extract(self, char):
char = str(char)[0]
if char == 'y':
return self.value.year
elif char == 'M':
return self.value.month
elif char == 'd':
return self.value.day
elif char == 'H':
return self.value.hour
elif char == 'h':
return (self.value.hour % 12 or 12)
elif char == 'm':
return self.value.minute
elif char == 'a':
return int(self.value.hour >= 12) # 0 for am, 1 for pm
else:
raise NotImplementedError("Not implemented: extracting %r from %r" % (char, self.value))
def format_era(self, char, num):
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)]
era = int(self.value.year >= 0)
return get_era_names(width, self.locale)[era]
def format_year(self, char, num):
value = self.value.year
if char.isupper():
week = self.get_week_number(self.get_day_of_year())
if week == 0:
value -= 1
year = self.format(value, num)
if num == 2:
year = year[-2:]
return year
def format_quarter(self, char, num):
quarter = (self.value.month - 1) // 3 + 1
if num <= 2:
return ('%%0%dd' % num) % quarter
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'Q': 'format', 'q': 'stand-alone'}[char]
return get_quarter_names(width, context, self.locale)[quarter]
def format_month(self, char, num):
if num <= 2:
return ('%%0%dd' % num) % self.value.month
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'M': 'format', 'L': 'stand-alone'}[char]
return get_month_names(width, context, self.locale)[self.value.month]
def format_week(self, char, num):
if char.islower(): # week of year
day_of_year = self.get_day_of_year()
week = self.get_week_number(day_of_year)
if week == 0:
date = self.value - timedelta(days=day_of_year)
week = self.get_week_number(self.get_day_of_year(date),
date.weekday())
return self.format(week, num)
else: # week of month
week = self.get_week_number(self.value.day)
if week == 0:
date = self.value - timedelta(days=self.value.day)
week = self.get_week_number(date.day, date.weekday())
return '%d' % week
def format_weekday(self, char='E', num=4):
"""
Return weekday from parsed datetime according to format pattern.
>>> format = DateTimeFormat(date(2016, 2, 28), Locale.parse('en_US'))
>>> format.format_weekday()
u'Sunday'
'E': Day of week - Use one through three letters for the abbreviated day name, four for the full (wide) name,
five for the narrow name, or six for the short name.
>>> format.format_weekday('E',2)
u'Sun'
'e': Local day of week. Same as E except adds a numeric value that will depend on the local starting day of the
week, using one or two letters. For this example, Monday is the first day of the week.
>>> format.format_weekday('e',2)
'01'
'c': Stand-Alone local day of week - Use one letter for the local numeric value (same as 'e'), three for the
abbreviated day name, four for the full (wide) name, five for the narrow name, or six for the short name.
>>> format.format_weekday('c',1)
'1'
:param char: pattern format character ('e','E','c')
:param num: count of format character
"""
if num < 3:
if char.islower():
value = 7 - self.locale.first_week_day + self.value.weekday()
return self.format(value % 7 + 1, num)
num = 3
weekday = self.value.weekday()
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow', 6: 'short'}[num]
if char == 'c':
context = 'stand-alone'
else:
context = 'format'
return get_day_names(width, context, self.locale)[weekday]
def format_day_of_year(self, num):
return self.format(self.get_day_of_year(), num)
def format_day_of_week_in_month(self):
return '%d' % ((self.value.day - 1) // 7 + 1)
def format_period(self, char):
period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)]
for width in ('wide', 'narrow', 'abbreviated'):
period_names = get_period_names(context='format', width=width, locale=self.locale)
if period in period_names:
return period_names[period]
raise ValueError('Could not format period %s in %s' % (period, self.locale))
def format_frac_seconds(self, num):
""" Return fractional seconds.
Rounds the time's microseconds to the precision given by the number \
of digits passed in.
"""
value = self.value.microsecond / 1000000
return self.format(round(value, num) * 10**num, num)
def format_milliseconds_in_day(self, num):
msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \
self.value.minute * 60000 + self.value.hour * 3600000
return self.format(msecs, num)
def format_timezone(self, char, num):
width = {3: 'short', 4: 'long', 5: 'iso8601'}[max(3, num)]
if char == 'z':
return get_timezone_name(self.value, width, locale=self.locale)
elif char == 'Z':
if num == 5:
return get_timezone_gmt(self.value, width, locale=self.locale, return_z=True)
return get_timezone_gmt(self.value, width, locale=self.locale)
elif char == 'O':
if num == 4:
return get_timezone_gmt(self.value, width, locale=self.locale)
# TODO: To add support for O:1
elif char == 'v':
return get_timezone_name(self.value.tzinfo, width,
locale=self.locale)
elif char == 'V':
if num == 1:
return get_timezone_name(self.value.tzinfo, width,
uncommon=True, locale=self.locale)
elif num == 2:
return get_timezone_name(self.value.tzinfo, locale=self.locale, return_zone=True)
elif num == 3:
return get_timezone_location(self.value.tzinfo, locale=self.locale, return_city=True)
return get_timezone_location(self.value.tzinfo, locale=self.locale)
# Included additional elif condition to add support for 'Xx' in timezone format
elif char == 'X':
if num == 1:
return get_timezone_gmt(self.value, width='iso8601_short', locale=self.locale,
return_z=True)
elif num in (2, 4):
return get_timezone_gmt(self.value, width='short', locale=self.locale,
return_z=True)
elif num in (3, 5):
return get_timezone_gmt(self.value, width='iso8601', locale=self.locale,
return_z=True)
elif char == 'x':
if num == 1:
return get_timezone_gmt(self.value, width='iso8601_short', locale=self.locale)
elif num in (2, 4):
return get_timezone_gmt(self.value, width='short', locale=self.locale)
elif num in (3, 5):
return get_timezone_gmt(self.value, width='iso8601', locale=self.locale)
def format(self, value, length):
return ('%%0%dd' % length) % value
def get_day_of_year(self, date=None):
if date is None:
date = self.value
return (date - date.replace(month=1, day=1)).days + 1
def get_week_number(self, day_of_period, day_of_week=None):
"""Return the number of the week of a day within a period. This may be
the week number in a year or the week number in a month.
Usually this will return a value equal to or greater than 1, but if the
first week of the period is so short that it actually counts as the last
week of the previous period, this function will return 0.
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE'))
>>> format.get_week_number(6)
1
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US'))
>>> format.get_week_number(6)
2
:param day_of_period: the number of the day in the period (usually
either the day of month or the day of year)
:param day_of_week: the week day; if ommitted, the week day of the
current date is assumed
"""
if day_of_week is None:
day_of_week = self.value.weekday()
first_day = (day_of_week - self.locale.first_week_day -
day_of_period + 1) % 7
if first_day < 0:
first_day += 7
week_number = (day_of_period + first_day - 1) // 7
if 7 - first_day >= self.locale.min_week_days:
week_number += 1
return week_number
PATTERN_CHARS = {
'G': [1, 2, 3, 4, 5], # era
'y': None, 'Y': None, 'u': None, # year
'Q': [1, 2, 3, 4, 5], 'q': [1, 2, 3, 4, 5], # quarter
'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month
'w': [1, 2], 'W': [1], # week
'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day
'E': [1, 2, 3, 4, 5, 6], 'e': [1, 2, 3, 4, 5, 6], 'c': [1, 3, 4, 5, 6], # week day
'a': [1], # period
'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour
'm': [1, 2], # minute
's': [1, 2], 'S': None, 'A': None, # second
'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4, 5], 'O': [1, 4], 'v': [1, 4], # zone
'V': [1, 2, 3, 4], 'x': [1, 2, 3, 4, 5], 'X': [1, 2, 3, 4, 5] # zone
}
#: The pattern characters declared in the Date Field Symbol Table
#: (http://www.unicode.org/reports/tr35/tr35-dates.html#Date_Field_Symbol_Table)
#: in order of decreasing magnitude.
PATTERN_CHAR_ORDER = "GyYuUQqMLlwWdDFgEecabBChHKkjJmsSAzZOvVXx"
_pattern_cache = {}
def parse_pattern(pattern):
"""Parse date, time, and datetime format patterns.
>>> parse_pattern("MMMMd").format
u'%(MMMM)s%(d)s'
>>> parse_pattern("MMM d, yyyy").format
u'%(MMM)s %(d)s, %(yyyy)s'
Pattern can contain literal strings in single quotes:
>>> parse_pattern("H:mm' Uhr 'z").format
u'%(H)s:%(mm)s Uhr %(z)s'
An actual single quote can be used by using two adjacent single quote
characters:
>>> parse_pattern("hh' o''clock'").format
u"%(hh)s o'clock"
:param pattern: the formatting pattern to parse
"""
if type(pattern) is DateTimePattern:
return pattern
if pattern in _pattern_cache:
return _pattern_cache[pattern]
result = []
for tok_type, tok_value in tokenize_pattern(pattern):
if tok_type == "chars":
result.append(tok_value.replace('%', '%%'))
elif tok_type == "field":
fieldchar, fieldnum = tok_value
limit = PATTERN_CHARS[fieldchar]
if limit and fieldnum not in limit:
raise ValueError('Invalid length for field: %r'
% (fieldchar * fieldnum))
result.append('%%(%s)s' % (fieldchar * fieldnum))
else:
raise NotImplementedError("Unknown token type: %s" % tok_type)
_pattern_cache[pattern] = pat = DateTimePattern(pattern, u''.join(result))
return pat
def tokenize_pattern(pattern):
"""
Tokenize date format patterns.
Returns a list of (token_type, token_value) tuples.
``token_type`` may be either "chars" or "field".
For "chars" tokens, the value is the literal value.
For "field" tokens, the value is a tuple of (field character, repetition count).
:param pattern: Pattern string
:type pattern: str
:rtype: list[tuple]
"""
result = []
quotebuf = None
charbuf = []
fieldchar = ['']
fieldnum = [0]
def append_chars():
result.append(('chars', ''.join(charbuf).replace('\0', "'")))
del charbuf[:]
def append_field():
result.append(('field', (fieldchar[0], fieldnum[0])))
fieldchar[0] = ''
fieldnum[0] = 0
for idx, char in enumerate(pattern.replace("''", '\0')):
if quotebuf is None:
if char == "'": # quote started
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
quotebuf = []
elif char in PATTERN_CHARS:
if charbuf:
append_chars()
if char == fieldchar[0]:
fieldnum[0] += 1
else:
if fieldchar[0]:
append_field()
fieldchar[0] = char
fieldnum[0] = 1
else:
if fieldchar[0]:
append_field()
charbuf.append(char)
elif quotebuf is not None:
if char == "'": # end of quote
charbuf.extend(quotebuf)
quotebuf = None
else: # inside quote
quotebuf.append(char)
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
return result
def untokenize_pattern(tokens):
"""
Turn a date format pattern token stream back into a string.
This is the reverse operation of ``tokenize_pattern``.
:type tokens: Iterable[tuple]
:rtype: str
"""
output = []
for tok_type, tok_value in tokens:
if tok_type == "field":
output.append(tok_value[0] * tok_value[1])
elif tok_type == "chars":
if not any(ch in PATTERN_CHARS for ch in tok_value): # No need to quote
output.append(tok_value)
else:
output.append("'%s'" % tok_value.replace("'", "''"))
return "".join(output)
def split_interval_pattern(pattern):
"""
Split an interval-describing datetime pattern into multiple pieces.
> The pattern is then designed to be broken up into two pieces by determining the first repeating field.
- http://www.unicode.org/reports/tr35/tr35-dates.html#intervalFormats
>>> split_interval_pattern(u'E d.M. \u2013 E d.M.')
[u'E d.M. \u2013 ', 'E d.M.']
>>> split_interval_pattern("Y 'text' Y 'more text'")
["Y 'text '", "Y 'more text'"]
>>> split_interval_pattern(u"E, MMM d \u2013 E")
[u'E, MMM d \u2013 ', u'E']
>>> split_interval_pattern("MMM d")
['MMM d']
>>> split_interval_pattern("y G")
['y G']
>>> split_interval_pattern(u"MMM d \u2013 d")
[u'MMM d \u2013 ', u'd']
:param pattern: Interval pattern string
:return: list of "subpatterns"
"""
seen_fields = set()
parts = [[]]
for tok_type, tok_value in tokenize_pattern(pattern):
if tok_type == "field":
if tok_value[0] in seen_fields: # Repeated field
parts.append([])
seen_fields.clear()
seen_fields.add(tok_value[0])
parts[-1].append((tok_type, tok_value))
return [untokenize_pattern(tokens) for tokens in parts]
def match_skeleton(skeleton, options, allow_different_fields=False):
"""
Find the closest match for the given datetime skeleton among the options given.
This uses the rules outlined in the TR35 document.
>>> match_skeleton('yMMd', ('yMd', 'yMMMd'))
'yMd'
>>> match_skeleton('yMMd', ('jyMMd',), allow_different_fields=True)
'jyMMd'
>>> match_skeleton('yMMd', ('qyMMd',), allow_different_fields=False)
>>> match_skeleton('hmz', ('hmv',))
'hmv'
:param skeleton: The skeleton to match
:type skeleton: str
:param options: An iterable of other skeletons to match against
:type options: Iterable[str]
:return: The closest skeleton match, or if no match was found, None.
:rtype: str|None
"""
# TODO: maybe implement pattern expansion?
# Based on the implementation in
# http://source.icu-project.org/repos/icu/icu4j/trunk/main/classes/core/src/com/ibm/icu/text/DateIntervalInfo.java
# Filter out falsy values and sort for stability; when `interval_formats` is passed in, there may be a None key.
options = sorted(option for option in options if option)
if 'z' in skeleton and not any('z' in option for option in options):
skeleton = skeleton.replace('z', 'v')
get_input_field_width = dict(t[1] for t in tokenize_pattern(skeleton) if t[0] == "field").get
best_skeleton = None
best_distance = None
for option in options:
get_opt_field_width = dict(t[1] for t in tokenize_pattern(option) if t[0] == "field").get
distance = 0
for field in PATTERN_CHARS:
input_width = get_input_field_width(field, 0)
opt_width = get_opt_field_width(field, 0)
if input_width == opt_width:
continue
if opt_width == 0 or input_width == 0:
if not allow_different_fields: # This one is not okay
option = None
break
distance += 0x1000 # Magic weight constant for "entirely different fields"
elif field == 'M' and ((input_width > 2 and opt_width <= 2) or (input_width <= 2 and opt_width > 2)):
distance += 0x100 # Magic weight for "text turns into a number"
else:
distance += abs(input_width - opt_width)
if not option: # We lost the option along the way (probably due to "allow_different_fields")
continue
if not best_skeleton or distance < best_distance:
best_skeleton = option
best_distance = distance
if distance == 0: # Found a perfect match!
break
return best_skeleton
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
35b0929e7702b7fd1ac6ea4d990530148fa577fc
|
21e371f62170f6baa31a91c6425494cf6f80de85
|
/reactdb/models.py
|
f416cbc21a7b3fc3c8006930aebfed3c0c6c9297
|
[] |
no_license
|
Grant1219/reactdb
|
2c9e574e154332b6dc9b57c69d1656bee647fe3a
|
700c6fb562c6109052292c879d0a487471129a40
|
refs/heads/master
| 2021-05-11T21:50:21.084347
| 2018-01-15T00:22:49
| 2018-01-15T00:22:49
| 117,478,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
from sqlalchemy import (
Table,
Column,
Integer,
BigInteger,
Unicode,
ForeignKey
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
relationship,
backref,
)
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session (sessionmaker (extension=ZopeTransactionExtension () ) )
Base = declarative_base ()
image_keyword_association = Table ('image_keyword_association', Base.metadata,
Column ('image_id', BigInteger, ForeignKey ('image.id', ondelete = 'cascade') ),
Column ('keyword_id', BigInteger, ForeignKey ('keyword.id', ondelete = 'cascade') ),
mysql_engine = 'InnoDB'
)
class InnoDB (object):
__table_args__ = {'mysql_engine': 'InnoDB'}
class Image (Base, InnoDB):
__tablename__ = 'image'
id = Column (BigInteger, primary_key = True)
filename = Column (Unicode (30), unique = True)
keywords = relationship ('Keyword', secondary = image_keyword_association, backref = 'images')
def __init__ (self, filename):
self.filename = filename
class Keyword (Base, InnoDB):
__tablename__ = 'keyword'
id = Column (BigInteger, primary_key = True)
value = Column (Unicode (30), unique = True)
def initialize_db (engine):
DBSession.configure (bind = engine)
Base.metadata.bind = engine
Base.metadata.create_all (engine)
|
[
"grant@tuxinator.net"
] |
grant@tuxinator.net
|
f0f593803b7a6cff4b061adf96277989a82afbb1
|
d7ad10f190eab923697e2694b6f4f12a2ca53403
|
/plugins/gateway/gateway.py
|
3faf282507ef27bdbe55af27e3f7301e27f32c9d
|
[
"MIT"
] |
permissive
|
jtimon/multi-ln-demo
|
54425d6dd9eafc2526e99b99eee58a1066416428
|
b71ef12ba38b146fca06494bc910b48948e8c74e
|
refs/heads/master
| 2022-12-10T10:35:23.366527
| 2020-04-07T02:42:45
| 2020-04-07T02:43:13
| 189,696,084
| 6
| 0
| null | 2022-12-08T05:26:55
| 2019-06-01T05:54:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,423
|
py
|
#!/usr/bin/env python3
"""Simple plugin to allow testing while closing of HTLC is delayed.
"""
from requests.exceptions import ConnectionError
import requests
from pyln.client import Plugin
plugin = Plugin()
# Example payment:
# {'label': 'from_regtest_to_liquid-regtest_label_5ad2ba3afc73bb3d3db3e80909c75501913abdf79391be1293d0c14fb43dc5b2', 'preimage': '6888a1e103f87233cf8f575de7f6c0db2b0783cd3d2144c20023b0eaa1995eba', 'msat': '1000msat'}
# Example invoice ():
# {'label': 'from_regtest_to_liquid-regtest_label_5ad2ba3afc73bb3d3db3e80909c75501913abdf79391be1293d0c14fb43dc5b2', 'bolt11': 'lnbcrt10n1p0xsfkjpp5tdknedz4t3uhmefjh6dpcgyn55kq6zqr9u8nk0r00rklwx9q7cxsdtzveex7m2lxpnrjvfc8pnrzvmrvgmkyvnrxuckvvnpxven2efnvy6xvcenxguxye34vfjkydpnxccrzvnpve3kzdfexp3rzcf3xy6rvdn9xgerqdjlw3h47wtx8qmk2c348qcxywt9x4nrzvtyvverzvt989nxyd3kv93xyvek8yunjwfexq6rge3cvejnzdpk8qcrzvfkxgenjvenxc6rywpkvvm97cn0d36rzv2lx4skgvnzvyekzenrxuekyc3nvsekgc3nv5urqwfs893nwdf4xqcnjvfnv93xge3h8yenjvtzv5cnywfnvscxxvf5ve3rgvmyvv6kyvjlv3jhxcmjd9c8g6t0dcxqzpucqp2sp52k69yp29ykwxk4al4cg76g32hskuz33llfj7046jna674yle78lq9qy9qsq8j8ll02p7d7qkvc0ns39rlt3hnse8jd5g5d64ehnrdw33wt5up4zwcyfnws28d6ljt3n6hlcfyh4zu9crdnfwkaswfw62379xukj2fqqgullzd', 'payment_hash': '5b6d3cb4555c797de532be9a1c2093a52c0d08032f0f3b3c6f78edf718a0f60d', 'msatoshi': 1000, 'amount_msat': 1000msat, 'status': 'unpaid', 'description': 'from_0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206_to_9f87eb580b9e5f11dc211e9fb66abb3699999044f8fe146801162393364286c6_bolt11_5ad2ba3afc73bb3d3db3e80909c75501913abdf79391be1293d0c14fb43dc5b2_description', 'expires_at': 1583884046}
def has_error(var_name, var_val):
plugin.log('GATEPAY: %s (%s)' % (var_name, var_val))
return not isinstance(var_val, dict) or 'error' in var_val
@plugin.hook('invoice_payment')
def on_payment(payment, plugin, **kwargs):
gateway = plugin.get_option('gateway')
if gateway == '':
plugin.log('GATEWAY: WARNING: All payments are accepted if gateway is not configured')
return {'result': 'continue'}
invoice = plugin.rpc.listinvoices(payment['label'])['invoices'][0]
plugin.log('GATEWAY: receiving a payment:\n%s' % invoice)
payment_hash = invoice['payment_hash']
try:
gateway_confirm_payment_result = requests.post(gateway + "/confirm_src_payment", data={
'payment_hash': payment_hash,
'payment_preimage': payment['preimage'],
}).json()
except ConnectionError as e:
plugin.log('GATEWAY: WARNING: Accepting payment because gateway seems down (%s)' % payment_hash)
plugin.log('GATEWAY: WARNING: Exception: %s' % str(e))
return {'result': 'continue'}
if has_error('gateway_confirm_payment_result', gateway_confirm_payment_result):
if gateway_confirm_payment_result['error'] == 'Unkown payment request %s.' % payment_hash:
plugin.log('GATEWAY: WARNING: Accepting payment with payment_hash unkown to gateway (%s)' % payment_hash)
return {'result': 'continue'}
plugin.log('GATEWAY: WARNING: Rejecting payment with payment_hash %s' % payment_hash)
plugin.log('GATEWAY: Rejection error: %s' % gateway_confirm_payment_result['error'])
plugin.rpc.delinvoice(payment['label'], 'unpaid')
return {'result': 'reject'}
return {'result': 'continue'}
plugin.add_option('gateway', '', 'A gateway you control.')
plugin.run()
|
[
"jtimon@jtimon.cc"
] |
jtimon@jtimon.cc
|
eb9f6d220a2e45524d1b0cab6ac5105592c1b351
|
cb66cbf728412a5d07200079076785404d45ad9a
|
/otmods/ot.py
|
80c34765ec074a25540128b8b8fe74333a5ff2ee
|
[
"MIT"
] |
permissive
|
chomes/overtime_counter
|
13e405c5c5a02832098521f6abbc3d35a2d23ac7
|
b3787b950747bd13c664c78c97f1531b19a9af0f
|
refs/heads/master
| 2020-05-18T13:36:51.457807
| 2019-05-02T15:31:35
| 2019-05-02T15:31:35
| 184,445,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,393
|
py
|
"""
Script used manage over time, creates a database to save information to and read when needed.
Author: chomes@
Version: 1.3
built using pyinstaller
"""
from datetime import date, datetime
import shelve
import getpass
import csv
import calendar
from pathlib import Path
import shutil
import sys
# Overtime class for the entire DB
class OverTime:
# Defining static variables
def __init__(self):
self.user = getpass.getuser()
self.db = "otdb-{}.shelve".format(self.user)
# Creating the DB or opening and starting welcome screen
def startup(self):
if Path(self.db).is_file():
print("Loading DB")
db = shelve.open(self.db)
print("Welcome {} !".format(db["User"]["shortname"]))
db.close()
else:
print("DB has not been created yet, lets get some info from you!")
while True:
usn = input("The system detects your Domain user is {} is this correct? y/n: "
"".format(self.user)).lower()
if usn == "n":
new_user = input("Please type your correct username: ")
confirm = input("You have chosen {} is this correct? y/n: ".format(new_user)).lower()
if confirm == "y":
self.user = new_user
break
elif confirm == "n":
print("Lets try again then!")
continue
elif usn == "y":
print("Great! Let's continue")
break
while True:
name = input("What would you like me to call you when using the db? ")
confirm = input("You have chosen {} is this correct? y/n: ".format(name)).lower()
if confirm == "y":
print("Great! Lets continue")
break
elif confirm == "n":
print("Ok lets try again!")
continue
print("Almost there! We just need to ask a few more questions just to save this information for later!")
while True:
otn = float(input("Please tell me your OT rate when working out of your normal hours: "))
otb = float(input("Please tell me your OT rate when working in your hours but on bank holidays: "))
otnb = float(input("Now tell me your OT when covering on bank holidays out of hours: "))
site = input("What is your primary site? For example when working on tickets your site is LHR54: "
"").upper()
print("You have stated that your normal OT is {}, you're working bank holiday OT is {},".format(otn,
otb))
print("your OT when covering on bank holiday is {} and your site is {}.".format(otnb, site))
confirm = input("Is this correct? y/n: ").lower()
if confirm == "y":
print("Great! one last question and we're done!")
break
elif confirm == "n":
print("Ok lets check those figures again")
continue
while True:
manager = input("Finally, tell me the username of your manager: ").lower()
confirm = input("You've said that your manager is {} is this correct? y/n: ".format(manager))
if confirm == "y":
print("OK! Let's make this database!")
break
if confirm == "n":
print("Ok, lets try this again")
continue
with shelve.open(self.db) as db:
create_user = {"username": self.user, "shortname": name, "OT": {"normal": otn,
"normal_bankhol": otb,
"extra_bankhol": otnb},
"site": site,
"manager": manager}
db["User"] = create_user
db["OT"] = {}
db.close()
print("DB is created!")
endscript = input("Do you want to exit now? y/n: ").lower()
if endscript == "y":
print("Good bye!")
sys.exit()
elif endscript == "n":
print("Ok lets start doing some OT!")
self.monthly_check()
# Will check if you've gone past a month if it does, will compile a list of OT to send to a ticket
# or send as a csv file to attach to a ticket,
# will then delete older OT then last month and start counting again
def checker(self):
db = shelve.open(self.db)
if date.today().isoformat() in db["OT"]:
if db["OT"][date.today().isoformat()]["status"] == "complete":
print("Looks like you have OT already for today!")
db.close()
self.hot_options()
elif db["OT"][date.today().isoformat()]["status"] == "pending":
print("You haven't finished your OT, lets go!")
cot = "pending"
cday = date.today().isoformat()
db.close()
self.not_options(cot, cday)
else:
print("You haven't started your OT yet, lets go!")
cot = "none"
cday = date.today().isoformat()
db.close()
self.not_options(cot, cday)
# Method for exporting OT into a csv file and purging it.
def export_tocsv(self):
total_hours = self.calculate_hours()
month = date.today().month - 1
month = calendar.month_name[month]
check_otrate = self.calculate_otrate()
if isinstance(check_otrate, list):
ratehour = self.cal_multi_ratehour(check_otrate)
ottype = "Rates & hours \n"
for key, value in ratehour["rates"].items():
ottype += f"rate: {key} hours: {value} \n"
else:
ottype = f"Total hours {total_hours} \n"
ottype += f"Rate {check_otrate}"
db = shelve.open(self.db)
with open("last_month_OT.csv", "w") as csvfile:
fieldnames = ["date", "hours", "reason", "ot type"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({"date": month, "hours": ottype})
writer.writeheader()
for key, value in db["OT"].items():
writer.writerow({"date": key, "hours": value["hours"],
"reason": value["purpose"], "ot type": value["ot_type"]})
csvfile.close()
db.close()
print("Purging DB of previous OT ")
self.purge_db()
sys.exit()
# Method for saving the current OT to a csv file to view.
def cot_tocsv(self):
print("Creating CSV")
total_hours = self.calculate_hours()
month = date.today().month - 1
month = calendar.month_name[month]
check_otrate = self.calculate_otrate()
if isinstance(check_otrate, list):
ratehour = self.cal_multi_ratehour(check_otrate)
ottype = "Rates and hours \n"
for key, value in ratehour["rates"].items():
ottype += f"rate: {key} hours: {value} \n"
else:
ottype = f"Total hours {total_hours} \n"
ottype += f"Rate {check_otrate}"
db = shelve.open(self.db)
with open("last_month_OT.csv", "w") as csvfile:
fieldnames = ["date", "hours", "reason", "ot type"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({"date": month, "hours": ottype})
writer.writeheader()
for key, value in db["OT"].items():
writer.writerow({"date": key, "hours": value["hours"],
"reason": value["purpose"], "ot type": value["ot_type"]})
csvfile.close()
db.close()
print("Your OT will be in a file called current_OT.csv for you to view good bye!")
sys.exit()
# Temporary until fix bug to post on encrypted tickets
def temp_csv(self):
print("Creating csv")
db = shelve.open(self.db)
with open("current_OT.csv", "w") as csvfile:
fieldnames = ["date", "hours", "reason", "ot type"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key, value in db["OT"].items():
writer.writerow({"date": key, "hours": value["hours"], "reason": value["purpose"],
"ot type": value["ot_type"]})
csvfile.close()
db.close()
# Method for calculating total hours.
def calculate_hours(self):
db = shelve.open(self.db)
hour_counter = 0
for day, info in db["OT"].items():
hour_counter += info["hours"]
db.close()
return hour_counter
# Method for calculating multiple rates
def calculate_otrate(self):
db = shelve.open(self.db)
multi = []
adding = []
for key, value in db["OT"].items():
if value["ot_type"] not in adding:
adding.append(value["ot_type"])
single = adding[0]
poten = adding[0]
for counter in adding:
if counter != single:
single = counter
multi.append(counter)
else:
single = counter
db.close()
if not multi:
return single
else:
if poten not in multi:
multi.append(poten)
return multi
else:
return multi
# Method for calculating multi rates and hours
def cal_multi_ratehour(self, otrates):
db = shelve.open(self.db)
rate_n_hours = {"rates": {}}
for rates in otrates:
rate_n_hours["rates"].update({rates: 0})
for key, value in db["OT"].items():
for rate, hours in rate_n_hours["rates"].items():
if value["ot_type"] == rate:
rate_n_hours["rates"][rate] += value["hours"]
return rate_n_hours
# Method for purging dict of previous months OT
def purge_db(self):
print("Backing up database if something goes wrong please use that instead")
shutil.copyfile(self.db, "{}.bk".format(self.db))
print("Commencing the mighty purge!")
db = shelve.open(self.db, writeback=True)
remove = []
for day in db["OT"].keys():
temp = datetime.strptime(day, "%Y-%m-%d")
if date.today().month > temp.month:
remove.append(day)
for key in remove:
del db["OT"][key]
db.close()
print("DB has been purged of all previous months items.")
# Method for checking if the OT has gone over the month, will be done every time the db is opened.
def monthly_check(self):
print("Checking DB for a previous months OT if you haven't gone over last month,"
" we'll take you to the next menu")
db = shelve.open(self.db)
for day, hours in db["OT"].items():
temp = datetime.strptime(day, "%Y-%m-%d")
if date.today().month > temp.month:
print("Looks like you're on a new month, we'll get your OT ready for you")
print("Pulling all previous OT from the database")
db.close()
self.export_tocsv()
else:
db.close()
return "Still on the current month, continuing programme"
# Method for options for people who have OT already
def hot_options(self):
print("So you already have OT for the day so here's your list of available options: ")
choose = input("""
1) Print out your current OT to csv (THIS WILL NOT PURGE THE DB IT'S FOR VIEWING ONLY!)
2) Change your user settings
3) Quit
""")
if choose == "1":
self.cot_tocsv()
sys.exit()
elif choose == "2":
self.settings()
elif choose == "3":
print("Take care!")
sys.exit()
# Method for pre-selecting hours for OT
def preselect(self, time, current_day):
ot_desc = input("Please give a brief reason for the OT, this can be a ticket or for example covering someone"
"this will go in the correspondence of the ticket when the month is over: ")
ot_type = input("""
Choose from the following:
1) Normal OT
2) Bank holiday OT while on shift
3) Out of hours bank holiday OT (Working on bank holiday out of your shift pattern)
""")
print("Creating OT")
db = shelve.open(self.db, writeback=True)
if ot_type == "1":
ot_type = db["User"]["OT"]["normal"]
elif ot_type == "2":
ot_type = db["User"]["OT"]["normal_bankhol"]
elif ot_type == "3":
ot_type = db["User"]["OT"]["extra_bankhol"]
db["OT"].update({current_day: {"hours": time, "purpose": ot_desc, "status": "complete", "ot_type": ot_type}})
print("Your OT for {} has been recorded in the db have a good day {}!".format(current_day,
db["User"]["shortname"]))
db.close()
sys.exit()
# Method for creating calculator for manual OT
def calculator(self, action, day):
if action == "start":
ot_desc = input(
"Please give a brief reason for the OT, this can be a ticket or for example covering someone"
"this will go in the correspondence of the ticket when the month is over: ")
ot_type = input("""
Choose from the following:
1) Normal OT
2) Bank holiday OT while on shift
3) Out of hours bank holiday OT (Working on bank holiday out of your shift pattern)
""")
db = shelve.open(self.db, writeback=True)
start_time = datetime.now()
if ot_type == "1":
ot_type = db["User"]["OT"]["normal"]
elif ot_type == "2":
ot_type = db["User"]["OT"]["normal_bankhol"]
elif ot_type == "3":
ot_type = db["User"]["OT"]["extra_bankhol"]
db["OT"].update({day: {"hours": start_time, "purpose": ot_desc, "status": "pending", "ot_type": ot_type}})
print("Ok your start time has been recorded {}, start the app again"
" when you're ready to stop".format(db["User"]["shortname"]))
db.close()
sys.exit()
elif action == "stop":
print("Calculating your time")
end_time = datetime.now()
db = shelve.open(self.db, writeback=True)
start_time = db["OT"][day]["hours"]
ttime = end_time - start_time
ttime = ttime.seconds / 60
print("Calculating time in hours and minutes")
hours = 0
while True:
if ttime > 60:
hours += 1
ttime -= 60
continue
else:
break
ttime = round(ttime)
final = f"{hours}.{ttime}"
final = float(final)
print("Updating database with todays time")
db["OT"][day]["hours"] = final
db["OT"][day]["status"] = "complete"
print("Ok {} your OT has been recorded for the day, your total time is {} hours"
"closing the DB have a nice day!".format(db["User"]["shortname"], final))
db.close()
sys.exit()
# Method for changing names
def quick_sn(self, change_name):
db = shelve.open(self.db, writeback=True)
db["User"]["shortname"] = change_name
print("By the power of Grayskull your new name is.. {} !".format(change_name))
db.close()
sys.exit()
# Method for changing managers username
def quick_manman(self, change_name):
db = shelve.open(self.db, writeback=True)
db["User"]["manager"] = change_name
print("Your new managers username is {} !".format(change_name))
db.close()
sys.exit()
# Method for changing your site location
def quick_site(self, change_site):
db = shelve.open(self.db, writeback=True)
db["User"]["site"] = change_site
print("You're now the fresh prince of a dc called {} !".format(change_site))
db.close()
sys.exit()
# Method for settings menu
def settings(self):
print("Here's the settings menu")
print("Please note most of these settings can be changed using arguments with the programme")
print("After you've changed the settings we'll provide you the argument you need to run in the future"
"just incase you want to do it the quick and lazy way cause who doesn't love being lazy :)")
while True:
picker = input("""Pick a setting to change:
1) Change your shortname
2) Change your managers username
3) Change your site
4) Export to CSV file
5) Quit
""")
if picker == "1":
db = shelve.open(self.db, writeback=True)
change_name = input("Ok {} what do you want to change your name to? ".format(db["User"]["shortname"]))
conf = input("You've chosen {} is this correct? y/n ".format(change_name)).lower()
if conf == "y":
db["User"]["shortname"] = change_name
print("By the power of Grayskull your new name is.. {} !".format(change_name))
db.close()
print("If you want to change this automatically simply run the programme with the "
"following argument: ot --changename NEW_NAME please make sure they're no spaces!")
break
elif conf == "n":
print("Ok lets try again")
continue
elif picker == "2":
db = shelve.open(self.db, writeback=True)
change_name = input("Ok {} what do you want to "
"change your managers username to to? ".format(db["User"]["shortname"])).lower()
conf = input("You've chosen {} is this correct? y/n ".format(change_name)).lower()
if conf == "y":
db["User"]["manager"] = change_name
print("Your new managers username is {} !".format(change_name))
db.close()
print("If you want to change this automatically simply run the programme with the "
"following argument: ot --changemanager USERNAME please make sure they're no spaces!")
break
elif conf == "n":
print("Ok lets try again")
continue
elif picker == "3":
db = shelve.open(self.db, writeback=True)
change_site = input("Ok {} what do you want to "
"change your new site to? ".format(db["User"]["shortname"])).upper()
conf = input("You've chosen {} is this correct? y/n ".format(change_site)).lower()
if conf == "y":
db["User"]["site"] = change_site
print("You're now the fresh prince of a dc called {} !".format(change_site))
db.close()
print("If you want to change this automatically simply run the programme with the "
"following argument: ot --changesite SITE please make sure they're no spaces!")
break
elif conf == "n":
print("Ok lets try again")
continue
elif picker == "4":
self.cot_tocsv()
elif picker == "5":
print("Take care!")
sys.exit()
# Method for no OT options
def not_options(self, confirm, current_day):
if confirm == "pending":
print("So it seems like you already are calculating OT, so here's your current options: ")
choose = input("""
1) Finish calculating OT for the day.
2) Change your user settings.
3) Quit
""")
if choose == "1":
self.calculator("stop", current_day)
elif choose == "2":
self.settings()
elif choose == "3":
print("Take care!")
sys.exit()
else:
print("Right lets get your OT sorted out for you")
print("Now you can either choose to calculate it via stop watch or choose from a pre-selected time")
print("Just so you know the stop watch rounds up to the closest minute")
print("So here are your options!")
choose = input("""
1) 12 hours OT
2) 8 hours OT
3) 4 hours OT
4) Custom hours
5) Calculate your OT by timer
6) Change your user settings
7) Quit
""")
if choose == "1":
self.preselect(12, current_day)
elif choose == "2":
self.preselect(8, current_day)
elif choose == "3":
self.preselect(4, current_day)
elif choose == "4":
hours = float(input("How many hours do you want to OT? "))
self.preselect(hours, current_day)
elif choose == "5":
self.calculator("start", current_day)
elif choose == "6":
self.settings()
elif choose == "7":
print("Take care!")
sys.exit()
|
[
"josejam@amazon.co.uk"
] |
josejam@amazon.co.uk
|
cc3df1f2d798d105c86966677a61715ccea7c4e3
|
60f2834c463c47e0dec1ae02c0afc8ff401b383e
|
/pythonbegin/learn_python/magicians8_9.py
|
024fe73f2f1fe8daa926d23bd8c061f1281bc73e
|
[] |
no_license
|
codenewac/python
|
9aed0987f7d00c4d1a13850a5535c73ababb290f
|
a925362c84934284cb69a84185051bdb213a6a20
|
refs/heads/master
| 2021-07-01T08:04:00.009483
| 2020-02-14T08:47:11
| 2020-02-14T08:47:11
| 240,200,618
| 0
| 0
| null | 2021-06-10T22:34:24
| 2020-02-13T07:21:58
|
HTML
|
UTF-8
|
Python
| false
| false
| 486
|
py
|
magicians_name=['cady','bob','mack','penny','tiara']
def show_magicians(magicians):
"""打印魔术师的名字"""
for magician in magicians:
print("Hello! " + magician.title())
def make_magicians(magicians):
"""对魔术师列表修改"""
for number in range(5):
magicians[number]='the Great '+magicians[number]
return magicians
magicians_name_copy=make_magicians(magicians_name[:])
show_magicians(magicians_name)
show_magicians(magicians_name_copy)
|
[
"2018307070118@cau.edu.cn"
] |
2018307070118@cau.edu.cn
|
9f51398f8e1742b2e1d28b128a47717e3ca7e4ca
|
cf1e72bf5120ee8f89f4c152cb83b962f6355f6e
|
/kernel_benchmarks/popart/test_lstm_popart.py
|
3349d0135a33ca874dd1036331c83ddce19c43b6
|
[
"MIT"
] |
permissive
|
Adamage/tutorials
|
0c57a4b02c7adbdb542fd87a8a46b2b8dbb63949
|
b6600c052613909dbec378fea4a69deff46004dc
|
refs/heads/main
| 2023-08-25T18:09:03.783734
| 2021-10-29T13:29:33
| 2021-10-29T13:29:33
| 407,079,255
| 0
| 0
|
NOASSERTION
| 2021-10-21T07:10:39
| 2021-09-16T08:10:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import subprocess
import sys
import unittest
import pytest
def run_lstm(batch_size, timesteps, hidden_size, extra_args=None):
py_version = "python" + str(sys.version_info[0])
cmd = [py_version, "lstm.py",
"--batch-size", str(batch_size),
"--timesteps", str(timesteps),
"--hidden-size", str(hidden_size)]
# Accommodate framework-specific args
if extra_args:
cmd.extend(extra_args)
cwd = os.path.dirname(__file__)
try:
out = subprocess.check_output(
cmd, cwd=cwd, stderr=subprocess.PIPE).decode("utf-8")
except subprocess.CalledProcessError as e:
print(f"TEST FAILED")
print(f"stdout={e.stdout.decode('utf-8',errors='ignore')}")
print(f"stderr={e.stderr.decode('utf-8',errors='ignore')}")
raise
return out
class TestPopARTLSTMSyntheticBenchmarks(unittest.TestCase):
"""Tests for the popART LSTM synthetic benchmarks"""
@classmethod
def setUpClass(cls):
pass
# LSTM inference
@pytest.mark.ipus(1)
@pytest.mark.category1
def test_lstm_inference_b256_s25_h1024(self):
out = run_lstm(batch_size=256, timesteps=25, hidden_size=1024)
@pytest.mark.ipus(1)
@pytest.mark.category1
def test_lstm_inference_b128_s50_h1536(self):
out = run_lstm(
batch_size=128,
timesteps=50,
hidden_size=1536,
extra_args=['--lstm-options={\"availableMemoryProportion\":\"0.55\"}']
)
@pytest.mark.ipus(1)
@pytest.mark.category1
def test_lstm_inference_b64_s25_h2048(self):
out = run_lstm(batch_size=64, timesteps=25, hidden_size=2048)
@pytest.mark.ipus(1)
@pytest.mark.category1
def test_lstm_inference_b1024_s150_h256(self):
out = run_lstm(batch_size=1024, timesteps=150, hidden_size=256)
@pytest.mark.ipus(1)
@pytest.mark.category1
def test_lstm_inference_b1024_s25_h512(self):
out = run_lstm(batch_size=1024, timesteps=25, hidden_size=512)
|
[
"louisev@graphcore.ai"
] |
louisev@graphcore.ai
|
c65a2c32b78315ed3209d336aa5b4220babed4df
|
82b194b063eadfb57d3e3e83b41279c122a33b53
|
/movies/migrations/0008_auto_20160119_0904.py
|
f25c3949a69e05d617e651a5b58cc1f9a434f7cb
|
[] |
no_license
|
AnupKandalkar/ssdb_django_angular_heroku
|
8c02a3a8751ffaf5957895bf4a27add2fe7d004a
|
91619f128728d42f15e26dd0c57ad36fab1fd79c
|
refs/heads/master
| 2021-01-21T21:40:16.190289
| 2019-01-16T17:36:08
| 2019-01-16T17:36:08
| 50,094,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0007_auto_20160119_0902'),
]
operations = [
migrations.AlterField(
model_name='moviesdata',
name='popularity',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='moviesdata',
name='ssdb_score',
field=models.IntegerField(default=0),
),
]
|
[
"kandalkar.a87@gmail.com"
] |
kandalkar.a87@gmail.com
|
f340d988bb9d2451fa121b0517affaf95fdea34d
|
3e2f9d3ab9303ab54ff26ca17a30275e5b666003
|
/payment/urls.py
|
cf53a280132534d60ff508808f331c77234bf297
|
[] |
no_license
|
Alisher007/e-shop
|
6721c156e5676514d7567d3da6c29e20eabaa417
|
ea45849b4711f8ca7146dab120e1991e7625f04f
|
refs/heads/master
| 2022-12-14T00:16:59.277865
| 2020-09-10T07:44:33
| 2020-09-10T07:44:33
| 293,895,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
from django.urls import path
from . import views
app_name='payment'
urlpatterns = [
path('', views.index, name="index"),
path('charge/', views.charge, name="charge"),
path('success/', views.successMsg, name="success"),
]
|
[
"alisher.khalikulov@jaresorts.com"
] |
alisher.khalikulov@jaresorts.com
|
07b886621333aed549ac0ed9f78af7cb294e0f0f
|
c2370387878107e67381400ff7ca5fc7c8d21050
|
/apirest/apirest/wsgi.py
|
4fa9212721a86faff300620cef2f84266ecc8f85
|
[] |
no_license
|
Linaerith/Projet_PfDA_2020
|
e5e369fa28d9dafcaaa65aac6027315f1f4bc7a3
|
a6e1b19cb87dca57c42e80b899233d56d734d3d8
|
refs/heads/master
| 2020-12-26T12:27:32.255173
| 2020-01-31T21:00:28
| 2020-01-31T21:00:28
| 237,510,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
WSGI config for apirest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apirest.settings')
application = get_wsgi_application()
|
[
"noreply@github.com"
] |
Linaerith.noreply@github.com
|
f6227272c3c000017f7eb895200542e0aa7c4100
|
57cfc4dfa84fae4dbe4e83e8b5bcfd36e985694f
|
/bom_scripts/bom_csv_grouped_by_value_with_fp.py
|
84298a254a90e3d0473fd30044a0d42f839850f9
|
[
"MIT"
] |
permissive
|
wkevina/micronome-pcb
|
2fe1f6f2dc898fb3d313f1cce0ee3c66843c0c3c
|
dc3d86c2723a2c7115eb68436a7f903cc8f0a9b0
|
refs/heads/master
| 2021-01-01T05:27:50.602139
| 2016-05-11T21:39:41
| 2016-05-11T21:39:41
| 58,574,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,185
|
py
|
#
# Example python script to generate a BOM from a KiCad generic netlist
#
# Example: Sorted and Grouped CSV BOM
#
"""
@package
Generate a Tab delimited list (csv file type).
Components are sorted by ref and grouped by value with same footprint
Fields are (if exist)
'Ref', 'Qnty', 'Value', 'Cmp name', 'Footprint', 'Description', 'Vendor'
"""
# Import the KiCad python helper module and the csv formatter
import kicad_netlist_reader
import csv
import sys
# Generate an instance of a generic netlist, and load the netlist tree from
# the command line option. If the file doesn't exist, execution will stop
net = kicad_netlist_reader.netlist(sys.argv[1])
# Open a file to write to, if the file cannot be opened output to stdout
# instead
try:
f = open(sys.argv[2], 'w')
except IOError:
e = "Can't open output file for writing: " + sys.argv[2]
print(__file__, ":", e, sys.stderr)
f = sys.stdout
# Create a new csv writer object to use as the output formatter
out = csv.writer(f, lineterminator='\n', delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)
# Output # a set of rows for a header providing general information
# out.writerow(['Source:', net.getSource()])
# out.writerow(['Date:', net.getDate()])
# out.writerow(['Tool:', net.getTool()])
# out.writerow( ['Generator:', sys.argv[0]] )
# out.writerow(['Component Count:', len(net.components)])
out.writerow(['Ref', 'Qty', 'Value', 'Cmp name', 'Footprint', 'Description', 'Manufacturer', 'Manufacturer\'s Part #'])
# Get all of the components in groups of matching parts + values
# (see ky_generic_netlist_reader.py)
grouped = net.groupComponents()
# Output all of the component information
for group in grouped:
refs = ""
# Add the reference of every component in the group and keep a reference
# to the component so that the other data can be filled in once per group
for component in group:
refs += component.getRef() + ", "
c = component
# Fill in the component groups common data
out.writerow([refs, len(group), c.getValue(), c.getPartName(), c.getFootprint(),
c.getDescription(), c.getField("MFN"), c.getField("MFP")])
|
[
"antiomiae@gmail.com"
] |
antiomiae@gmail.com
|
827c7b9b76801ff6a9ebbc2f8342fe133931ca45
|
f17de2f1a2804033a7b7fc74a0d09f964fe1d876
|
/hungerExpress/food/migrations/0003_auto_20180331_1736.py
|
a285d1dd32068594eea223b405926bad96304f74
|
[] |
no_license
|
udwivedi394/djangoProjects
|
60d6eb275ce75dab3884f1a9c68e01226625c4e2
|
22075b7f850d796afe5a0c06411eb5ff762357b7
|
refs/heads/master
| 2021-09-10T21:54:44.363710
| 2018-04-03T01:58:27
| 2018-04-03T01:58:27
| 126,106,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# Generated by Django 2.0.3 on 2018-03-31 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('food', '0002_auto_20180331_1725'),
]
operations = [
migrations.AlterField(
model_name='restaurant',
name='contact_no',
field=models.CharField(max_length=20),
),
]
|
[
"utkarshdwivedi394@gmail.com"
] |
utkarshdwivedi394@gmail.com
|
c9585d56b0fe94af3866093cae1b512d95ca70cb
|
fe3265b72e691c6df8ecd936c25b6d48ac33b59a
|
/tests/components/fritz/test_button.py
|
36af1c27f5e0bcf2f1852749964ed9cdf872c95c
|
[
"Apache-2.0"
] |
permissive
|
bdraco/home-assistant
|
dcaf76c0967783a08eec30ce704e5e9603a2f0ca
|
bfa315be51371a1b63e04342a0b275a57ae148bd
|
refs/heads/dev
| 2023-08-16T10:39:15.479821
| 2023-02-21T22:38:50
| 2023-02-21T22:38:50
| 218,684,806
| 13
| 7
|
Apache-2.0
| 2023-02-21T23:40:57
| 2019-10-31T04:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,402
|
py
|
"""Tests for Fritz!Tools button platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.components.fritz.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .const import MOCK_USER_DATA
from tests.common import MockConfigEntry
async def test_button_setup(hass: HomeAssistant, fc_class_mock, fh_class_mock) -> None:
"""Test setup of Fritz!Tools buttons."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
buttons = hass.states.async_all(BUTTON_DOMAIN)
assert len(buttons) == 4
for button in buttons:
assert button.state == STATE_UNKNOWN
@pytest.mark.parametrize(
("entity_id", "wrapper_method"),
[
("button.mock_title_firmware_update", "async_trigger_firmware_update"),
("button.mock_title_reboot", "async_trigger_reboot"),
("button.mock_title_reconnect", "async_trigger_reconnect"),
("button.mock_title_cleanup", "async_trigger_cleanup"),
],
)
async def test_buttons(
hass: HomeAssistant,
entity_id: str,
wrapper_method: str,
fc_class_mock,
fh_class_mock,
) -> None:
"""Test Fritz!Tools buttons."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
button = hass.states.get(entity_id)
assert button
assert button.state == STATE_UNKNOWN
with patch(
f"homeassistant.components.fritz.common.AvmWrapper.{wrapper_method}"
) as mock_press_action:
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
mock_press_action.assert_called_once()
button = hass.states.get(entity_id)
assert button.state != STATE_UNKNOWN
|
[
"noreply@github.com"
] |
bdraco.noreply@github.com
|
f90334a1939d9b22c35a1f046ae87e4ce66693cb
|
ac305c6739541e84857e297f8eb1b19417978548
|
/module_128.py
|
b9ba541614d3ccd041e0fe0728a597cc18a34050
|
[] |
no_license
|
imhardikj/git_test
|
d6608d6c02e0bc454f9dd31ffbbc5704a7046a61
|
43f0de2e9ac09ecd4fdfee27879fd8ae354a0685
|
refs/heads/master
| 2020-03-27T21:56:46.394739
| 2018-09-03T11:27:58
| 2018-09-03T11:27:58
| 147,189,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
"""A set of classes used to represent electric cars."""
from module_121 import Car
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=70):
"""Initialize the batteery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print("This car has a " + str(self.battery_size) + "-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
class ElectricCar(Car):
"""Models aspects of a car, specific to electric vehicles."""
def __init__(self, make, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(make, model, year)
self.battery = Battery()
|
[
"noreply@github.com"
] |
imhardikj.noreply@github.com
|
1d8710b301f679be196cfeb930905c255e5fc17d
|
7f015891893bfd60ab2eb9b8f2ca70c1eca062c3
|
/onyxlog/core/util.py
|
e19265187f0864d15ad70430af220acab9d61aa8
|
[] |
no_license
|
MauricioAlmeida/onyxlog
|
b9bad081697de563a559e2d0ca51d0ab4aa8d2a0
|
737816cbd5248a0ad297fc7f6355cf9a22553a63
|
refs/heads/master
| 2021-01-13T03:27:09.175104
| 2015-11-23T14:53:52
| 2015-11-23T14:53:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
# -*- coding: ISO-8859-1 -*-
"""
Funções uteis para toda aplicação
"""
def getParam(param):
"""
Retorna o valor de um parâmetro
"""
from onyxlog.core.models.parametro import Parametro
if not param:
return None
parametro = Parametro.objects.filter(nome=param)
if parametro:
return parametro[0].valor
else:
return None
def getParamByName(param, user_id):
"""
Retorna o valor de um parâmetro filtrando filiais
"""
from django.db.models import Q
from onyxlog.core.models.parametro import Parametro
if not param:
return None
parametro = Parametro.objects.filter(nome=param)
if parametro:
return parametro[0].valor
else:
return None
def format_number(numero):
"""
Formata o número para o padrão brasileiro
"""
try:
contador = 0
valor_str = ''
num = numero.__str__()
if '.' in num:
valor, decimal = num.split('.')
else:
valor = num
decimal = '00'
if len(decimal) < 2:
decimal = decimal + '0'
tamanho = len(valor)
while tamanho > 0:
valor_str = valor_str + valor[tamanho-1]
contador += 1
if contador == 3 and tamanho > 1:
valor_str = valor_str + ','
contador = 0
tamanho -= 1
tamanho = len(valor_str)
str_valor = ''
while tamanho > 0:
str_valor = str_valor + valor_str[tamanho-1]
tamanho -= 1
return "%s.%s" % (str_valor,decimal)
except:
return "Erro. Nao foi possivel converter o valor enviado."
|
[
"jairvercosa@gmail.com"
] |
jairvercosa@gmail.com
|
d380e27ac680f5df02f84e51c854721635cbf2ba
|
83d0abf6f05ed6694797872c45c6f1b99f9ceeec
|
/src/const.py
|
9ea80df69bec5e1cafe1a8d935e7bcc73a974a60
|
[] |
no_license
|
Ruthvik2127/IEEE-CIS-Fraud-Detection-1
|
be01ed9eb95f11b31e341424c0b1fc1ab2171f82
|
f83d717e7ba604e64b517c480a99325fe0579976
|
refs/heads/master
| 2023-03-15T15:28:55.374749
| 2019-10-02T13:17:44
| 2019-10-02T13:17:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
import pandas as pd
train_identity = pd.read_csv("../data/train_identity.csv", )
train_transaction = pd.read_csv("../data/train_transaction.csv", )
test_identity = pd.read_csv("../data/test_identity.csv", )
test_transaction = pd.read_csv("../data/test_transaction.csv",)
BASE_COLUMNS = list(train_transaction.columns) + list(train_identity.columns)
COLS_TO_DROP = ['V112', 'V315', 'V293', 'id_25', 'V135', 'V136', 'V284', 'V298', 'V300', 'V316',
'V111', 'dist2', 'V105', 'V113', 'V104', 'id_24', 'id_22', 'V117', 'V121', 'V125',
'V320', 'V103', 'V109', 'V118', 'V295', 'V303', 'V119', 'V134', 'V106', 'V281',
'V120', 'V290', 'V98', 'V102', 'V115', 'V137', 'V123', 'id_08', 'V309', 'id_18',
'V114', 'V321', 'V116', 'V133', 'V108', 'V301', 'V124', 'C3', 'V296', 'id_23',
'V122', 'V129', 'id_26', 'V304', 'V110', 'V107', 'id_21', 'V286', 'id_27',
'V297', 'V299', 'V311', 'V319', 'V305', 'V101', 'V289', 'id_07', 'V132', 'V318', 'D7']
CATEGORY_COLUMNS = ['id_12', 'id_13', 'id_14', 'id_15', 'id_16', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22',
'id_23', 'id_24', 'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_30', 'id_31', 'id_32', 'id_33',
'id_34', 'id_35', 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo', 'ProductCD', 'card4',
'card6', 'M4','P_emaildomain', 'R_emaildomain', 'card1', 'card2', 'card3', 'card5', 'addr1',
'addr2', 'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8', 'M9', 'P_emaildomain_bin', 'DeviceInfo_c',
'id_30_c', 'P_emaildomain_suffix', 'R_emaildomain_bin', 'R_emaildomain_suffix']
|
[
"andrewsiu1029@gmail.com"
] |
andrewsiu1029@gmail.com
|
fa5914ae5d699606bf8f903862b95dc7fc78ceab
|
d76afab167e163f8f54fb0799bf2c5c1e0868a05
|
/src/openzwave_compat/scene.py
|
915f9fb2ab849cb653348ed07758de866f776f04
|
[] |
no_license
|
magicus/babyzwip
|
e61b4886fafb582723bc09a2128b5d7ae2b1256d
|
38ad83598f9e54ee667ec342a0478206daf3cfee
|
refs/heads/master
| 2021-01-12T07:45:49.928391
| 2018-01-15T09:42:46
| 2018-01-15T09:42:46
| 77,001,731
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
"""
Compatibility layer with python-openzwave
"""
from openzwave_compat import ZWaveObject
# Set default logging handler to avoid "No handler found" warnings.
class ZWaveScene(ZWaveObject):
def __init__(self, scene_id, network=None):
ZWaveObject.__init__(self, scene_id, network)
@property
def scene_id(self):
return None
@property
def label(self):
return None
@label.setter
def label(self, value):
pass
def create(self, label=None):
return None
def add_value(self, value_id, value_data):
return None
def set_value(self, value_id, value_data):
return None
def get_values(self):
return None
def get_values_by_node(self):
return None
def remove_value(self, value_id):
return None
def activate(self):
return None
def to_dict(self, extras=['kvals']):
return None
|
[
"mag@icus.se"
] |
mag@icus.se
|
3c840954bad45d6884f9cadc51628038511b55ba
|
d6475dda9db9ea6e447db2b4d75d2ebdf454e9d8
|
/polls/models.py
|
fefdac850f120944eee69c1278d883e9925f2e2d
|
[] |
no_license
|
yoophi/django_polls
|
3d92b01f239ed6933b7593408b788f7adf2e6c31
|
f94c0ff6307cbdd2d3c65a6b5131a515b6fe67af
|
refs/heads/master
| 2021-01-10T00:57:18.706884
| 2016-03-24T14:50:38
| 2016-03-24T14:50:38
| 54,241,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
@python_2_unicode_compatible
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
[
"yoophi@gmail.com"
] |
yoophi@gmail.com
|
82b80a1803095f37024f7db48d75ec4c61894f66
|
50850883f2a38c50f5c77889ab300bfd81bd5009
|
/setup.py
|
4f8bfa00256ffb337b8827a49855274293bb340e
|
[] |
no_license
|
Brant/django-triggeredMenu
|
b015ffe0b8007b3c5d05ed9cf93d78815dfb50b0
|
218bd9d7696d8fa147f682efd45bc79db7a1d549
|
refs/heads/master
| 2021-01-01T18:07:32.142089
| 2013-01-04T21:14:06
| 2013-01-04T21:14:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
from setuptools import setup, find_packages
setup(
name = "triggeredmenu",
version = "0.1",
url = 'https://github.com/Brant/django-triggeredMenu',
license = 'GPL',
description = "Adds triggeredMenu static files to a django project",
long_description = open('README.md').read(),
author = 'Brant Steen',
author_email = 'brant.steen@gmail.com',
packages = find_packages(exclude=('tests', )),
include_package_data = True,
zip_safe = False,
install_requires = [],
# scripts = ["scripts/project_dj", ],
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GPL License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
]
)
|
[
"brant.steen@gmail.com"
] |
brant.steen@gmail.com
|
1a9a1c804acf382b9815037e5ac1e6d8ff33b443
|
a4008766df10c009591a3aed79c96b04de58ed33
|
/dateFind.py
|
6a41772a247cf6b50b0a309538e34f7d7fdddf74
|
[] |
no_license
|
JoneWangwz/ZHQH
|
2bb1cbc087023159e22e14ad3bb38693827122c6
|
8e13a9c47d383ffbd617f7b72150d37711bb9e37
|
refs/heads/master
| 2020-03-21T04:17:58.718832
| 2018-07-30T14:14:22
| 2018-07-30T14:14:31
| 138,101,460
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,599
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 29 22:03:51 2018
@author: 王钊
"""
import pandas as pd
import os
from sklearn.model_selection import train_test_split
import csv
from sklearn.linear_model import LogisticRegression
import numpy as np
'''
dateTable,随便去一个表中的时间testingStand/A.csv
date,单日的计算,例如20180101
trendSort,表示方向的文件的品种文件,TrainingResult/sort
training2Model,训练数据的存放样本的文件夹
testing2Model,测试数据的存放样本的文件夹
varietyValue,品种价值的文件,'stock.csv'
oneResult,单日计算结果存放的文件路径,TrainingResult
'''
def filtrate(dateTable,date,dateBegin,dateEnd,trendSort,training2Model,testing2Model,varietyValue,
oneResult,manyDaysResult):
#fd=pd.read_csv(variety+'.txt',encoding='gbk')
df=pd.read_csv(dateTable,encoding='gbk')
#dd=pd.read_csv('testingStand/A.csv',encoding='gbk')
if (date>20180101):
if (date<=df.loc[df.shape[0]-1,'日期']):
for xx in range(df.shape[0]):
if str(df.loc[xx,'日期']) == str(date):
conNum=[]
contract=[]
closingPrice=[]
variety=[]
date=[]
predic=[]
trend=[]
value=[]
result = {}
aLL=[0,1]
for al in aLL:
num=0
dff = pd.read_csv(trendSort+'/sort%s.csv'%(str(al)), parse_dates=True,encoding='gbk')
dff.sort_index(inplace=True)
for f in range(len(dff.iloc[1:, 0])):
vv=pd.read_csv(training2Model+'/%s.csv' % dff.iloc[f, 0], encoding='gbk', parse_dates=True)
dd=pd.read_csv(testing2Model+'/%s.csv' % dff.iloc[f, 0], encoding='gbk')
#print('dd.shape[0]',dd.shape[0])
for cc in range(dd.shape[0]):
if dd.loc[cc,'日期']==df.loc[xx,'日期']:
with open(training2Model+'/%s.csv' % dff.iloc[f, 0],encoding='gbk') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
X = []
y = []
for row in readCSV:
X.append(np.array(row[3:len(row[:]) - 1]))
y.append(float(row[-1]))
X.append(dd.iloc[cc,3:-1])
y.append(float(dd.iloc[cc,-1]))
X = np.array(X)
print('X.shape[1]',X.shape[1])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=(X.shape[0]-1),shuffle=False)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
x_test = []
x_test = np.array(x_test)
print("\n")
print("开始训练品类%s" % dff.iloc[f, 0])
ml=[]
print("dff.loc[f,'combination']",dff.loc[f,'combination'])
for i in dff.loc[f,'combination']:
if i=='(':
(dff.loc[f,'combination'])
elif i==')':
(dff.loc[f,'combination'])
elif i==',':
(dff.loc[f,'combination'])
elif i==' ':
(dff.loc[f,'combination'])
else:
ml.append(int(i))
print('ml',len(ml))
x_train = np.zeros(shape=(X_train.shape[0], len(ml)))
# 初始化测试集
x_test = np.zeros(shape=(X_test.shape[0], len(ml)))
for m in range(len(ml)):
print('m',m)
ll=ml[m]
x_train[:,m]=X_train[:,ll]
x_test[:,m]=X_test[:,ll]
clf = LogisticRegression().fit(x_train, y_train)
print('x_test',x_test.shape)
a = clf.predict(x_test)
if a == dff.loc[f,'trend']:
shou=pd.read_csv(varietyValue, parse_dates=True,encoding='gbk')
print('shou',shou.shape[0])
for s in range(shou.shape[0]):
if shou.loc[s,'合约代码']==dff.loc[f,'category'].lower():
print("shou.loc[s,'合约乘数']",shou.loc[s,'合约乘数'])
com=shou.loc[s,'合约乘数']
if str(com).isdigit():
contract.append(shou.loc[s,'合约乘数'])
value.append(shou.loc[s,'合约乘数']*dd.loc[cc,'收盘价1'])
conNum.append(int(3000000/(shou.loc[s,'合约乘数']*vv.loc[vv.shape[0]-1,'收盘价1'])))
else:
contract.append(0)
value.append(0)
conNum.append(0)
predic.append(a)
trend.append(dff.loc[f,'trend'])
date.append(dd.loc[cc,'日期'])
variety.append(dff.iloc[f, 0])
closingPrice.append(dd.loc[cc,'收盘价1'])
num+=1
if num>=5:
break
if num>=5:
break
vv = vv.drop([vv.shape[0]-1])
print('variety',len(variety))
print('date',len(date))
print('trend',len(trend))
print('predic',len(predic))
print('closingPrice',len(closingPrice))
print('contract',len(contract))
print('value',len(value))
print('conNum',len(conNum))
result = {'合约':variety,'日期':date,'方向':trend,'预测':predic,'收盘价1':closingPrice,
'合约乘数':contract,'价值':value,'数量':conNum}
re=pd.DataFrame(data=result,columns=['合约','日期','方向','预测','收盘价1','合约乘数','价值','数量'])
re.to_csv(oneResult+'/1%s.csv'%(str(date[0])),encoding='gbk',index=False)
print(date)
print('%s训练完毕'%(str(date)))
#dd=pd.read_csv('TrainingResult/%s.csv'%(date),encoding='gbk')
#dd.to_csv('%s.txt'%(date),encoding='gbk',index=False)
else:
print("输入日期可能不为交易日")
else:
print("输入日期开始日期必须是2018年以后")
#sumBegin=0
#sumEnd=0
if dateBegin>20180101:
if dateBegin<dateEnd:
if dateBegin>20180101:
if dateBegin<=df.loc[df.shape[0]-2,'日期']:
if dateEnd>20180101:
if dateEnd<df.loc[df.shape[0]-1,'日期']:
for i in range(df.shape[0]):
if df.loc[i,'日期']==dateBegin:
sumBegin=i
if df.loc[i,'日期']==dateEnd:
sumEnd=i
conNum=[]
contract=[]
closingPrice=[]
variety=[]
date=[]
predic=[]
trend=[]
value=[]
result = {}
for xx in range(df.shape[0]):
if df.loc[xx,'日期'] >= dateBegin:
if df.loc[xx,'日期'] <= dateEnd:
aLL=[0,1]
for al in aLL:
nu=0
dff = pd.read_csv(trendSort+'/sort%s.csv'%(str(al)), parse_dates=True,encoding='gbk')
dff.sort_index(inplace=True)
for f in range(len(dff.iloc[1:, 0])):
vv=pd.read_csv(training2Model+'/%s.csv' % dff.iloc[f, 0], encoding='gbk', parse_dates=True)
dd=pd.read_csv(testing2Model+'/%s.csv' % dff.iloc[f, 0], encoding='gbk')
#print('dd.shape[0]',dd.shape[0])
for cc in range(dd.shape[0]):
if dd.loc[cc,'日期']==df.loc[xx,'日期']:
#if dd.loc[cc,'日期']<=float(dateEnd):
#num=0
with open(training2Model+'/%s.csv' % dff.iloc[f, 0],encoding='gbk') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
X = []
y = []
for row in readCSV:
X.append(np.array(row[3:len(row[:]) - 1]))
y.append(float(row[-1]))
X.append(dd.iloc[cc,3:-1])
y.append(float(dd.iloc[cc,-1]))
X = np.array(X)
print('X.shape[1]',X.shape[1])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=(X.shape[0]-1),shuffle=False)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
x_test = []
x_test = np.array(x_test)
print("\n")
print("开始训练品类%s" % dff.iloc[f, 0])
ml=[]
print("dff.loc[f,'combination']",dff.loc[f,'combination'])
for i in dff.loc[f,'combination']:
if i=='(':
(dff.loc[f,'combination'])
elif i==')':
(dff.loc[f,'combination'])
elif i==',':
(dff.loc[f,'combination'])
elif i==' ':
(dff.loc[f,'combination'])
else:
ml.append(int(i))
print('ml',len(ml))
x_train = np.zeros(shape=(X_train.shape[0], len(ml)))
# 初始化测试集
x_test = np.zeros(shape=(X_test.shape[0], len(ml)))
for m in range(len(ml)):
print('m',m)
ll=ml[m]
x_train[:,m]=X_train[:,ll]
x_test[:,m]=X_test[:,ll]
clf = LogisticRegression().fit(x_train, y_train)
print('x_test',x_test.shape)
a = clf.predict(x_test)
if a == dff.loc[f,'trend']:
shou=pd.read_csv(varietyValue, parse_dates=True,encoding='gbk')
print('shou',shou.shape[0])
for s in range(shou.shape[0]):
if shou.loc[s,'合约代码']==dff.loc[f,'category'].lower():
print("shou.loc[s,'合约乘数']",shou.loc[s,'合约乘数'])
com=shou.loc[s,'合约乘数']
if str(com).isdigit():
contract.append(shou.loc[s,'合约乘数'])
value.append(shou.loc[s,'合约乘数']*dd.loc[cc,'收盘价1'])
conNum.append(int(3000000/(shou.loc[s,'合约乘数']*vv.loc[vv.shape[0]-1,'收盘价1'])))
else:
contract.append(0)
value.append(0)
conNum.append(0)
predic.append(a)
trend.append(dff.loc[f,'trend'])
date.append(dd.loc[cc,'日期'])
variety.append(dff.iloc[f, 0])
closingPrice.append(dd.loc[cc,'收盘价1'])
nu+=1
if nu>=5:
break
#if nu>=5:
# break
if nu>=5:
break
vv = vv.drop([vv.shape[0]-1])
#if num>=5:
# break
print('variety',len(variety))
print('date',len(date))
print('trend',len(trend))
print('predic',len(predic))
print('closingPrice',len(closingPrice))
print('contract',len(contract))
print('value',len(value))
print('conNum',len(conNum))
result = {'合约':variety,'日期':date,'方向':trend,'预测':predic,'收盘价1':closingPrice,
'合约乘数':contract,'价值':value,'数量':conNum}
re=pd.DataFrame(data=result,columns=['合约','日期','方向','预测','收盘价1','合约乘数','价值','数量'])
re=re.sort_values(by=['日期','方向'])
re.to_csv(manyDaysResult+'/%sAnd%s.csv'%(str(dateBegin),str(dateEnd)),encoding='gbk',index=False)
print("%s-%s训练完毕"%(str(dateBegin),str(dateEnd)))
#for j in range(sumBegin,sumEnd+1):
else:
print('结束日期必须是存在于当前列表中')
else:
print("结束日期必须是存在于当前列表中")
else:
print("输入日期必须是2018年以后1")
else:
print('开始日期必须是2018年以后1')
if __name__=='__main__':
date=input("请输入一天的预测:")
date=float(date)
#print(date)
yes=input("请输入是否需要训练时间段 yes/no:")
#no=input("不与要训练输入no:")
if yes=='yes':
dateBegin=input("请输入开始的时间:")
dateEnd=input("请输入结束的时间:")
dateBegin = float(dateBegin)
dateEnd = float(dateEnd)
elif yes=='no':
dateBegin=0
dateEnd=0
filtrate('testingStand/AG.csv',date,dateBegin,dateEnd, 'TrainingResult/sort', 'training2Model', 'testing2Model', 'stock.csv',
'TrainingResult', 'TrainingResult')
|
[
"32155950+Wangzhaozz@users.noreply.github.com"
] |
32155950+Wangzhaozz@users.noreply.github.com
|
0355be01c5e553aed9870e5fe28e838bab995917
|
d0dcc77793433e31adda34b0a9989da694b61f19
|
/divideTrainTestSet.py
|
9b91ab7d5bdffe2b9f85f231da505ed662e38d1e
|
[] |
no_license
|
MagicSen/python_tools
|
0bac702a31ad5b8b742fe809e63f027891783d4c
|
296b375cb3807f8436ce6b7c8a661daeecd975c1
|
refs/heads/master
| 2021-09-02T15:57:59.758341
| 2018-01-03T14:10:17
| 2018-01-03T14:10:17
| 116,134,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
##
# @file divideTrainTestSet.py
# @brief Given ratio, divid data set to train and test set.
# @author Yang Sen, magicys@qq.com
# @version 1.0.0
# @date 2017-09-09
import os,sys
##
# @brief getListFromFile
# Get a list from a file
# @param file_name
#
# @return
def getListFromFile(file_name):
if not os.path.exists(file_name):
print "Error: File is not exists."
return []
list_file = open(file_name, 'r')
list_all = []
for line in list_file:
item = line.strip("\n").strip(' ')
if len(item) != 0:
list_all.append(item)
list_file.close()
return list_all
if __name__ == "__main__":
if len(sys.argv) < 5:
print "<fin_total_list> <fin_ratio> <fout_train_list> <fout_test_list>"
sys.exit()
file_list = getListFromFile(sys.argv[1])
ratio = float(sys.argv[2])
fout_train_list = open(sys.argv[3], 'w')
fout_test_list = open(sys.argv[4], 'w')
total_number = len(file_list)
train_number = total_number * ratio
for i in range(total_number):
if i < train_number:
fout_train_list.writelines(file_list[i] + "\n")
else:
fout_test_list.writelines(file_list[i] + "\n")
fout_train_list.close()
fout_test_list.close()
|
[
"syang@usens.com"
] |
syang@usens.com
|
a53e77f1ed8e5db0c92f17c14085432ed68a4c61
|
2624f3e7b657defbda604c68902f63242ae52141
|
/Patterns/pattern_5.py
|
ebe0be5b2f8a530dcd00d0d30f768124671f1c7a
|
[
"Apache-2.0"
] |
permissive
|
jarvis-1805/DSAwithPYTHON
|
a451c40571ef90a979832b33a4b4fb6c0235b209
|
872073d1b8d0001ea8b1a54b5e327dd0c1c406f2
|
refs/heads/main
| 2023-03-28T23:46:14.672310
| 2021-03-27T10:57:16
| 2021-03-27T10:57:16
| 346,755,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
'''
*
* *
* * *
* * * *
* * *
* *
*
'''
n = int(input())
i = 0
while i <= (n//2)+1:
k = i
while k > 1:
print(' ', end='')
k -= 1
j = 0
while j < i:
print('*', end=' ')
j += 1
print()
i += 1
i = 0
while i < n//2:
k = i
while k < (n//2) - 1:
print(' ', end='')
k += 1
j = 0
while j < (n//2)-i:
print('*', end=' ')
j += 1
print()
i += 1
|
[
"shubhsahu1805@gmail.com"
] |
shubhsahu1805@gmail.com
|
ff01db056009a80fa1000e2954fbb76c769b6e7e
|
a3d2620bbf25002c7b182600c2e40f8f06555e91
|
/exc/exc/wsgi.py
|
8d7d6db299d15b0077bd2774bf300955b5612354
|
[] |
no_license
|
alejo8591/backend-lab
|
782736a82933f705f825a1194369bfe13e86c0ec
|
4a02a9552083a7c877e91b0f8b81e37a8650cf54
|
refs/heads/master
| 2016-09-03T03:53:43.878240
| 2015-11-26T06:35:38
| 2015-11-26T06:35:38
| 3,911,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
"""
WSGI config for exc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "exc.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"alejo8591@gmail.com"
] |
alejo8591@gmail.com
|
9d7d2d581d50ca04cf1b4329b5b87bf803707862
|
c2e6b6119a1d03bc293572d568d21a6b76762a1f
|
/ex.py
|
30c1077d8fe6fac7ee1c285147c7a62bef2ee59a
|
[] |
no_license
|
kafura-kafiri/Fesss
|
24a92e5185881066b0d2f61d1649ab0e43a0f479
|
7b660723237dfbdbd3ba9772a9d2a9c771807bb7
|
refs/heads/master
| 2021-05-03T17:17:54.799918
| 2018-02-06T16:06:40
| 2018-02-06T16:06:40
| 120,443,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
# LSTM for international airline passengers problem with regression framing
import numpy
from pandas import read_csv
import datetime
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset
def parse(x):
return datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
dataframe = read_csv('fesss.csv', parse_dates=['Date'], engine='python', date_parser=parse)
dataset = dataframe.values
start = dataset[0, 0]
for i in range(len(dataset)):
_start = dataset[i, 0]
dataset[i, 0] = (dataset[i, 0] - start).total_seconds()
start = _start
dataset = dataset.astype('float32')
# normalize the dataset
delta_scaler = MinMaxScaler(feature_range=(0, 1))
delay_scaler = MinMaxScaler(feature_range=(0, 1))
# print(dataset)
def scale(scaler, dataset, i):
data = dataset[:, i]
data = data.reshape(data.shape[0], 1)
data = scaler.fit_transform(data)
dataset[:, i] = data.reshape(data.shape[0])
return dataset
dataset = scale(delta_scaler, dataset, 0)
dataset = scale(delay_scaler, dataset, 1)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
l = [dataset[i + 1][0]]
l.extend(dataset[i:(i + look_back), 1])
l.append(dataset[i + 1][2])
dataX.append(l)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
look_back = 1
dataX, dataY = create_dataset(dataset, look_back)
# reshape input to be [samples, time steps, features]
dataX = numpy.reshape(dataX, (dataX.shape[0], 1, dataX.shape[1]))
print(dataset)
print(dataX)
print(dataY)
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back + 2)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(dataX, dataY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(dataX)
from math import sqrt
rmse = sqrt(mean_squared_error(dataY, trainPredict))
print('RMSE: %.3f' % rmse)
|
[
"kafura.kafiri@gmail.com"
] |
kafura.kafiri@gmail.com
|
968290c1917596dac408fca7d1a91f4c18315524
|
3024cafafbfc75193105af7f225d3b12eb2aea46
|
/DjangoProjects/project24/iplapp/models.py
|
b6932bc062b857864ce7ec33dc7f0cac6088b6d7
|
[] |
no_license
|
jaishankarg24/Django-Rest-Framework
|
33266f6825d51abb8a512426baedf59f2ee957c8
|
809ee9208ffbef4202a8f4058a84f5322793af52
|
refs/heads/master
| 2023-03-02T20:56:38.051060
| 2021-02-12T05:37:48
| 2021-02-12T05:37:48
| 338,233,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
from django.db import models
# Create your models here.
class IplTable(models.Model):
name = models.CharField( max_length=50)
age = models.IntegerField()
country = models.CharField( max_length=50)
|
[
"jaishankarg24@gmail.com"
] |
jaishankarg24@gmail.com
|
603b9965d66ae58e80bf9f48f51c6c5dbebca190
|
d44b37644b1674eba76030c581f3e7c9d02e4127
|
/ishelf/content/admin.py
|
15d678f71771b0e399603fca59ae7f4940899635
|
[] |
no_license
|
tommus/android-05-payu-integration-api
|
3d0cae4129d19a6bb8338cc1424fce97b6a5d602
|
8155114ded6abcccde909555e7e40f12b0c49376
|
refs/heads/master
| 2020-03-23T11:12:36.755036
| 2018-07-19T21:30:49
| 2018-07-19T21:30:49
| 141,490,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
from django.contrib.admin import (
ModelAdmin,
register,
)
from ishelf.content.models import (
Author,
Book,
)
# region Author
@register(Author)
class AuthorAdmin(ModelAdmin):
list_filter = ["active"]
list_display = ["__str__", "active"]
list_editable = ["active"]
# endregion
# region Book
@register(Book)
class BookAdmin(ModelAdmin):
list_filter = ["author", "active"]
list_display = ["__str__", "active"]
list_editable = ["active"]
# endregion
|
[
"tomasz.dzieniak@windly.co"
] |
tomasz.dzieniak@windly.co
|
5c5ff093f8e4848fe2435494f5abccda014f4507
|
84a1f9d626828b6ecaee4ef037081f4d8750a990
|
/编程/9月/9.12/习题答案.py
|
df9a5234a978fced165131300f75ac2e75628528
|
[] |
no_license
|
dujiaojingyu/Personal-programming-exercises
|
5a8f001efa038a0cb3b6d0aa10e06ad2f933fe04
|
72a432c22b52cae3749e2c18cc4244bd5e831f64
|
refs/heads/master
| 2020-03-25T17:36:40.734446
| 2018-10-01T01:47:36
| 2018-10-01T01:47:36
| 143,986,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,921
|
py
|
#coding=utf-8
import linecache
import time
now = time.time() #代码开始时间
# 前期准备,整理数据
data_keys = ('bid', 'uid', 'username', 'v_class', 'content', 'img', 'created_at', 'source', 'rt_num', 'cm_num', 'rt_uid', 'rt_username', 'rt_v_class', 'rt_content', 'rt_img', 'src_rt_num', 'src_cm_num', 'gender', 'rt_bid', 'location', 'rt_mid', 'mid', 'lat', 'lon', 'lbs_type', 'lbs_title', 'poiid', 'links', 'hashtags', 'ats', 'rt_links', 'rt_hashtags', 'rt_ats', 'v_url', 'rt_v_url')
keys = {data_keys[k]:k for k in xrange(0,len(data_keys))}
f = linecache.getlines('t.txt')
lines = [x[1:-1].split('","') for x in f] #拆分
#1 输出用户总数
users = set([line[keys['username']] for line in lines])
user_total = len(set(users))
assert type(user_total) == int
#2 每一个用户的名字 list
users = list(users)
assert type(users) == list
#3 有多少个2012年11月发布的tweets
lines_from_2012_11 = filter(lambda line:line[keys['created_at']].startswith('2012-11'),lines)
lines_total_from_2012_11 = len(lines_from_2012_11)
assert type(lines_total_from_2012_11) == int
#
# #4 该文本里,有哪几天的数据?
#
# users_by_date = [line[keys['created_at']].split(' ')[0] for line in lines]
#
# lines_by_created = list(set(users_by_date))
#
# lines_by_created.sort()
#
# assert type(lines_by_created) == list
#
#
# #5 该文本里,在哪个小时发布的数据最多?
# # todo 这里用time模块做时间转换最好。下例只为讲解拆分方法
#
# hours = [int(line[keys['created_at']][11:13]) for line in lines]
#
# total_by_hour = [(h,hours.count(h)) for h in xrange(0,24) ]
#
# total_by_hour.sort(key=lambda k:k[1],reverse=True)
#
# max_hour = total_by_hour[0][0]
#
# assert type(max_hour) == int
#
#
# #6 该文本里,输出在每一天发表tweets最多的用户
#
# dateline_by_user = {k:dict() for k in lines_by_created}
#
# for line in lines:
# dateline = line[keys['created_at']].split(' ')[0]
# username = line[keys['username']]
# if dateline_by_user[dateline].has_key(username):
# dateline_by_user[dateline][username] += 1
# else:
# dateline_by_user[dateline][username] = 1
#
# for k,v in dateline_by_user.items():
# us = v.items()
# us.sort(key=lambda k:k[1],reverse=True)
# dateline_by_user[k] = {us[0][0]:us[0][1]}
#
# assert type(dateline_by_user) == dict
#
#
# #7 请按照时间顺序输出 2012-11-03 每个小时的发布tweets的频率
#
# lines_from_2012_11_03 = filter(lambda line:line[keys['created_at']].startswith('2012-11-03'),lines)
#
# hourlines_from_2012_11_03 = {str(i):0 for i in xrange(0,24)}
#
# for line in lines_from_2012_11_03:
# hour = line[keys['created_at']][11:13]
# hourlines_from_2012_11_03[str(int(hour))] += 1
#
# hour_timeline_from_2012_11_03 = [(k,v) for k,v in hourlines_from_2012_11_03.items()]
# hour_timeline_from_2012_11_03.sort(key=lambda k:int(k[0]))
#
# assert type(hour_timeline_from_2012_11_03) == list
#
#
# #8 统计该文本里,来源的相关信息和次数
#
# source = set([k[keys['source']] for k in lines])
# source_dict = {s:0 for s in source}
# for line in lines:
# source_name = line[keys['source']]
# source_dict[source_name] += 1
# source_list = [(k,v) for k,v in source_dict.items()]
# source_list.sort(key=lambda k:k[1],reverse=True)
# assert type(source_list) == list
#
#
# #9 计算转发URL中:以:"https://twitter.com/umiushi_no_uta"开头的有几个
#
# umi_total = 0
# for line in lines:
# if line[keys['rt_v_url']].startswith('https://twitter.com/umiushi_no_uta'):
# umi_total += 1
# assert type(umi_total) == int
#
#
# #10 UID为573638104的用户 发了多少个微博
#
# tweets_total_from_573638104 = 0
# for line in lines:
# if line[keys['uid']] == '573638104' :
# tweets_total_from_573638104 += 1
# assert type(tweets_total_from_573638104) == int
#
#
# #11 定义一个函数,该函数可放入任意多的用户uid参数(如果不存在则返回null),函数返回发微薄数最多的用户uid。
#
# def get_user_by_max_tweets(*uids):
#
# '''
# @deprecated:参数可为字符串或者数字
# '''
#
# if len(uids) > 0:
# uids = filter(lambda u:type(u) == int or u.isdigit(),uids)
# uids = map(str,uids)
# if len(uids) > 0:
# uids_dict = {x:0 for x in uids}
# for line in lines:
# uid = line[keys['uid']]
# if uid in uids:
# uids_dict[uid] += 1
# uids_and_tweets_total = [(x,y) for x,y in uids_dict.items()]
# uids_and_tweets_total.sort(key=lambda k:k[1],reverse=True)
# return uids_and_tweets_total[0][0]
# return "null"
#
#
# assert get_user_by_max_tweets() == 'null'
# assert get_user_by_max_tweets('ab','cds') == 'null'
# assert get_user_by_max_tweets('ab','cds','123b') == 'null'
# assert get_user_by_max_tweets('12342','cd') == '12342'
# assert get_user_by_max_tweets('28803555',28803555) == '28803555'
# assert get_user_by_max_tweets('28803555',28803555,'96165754') == '28803555'
#
#
# #12 该文本里,谁发的微博内容长度最长
#
# lines_by_content_length = [(line[keys['username']],len(line[keys['content']])) for line in lines]
# lines_by_content_length.sort(key=lambda k:k[1],reverse=True)
# user_by_max_content = lines_by_content_length[0][0]
# # todo 如果有多个最多怎么办?
# assert type(user_by_max_content) == str
#
#
# #13 该文本里,谁转发的URL最多
#
# lines_by_rt = [(line[keys['uid']],int(line[keys['rt_num']])) for line in lines if line[keys['rt_num']] != '']
# lines_by_rt.sort(key=lambda k:k[1],reverse=True)
# user_by_max_rt = lines_by_rt[0][0]
# assert type(user_by_max_rt) == str
#
#
# #14 该文本里,11点钟,谁发的微博次数最多。
#
# lines_on_hour11 = filter(lambda line:line[keys['created_at']].startswith('11',11,13),lines)
# lines_by_uid_on_hour11 = {k[keys['uid']]:0 for k in lines_on_hour11}
# for line in lines_on_hour11:
# uid = line[keys['uid']]
# lines_by_uid_on_hour11[uid] += 1
# d = [(k,v) for k,v in lines_by_uid_on_hour11.items()]
# d.sort(key=lambda k:k[1],reverse=True)
# uid_by_max_tweets_on_hour11 = d[0][0]
# # todo 如果有多个最多怎么办?
# assert type(uid_by_max_tweets_on_hour11) == str
#
#
# #15 该文本里,哪个用户的源微博URL次数最多。 (要求:输出用户的uid,字符串格式。)
#
# uid_by_v_url = {k[keys['uid']]:0 for k in lines}
# for line in lines:
# uid = line[keys['uid']]
# if lines[keys['v_url']] != '':
# uid_by_v_url[uid] += 1
# uid_sort_by_v_url = [(k,v) for k,v in uid_by_v_url.items()]
# uid_sort_by_v_url.sort(key=lambda k:k[1],reverse=True)
# uid_by_max_v_url = uid_sort_by_v_url[0][0]
# # todo 如果有多个最多怎么办?
# assert type(uid_by_max_v_url) == str
#
# print '运算时间:%s'%(time.time() - now) #整体运行时间
|
[
"34296128+dujiaojingyu@users.noreply.github.com"
] |
34296128+dujiaojingyu@users.noreply.github.com
|
f588ea12e923ea4f213f05f6c9644792b914cfeb
|
5e288e450271b3a395e6e2e4d340370d9b2380eb
|
/CarData/src/Cars/Ford/Car_data_DownTownFord.py
|
57addc566fe99503f6c1fa427f4b82e98b81baff
|
[] |
no_license
|
dnp987/GTA-Cars-old
|
76c8713516c8cf5e2fe0ee8a2b4bd5b429657e21
|
a7e72db18a5415f4b0ccf9c95b762e1bf801aded
|
refs/heads/master
| 2023-09-02T18:48:26.116925
| 2021-10-17T14:42:14
| 2021-10-17T14:42:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,288
|
py
|
'''
Created on June 29, 2020
@author: DNP Enterprises Inc.
'''
from datetime import datetime
from time import sleep
import re
#from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from Quotes.Excel_utils2 import Excel_utils2
from Cars.CreateDealerSheet2 import CreateDealerSheet
from Cars.browser_start import browser_start
if __name__ == '__main__':
file_in = 'C:/Users/Home/Desktop/Cars/CarData.xlsx'
data_in = Excel_utils2(file_in, 'Ford', 'in')
file_out = data_in.sht.cell(4,7).value
dealer = data_in.sht.cell(4,1).value
url = data_in.sht.cell(4,2).value
dealer_id = (data_in.sht.cell(4,3).value).split() # convert to a list for use later
date_time = datetime.now().strftime('%Y %B %d %I %M %p') # get the date and time
data_out = Excel_utils2(' ', date_time, 'out') # set the spreadsheet tab to the dealer name
driver = browser_start(url, True) # run browser in headless mode
#driver = browser_start(url) # run browser in non-headless, incognito mode
wait = WebDriverWait(driver, 10)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'li.resetButtonItem'))) # wait for Reset button to be displayed
print (driver.title)
num_cars = driver.find_element_by_css_selector('div.count.verticalAlignMid.inlineBlock').text
num_cars = int(re.sub("[^0-9]", "", num_cars)) #remove text, keep the numeric part, and convert to integer for later use
print ("Number of cars found on site: " , num_cars)
count = 0
zero = 0
car_info = []
pages_remaining = True
while pages_remaining:
try:
driver.find_element_by_css_selector('.popCloseButton').click() # close the annoying pop-up if it appears
except:
pass
car_desc = driver.find_elements_by_css_selector(".inventoryListVehicleTitle")
car_prices = driver.find_elements_by_css_selector('.vehiclePriceDisplay' '[itemprop]')
stock = driver.find_elements_by_css_selector('.field' '[itemprop = "sku"]')
details_links = driver.find_elements_by_css_selector('.inventoryListVehicleTitle [href]')
for index, car in enumerate(car_desc):
car_name = (car.text +" ").split()[:4] # keep the year, make, and model, remove the rest
year = car_name[0].split() # convert the year to a list
make = car_name[1].split() # convert make to a list
model = car_name[2:] # model is already a list
model = [' '.join(model)] # merge the model into one list element
car_desc = year + make + model
price = re.sub("[$,]", "", car_prices[index].text) # remove $ and ',' from the price
if (not price.isdigit()): # if the price is "Please call" or something non-numeric, set the price to 0
price = '0'
zero += 1
price = price.split() # convert to a list
stock_num = (stock[index].text).split() # convert to a list
link = (details_links[index].get_attribute('href')).split()
print (index,":", car_desc, price, stock_num, link)
car_info.append(dealer_id + car_desc + price + stock_num + link)
count = count + index +1
print ("Running count: ", count)
try:
print (driver.find_element_by_link_text("Next").get_attribute('href'))
driver.find_element_by_link_text("Next").click() # click on Next link
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'li.resetButtonItem'))) # wait for Reset button to be displayed
sleep (4)
except:
print ("Total cars processed: ", count, " Total unpriced cars: ", zero)
pages_remaining = False
car_info = sorted(car_info)
for index, i in enumerate(car_info):
print (index, ":", i)
print ("Saving data in a spreadsheet....", file_out)
CreateDealerSheet(data_out, car_info, date_time)
print (dealer, "Total cars: " , count)
data_out.save_file(file_out)
driver.quit() # Close the browser and end the session
|
[
"dnp987@gmail.com"
] |
dnp987@gmail.com
|
95aa037242063b122b3bd33f7bb1314f54c46850
|
11ad104b0309a2bffd7537d05e2ab3eaf4aed0ca
|
/tests/helpers/test_storage_remove.py
|
9a447771ea630816f159fba84f8ff655f447eb56
|
[
"Apache-2.0"
] |
permissive
|
koying/home-assistant
|
15e5d01a45fd4373b3d286e1b2ca5aba1311786d
|
9fc92ab04e0d1933cc23e89b4095714aee725f8b
|
refs/heads/dev
| 2023-06-24T01:15:12.150720
| 2020-11-01T12:27:33
| 2020-11-01T12:27:33
| 189,232,923
| 2
| 1
|
Apache-2.0
| 2023-01-13T06:04:15
| 2019-05-29T13:39:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
"""Tests for the storage helper with minimal mocking."""
import asyncio
from datetime import timedelta
import os
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.async_mock import patch
from tests.common import async_fire_time_changed, async_test_home_assistant
async def test_removing_while_delay_in_progress(tmpdir):
"""Test removing while delay in progress."""
loop = asyncio.get_event_loop()
hass = await async_test_home_assistant(loop)
test_dir = await hass.async_add_executor_job(tmpdir.mkdir, "storage")
with patch.object(storage, "STORAGE_DIR", test_dir):
real_store = storage.Store(hass, 1, "remove_me")
await real_store.async_save({"delay": "no"})
assert await hass.async_add_executor_job(os.path.exists, real_store.path)
real_store.async_delay_save(lambda: {"delay": "yes"}, 1)
await real_store.async_remove()
assert not await hass.async_add_executor_job(os.path.exists, real_store.path)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert not await hass.async_add_executor_job(os.path.exists, real_store.path)
await hass.async_stop()
|
[
"noreply@github.com"
] |
koying.noreply@github.com
|
8ef2d2abe68d0b5499e760395b40896a467518c4
|
2e9193625039cbd93a76a1ac1115e84599c6afcd
|
/HashTable/hashtableImp.py
|
1f19d4d3fcdd4ca486866e38beb7dbb1a273fa65
|
[] |
no_license
|
hieudx149/DatastructAndAlgorithms
|
d54b79c3375dfb17989160a1d2dc74505061eae5
|
a5f147b2f644f2a273c50756c9d297fa8b6bcd08
|
refs/heads/master
| 2023-06-16T13:38:32.039274
| 2021-07-13T10:35:54
| 2021-07-13T10:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
class hash_table:
def __init__(self, size):
self.size = size
self.data = [None]*self.size
def __str__(self): # As in the array implementation, this method is used to print the attributes of the class object in a dictionary format
return str(self.__dict__)
def _hash(self, key):
hash = 0
for i in range(len(key)):
hash = (hash + ord(key[i])*i) % self.size
return hash
def set(self, key, value):
address = self._hash(key)
if not self.data[address]:
self.data[address] = [[key, value]]
else:
self.data[address].append([key, value])
print(self.data)
def get(self, key):
address = self._hash(key)
bucket = self.data[address]
if bucket:
for i in range(len(bucket)):
if bucket[i][0] == key:
return bucket[i][1]
return None
def keys(self):
list_key = []
for i in range(self.size):
if self.data[i]:
for j in range(len(self.data[i])):
list_key.append(self.data[i][j][0])
return list_key
def values(self):
list_value = []
for i in range(self.size):
if self.data[i]:
for j in range(len(self.data[i])):
list_value.append(self.data[i][j][1])
return list_value
new_hash = hash_table(5)
new_hash.set('duong', 100)
new_hash.set('xuan', 200)
new_hash.set('hieu', 300)
print(new_hash.keys())
print(new_hash.values())
|
[
"you@example.com"
] |
you@example.com
|
5f751dd40eb4c0085434027a83c7ef7d78945f6c
|
e0708f9ee1adbe9c86d5b333da25ad9b31763b0b
|
/App_Blog/templatetags/custom_filters.py
|
88d002df6794e06beb9d787c1df7ed2d4084618e
|
[] |
no_license
|
kazi-akib-abdullah/blog_project_django
|
ad766acd7809876c02791631de96ca31304efef3
|
ea6a534e1a9a24b2df055a0757f17e9f6cc09e16
|
refs/heads/master
| 2023-03-09T21:03:26.977160
| 2021-02-27T08:02:13
| 2021-02-27T08:02:13
| 340,184,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from django import template
register = template.Library()
def range_filter(value):
return value[0:500] + "......"
register.filter('range_filter', range_filter)
|
[
"45953236+kazi-akib-abdullah@users.noreply.github.com"
] |
45953236+kazi-akib-abdullah@users.noreply.github.com
|
dcf9913ef2a4ff09c4c40ac79a15797fa47cbec7
|
3bf01ae08ad8e85af0928f8d644e21e3dd3ab504
|
/seance/seance_service.py
|
c702dee1aeac952d9615f8b5eb14ad08be4e0250
|
[] |
no_license
|
AnkoSkl/rsoi
|
374a09079d1451f3e8890bcef0fdcb504eb49acc
|
9c24f193bcea09e12726fd30445227ebf84090ab
|
refs/heads/master
| 2022-12-13T23:16:05.067962
| 2019-11-30T20:39:47
| 2019-11-30T20:39:47
| 153,944,478
| 0
| 0
| null | 2022-12-08T01:19:28
| 2018-10-20T20:13:30
|
Python
|
UTF-8
|
Python
| false
| false
| 427
|
py
|
from movie import app
from flask_restful import Api
from seance.rest_api.seance_resource import *
from movie.repository.movie_repository import Movies
api = Api(app)
service_namespace = "/seances"
api.add_resource(SeanceListResource, "/seances")
api.add_resource(SeanceResource, "/seances/<seance_id>")
api.add_resource(SeanceCreateResource, "/seances/create")
if __name__ == '__main__':
app.run(port=5002, debug=True)
|
[
"anko136660422@gmail.com"
] |
anko136660422@gmail.com
|
2e3a286e0a9ecb9a6cf8662b45d6de113da6d8e2
|
bef90192417ecaf5405a0ce23a7d1e55e797b702
|
/lab3_1.py
|
d07d197b69a34c013ff68ea9792a3733fd92996c
|
[] |
no_license
|
fp-computer-programming/cycle-5-labs-p22anishiyama
|
e712059922d63d1a8084b7f907ad7bc1085d7099
|
65b2f2b9de431013bea14593024a13dcca671c69
|
refs/heads/master
| 2023-08-25T14:24:45.489265
| 2021-10-29T14:44:26
| 2021-10-29T14:44:26
| 419,737,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
# Author: ATN 10/25/21
import time
import math
t1 = time.perf_counter()
math.pow(2, 2)
t2 = time.perf_counter()
speed1 = t2 - t1
print(speed1)
t3 = time.perf_counter()
2 ** 2
t4 = time.perf_counter()
speed2 = t4 - t3
print(speed2)
|
[
"p22anishiyama@fairfieldprep.org"
] |
p22anishiyama@fairfieldprep.org
|
a890dc792470d462bc393e0a9032f07d41b0bb38
|
4faca7dbc379bd48d0c49de3c5443f432f75651f
|
/withGUI.py
|
e5dc997d8dde5fb6be0f9ba372506ff1947b4319
|
[] |
no_license
|
adityadroid/Alphabeter
|
aa08c767c97b859158a4fbc05ca061508ec1cc62
|
c0012fc2c41d0a23eefbb89554f3e9100c2d51bc
|
refs/heads/master
| 2021-06-11T01:02:14.661708
| 2017-02-07T19:17:47
| 2017-02-07T19:17:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,791
|
py
|
import turtle
from tkinter import *
from tkinter import ttk
def printBlank():
turtle.color(str(bg.get()))
turtle.forward(30)
def printA(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.left(90)
def printB(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
def printC(color):
turtle.color(color)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.color(str(bg.get()))
turtle.forward(100)
turtle.left(90)
def printD(color):
turtle.color(color)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(100)
turtle.left(90)
def printE(color):
turtle.color(color)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.color(str(bg.get()))
turtle.forward(100)
turtle.left(90)
def printF(color):
turtle.color(color)
turtle.left(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.color(str(bg.get()))
turtle.forward(100)
turtle.left(90)
def printG(color):
turtle.color(color)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.color(str(bg.get()))
turtle.forward(50)
turtle.color(color)
turtle.forward(50)
turtle.left(90)
def printH(color):
turtle.color(color)
turtle.left(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(180)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.color(str(bg.get()))
turtle.forward(50)
turtle.right(90)
turtle.color(color)
turtle.forward(100)
turtle.left(90)
def printI(color):
turtle.color(color)
turtle.forward(50)
turtle.left(180)
turtle.forward(25)
turtle.right(90)
turtle.forward(100)
turtle.left(90)
turtle.forward(25)
turtle.left(180)
turtle.forward(50)
turtle.color(str(bg.get()))
turtle.right(90)
turtle.forward(100)
turtle.left(90)
def printJ(color):
turtle.left(90)
turtle.forward(50)
turtle.left(180)
turtle.color(color)
turtle.forward(50)
turtle.left(90)
turtle.forward(25)
turtle.left(90)
turtle.forward(100)
turtle.left(90)
turtle.forward(25)
turtle.left(180)
turtle.forward(50)
turtle.color(str(bg.get()))
turtle.right(90)
turtle.forward(100)
turtle.left(90)
def printK(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.left(180)
turtle.forward(50)
turtle.left(135)
turtle.forward(70.71)
turtle.left(180)
turtle.forward(70.71)
turtle.left(90)
turtle.forward(70.71)
turtle.left(45)
def printL(color):
turtle.left(90)
turtle.forward(100)
turtle.color(color)
turtle.left(180)
turtle.forward(100)
turtle.left(90)
turtle.forward(50)
def printM(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.right(135)
turtle.forward(55.90)
turtle.left(90)
turtle.forward(55.90)
turtle.right(135)
turtle.forward(100)
turtle.left(90)
def printN(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.right(150)
turtle.forward(115.70)
turtle.left(150)
turtle.forward(100)
turtle.left(180)
turtle.forward(100)
turtle.left(90)
def printO(color):
turtle.left(90)
turtle.forward(5)
turtle.color(color)
turtle.forward(90)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(40)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(90)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(40)
turtle.right(45)
turtle.forward(7.07)
turtle.right(180)
turtle.forward(7.07)
turtle.left(45)
turtle.forward(40)
turtle.color(str(bg.get()))
turtle.forward(5)
def printP(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(45)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(40)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(45)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.color(str(bg.get()))
turtle.forward(50)
def printQ(color):
turtle.left(90)
turtle.forward(10)
turtle.color(color)
turtle.forward(80)
turtle.right(45)
turtle.forward(14.14)
turtle.right(45)
turtle.forward(30)
turtle.right(45)
turtle.forward(14.14)
turtle.right(45)
turtle.forward(80)
turtle.right(45)
turtle.forward(14.14)
turtle.right(45)
turtle.forward(30)
turtle.right(45)
turtle.forward(14.14)
turtle.right(180)
turtle.forward(14.14)
turtle.left(45)
turtle.forward(30)
turtle.left(45)
turtle.forward(7.07)
turtle.left(90)
turtle.forward(7.07)
turtle.left(180)
turtle.forward(14.14)
turtle.left(180)
turtle.forward(7.07)
turtle.left(90)
turtle.forward(7.07)
turtle.left(135)
turtle.color(str(bg.get()))
turtle.forward(5)
def printR(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(45)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(40)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(45)
turtle.left(180)
turtle.forward(20)
turtle.right(60)
turtle.forward(58.30)
turtle.left(60)
def printS(color):
turtle.color(color)
turtle.forward(45)
turtle.left(45)
turtle.forward(7.07)
turtle.left(45)
turtle.forward(40)
turtle.left(45)
turtle.forward(7.07)
turtle.left(45)
turtle.forward(40)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(40)
turtle.right(45)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(45)
turtle.color(str(bg.get()))
turtle.forward(5)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(5)
turtle.left(180)
def printT(color):
turtle.forward(30)
turtle.left(90)
turtle.color(color)
turtle.forward(100)
turtle.left(90)
turtle.forward(30)
turtle.left(180)
turtle.forward(60)
turtle.color(str(bg.get()))
turtle.right(90)
turtle.forward(100)
turtle.left(90)
def printU(color):
turtle.forward(5)
turtle.color(color)
turtle.left(135)
turtle.forward(7.07)
turtle.right(45)
turtle.forward(95)
turtle.left(180)
turtle.forward(95)
turtle.left(45)
turtle.forward(7.07)
turtle.left(45)
turtle.forward(40)
turtle.left(45)
turtle.forward(7.07)
turtle.left(45)
turtle.forward(95)
turtle.left(180)
turtle.forward(95)
turtle.right(45)
turtle.forward(7.07)
turtle.left(135)
turtle.color(str(bg.get()))
turtle.forward(5)
def printV(color):
turtle.forward(25)
turtle.color(color)
turtle.left(135)
turtle.forward(55.90)
turtle.right(45)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.left(45)
turtle.forward(55.90)
turtle.left(90)
turtle.forward(55.90)
turtle.left(45)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.color(str(bg.get()))
turtle.forward(50)
turtle.left(90)
def printW(color):
turtle.color(color)
turtle.left(90)
turtle.forward(100)
turtle.left(180)
turtle.forward(100)
turtle.left(135)
turtle.forward(55.90)
turtle.right(90)
turtle.forward(55.90)
turtle.left(135)
turtle.forward(100)
turtle.left(180)
turtle.forward(100)
turtle.left(90)
def printX(color):
turtle.forward(50)
turtle.color(color)
turtle.left(116.551)
turtle.forward(111.80)
turtle.left(180)
turtle.forward(55.90)
turtle.right(53.102)
turtle.forward(55.90)
turtle.left(180)
turtle.forward(111.80)
turtle.right(153.449)
turtle.color(str(bg.get()))
turtle.forward(100)
turtle.left(90)
def printY(color):
turtle.forward(25)
turtle.color(color)
turtle.left(90)
turtle.forward(50)
turtle.left(40)
turtle.forward(65)
turtle.left(180)
turtle.forward(65)
turtle.left(100)
turtle.forward(65)
turtle.left(180)
turtle.forward(65)
turtle.left(40)
turtle.forward(50)
turtle.left(90)
turtle.color(str(bg.get()))
turtle.forward(30)
def printZ(color):
turtle.color(color)
turtle.left(63.45)
turtle.forward(111.80)
turtle.left(116.55)
turtle.forward(50)
turtle.left(180)
turtle.forward(50)
turtle.right(116.551)
turtle.forward(111.80)
turtle.left(116.55)
turtle.forward(50)
def printNewLine():
turtle.color(str(bg.get()))
turtle.right(90)
turtle.forward(10)
turtle.left(90)
turtle.setx(-200)
turtle.sety(turtle.ycor()-150)
def printData():
# switcher = {
# 'A': printA(str(fg.get())),
# 'B': printB(str(fg.get())),
# 'C': printC(str(fg.get())),
# 'D': printD(str(fg.get())),
# 'E': printE(str(fg.get())),
# 'F': printF(str(fg.get())),
# 'G': printG(str(fg.get())),
# 'H': printH(str(fg.get())),
# 'I': printI(str(fg.get())),
# 'J': printJ(str(fg.get())),
# 'K': printK(str(fg.get())),
# 'L': printL(str(fg.get())),
# 'M': printM(str(fg.get())),
# 'N': printN(str(fg.get())),
# 'O': printO(str(fg.get())),
# 'P': printP(str(fg.get())),
# 'Q': printQ(str(fg.get())),
# 'R': printR(str(fg.get())),
# 'S': printS(str(fg.get())),
# 'T': printT(str(fg.get())),
# 'U': printU(str(fg.get())),
# 'V': printV(str(fg.get())),
# 'W': printW(str(fg.get())),
# 'X': printX(str(fg.get())),
# 'Y': printY(str(fg.get())),
# 'Z': printZ(str(fg.get())),
#
# }
turtle.color(str(bg.get()))
turtle.setx(-300)
turtle.sety(200)
turtle.speed(3)
turtle.width(int(font.get()))
turtle.bgcolor(str(bg.get()))
print(str(bg.get()))
print(str(fg.get()))
print(str(txt.get()))
txtnew = str(txt.get()).upper()
print(txtnew)
# printA(str(fg.get()))
# printBlank()
# printBlank()
# printZ(str(fg.get()))
for i in txtnew:
if turtle.xcor() > 500:
printNewLine()
if i == 'A':
print("inside")
printA(str(fg.get()))
elif i == 'B':
printB(str(fg.get()))
elif i == 'C':
printC(str(fg.get()))
elif i == 'D':
printD(str(fg.get()))
elif i == 'E':
printE(str(fg.get()))
elif i == 'F':
printF(str(fg.get()))
elif i == 'G':
printG(str(fg.get()))
elif i == 'H':
printH(str(fg.get()))
elif i == 'I':
printI(str(fg.get()))
elif i == 'J':
printJ(str(fg.get()))
elif i == 'K':
printK(str(fg.get()))
elif i == 'L':
printL(str(fg.get()))
elif i == 'M':
printM(str(fg.get()))
elif i == 'N':
printN(str(fg.get()))
elif i == 'O':
printO(str(fg.get()))
elif i == 'P':
printP(str(fg.get()))
elif i == 'Q':
printQ(str(fg.get()))
elif i == 'R':
printR(str(fg.get()))
elif i == 'S':
printS(str(fg.get()))
elif i == 'T':
printT(str(fg.get()))
elif i == 'U':
printU(str(fg.get()))
elif i == 'V':
printV(str(fg.get()))
elif i == 'W':
printW(str(fg.get()))
elif i == 'X':
printX(str(fg.get()))
elif i == 'Y':
printY(str(fg.get()))
elif i == 'Z':
printZ(str(fg.get()))
elif i == ' ':
printBlank()
printBlank()
turtle.done()
root = Tk()
root.title("I'll do what you ask me to!")
mainframe = ttk.Frame(root,padding = "25 25 25 25")
mainframe.grid(column=0,row=0, sticky = (N,W,E,S))
mainframe.columnconfigure(0,weight = 1)
mainframe.rowconfigure(0, weight= 1)
txt = StringVar()
bg = StringVar()
fg = StringVar()
font = StringVar()
font.set(10)
bg.set("white")
ttk.Label(mainframe, text= "Text:").grid(column = 1, row = 1, sticky = (W,E))
textEntry = ttk.Entry(mainframe, width = 7,textvariable = txt)
textEntry.grid(column =1, row = 2, sticky = (W,E))
ttk.Label(mainframe, text = "ForeGround Color:").grid(column=1,row =3,sticky = (W,E))
fgEntry = ttk.Entry(mainframe, width = 7,textvariable = fg)
fgEntry.grid(column =1, row = 4, sticky = (W,E))
ttk.Label(mainframe, text = "BackGround Color:").grid(column=1,row =5,sticky = (W,E))
bgEntry = ttk.Entry(mainframe, width = 7,textvariable = bg)
bgEntry.grid(column =1, row = 6, sticky = (W,E))
ttk.Label(mainframe, text = "Font Size:").grid(column=1,row =7,sticky = (W,E))
fontEntry = ttk.Entry(mainframe, width = 7,textvariable = font)
fontEntry.grid(column =1, row = 8, sticky = (W,E))
ttk.Button(mainframe,text="Okay?", command = printData).grid(column= 1, row = 9, sticky = (W,E))
for child in mainframe.winfo_children():
child.grid_configure(padx=5,pady=5)
textEntry.focus()
root.mainloop()
|
[
"adityaadi1467@gmail.com"
] |
adityaadi1467@gmail.com
|
0ac9d28c7e8325cfbf311caef311479a4339cfd7
|
438703d149788b6dec63f9d5e50acc5db7cab73c
|
/flask_application/controllers/admin.py
|
01df5be6f21d20282a8ed66cccfdf2553c6e19a6
|
[] |
no_license
|
kozdowy/picgridder
|
a190f4e9fdeafa72af84f7cb6e0f76d397da78c3
|
9bb73deca5f7db0a265efb8b1ee4a0e77858f32e
|
refs/heads/master
| 2016-09-05T21:08:45.721794
| 2014-09-07T05:43:45
| 2014-09-07T05:43:45
| 23,750,728
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from flask import Blueprint
from flask.ext.security import roles_required
from flask_application.controllers import TemplateView
admin = Blueprint('admin', __name__, url_prefix='/admin')
class AdminView(TemplateView):
route = '/admin'
template_name = 'security/index.html'
decorators = [roles_required('admin')]
def get_context_data(self, *args, **kwargs):
return {
'content': 'This is the Admin Page'
}
|
[
"hanooter@gmail.com"
] |
hanooter@gmail.com
|
3dd7a6c1cc0e7b493acc79ecedfa610981f4a0c2
|
6e8d58340f2be5f00d55e2629052c0bbc9dcf390
|
/eggs/numpy-1.6.0-py2.7-linux-x86_64-ucs4.egg/numpy/f2py/auxfuncs.py
|
a12d92b7ea6a8df62af61ded3a2fcb333b26d37c
|
[
"CC-BY-2.5",
"MIT"
] |
permissive
|
JCVI-Cloud/galaxy-tools-prok
|
e57389750d33ac766e1658838cdb0aaf9a59c106
|
3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c
|
refs/heads/master
| 2021-05-02T06:23:05.414371
| 2014-03-21T18:12:43
| 2014-03-21T18:12:43
| 6,092,693
| 0
| 2
|
NOASSERTION
| 2020-07-25T20:38:17
| 2012-10-05T15:57:38
|
Python
|
UTF-8
|
Python
| false
| false
| 19,936
|
py
|
#!/usr/bin/env python
"""
Auxiliary functions for f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
__version__ = "$Revision: 1.65 $"[10:-1]
import __version__
f2py_version = __version__.version
import pprint
import sys
import types
import cfuncs
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
options={}
debugoptions=[]
wrapfuncs = 1
if sys.version_info[0] >= 3:
from functools import reduce
def outmess(t):
if options.get('verbose',1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _isstring(var):
return 'typespec' in var and var['typespec']=='character' and (not isexternal(var))
def isstring(var):
return _isstring(var) and not isarray(var)
def ischaracter(var):
return isstring(var) and 'charselector' not in var
def isstringarray(var):
return isarray(var) and _isstring(var)
def isarrayofstrings(var):
# leaving out '*' for now so that
# `character*(*) a(m)` and `character a(m,*)`
# are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1]=='(*)'
def isarray(var):
return 'dimension' in var and (not isexternal(var))
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and var.get('typespec') in ['complex','double complex']
def islogical(var):
return isscalar(var) and var.get('typespec')=='logical'
def isinteger(var):
return isscalar(var) and var.get('typespec')=='integer'
def isreal(var):
return isscalar(var) and var.get('typespec')=='real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer','logical']:
return 0
return get_kind(var)=='8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var)=='32'
def iscomplexarray(var):
return isarray(var) and var.get('typespec') in ['complex','double complex']
def isint1array(var):
return isarray(var) and var.get('typespec')=='integer' \
and get_kind(var)=='1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not (not 'dimension' in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return ('block' in rout and 'module'==rout['block'])
def isfunction(rout):
return ('block' in rout and 'function'==rout['block'])
#def isfunction_wrap(rout):
# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout))
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return ('block' in rout and 'subroutine'==rout['block'])
def issubroutine_wrap(rout):
if isintent_c(rout):
return 0
return issubroutine(rout) and hasassumedshape(rout)
def hasassumedshape(rout):
if rout.get('hasassumedshape'):
return True
for a in rout['args']:
for d in rout['vars'].get(a,{}).get('dimension',[]):
if d==':':
rout['hasassumedshape'] = True
return True
return False
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
Run the following test before using it in your applications:
$(f2py install dir)/test-site/{b/runme_scalar,e/runme}
When using GNU gcc/g77 compilers, codes should work correctly.
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return ('attrspec' in var and 'external' in var['attrspec'])
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent']
def isintent_out(var):
return 'out' in var.get('intent',[])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout,isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent',[])
# def isintent_f(var):
# return not isintent_c(var)
def isintent_cache(var):
return 'cache' in var.get('intent',[])
def isintent_copy(var):
return 'copy' in var.get('intent',[])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent',[])
def isintent_callback(var):
return 'callback' in var.get('intent',[])
def isintent_inplace(var):
return 'inplace' in var.get('intent',[])
def isintent_aux(var):
return 'aux' in var.get('intent',[])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent',[])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent',[])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent',[])
isintent_dict = {isintent_in:'INTENT_IN',isintent_inout:'INTENT_INOUT',
isintent_out:'INTENT_OUT',isintent_hide:'INTENT_HIDE',
isintent_cache:'INTENT_CACHE',
isintent_c:'INTENT_C',isoptional:'OPTIONAL',
isintent_inplace:'INTENT_INPLACE',
isintent_aligned4:'INTENT_ALIGNED4',
isintent_aligned8:'INTENT_ALIGNED8',
isintent_aligned16:'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"',"'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self,mess):
self.mess = mess
def __call__(self,var):
mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess)
raise F2PYError,mess
def l_and(*f):
l,l2='lambda v',[]
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l,i,i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l,' and '.join(l2)))
def l_or(*f):
l,l2='lambda v',[]
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l,i,i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l,' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname']==''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name=='':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout,blockname,comment=1,counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r: return
if counter>0 and type(r) is type(''):
return
if type(r) is type([]):
if counter>=len(r): return
r = r[counter]
if r[:3]=="'''":
if comment:
r = '\t/* start ' + blockname + ' multiline ('+`counter`+') */\n' + r[3:]
else:
r = r[3:]
if r[-3:]=="'''":
if comment:
r = r[:-3] + '\n\t/* end multiline ('+`counter`+')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n" \
% (blockname,repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout,'callstatement')
def getcallprotoargument(rout,cb_map={}):
r = getmultilineblock(rout,'callprotoargument',comment=0)
if r: return r
if hascallstatement(rout):
outmess('warning: callstatement is defined without callprotoargument\n')
return
from capi_maps import getctype
arg_types,arg_types2 = [],[]
if l_and(isstringfunction,l_not(isfunction_wrap))(rout):
arg_types.extend(['char*','size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n]+'_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c,l_or(isscalar,iscomplex))(var):
pass
elif isstring(var):
pass
#ctype = 'void*'
else:
ctype = ctype+'*'
if isstring(var) or isarrayofstrings(var):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types+arg_types2)
if not proto_args:
proto_args = 'void'
#print proto_args
return proto_args
def getusercode(rout):
return getmultilineblock(rout,'usercode')
def getusercode1(rout):
return getmultilineblock(rout,'usercode',counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout,'pymethoddef')
def getargs(rout):
sortargs,args=[],[]
if 'args' in rout:
args=rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=rout['args']
return args,sortargs
def getargs2(rout):
sortargs,args=[],rout.get('args',[])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=auxvars + rout['args']
return args,sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block']=='python module':
k = rout['block'],rout['name']
return rout['f2pymultilines'].get(k,None)
def gentitle(name):
l=(80-len(name)-6)//2
return '/*%s %s %s*/'%(l*'*',name,l*'*')
def flatlist(l):
if type(l)==types.ListType:
return reduce(lambda x,y,f=flatlist:x+f(y),l,[])
return [l]
def stripcomma(s):
if s and s[-1]==',': return s[:-1]
return s
def replace(str,d,defaultsep=''):
if type(d)==types.ListType:
return map(lambda d,f=replace,sep=defaultsep,s=str:f(s,d,sep),d)
if type(str)==types.ListType:
return map(lambda s,f=replace,sep=defaultsep,d=d:f(s,d,sep),str)
for k in 2*d.keys():
if k=='separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep=d['separatorsfor'][k]
else:
sep=defaultsep
if type(d[k])==types.ListType:
str=str.replace('#%s#'%(k),sep.join(flatlist(d[k])))
else:
str=str.replace('#%s#'%(k),d[k])
return str
def dictappend(rd,ar):
if type(ar)==types.ListType:
for a in ar:
rd=dictappend(rd,a)
return rd
for k in ar.keys():
if k[0]=='_':
continue
if k in rd:
if type(rd[k])==str:
rd[k]=[rd[k]]
if type(rd[k])==types.ListType:
if type(ar[k])==types.ListType:
rd[k]=rd[k]+ar[k]
else:
rd[k].append(ar[k])
elif type(rd[k])==types.DictType:
if type(ar[k])==types.DictType:
if k=='separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1]=ar[k][k1]
else:
rd[k]=dictappend(rd[k],ar[k])
else:
rd[k]=ar[k]
return rd
def applyrules(rules,d,var={}):
ret={}
if type(rules)==types.ListType:
for r in rules:
rr=applyrules(r,d,var)
ret=dictappend(ret,rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs':rules['need']},d,var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k=='separatorsfor':
ret[k]=rules[k]; continue
if type(rules[k])==str:
ret[k]=replace(rules[k],d)
elif type(rules[k])==types.ListType:
ret[k]=[]
for i in rules[k]:
ar=applyrules({k:i},d,var)
if k in ar:
ret[k].append(ar[k])
elif k[0]=='_':
continue
elif type(rules[k])==types.DictType:
ret[k]=[]
for k1 in rules[k].keys():
if type(k1)==types.FunctionType and k1(var):
if type(rules[k][k1])==types.ListType:
for i in rules[k][k1]:
if type(i)==types.DictType:
res=applyrules({'supertext':i},d,var)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i,d))
else:
i=rules[k][k1]
if type(i)==types.DictType:
res=applyrules({'supertext':i},d)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i,d))
else:
errmess('applyrules: ignoring rule %s.\n'%`rules[k]`)
if type(ret[k])==types.ListType:
if len(ret[k])==1:
ret[k]=ret[k][0]
if ret[k]==[]:
del ret[k]
return ret
|
[
"root@ip-10-118-137-129.ec2.internal"
] |
root@ip-10-118-137-129.ec2.internal
|
0188b4ca6eafecea318ab321afff936f95e18e40
|
41d6abf94adc5699aff40d862aa32adb03b952b1
|
/attendance/attendance/prof/migrations/0004_auto_20170311_1924.py
|
427c7077ed93f981b768b219a87d1b3fd1c2087e
|
[] |
no_license
|
AndreiTich/TopAttendance
|
ec134252f76bbdbe23664277291b05e207737ee6
|
44c4091bcaf7637eea51382548b5a2465be59e91
|
refs/heads/master
| 2020-05-21T06:01:47.109394
| 2017-03-11T22:29:45
| 2017-03-11T22:29:45
| 84,583,020
| 2
| 2
| null | 2017-03-10T19:19:59
| 2017-03-10T17:11:08
|
Python
|
UTF-8
|
Python
| false
| false
| 469
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-11 19:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prof', '0003_merge_20170311_1550'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='class_code',
field=models.CharField(max_length=4, unique=True),
),
]
|
[
"menglingsilence@gmail.com"
] |
menglingsilence@gmail.com
|
e05063621f0c473ff8ad2a30c1e044d911e4b514
|
ef91c9eea0f4c2b67111e48f7dace22307fbc584
|
/three.py/TestRenderTarget.py
|
f3094ac47d9baaea1216f29ad445a6f842e7d847
|
[
"MIT"
] |
permissive
|
lukestanley/three.py
|
7cca1f1688419fa98edaa8157c693d475f9f6bfb
|
a3fa99cb3553aca8c74ceabb8203edeb55450803
|
refs/heads/master
| 2020-05-22T05:46:09.248617
| 2019-05-12T10:42:55
| 2019-05-12T10:57:07
| 186,241,212
| 0
| 0
|
MIT
| 2019-05-12T10:33:48
| 2019-05-12T10:33:48
| null |
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
from core import *
from cameras import *
from geometry import *
from material import *
from helpers import *
class TestTemplate(Base):
def initialize(self):
self.setWindowTitle('Test')
self.setWindowSize(800,800)
self.renderer = Renderer()
self.renderer.setViewportSize(800,800)
self.renderer.setClearColor(0.25, 0.25, 0.25)
self.scene = Scene()
self.camera = PerspectiveCamera()
self.camera.transform.setPosition(0, 1, 5)
self.camera.transform.lookAt(0, 0, 0)
self.cameraControls = FirstPersonController(self.input, self.camera)
skyTexture = OpenGLUtils.initializeTexture("images/skysphere.jpg")
sky = Mesh( SphereGeometry(200, 64,64), SurfaceBasicMaterial(texture=skyTexture) )
self.scene.add(sky)
gridTexture = OpenGLUtils.initializeTexture("images/color-grid.png")
sphere = Mesh( SphereGeometry(), SurfaceBasicMaterial(texture=gridTexture) )
self.scene.add(sphere)
floorMesh = GridHelper(size=10, divisions=10, gridColor=[0,0,0], centerColor=[1,0,0])
floorMesh.transform.rotateX(-3.14/2, Matrix.LOCAL)
self.scene.add(floorMesh)
self.skycam = PerspectiveCamera()
self.skycam.transform.setPosition(0, 5, 1)
self.skycam.transform.lookAt(0,0,0)
self.renderTarget = RenderTarget(1024,1024)
self.quad = Mesh( QuadGeometry(), SurfaceBasicMaterial(texture=self.renderTarget.textureID) )
self.quad.transform.setPosition(2,0,0)
self.scene.add( self.quad )
def update(self):
self.cameraControls.update()
if self.input.resize():
size = self.input.getWindowSize()
self.camera.setAspectRatio( size["width"]/size["height"] )
self.renderer.setViewportSize(size["width"], size["height"])
self.renderer.render(self.scene, self.skycam, self.renderTarget)
self.renderer.render(self.scene, self.camera)
# instantiate and run the program
TestTemplate().run()
|
[
"stemkoski@gmail.com"
] |
stemkoski@gmail.com
|
5cbcaaa43ef258823c6c27044d41b401cda0c79d
|
6b301b0b0d5fea69e6ab6d3fcfd0a9741143a9b7
|
/config/jupyter/.ipython/profile_default/startup/00-setup-spark.py
|
0219daccbe9e74cbcbd99ab8d59a1f0b6a772a72
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
frankiegu/pipeline
|
c7a166e80ccc6a351c32fb1918a41268f2380140
|
3526f58cc9b4d824a23300cd60c647a753902774
|
refs/heads/master
| 2021-01-11T06:09:36.914324
| 2016-10-03T05:33:41
| 2016-10-03T05:33:41
| 69,836,618
| 1
| 0
| null | 2016-10-03T02:56:09
| 2016-10-03T02:56:09
| null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import glob
import os
import sys
# Setup SPARK_HOME
spark_home = os.getenv('SPARK_HOME', None)
if not spark_home:
raise ValueError('SPARK_HOME environment variable is not set')
# System sys.path
sys.path.insert(0, os.path.join(spark_home, 'python'))
for lib in glob.glob(os.path.join(spark_home, 'python/lib/py4j-*-src.zip')):
sys.path.insert(0, lib)
os.environ['PYSPARK_SUBMIT_ARGS']='--master %s %s pyspark-shell' % (os.getenv('SPARK_MASTER'), os.getenv('SPARK_SUBMIT_ARGS'))
|
[
"chris@fregly.com"
] |
chris@fregly.com
|
34906a49299704ce8c70279a90752f8f06fab619
|
7c8bd2e26fdabf1555e0150272ecf035f6c21bbd
|
/ps프로젝트/BS/숫자카드2.py
|
8734a278232da1fa846614d424d7f3945e467c48
|
[] |
no_license
|
hyeokjinson/algorithm
|
44090c2895763a0c53d48ff4084a96bdfc77f953
|
46c04e0f583d4c6ec4f51a24f19a373b173b3d5c
|
refs/heads/master
| 2021-07-21T10:18:43.918149
| 2021-03-27T12:27:56
| 2021-03-27T12:27:56
| 245,392,582
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
from collections import Counter
def check(v):
lt=0
rt=n-1
cnt=0
while lt<=rt:
mid=(lt+rt)//2
if arr[mid]==v:
return 1
elif arr[mid]>v:
rt=mid-1
else:
lt=mid+1
return 0
if __name__ == '__main__':
n=int(input())
arr=list(map(int,input().split()))
arr.sort()
m=int(input())
arr1=list(map(int,input().split()))
c=Counter(arr)
res=[]
for i in range(m):
if check(arr1[i]):
res.append(c[arr1[i]])
else:
res.append(0)
for x in res:
print(x,end=' ')
|
[
"hjson817@gmail.com"
] |
hjson817@gmail.com
|
730e2ad889abd7253a50a6192e8170560f90da0e
|
25835efb6081aa857c347c686f1c776060f9548b
|
/numpy/lib/_version.py
|
4eaabd0ff5ef0527ededaf88b0ebfa69bdfedbea
|
[] |
no_license
|
hildensia/numpy
|
43e085cccc138ac666c8e13fe54c79cbe2d967ef
|
9dd46ee2ed9fc55942d9ec13532d817c3b36d322
|
refs/heads/master
| 2021-01-16T20:55:46.797588
| 2014-05-19T13:35:52
| 2014-05-19T13:35:52
| 20,024,779
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,866
|
py
|
"""Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
from __future__ import division, absolute_import, print_function
import re
from numpy.compat import basestring
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be > 9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance. Note that all development versions of the same
(pre-)release compare equal.
.. versionadded:: 1.9.0
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Examples
--------
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0'):
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (basestring, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, basestring):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
|
[
"charlesr.harris@gmail.com"
] |
charlesr.harris@gmail.com
|
a53ec68b0e8ce40a7cda19097562ab614461ffc1
|
b05761d771bb5a85d39d370c649567c1ff3eb089
|
/venv/lib/python3.10/site-packages/pip/_internal/operations/install/wheel.py
|
6a5fa1d94277dc9ee39c696ded14e23b17d938e6
|
[] |
no_license
|
JawshyJ/Coding_Practice
|
88c49cab955eab04609ec1003b6b8c20f103fc06
|
eb6b229d41aa49b1545af2120e6bee8e982adb41
|
refs/heads/master
| 2023-02-19T10:18:04.818542
| 2023-02-06T21:22:58
| 2023-02-06T21:22:58
| 247,788,631
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,185
|
py
|
"""Support for installing and building the "wheel" binary package format.
"""
import collections
import compileall
import contextlib
import csv
import importlib
import logging
import os.path
import re
import shutil
import sys
import warnings
from base64 import urlsafe_b64decode, urlsafe_b64encode
from email.message import Message
from itertools import chain, filterfalse, starmap
from typing import (
IO,
TYPE_CHECKING,
Any,
BinaryIO,
Callable,
Dict,
Iterable,
Iterator,
List,
NewType,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
from zipfile import ZipFile, ZipInfo
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor.distlib.util import get_export_entry
from pip._vendor.pkg_resources import Distribution
from pip._vendor.six import ensure_str, ensure_text, reraise
from pip._internal.exceptions import InstallationError
from pip._internal.locations import get_major_minor_version
from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl
from pip._internal.models.scheme import SCHEME_KEYS, Scheme
from pip._internal.utils.filesystem import adjacent_tmp_file, replace
from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition
from pip._internal.utils.unpacking import (
current_umask,
is_within_directory,
set_extracted_file_to_default_mode_plus_executable,
zip_item_is_executable,
)
from pip._internal.utils.wheel import parse_wheel, pkg_resources_distribution_for_wheel
if TYPE_CHECKING:
from typing import Protocol
class File(Protocol):
src_record_path = None # type: RecordPath
dest_path = None # type: str
changed = None # type: bool
def save(self):
# type: () -> None
pass
logger = logging.getLogger(__name__)
RecordPath = NewType('RecordPath', str)
InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]]
def rehash(path, blocksize=1 << 20):
# type: (str, int) -> Tuple[str, str]
"""Return (encoded_digest, length) for path using hashlib.sha256()"""
h, length = hash_file(path, blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, str(length))
def csv_io_kwargs(mode):
# type: (str) -> Dict[str, Any]
"""Return keyword arguments to properly open a CSV file
in the given mode.
"""
return {'mode': mode, 'newline': '', 'encoding': 'utf-8'}
def fix_script(path):
# type: (str) -> bool
"""Replace #!python with #!/path/to/python
Return True if file was changed.
"""
# XXX RECORD hashes will need to be updated
assert os.path.isfile(path)
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = firstline[2:]
firstline = b'#!/usr/bin/env ' + exename + os.linesep.encode("ascii")
rest = script.read()
# If the file is installed from the pool, let's unlink it before
# writing the new version.
if not os.access(path, os.W_OK):
os.unlink(path)
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
def wheel_root_is_purelib(metadata):
# type: (Message) -> bool
return metadata.get("Root-Is-Purelib", "").lower() == "true"
def get_entrypoints(distribution):
# type: (Distribution) -> Tuple[Dict[str, str], Dict[str, str]]
# get the entry points and then the script names
try:
console = distribution.get_entry_map('console_scripts')
gui = distribution.get_entry_map('gui_scripts')
except KeyError:
# Our dict-based Distribution raises KeyError if entry_points.txt
# doesn't exist.
return {}, {}
def _split_ep(s):
# type: (pkg_resources.EntryPoint) -> Tuple[str, str]
"""get the string representation of EntryPoint,
remove space and split on '='
"""
split_parts = str(s).replace(" ", "").split("=")
return split_parts[0], split_parts[1]
# convert the EntryPoint objects into strings with module:function
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def message_about_scripts_not_on_PATH(scripts):
# type: (Sequence[str]) -> Optional[str]
"""Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None.
"""
if not scripts:
return None
# Group scripts by the path they were installed in
grouped_by_dir = collections.defaultdict(set) # type: Dict[str, Set[str]]
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
# We don't want to warn for directories that are on PATH.
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ.get("PATH", "").split(os.pathsep)
]
# If an executable sits with sys.executable, we don't warn for it.
# This covers the case of venv invocations without activating the venv.
not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
} # type: Dict[str, Set[str]]
if not warn_for:
return None
# Format a message
msg_lines = []
for parent_dir, dir_scripts in warn_for.items():
sorted_scripts = sorted(dir_scripts) # type: List[str]
if len(sorted_scripts) == 1:
start_text = "script {} is".format(sorted_scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
# Add a note if any directory starts with ~
warn_for_tilde = any(
i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i
)
if warn_for_tilde:
tilde_warning_msg = (
"NOTE: The current PATH contains path(s) starting with `~`, "
"which may not be expanded by all applications."
)
msg_lines.append(tilde_warning_msg)
# Returns the formatted multiline message
return "\n".join(msg_lines)
def _normalized_outrows(outrows):
# type: (Iterable[InstalledCSVRow]) -> List[Tuple[str, str, str]]
"""Normalize the given rows of a RECORD file.
Items in each row are converted into str. Rows are then sorted to make
the value more predictable for tests.
Each row is a 3-tuple (path, hash, size) and corresponds to a record of
a RECORD file (see PEP 376 and PEP 427 for details). For the rows
passed to this function, the size can be an integer as an int or string,
or the empty string.
"""
# Normally, there should only be one row per path, in which case the
# second and third elements don't come into play when sorting.
# However, in cases in the wild where a path might happen to occur twice,
# we don't want the sort operation to trigger an error (but still want
# determinism). Since the third element can be an int or string, we
# coerce each element to a string to avoid a TypeError in this case.
# For additional background, see--
# https://github.com/pypa/pip/issues/5868
return sorted(
(ensure_str(record_path, encoding='utf-8'), hash_, str(size))
for record_path, hash_, size in outrows
)
def _record_to_fs_path(record_path):
# type: (RecordPath) -> str
return record_path
def _fs_to_record_path(path, relative_to=None):
# type: (str, Optional[str]) -> RecordPath
if relative_to is not None:
# On Windows, do not handle relative paths if they belong to different
# logical disks
if os.path.splitdrive(path)[0].lower() == \
os.path.splitdrive(relative_to)[0].lower():
path = os.path.relpath(path, relative_to)
path = path.replace(os.path.sep, '/')
return cast('RecordPath', path)
def _parse_record_path(record_column):
# type: (str) -> RecordPath
p = ensure_text(record_column, encoding='utf-8')
return cast('RecordPath', p)
def get_csv_rows_for_installed(
old_csv_rows, # type: List[List[str]]
installed, # type: Dict[RecordPath, RecordPath]
changed, # type: Set[RecordPath]
generated, # type: List[str]
lib_dir, # type: str
):
# type: (...) -> List[InstalledCSVRow]
"""
:param installed: A map from archive RECORD path to installation RECORD
path.
"""
installed_rows = [] # type: List[InstalledCSVRow]
for row in old_csv_rows:
if len(row) > 3:
logger.warning('RECORD line has more than three elements: %s', row)
old_record_path = _parse_record_path(row[0])
new_record_path = installed.pop(old_record_path, old_record_path)
if new_record_path in changed:
digest, length = rehash(_record_to_fs_path(new_record_path))
else:
digest = row[1] if len(row) > 1 else ''
length = row[2] if len(row) > 2 else ''
installed_rows.append((new_record_path, digest, length))
for f in generated:
path = _fs_to_record_path(f, lib_dir)
digest, length = rehash(f)
installed_rows.append((path, digest, length))
for installed_record_path in installed.values():
installed_rows.append((installed_record_path, '', ''))
return installed_rows
def get_console_script_specs(console):
# type: (Dict[str, str]) -> List[str]
"""
Given the mapping from entrypoint name to callable, return the relevant
console script specs.
"""
# Don't mutate caller's version
console = console.copy()
scripts_to_generate = []
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append('pip = ' + pip_script)
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
scripts_to_generate.append(
'pip{} = {}'.format(sys.version_info[0], pip_script)
)
scripts_to_generate.append(
f'pip{get_major_minor_version()} = {pip_script}'
)
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append(
'easy_install = ' + easy_install_script
)
scripts_to_generate.append(
'easy_install-{} = {}'.format(
get_major_minor_version(), easy_install_script
)
)
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console entry points specified in the wheel
scripts_to_generate.extend(starmap('{} = {}'.format, console.items()))
return scripts_to_generate
class ContentAddressablePool:
def __init__(self, cache_dir, save, symlink):
# type: (str, bool, bool) -> None
self.cache_dir = cache_dir
self.save = save
self.symlink = symlink
def path_for_digest(self, digest):
# type: (str) -> str
return os.path.join(
self.cache_dir,
'pool',
digest[:2],
digest[2:4],
digest[4:6],
digest[6:]
)
class ZipBackedFile:
def __init__(
self,
src_record_path, # type: RecordPath
dest_path, # type: str
zip_file, # type: ZipFile
sha256_hash, # type: Optional[str]
pool, # type: Optional[ContentAddressablePool]
):
# type: (...) -> None
self.src_record_path = src_record_path
self.dest_path = dest_path
self._zip_file = zip_file
self.changed = False
self.sha256_hash = sha256_hash
self.pool = pool
def _getinfo(self):
# type: () -> ZipInfo
return self._zip_file.getinfo(self.src_record_path)
def save(self):
# type: () -> None
# When we open the output file below, any existing file is truncated
# before we start writing the new contents. This is fine in most
# cases, but can cause a segfault if pip has loaded a shared
# object (e.g. from pyopenssl through its vendored urllib3)
# Since the shared object is mmap'd an attempt to call a
# symbol in it will then cause a segfault. Unlinking the file
# allows writing of new contents while allowing the process to
# continue to use the old copy.
if os.path.exists(self.dest_path):
os.unlink(self.dest_path)
def _save(dest_path, writable=True):
# type: (str, bool) -> None
# directory creation is lazy and after file filtering
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
parent_dir = os.path.dirname(dest_path)
ensure_dir(parent_dir)
zipinfo = self._getinfo()
with self._zip_file.open(zipinfo) as f:
with open(dest_path, "wb") as dest:
shutil.copyfileobj(f, dest)
if zip_item_is_executable(zipinfo):
set_extracted_file_to_default_mode_plus_executable(
dest_path,
writable=writable
)
if self.sha256_hash is not None and self.pool is not None:
cached_path = self.pool.path_for_digest(self.sha256_hash)
if not os.path.isfile(cached_path):
if not self.pool.save:
# We're not going to use the pool.
_save(self.dest_path, writable=True)
return
# Save to cache and symlink from there.
_save(cached_path, writable=False)
parent_dir = os.path.dirname(self.dest_path)
ensure_dir(parent_dir)
if self.pool.symlink:
os.symlink(cached_path, self.dest_path)
return
# Fall back to a hard link. This might not work in all
# platforms and situations, so fall back to regular
# copying if this fails.
try:
os.link(cached_path, self.dest_path)
return
except OSError:
# This is moderately expected. Fall back to copy.
pass
_save(self.dest_path, writable=True)
class ScriptFile:
def __init__(self, file):
# type: (File) -> None
self._file = file
self.src_record_path = self._file.src_record_path
self.dest_path = self._file.dest_path
self.changed = False
def save(self):
# type: () -> None
self._file.save()
self.changed = fix_script(self.dest_path)
class MissingCallableSuffix(InstallationError):
def __init__(self, entry_point):
# type: (str) -> None
super().__init__(
"Invalid script entry point: {} - A callable "
"suffix is required. Cf https://packaging.python.org/"
"specifications/entry-points/#use-for-scripts for more "
"information.".format(entry_point)
)
def _raise_for_invalid_entrypoint(specification):
# type: (str) -> None
entry = get_export_entry(specification)
if entry is not None and entry.suffix is None:
raise MissingCallableSuffix(str(entry))
class PipScriptMaker(ScriptMaker):
def make(self, specification, options=None):
# type: (str, Dict[str, Any]) -> List[str]
_raise_for_invalid_entrypoint(specification)
return super().make(specification, options)
def _install_wheel(
name, # type: str
wheel_zip, # type: ZipFile
wheel_path, # type: str
scheme, # type: Scheme
pycompile=True, # type: bool
noop=False, # type: bool
warn_script_location=True, # type: bool
direct_url=None, # type: Optional[DirectUrl]
requested=False, # type: bool
pool=None, # type: Optional[ContentAddressablePool]
):
# type: (...) -> None
"""Install a wheel.
:param name: Name of the project to install
:param wheel_zip: open ZipFile for wheel being installed
:param scheme: Distutils scheme dictating the install directories
:param req_description: String used in place of the requirement, for
logging
:param pycompile: Whether to byte-compile installed Python files
:param warn_script_location: Whether to check that scripts are installed
into a directory on PATH
:param pool: An optional content-addressable pool cache
:raises UnsupportedWheel:
* when the directory holds an unpacked wheel with incompatible
Wheel-Version
* when the .dist-info dir does not match the wheel
"""
info_dir, metadata = parse_wheel(wheel_zip, name)
if wheel_root_is_purelib(metadata):
lib_dir = scheme.purelib
else:
lib_dir = scheme.platlib
distribution = pkg_resources_distribution_for_wheel(
wheel_zip, name, wheel_path
)
record_text = distribution.get_metadata('RECORD')
record_rows = list(csv.reader(record_text.splitlines()))
digests = {} # type: Dict[RecordPath, str]
if pool is not None:
for row in record_rows:
if len(row) < 3:
continue
parsed_record_path = _parse_record_path(row[0])
if '=' not in row[1]:
continue
digest_name, b64hash = row[1].split('=', 1)
if digest_name != 'sha256':
continue
digests[parsed_record_path] = urlsafe_b64decode(f'{b64hash}=').hex()
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {} # type: Dict[RecordPath, RecordPath]
changed = set() # type: Set[RecordPath]
generated = [] # type: List[str]
def record_installed(srcfile, destfile, modified=False):
# type: (RecordPath, str, bool) -> None
"""Map archive RECORD paths to installation RECORD paths."""
newpath = _fs_to_record_path(destfile, lib_dir)
installed[srcfile] = newpath
if modified:
changed.add(_fs_to_record_path(destfile))
def all_paths():
# type: () -> Iterable[RecordPath]
names = wheel_zip.namelist()
# If a flag is set, names may be unicode in Python 2. We convert to
# text explicitly so these are valid for lookup in RECORD.
decoded_names = map(ensure_text, names)
for name in decoded_names:
yield cast("RecordPath", name)
def is_dir_path(path):
# type: (RecordPath) -> bool
return path.endswith("/")
def assert_no_path_traversal(dest_dir_path, target_path):
# type: (str, str) -> None
if not is_within_directory(dest_dir_path, target_path):
message = (
"The wheel {!r} has a file {!r} trying to install"
" outside the target directory {!r}"
)
raise InstallationError(
message.format(wheel_path, target_path, dest_dir_path)
)
def root_scheme_file_maker(zip_file, dest):
# type: (ZipFile, str) -> Callable[[RecordPath], File]
def make_root_scheme_file(record_path):
# type: (RecordPath) -> File
normed_path = os.path.normpath(record_path)
dest_path = os.path.join(dest, normed_path)
assert_no_path_traversal(dest, dest_path)
return ZipBackedFile(
record_path,
dest_path,
zip_file,
digests.get(record_path),
pool
)
return make_root_scheme_file
def data_scheme_file_maker(zip_file, scheme):
# type: (ZipFile, Scheme) -> Callable[[RecordPath], File]
scheme_paths = {}
for key in SCHEME_KEYS:
encoded_key = ensure_text(key)
scheme_paths[encoded_key] = ensure_text(
getattr(scheme, key), encoding=sys.getfilesystemencoding()
)
def make_data_scheme_file(record_path):
# type: (RecordPath) -> File
normed_path = os.path.normpath(record_path)
try:
_, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
except ValueError:
message = (
"Unexpected file in {}: {!r}. .data directory contents"
" should be named like: '<scheme key>/<path>'."
).format(wheel_path, record_path)
raise InstallationError(message)
try:
scheme_path = scheme_paths[scheme_key]
except KeyError:
valid_scheme_keys = ", ".join(sorted(scheme_paths))
message = (
"Unknown scheme key used in {}: {} (for file {!r}). .data"
" directory contents should be in subdirectories named"
" with a valid scheme key ({})"
).format(
wheel_path, scheme_key, record_path, valid_scheme_keys
)
raise InstallationError(message)
dest_path = os.path.join(scheme_path, dest_subpath)
assert_no_path_traversal(scheme_path, dest_path)
return ZipBackedFile(
record_path,
dest_path,
zip_file,
digests.get(record_path),
pool
)
return make_data_scheme_file
def is_data_scheme_path(path):
# type: (RecordPath) -> bool
return path.split("/", 1)[0].endswith(".data")
paths = all_paths()
file_paths = filterfalse(is_dir_path, paths)
root_scheme_paths, data_scheme_paths = partition(
is_data_scheme_path, file_paths
)
make_root_scheme_file = root_scheme_file_maker(
wheel_zip,
ensure_text(lib_dir, encoding=sys.getfilesystemencoding()),
)
files = map(make_root_scheme_file, root_scheme_paths)
def is_script_scheme_path(path):
# type: (RecordPath) -> bool
parts = path.split("/", 2)
return (
len(parts) > 2 and
parts[0].endswith(".data") and
parts[1] == "scripts"
)
other_scheme_paths, script_scheme_paths = partition(
is_script_scheme_path, data_scheme_paths
)
make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
files = chain(files, other_scheme_files)
# Get the defined entry points
console, gui = get_entrypoints(distribution)
def is_entrypoint_wrapper(file):
# type: (File) -> bool
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
path = file.dest_path
name = os.path.basename(path)
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
script_scheme_files = map(make_data_scheme_file, script_scheme_paths)
script_scheme_files = filterfalse(
is_entrypoint_wrapper, script_scheme_files
)
script_scheme_files = map(ScriptFile, script_scheme_files)
files = chain(files, script_scheme_files)
if noop:
# Nothing to do here.
return
for file in files:
file.save()
record_installed(file.src_record_path, file.dest_path, file.changed)
def pyc_source_file_paths():
# type: () -> Iterator[str]
# We de-duplicate installation paths, since there can be overlap (e.g.
# file in .data maps to same location as file in wheel root).
# Sorting installation paths makes it easier to reproduce and debug
# issues related to permissions on existing files.
for installed_path in sorted(set(installed.values())):
full_installed_path = os.path.join(lib_dir, installed_path)
if not os.path.isfile(full_installed_path):
continue
if not full_installed_path.endswith('.py'):
continue
yield full_installed_path
def pyc_output_path(path):
# type: (str) -> str
"""Return the path the pyc file would have been written to.
"""
return importlib.util.cache_from_source(path)
# Compile all of the pyc files for the installed files
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for path in pyc_source_file_paths():
# Python 2's `compileall.compile_file` requires a str in
# error cases, so we must convert to the native type.
path_arg = ensure_str(
path, encoding=sys.getfilesystemencoding()
)
success = compileall.compile_file(
path_arg, force=True, quiet=True
)
if success:
pyc_path = pyc_output_path(path)
assert os.path.exists(pyc_path)
pyc_record_path = cast(
"RecordPath", pyc_path.replace(os.path.sep, "/")
)
record_installed(pyc_record_path, pyc_path)
logger.debug(stdout.getvalue())
maker = PipScriptMaker(None, scheme.scripts)
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = {''}
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Generate the console and GUI entry points specified in the wheel
scripts_to_generate = get_console_script_specs(console)
gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items()))
generated_console_scripts = maker.make_multiple(scripts_to_generate)
generated.extend(generated_console_scripts)
generated.extend(
maker.make_multiple(gui_scripts_to_generate, {'gui': True})
)
if warn_script_location:
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
if msg is not None:
logger.warning(msg)
generated_file_mode = 0o666 & ~current_umask()
@contextlib.contextmanager
def _generate_file(path, **kwargs):
# type: (str, **Any) -> Iterator[BinaryIO]
with adjacent_tmp_file(path, **kwargs) as f:
yield f
os.chmod(f.name, generated_file_mode)
replace(f.name, path)
dest_info_dir = os.path.join(lib_dir, info_dir)
# Record pip as the installer
installer_path = os.path.join(dest_info_dir, 'INSTALLER')
with _generate_file(installer_path) as installer_file:
installer_file.write(b'pip\n')
generated.append(installer_path)
# Record the PEP 610 direct URL reference
if direct_url is not None:
direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
with _generate_file(direct_url_path) as direct_url_file:
direct_url_file.write(direct_url.to_json().encode("utf-8"))
generated.append(direct_url_path)
# Record the REQUESTED file
if requested:
requested_path = os.path.join(dest_info_dir, 'REQUESTED')
with open(requested_path, "wb"):
pass
generated.append(requested_path)
rows = get_csv_rows_for_installed(
record_rows,
installed=installed,
changed=changed,
generated=generated,
lib_dir=lib_dir)
# Record details of all files installed
record_path = os.path.join(dest_info_dir, 'RECORD')
with _generate_file(record_path, **csv_io_kwargs('w')) as record_file:
# The type mypy infers for record_file is different for Python 3
# (typing.IO[Any]) and Python 2 (typing.BinaryIO). We explicitly
# cast to typing.IO[str] as a workaround.
writer = csv.writer(cast('IO[str]', record_file))
writer.writerows(_normalized_outrows(rows))
@contextlib.contextmanager
def req_error_context(req_description):
# type: (str) -> Iterator[None]
try:
yield
except InstallationError as e:
message = "For req: {}. {}".format(req_description, e.args[0])
reraise(
InstallationError, InstallationError(message), sys.exc_info()[2]
)
def install_wheel(
name, # type: str
wheel_path, # type: str
scheme, # type: Scheme
req_description, # type: str
pycompile=True, # type: bool
noop=False, # type: bool
warn_script_location=True, # type: bool
direct_url=None, # type: Optional[DirectUrl]
requested=False, # type: bool
pool=None, # type: Optional[ContentAddressablePool]
):
# type: (...) -> None
with ZipFile(wheel_path, allowZip64=True) as z:
with req_error_context(req_description):
_install_wheel(
name=name,
wheel_zip=z,
wheel_path=wheel_path,
scheme=scheme,
pycompile=pycompile,
noop=noop,
warn_script_location=warn_script_location,
direct_url=direct_url,
requested=requested,
pool=pool,
)
|
[
"37465112+JawshyJ@users.noreply.github.com"
] |
37465112+JawshyJ@users.noreply.github.com
|
d0d85506bedc1b3fd956f4a58667a48b4a024fe7
|
889a615a4c503ff7c7452af55965f77b5c086beb
|
/venv/Scripts/easy_install-3.7-script.py
|
d4a1f8247617c92a91022bd1cedeccbb4b65ab19
|
[] |
no_license
|
james98k/tvs
|
1a4e275df35033e1cf7d8116f21a76e739f19baa
|
6aaee124ffe0c1340df9295f0b00867a84f8f125
|
refs/heads/master
| 2022-01-10T12:58:18.900680
| 2019-07-02T21:05:49
| 2019-07-02T21:05:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#!C:\Users\jr\PycharmProjects\FYP\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"maricaneston38@gmail.com"
] |
maricaneston38@gmail.com
|
9bd7d4a09546d875c2b8004638a23d7321c41824
|
944913d11bd936616396ce57e79b5c13a4581be3
|
/scripts/spread_list.py
|
6370342e4f31daa4ad4d39d9708e356b3a47786a
|
[
"MIT"
] |
permissive
|
jcodinal/edit_google_spreadsheet
|
67ddae1bf4a275ec13ef9bd1f9949071e06915ed
|
44d21a0444c31170c512a4e43e524afc0b82c1bd
|
refs/heads/master
| 2020-04-06T06:57:33.586237
| 2016-09-03T10:23:48
| 2016-09-03T10:23:48
| 61,422,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,907
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Script Name : spread_list.py
# Author : jordi codina
# Created : 20/06/2016
# Last Modified : 20/06/2016
# Version : 1.0.0
# License : MIT
# Description : return a list of dictionries {row_title: cel content}
import re
import get_service
def list(site_in):
print('INIT_list')
service_in = get_service.get_service()
service = service_in[0]
spreadsheetId = service_in[1]
rangeName = 'A1:2000'# this range referred to the first sheet if you want to point a concrete one change it to sheet_name!A1:2000
result = service.spreadsheets().values().get(spreadsheetId=spreadsheetId, range=rangeName).execute()
values = result.get('values')
titles = values[0]
nrow = 0
dictionary = {}
dictionary_list = []
if not values:
print('No data found.')
elif not site_in:
nrow = 1
for row in values[1:]:
nrow = nrow + 1
site = []
dictionary = dict(zip(titles, row))
site.append(nrow)
site.append(dictionary)
dictionary_list.append(site)
else :
for row in values:
nrow = nrow + 1
for s in row:
if re.findall(site_in, s):
site = []
dictionary = dict(zip(titles, row))
site.append(nrow)
site.append(dictionary)
dictionary_list.append(site)
print(len(dictionary_list))
print('end_list')
return {'cel_list':dictionary_list, 'titles':titles}
if __name__ == '__main__':
list("", get_service())
|
[
"jordi@travel"
] |
jordi@travel
|
141c53674db1533ee152ffe3d7fa8435892d9e7c
|
08a80a4862ab321e6d8327d2824910d3d98bf7be
|
/SD-Card-Contents/usr/bin/python2.7-config
|
eb2cf9ab507c312a065c8a450e68e89bb7311a7c
|
[] |
no_license
|
farseeker/pirate-3d-buccaneer
|
2afc779d2b99219fc4594d7453554fc815f78ffd
|
fe8b3ac16d7df09571fcc0c540cfb6476b8a7d71
|
refs/heads/master
| 2022-10-31T14:02:20.851024
| 2018-03-06T01:47:37
| 2018-03-06T01:47:37
| 123,995,114
| 1
| 1
| null | 2022-10-27T19:36:48
| 2018-03-06T00:06:58
|
C++
|
UTF-8
|
Python
| false
| false
| 1,668
|
#!/usr/bin/python2.7
import sys
import os
import getopt
from distutils import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
def exit_with_usage(code=1):
print >>sys.stderr, "Usage: %s [%s]" % (sys.argv[0],
'|'.join('--'+opt for opt in valid_opts))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print sysconfig.PREFIX
elif opt == '--exec-prefix':
print sysconfig.EXEC_PREFIX
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_python_inc(),
'-I' + sysconfig.get_python_inc(plat_specific=True)]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print ' '.join(flags)
elif opt in ('--libs', '--ldflags'):
libs = getvar('LIBS').split() + getvar('SYSLIBS').split()
libs.append('-lpython'+pyver)
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print ' '.join(libs)
|
[
"markness@gmail.com"
] |
markness@gmail.com
|
|
43b720aa6186d5142bf19c70b95377a6e09392e7
|
5b4312ddc24f29538dce0444b7be81e17191c005
|
/autoware.ai/1.12.0_cuda/build/memsic_imu/catkin_generated/generate_cached_setup.py
|
a7913516ce6d717d1e4e1c8927d7d0035dfa6bef
|
[
"MIT"
] |
permissive
|
muyangren907/autoware
|
b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2
|
5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38
|
refs/heads/master
| 2020-09-22T13:08:14.237380
| 2019-12-03T07:12:49
| 2019-12-03T07:12:49
| 225,167,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/memsic_imu/devel/env.sh')
output_filename = '/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/memsic_imu/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"907097904@qq.com"
] |
907097904@qq.com
|
39833c3be9db4f6ed802fb2998b4e637175c6a75
|
d2d906d703e2e43b1a1c42eb6791a30fef604377
|
/real_coded_ga.py
|
fde85314666d5815b8ac01e8a0b9e4c0390ef586
|
[] |
no_license
|
HaRshA10D/mlda
|
d0f087aa343cdb26fa7e218eb25353a9730807cf
|
dfe320382a7f34a821fd7555b823c1f9249a4b9e
|
refs/heads/master
| 2021-01-18T13:16:46.052584
| 2017-09-15T18:16:13
| 2017-09-15T18:16:13
| 100,375,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,181
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 17:26:43 2017
@author: Harsha SlimShady
"""
import math
import random
import matplotlib.pyplot as plt
import numpy as np
class ga(object):
def __init__(self,chromosomes):
self.chromosomes = chromosomes
self.length = len(self.chromosomes)
self.fitness = self.evaluate(self.chromosomes)
def evaluate(self,chromosomes):
fitness = []
for x in chromosomes:
fitness.append(math.sin(x))
return fitness
def getValues(self):
self.fitness = self.evaluate(self.chromosomes)
return self.chromosomes,self.fitness
def tournamentSelection(self):
parents = []
for i in range(0,self.length):
rand = np.random.choice(range(0,self.length),2,replace=False)
if(self.fitness[rand[0]]>self.fitness[rand[1]]):
parents.append(self.chromosomes[rand[0]])
else:
parents.append(self.chromosomes[rand[1]])
self.parents = parents
def crossover(self,pc):
nu = 20
children = []
for i in range(0,int(pc*self.length)):
rand = np.random.choice(range(0,self.length),2,replace=False)
r = random.random()
if(r>0.5):
b = (1/(2*(1-r)))**(1/(nu+1))
else:
b = (2*r)**(1/(nu+1))
c1 = 1/2*((1+b)*self.parents[rand[0]]+(1-b)*self.parents[rand[1]])
c2 = 1/2*((1-b)*self.parents[rand[0]]+(1+b)*self.parents[rand[1]])
children.append(c1)
children.append(c2)
self.children = children
def mutation(self,pm):
nu = 20
mutants = []
for i in range(0,int(pm*0.8*len(self.children)*2)+1):
r = random.random()
if(r>0.5):
d = 1-(2*(1-r))**(1/(nu+1))
else:
d = (2*r)**(1/(nu+1))-1
mutants.append(self.children[random.randint(0,len(self.children)-1)]+d)
self.mutants = mutants
def selectforNextgen(self):
total = []
total.extend(self.chromosomes)
total.extend(self.children)
total.extend(self.mutants)
totalfitness = np.sin(np.array(total))
totalfitness = totalfitness.tolist()
self.chromosomes = [x for (y,x) in sorted(zip(totalfitness,total))][:self.length]
if __name__ == "__main__":
points = np.linspace(0,2*math.pi,100)
sinvals = np.sin(points)
initialch = []
for i in range(0,10):
initialch.append(random.random()*2*math.pi)
for i in range(0,100):
chset = ga(initialch)
chset.tournamentSelection()
chset.crossover(0.8)
chset.mutation(0.2)
chset.selectforNextgen()
initialch, fitness = chset.getValues()
chromosomes, fitness = chset.getValues()
#chset = ga(initialch)
#chromosomes, fitness = chset.getValues()
print(fitness)
plt.plot(points,sinvals)
plt.scatter(chromosomes,fitness)
plt.show()
|
[
"noreply@github.com"
] |
HaRshA10D.noreply@github.com
|
1fa21a996ce86ac5fad72af0eec2946f98dd82bf
|
dc1c83330a8639edefec09dd688dd6830acc43b0
|
/urls.py
|
3f72d3f20e1ab074f0e3000c676307bcac83d594
|
[] |
no_license
|
bradleysp/glbrc-task
|
f96436a498c6090f49bcd356f2ba518526ba669a
|
204a0faddce8d0580a65868be4be77a512907b06
|
refs/heads/master
| 2021-04-15T04:58:24.894625
| 2016-07-24T19:21:44
| 2016-07-24T19:21:44
| 64,081,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.auth import views as auth_views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'mysite.app.views.home', name='home'),
url(r'^edit_home$', 'mysite.app.views.edit_home', name='edit_home'),
# url(r'^mysite/', include('mysite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^accounts/logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
)
|
[
"bradleysp@vcu.edu"
] |
bradleysp@vcu.edu
|
5c0537d348a3468866db5e6fc02449f939b98820
|
65769cc7508c63d96c85d52d61a1db6ede6444d1
|
/app/admin.py
|
bfbecd6925f18e40270a95c401aa5075375fc4a4
|
[] |
no_license
|
eleyine/SDP1-Tinder
|
bb69859cc02725092364e3db38e4836fa5563a68
|
205e70f2bcf3ec6036d8e84ad04884040f23f833
|
refs/heads/master
| 2021-05-04T11:24:08.703541
| 2017-12-16T18:19:35
| 2017-12-16T18:19:35
| 46,077,871
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
from django.contrib import admin
from app.models.models import UserProfile, Event, SwipeAction
class UserProfileAdmin(admin.ModelAdmin):
list_display = (
'full_name',
'num_votes',
'num_right_swipes',
'num_left_swipes',
'num_views',
'age',
)
class EventAdmin(admin.ModelAdmin):
list_display = (
'name',
'is_active',
'get_num_participants',
)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(SwipeAction)
|
[
"eleyine@gmail.com"
] |
eleyine@gmail.com
|
8407da9d9925caafbab77822da0543ccc6e4cf22
|
21c0caf043f3452257780038c5d087c440d8cdb8
|
/hammerblade/torch/tests/test_profiling.py
|
2255ce94646387a4cb3981f781f09864755c085b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
Kofi-A-Efah/hb-pytorch
|
3f47c43d8dd7f12da3e8b4b0e626819ded1a937c
|
c06e40f210fdb5ee98a980c08976459c0bfa2612
|
refs/heads/master
| 2022-07-16T03:56:25.837046
| 2020-05-18T00:41:24
| 2020-05-18T00:41:24
| 265,396,517
| 0
| 0
|
NOASSERTION
| 2020-05-19T23:50:24
| 2020-05-19T23:50:23
| null |
UTF-8
|
Python
| false
| false
| 4,839
|
py
|
"""
Unit tests for torch.hammerblade.profiler
05/06/2020 Lin Cheng (lc873@cornell.edu)
"""
import json
import torch
import random
torch.manual_seed(42)
random.seed(42)
torch.hammerblade.init()
def test_ROI():
assert not torch.hammerblade.profiler.is_in_ROI()
torch.hammerblade.profiler.enable()
assert torch.hammerblade.profiler.is_in_ROI()
torch.hammerblade.profiler.disable()
assert not torch.hammerblade.profiler.is_in_ROI()
def test_ROI_2():
assert not torch.hammerblade.profiler.is_in_ROI()
torch.hammerblade.profiler.enable()
assert torch.hammerblade.profiler.is_in_ROI()
torch.hammerblade.profiler.disable()
assert not torch.hammerblade.profiler.is_in_ROI()
def test_execution_time_1():
x = torch.ones(100000)
torch.hammerblade.profiler.enable()
x = torch.randn(100000)
y = x + x
torch.hammerblade.profiler.disable()
fancy = torch.hammerblade.profiler.exec_time.fancy_print()
assert fancy.find("aten::randn") != -1
assert fancy.find("aten::add") != -1
assert fancy.find("aten::ones") == -1
def test_execution_time_2():
x = torch.ones(100000)
torch.hammerblade.profiler.enable()
x = torch.randn(100000)
y = x + x
torch.hammerblade.profiler.disable()
stack = torch.hammerblade.profiler.exec_time.raw_stack()
assert stack.find("at::Tensor at::CPUType::{anonymous}::add(const at::Tensor&, const at::Tensor&, c10::Scalar)") != -1
assert stack.find("at::Tensor at::TypeDefault::randn(c10::IntArrayRef, const c10::TensorOptions&)") != -1
assert stack.find("at::Tensor at::TypeDefault::ones(c10::IntArrayRef, const c10::TensorOptions&)") == -1
assert stack.find("at::Tensor& at::native::legacy::cpu::_th_normal_(at::Tensor&, double, double, at::Generator*)") != -1
assert stack.find("at::native::add_stub::add_stub()") != -1
def test_unimpl_1():
x = torch.ones(100000)
torch.hammerblade.profiler.enable()
x = torch.randn(100000)
y = x + x
torch.hammerblade.profiler.disable()
unimpl = torch.hammerblade.profiler.unimpl.fancy_print()
assert unimpl.find("aten::normal_") != -1
def test_unimpl_2():
x = torch.ones(100000)
x = torch.randn(100000)
torch.hammerblade.profiler.enable()
y = x + x
torch.hammerblade.profiler.disable()
unimpl = torch.hammerblade.profiler.unimpl.fancy_print()
assert unimpl.find("aten::normal_") == -1
def test_chart_1():
M = torch.randn(2, 3)
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
torch.hammerblade.profiler.chart.clear()
torch.hammerblade.profiler.chart.add("at::Tensor at::CPUType::{anonymous}::addmm(const at::Tensor&, const at::Tensor&, const at::Tensor&, c10::Scalar, c10::Scalar)")
torch.hammerblade.profiler.enable()
torch.add(M, mat1)
torch.hammerblade.profiler.disable()
torch.hammerblade.profiler.chart.clear()
chart = torch.hammerblade.profiler.chart.json()
assert chart == "[]\n"
def test_chart_2():
M = torch.randn(2, 3)
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
torch.hammerblade.profiler.chart.clear()
torch.hammerblade.profiler.chart.add("at::Tensor at::CPUType::{anonymous}::addmm(const at::Tensor&, const at::Tensor&, const at::Tensor&, c10::Scalar, c10::Scalar)")
torch.hammerblade.profiler.enable()
torch.add(M, mat1)
torch.addmm(M, mat1, mat2)
torch.addmm(M, mat1, mat2)
torch.hammerblade.profiler.disable()
torch.hammerblade.profiler.chart.clear()
chart = torch.hammerblade.profiler.chart.json()
golden = """[
{
"offload": false,
"signature": "at::Tensor at::CPUType::{anonymous}::addmm(const at::Tensor&, const at::Tensor&, const at::Tensor&, c10::Scalar, c10::Scalar)"
},
{
"offload": false,
"signature": "at::Tensor at::CPUType::{anonymous}::addmm(const at::Tensor&, const at::Tensor&, const at::Tensor&, c10::Scalar, c10::Scalar)"
}
]
"""
assert chart == golden
def test_route_1():
M = torch.randn(2, 3)
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
route = """[
{
"offload": true,
"signature": "at::Tensor at::CPUType::{anonymous}::addmm(const at::Tensor&, const at::Tensor&, const at::Tensor&, c10::Scalar, c10::Scalar)"
},
{
"offload": true,
"signature": "at::Tensor at::CPUType::{anonymous}::add(const at::Tensor&, const at::Tensor&, c10::Scalar)"
}
]
"""
data = json.loads(route)
torch.hammerblade.profiler.route.set_route_from_json(data)
_route = torch.hammerblade.profiler.route.json()
assert _route == route
out1 = torch.addmm(M, mat1, mat2)
out1 = out1 + M
torch.hammerblade.profiler.enable()
out2 = torch.addmm(M, mat1, mat2)
out2 = out2 + M
torch.hammerblade.profiler.disable()
assert torch.allclose(out1, out2)
|
[
"lc873@cornell.edu"
] |
lc873@cornell.edu
|
15275b6cd87f6c6b72eb7f27a3933696df37ebb2
|
8faa346ec071b7ffec6042ec3260cfe9237894cd
|
/pythonscripts/WeatherSearchAPI_V1.py
|
1f1b49af707e32c219f35a106fb5e2fdf9a535d9
|
[] |
no_license
|
changdaniel/glocal-data
|
ce1eb5bb471276a4b898099272d9fd6f2b8b6694
|
9b36106fb7ef3da892968883b953713e4988bbff
|
refs/heads/master
| 2020-05-19T05:29:46.395387
| 2019-05-28T06:56:38
| 2019-05-28T06:56:38
| 184,850,314
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import requests
import json
from pprint import pprint
apikeyfile = open('apikey2.txt', 'r')
api_key = apikeyfile.read()
url = "http://api.openweathermap.org/data/2.5/forecast?id=4887398&APPID=" + str(api_key)
req = requests.get(url)
data = json.loads(req.text)["list"]
pprint(data[0])
count = 0
for day in data:
if day["weather"][0]["main"] == "Rain":
count += 1
print(count)
with open('WeatherTestDataV1.json', 'w') as outfile:
json.dump(data, outfile, indent = 4)
|
[
"andrewacomb@dhcp-10-105-173-92.wireless.northwestern.private"
] |
andrewacomb@dhcp-10-105-173-92.wireless.northwestern.private
|
348eb95db9fc5e986182b6cff1ec5152971d3c32
|
ffa6bf9120a279e848e71c7ac3fc34133d3b6e1a
|
/scripts/gan/cycle_gan/prepare_data_for_cyclegan_from_dataset.py
|
0d1c1dcdbf960ba05211c6f81e8826740d51dfea
|
[
"MIT"
] |
permissive
|
hiroyasuakada/ros_start
|
8f820f1cbed667706371231139e644e4d4417bcb
|
10221ad2bcaefa4aaadc6c90424a3751126ac256
|
refs/heads/master
| 2022-11-18T02:03:34.357524
| 2020-07-16T01:30:27
| 2020-07-16T01:30:27
| 219,941,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,460
|
py
|
# generate data from bag images
from PIL import Image
from pathlib import Path
import os, glob # manipulate file or directory
import numpy as np
import cv2
import matplotlib.pyplot as plt
class DataArrangement(object):
def __init__(self, height, width):
self.height = height
self.width = width
self.prepared_data_directories = ['train_not_tracking',
'train_tracking',
'train_tracking_with_binary_mask',
]
# 'test_quantitative'
# 'test_quantitative_as_gt'
# 'test_quantitative_by_hand'
# 'test_quantitative_with_binary_mask_by_hand'
self.resize_data_directories = ['trainA', 'trainB'] # trainA: tracking (mask or not), trainB: not_tracking
self.target_dir = ['double', 'single', 'out_to_in', 'in_to_out']
self.dict_for_mask_or_not = None
self.X_not_tracking = []
self.X_not_tracking_i = []
self.Y_tracking = []
self.Y_tracking_i = []
self.iteration = 1
self.directory_name = None
self.new_csv_name = None
def resize_data(self):
for prepared_data in self.prepared_data_directories:
print(prepared_data) # not_tracking or tracking
path = Path(__file__).parent
path /= '../../dataset_for_gan/{}'.format(prepared_data)
directories = os.listdir(path)
for directory in directories:
if directory.find(self.target_dir[0]) != -1\
or directory.find(self.target_dir[1]) != -1\
or directory.find(self.target_dir[2]) != -1\
or directory.find(self.target_dir[3]) != -1:
files = glob.glob(str(path.resolve()) + '/{}/*.jpg'.format(directory))
for file in files:
get_file_name = os.path.basename(file)
output_name = '{}_{}'.format(directory, get_file_name)
image = Image.open(file)
image = image.convert('RGB')
image = image.resize((self.width, self.height)) # 360, 640 or 180, 320
data = np.asarray(image)
print(output_name)
bgr_image = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
# self.directory_name = './{}_{}/dataset_for_cyclegan_4_situation/{}/{}' \
# .format(self.height, self.width, prepared_data, directory)
#
# if not os.path.exists(self.directory_name):
# os.mkdir(self.directory_name)
# cv2.imwrite(self.directory_name + '/' + get_file_name, bgr_image)
########################################################################################################################
if prepared_data == 'train_not_tracking':
cv2.imwrite('./{}_{}/with_binary_mask_4_situation/trainB'.format(self.height, self.width) +
'/' + output_name, bgr_image)
cv2.imwrite('./{}_{}/without_mask_4_situation/trainB'.format(self.height, self.width) +
'/' + output_name, bgr_image)
elif prepared_data == 'train_tracking':
cv2.imwrite('./{}_{}/without_mask_4_situation/trainA'.format(self.height, self.width) +
'/' + output_name, bgr_image)
elif prepared_data == 'train_tracking_with_binary_mask':
cv2.imwrite('./{}_{}/with_binary_mask_4_situation/trainA'.format(self.height, self.width) +
'/' + output_name, bgr_image)
# elif prepared_data == 'test_qualitative':
# cv2.imwrite('./{}_{}/without_mask_4_situation/test_qualitative'.format(self.height, self.width) +
# '/' + output_name, bgr_image)
#
# elif prepared_data == 'test_qualitative_with_binary_mask':
# cv2.imwrite('./{}_{}/with_binary_mask_4_situation/test_qualitative_with_binary_mask'.format(self.height, self.width) +
# '/' + output_name, bgr_image)
#
# elif prepared_data == 'test_quantitative_as_gt':
# cv2.imwrite('./{}_{}/with_binary_mask_4_situation/test_quantitative_as_gt'.format(self.height, self.width) +
# '/' + output_name, bgr_image)
# cv2.imwrite('./{}_{}/without_mask_4_situation/test_quantitative_as_gt'.format(self.height, self.width) +
# '/' + output_name, bgr_image)
#
# elif prepared_data == 'test_quantitative_by_hand':
# cv2.imwrite('./{}_{}/without_mask_4_situation/test_quantitative_by_hand'.format(self.height, self.width) +
# '/' + output_name, bgr_image)
#
# elif prepared_data == 'test_quantitative_with_binary_mask_by_hand':
# cv2.imwrite('./{}_{}/with_binary_mask_4_situation/test_quantitative_with_binary_mask_by_hand'.format(self.height, self.width) +
# '/' + output_name, bgr_image)
def load_resized_data_for_gan(self, mask=True):
if mask is True:
self.dict_for_mask_or_not = 'with_mask'
else:
self.dict_for_mask_or_not = 'without_mask'
for i, directory in enumerate(self.resize_data_directories):
file_path = './{}_{}/{}/{}'.format(self.height, self.width, self.dict_for_mask_or_not, directory)
files = glob.glob(file_path + '/*.jpg')
for j, file in enumerate(files):
data = cv2.imread(file)
if directory == 'trainA': # trainA: tracking (mask or not)
self.Y_tracking.append(data)
elif directory == 'trainB': # trainB: not_tracking
self.X_not_tracking.append(data)
if int(j + 1) % 5000 == 0:
print('finished image processing for ' + str(5000 * self.iteration))
self.iteration += 1
return np.array(self.X_not_tracking), np.array(self.Y_tracking)
# def load_resized_data_for_lstm_gan(self):
#
# return np.array(self.X_not_tracking), np.array(self.Y_tracking), \
# np.array(self.X_not_tracking_i), np.array(self.Y_tracking_i)
# pass
if __name__ == '__main__':
DA = DataArrangement(180, 320) # (height, width) = (160, 320) or (80, 160) or (360, 360) in this case,
DA.resize_data()
print("finished_resize_data")
# X_not_tracking, Y_tracking = DA.load_resized_data_for_gan(mask=True)
# print('X_not_tracking_raw: {}, Y_tracking_raw: {}'.format(X_not_tracking, Y_tracking))
# print('shape of X_not_tracking_raw: {}, shape of Y_tracking_raw: {}'.format(X_not_tracking.shape, Y_tracking.shape))
|
[
"hiroyasu071213@gmail.com"
] |
hiroyasu071213@gmail.com
|
4c0e5007846ba8c267aa7f7fee3d08e596137394
|
3953ee8faa83677c4470242adc45315acd353eff
|
/src/core/matpy.py
|
86cda02cd0b1b22295f1f42a69eb4f542bacc132
|
[
"BSD-2-Clause"
] |
permissive
|
zutshi/S3CAMR
|
83be6ba820207273e6f4b81c55d737b48b097e2a
|
e7a354137729fcc1f87e647efc8d91e5cd40c83d
|
refs/heads/master
| 2021-05-04T11:52:09.707263
| 2019-06-25T00:37:11
| 2019-06-25T00:37:11
| 51,499,986
| 4
| 0
| null | 2016-03-29T22:01:38
| 2016-02-11T07:22:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,616
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
# File name: matpy.py
# Author: Aditya
# Python Version: 2.7
#
# #### Description ####
# Provides an interface between Python and their counterpart Matlab functions.
# The primary reason for such a layer is the lack of Matlab R2015a support for
# passing double array of more than 1 dim. This layer takes care of
# serialization and de-serialization. This is done with its matlab counterpart
# simulate_system_external.m
###############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import array
import numpy as np
import err
import external_interface as exifc
# Reachabillity Property structure translated to matlab
class MatProp(object):
def __init__(self, T, init_cons, final_cons, ci, num_segments, delta_t):
self.T = T
self.init_cons = init_cons
self.final_cons = final_cons
self.w = ci
# self.pi = pi
self.num_segments = num_segments
self.delta_t = delta_t
# TODO: init_cons_list is not handled!
def load_system(file_path):
one_shot_sim, prop = exifc.load_system(file_path)
#T = prop.T
init_cons = serialize_array(prop.init_cons.to_numpy_array())
final_cons = serialize_array(prop.final_cons.to_numpy_array())
ci = serialize_array(prop.ci.to_numpy_array())
print(init_cons, final_cons, ci)
#num_segments = prop.num_segments
mat_prop = MatProp(prop.T,
init_cons,
final_cons,
ci,
prop.num_segments,
prop.delta_t)
def mat_one_shot_sim(x, t0, tf, w):
x = deserialize_array(x)
w = deserialize_array(w)
#print '#'*20
#print x
#print w
trace = one_shot_sim(x, t0, tf, w)
#print trace
T_ser = serialize_array(trace.t_array)
X_ser = serialize_array(trace.x_array)
#print trace.x_array
return (T_ser, X_ser)
return mat_one_shot_sim, mat_prop
# S should be a tuple representing matrix size
# This is not trivially generizable to N-dim arrays.More work needs to be dont
# to serialize/un-serialize matlab N-dim arrays
# At present, use it for matrices only!
# [shape data]
def serialize_array(x):
if x.ndim > 2:
raise err.Fatal('Interface can only be used for matrices, dim <= 2')
flat_x = x.flatten()
if x.ndim == 1:
s = (1, x.shape[0])
else:
s = x.shape
tmp_x = np.append(s, flat_x)
x_ser = array.array(tmp_x.dtype.char, tmp_x)
return x_ser
def deserialize_array(x_ser):
s = x_ser[0:2]
flat_x = x_ser[2:]
# if num rows is 1, interpret it as 1-dim array
if s[0] == 1:
x = np.array(flat_x)
else:
#s.reverse()
s = list(map(int, s))
#x = np.reshape(flat_x, s).T
x = np.reshape(flat_x, s)
#print x
return x
## [ndim shape data]
#def serialize_array(x):
# if x.ndim > 2:
# raise err.Fatal('Interface can only be used for matrices, dim <= 2')
# flat_x = x.flatten()
# tmp_x = np.append(x.shape, flat_x)
# tmp_x = np.append(x.ndim, tmp_x)
# x_ser = array.array(tmp_x.dtype.char, tmp_x)
# return x_ser
#def deserialize_array(x_ser):
# dim = int(x_ser[0])
# s = x_ser[1:1+dim]
# flat_x = x_ser[1+dim:]
# s.reverse()
# s = map(int, s)
# x = np.reshape(flat_x, s).T
# print x
# return x
|
[
"aaditya.zutshi@gmail.com"
] |
aaditya.zutshi@gmail.com
|
6b9ed7d7db3e9970dd955f1765cc3b822c92fcac
|
034d76b114206d8454b2948e370e43e91153fce8
|
/readLabels.py
|
70a2bd2b0d9c14ad9399b0bb7c4b3399539e9f5f
|
[] |
no_license
|
bharadwaj221-zz/TaxonomyAlignment
|
d921fbec8f56defddcd7a4ae20814700bead34c1
|
994737ddf976ed3c2c192e815975822847bcef13
|
refs/heads/master
| 2021-05-29T18:06:34.273743
| 2015-04-26T03:15:01
| 2015-04-26T03:15:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,696
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 9 14:31:44 2014
@author: bharadwaj
"""
import sys
import pickle
import string
def getLabel(level, allLabels, category):
if category in allLabels[level]:
return allLabels[level][category]
else:
return '###'
def readLabels(inputFilename):
level = 0
domain=sys.argv[2]
#allLabels = pickle.load(open(domain+'_LABELS.p'))
catMap = {}
labelDict = {}
labels = []
Crumbs=[]
Titles=[]
count=0
currLabel=1
with open(inputFilename) as inputFile:
for line in inputFile:
words = line.strip().split('\t')
url = words[0]
title = words[1]
crumb = words[2]
if len(words)>=3 and "www.amazon" in url and ">" in crumb:
Crumbs.append(crumb)
Titles.append(title)
parts = crumb.split('>')
category=parts[level].strip().lower()
nextCat=parts[level+1].strip().lower()
if len(category)==0 or category.lower() == 'amazon.com' or catMap.has_key(nextCat):
category=nextCat
category=category.translate(None,string.punctuation)
if catMap.has_key(category):
label=catMap[category]
else:
catMap[category]=currLabel
label=currLabel
currLabel+=1
#label = getLabel(level, allLabels, crumb)
if labelDict.has_key(label):
labelDict[label]+=1
else:
labelDict[label]=1
count+=1
if count%1000000 == 0:
print count, ' lines read', '\t', title
pickle.dump(labelDict,open('amazon_LABEL_DICT.p','wb'))
pickle.dump(catMap,open('amazon_CAT_MAP.p','wb'))
pickle.dump(Crumbs,open('Data/'+domain+'_CRUMBS.p','wb'))
pickle.dump(Titles,open('Data/'+domain+'_TITLES.p','wb'))
return labels,catMap
labelDict={}
fout=open('LabelMapping.txt','wb')
fout.write('Label\tCount\tCategory')
labels,catMap = readLabels(sys.argv[1])
for l in labelDict:
fout.write(str(l)+'\t'+str(labelDict[l])+'\t'+catMap[l]+'\n')
#print str(l)+'\t'+str(labelDict[l])+'\t'+catMap[l]
fout.close()
|
[
"ec2-user@cam-dev04.production-mr.indix.tv"
] |
ec2-user@cam-dev04.production-mr.indix.tv
|
a891b7dbf6d6642a5556df699356d8e6d45ea81e
|
9eef031728a6cdcd681cad9ba6b0709269383905
|
/examples/test/test_analyzer.py
|
bd467f4878203aa3e45a31a9040cd5ead57b0c12
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
kbeckmann/liteeth
|
906b6f30b5d3be28f2bfac91704c7d5ddf26e85e
|
54acf9fd76c226d7760294ffde86418e52e0951b
|
refs/heads/master
| 2022-12-24T17:02:42.834415
| 2020-08-24T20:14:35
| 2020-08-24T20:14:35
| 300,029,015
| 0
| 0
|
NOASSERTION
| 2020-09-30T19:03:51
| 2020-09-30T19:03:50
| null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
#!/usr/bin/env python3
#
# This file is part of LiteEth.
#
# Copyright (c) 2015-2018 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex import RemoteClient
wb = RemoteClient()
wb.open()
# # #
from litescope.software.driver.analyzer import LiteScopeAnalyzerDriver
analyzer = LiteScopeAnalyzerDriver(wb.regs, "analyzer", debug=True)
analyzer.configure_trigger(cond={})
analyzer.configure_subsampler(1)
analyzer.run(offset=128, length=256)
analyzer.wait_done()
analyzer.upload()
analyzer.save("dump.vcd")
# # #
wb.close()
|
[
"florent@enjoy-digital.fr"
] |
florent@enjoy-digital.fr
|
be55b0d5e4c5fd36d7343e43f82667e2fb40726d
|
79ab0f915e2a1820295bebad13def3a74e67da63
|
/are_you_the_one/matchups/migrations/0001_initial.py
|
5e8815f8f88f8696db1df7495eae0aebca70b1ca
|
[] |
no_license
|
iamlostcoast/are-you-the-one
|
d9b9682b293b4a446c2488c3c8c9539ab2d46c4c
|
5d6cc7653790bdad4efd6456e3f1395bdc3a68f3
|
refs/heads/master
| 2020-03-31T06:47:26.201274
| 2018-10-11T05:10:52
| 2018-10-11T05:10:52
| 151,995,273
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# Generated by Django 2.1.2 on 2018-10-06 04:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('person_one_first_name', models.CharField(max_length=50)),
('person_one_last_name', models.CharField(max_length=50)),
('person_two_first_name', models.CharField(max_length=50)),
('person_two_last_name', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('queries', models.IntegerField()),
('match_id', models.CharField(max_length=50)),
],
),
]
|
[
"evancasebaker@gmail.com"
] |
evancasebaker@gmail.com
|
bc4d8fdf44a8f6da59b0a8ead9eefac7907e6a29
|
b3455474da0bc27c913ff88908be0d0bddba352d
|
/5.AI/1.Machine Learning/196_mushroom_train2.py
|
0919272787d2e7922608902f2ded949c86259dab
|
[] |
no_license
|
rntva/JumpToPython
|
7286bc94e40b553fa7b9fbca7934f2e35f63b54e
|
090f0ed5bf28ae7832e5edde11936b71b4fb324b
|
refs/heads/master
| 2021-05-01T02:33:44.528975
| 2018-07-18T08:24:07
| 2018-07-18T08:24:07
| 121,182,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
#데이터 읽기
mr = pd.read_csv("mushroom.csv", header=None)
#데이터 내부의 분류 변수 전개
label = []
data = []
attr_list = []
for row_index, row in mr.iterrows() :
label.append(row.ix[0])
exdata = []
for col, v in enumerate(row.ix[1:]) :
if row_index == 0 :
attr = {"dic" : {}, "cnt" : 0}
attr_list.append(attr)
else :
attr = attr_list[col]
#버섯의 특징 기호를 배열로 나타내기
d = [0,0,0,0,0,0,0,0,0,0,0,0]
if v in attr["dic"] : idx = attr["dic"][v]
else :
idx = attr["cnt"]
attr["dic"][v] = idx
attr["cnt"] += 1
d[idx] = 1
exdata += d
data.append(exdata)
#학습, 데트스 데이터 나누기
data_train, data_test, label_train, label_test = train_test_split(data, label)
#학습시키기
clf = RandomForestClassifier()
clf.fit(data_train, label_train)
#예측하기
pre = clf.predict(data_test)
#결과테스트
ac_score = metrics.accuracy_score(label_test, pre)
print("정답률", ac_score)
|
[
"ltrodl@gmail.com"
] |
ltrodl@gmail.com
|
fedb6ed76a5d7115dd820e753d6a9561b86a1f9e
|
36e27ca74b734994fb2e5cd4e328e7b82202d8cd
|
/nodarb/migrations/0007_nodarb_tips_rada.py
|
23417ec23dc96ae31da304e4df5cc8abde817eeb
|
[] |
no_license
|
svabis/vf
|
5e9513f3a767a9561e2fb8bd3e37bb3c03d113dd
|
d83a4afd177e4f7007a9ce824ae5ed36f18654fc
|
refs/heads/master
| 2020-05-21T21:19:59.952463
| 2018-06-04T11:11:50
| 2018-06-04T11:11:50
| 84,647,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nodarb', '0006_auto_20170311_1644'),
]
operations = [
migrations.AddField(
model_name='nodarb_tips',
name='rada',
field=models.BooleanField(default=True),
),
]
|
[
"fizmats@inbox.lv"
] |
fizmats@inbox.lv
|
b4122b3750f41b40ecd8b04fe0a7bc8cfceb9f07
|
d72b18f827dcde06ec779a14fae94501063f1077
|
/other_not _important/stalkingWhSite/stalkingWhSite/settings.py
|
0dc372459567297ed9d069d6d2e1266e981fb2da
|
[] |
no_license
|
cflowers007/stalkingWh
|
ff5fc11f82d5d4ea653e6d3083b7cad9bdef2f5a
|
d09b30b8757fffb52965c1e648c616af3b2b5f0a
|
refs/heads/master
| 2022-11-06T03:24:26.839884
| 2020-06-17T22:52:15
| 2020-06-17T22:52:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,181
|
py
|
"""
Django settings for stalkingWhSite project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aza3&1n$qzb-kx(fphnb7f!w)@hu%1(0_d@rf&$-#o0525&ie-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appWh',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stalkingWhSite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stalkingWhSite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"ro0trexustom@gmail.com"
] |
ro0trexustom@gmail.com
|
6922447130d990d6a1abd8c44c4d5766c07bbb61
|
6d4c0faa73635a7fbd37aa316250996019f069ee
|
/app/models.py
|
713dd92387e77759c6350b02591cf582c5d4cbbb
|
[] |
no_license
|
eddieberklee/django-template
|
d93ad4d9c3145b1cf34348678418f43ec7a353d8
|
7b9bc08937f194e4d4f3a833e1fc46254391c44d
|
refs/heads/master
| 2021-01-01T05:30:22.036116
| 2013-08-03T22:27:19
| 2013-08-03T22:27:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
from django.db import models
# Create your models here.
# Example
# class Note(models.Model):
# title = models.CharField(max_length=255
|
[
"eddieberklee@gmail.com"
] |
eddieberklee@gmail.com
|
43951d7f5587527a42af16d40b357ac2af3970b6
|
c46d15e64c588c424db64a0877a7ab69b8e036e7
|
/users/migrations/0006_remove_profile_image.py
|
b233a8a47db9bbcdc01f4042c3a2805aa25ef914
|
[
"MIT"
] |
permissive
|
mhdSharuk/CodePanda
|
2275e0502a6824593b582ea37956106a7c30f233
|
55ce8e271378df5d27ee7b3d681cca69e0c2ca83
|
refs/heads/main
| 2023-04-12T03:53:02.249836
| 2021-04-27T16:27:50
| 2021-04-27T16:27:50
| 358,644,908
| 0
| 0
| null | 2021-04-16T15:36:04
| 2021-04-16T15:36:03
| null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
# Generated by Django 3.2 on 2021-04-18 08:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0005_alter_profile_image'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='image',
),
]
|
[
"msharuk589@gmail.com"
] |
msharuk589@gmail.com
|
5a5b030a3e1b921dcd9ff11aa64c481c8b75bfb7
|
367f6473dfec2768511c559c5d0ac9b77efcc9ba
|
/mapsite/asgi.py
|
0a6c89142de5ffa20a6222b7cd2f612432b7a887
|
[] |
no_license
|
nancyhoangd/uvadirect
|
7028f9dc41a44d2469986c6f2f6b9aea214caa65
|
671a531cf817736610cccf811c2b70ae8bea4de3
|
refs/heads/main
| 2023-04-25T15:47:25.373055
| 2021-05-06T19:27:41
| 2021-05-06T19:27:41
| 368,327,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
ASGI config for mapsite project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mapsite.settings')
application = get_asgi_application()
|
[
"aam8mf@virginia.edu"
] |
aam8mf@virginia.edu
|
326d73ecb950b3552e26d785ac5d8f60dcbe908b
|
a56e5614d0103eb7bd33247416f21e009953dd64
|
/a3_2_3.py
|
ead3a6847fcad12183d63cd52daf2b9f16f07907
|
[] |
no_license
|
vikrambhatt1405/aircraftdata_spark
|
f4ae3546df4ef68da53284bf05af25ba3a75e117
|
578980eae66b99d8298ce884230590ec62cb6369
|
refs/heads/master
| 2021-01-24T04:34:37.774126
| 2018-02-26T09:15:57
| 2018-02-26T09:15:57
| 122,943,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("FreqCauseofDelay")
sc = SparkContext(conf=conf)
rdd1 = sc.textFile('hdfs:///user/simmhan/faa/2007.csv')
rdd2 = rdd1.map(lambda l:l.split(",")).filter(lambda c: c[3] != "DayOfWeek")
rdd2 = rdd2.filter(lambda x: x[14] != 'NA')
CarrierDelayFrequency = rdd2.filter(lambda x:x[24] != '0').map(lambda x:(x[16]+"-"+x[17],int(x[24])))
WeatherDelayFrequency = rdd2.filter(lambda x:x[25] != '0').map(lambda x:(x[16]+"-"+x[17],int(x[25])))
NASDelayFrequency = rdd2.filter(lambda x:x[26] != '0').map(lambda x:(x[16]+"-"+x[17],int(x[26])))
SecurityDelayFrequency = rdd2.filter(lambda x:x[27] != '0').map(lambda x:(x[16]+"-"+x[17],int(x[27])))
LateAircraftDelayFrequency = rdd2.filter(lambda x:x[28] != '0').map(lambda x:(x[16]+"-"+x[17],int(x[28])))
lst = [('CarrierDelayFrequency',CarrierDelayFrequency.count()),('WeatherDelayFrequency',WeatherDelayFrequency.count()),('NASDelayFrequency',NASDelayFrequency.count()),('SecurityDelayFrequency',SecurityDelayFrequency.count()),('LateAircraftDelayFrequency',LateAircraftDelayFrequency.count())]
rdd3 = sc.parallelize(lst)
rdd3.saveAsTextFile('hdfs:///user/vikrambhatt/a3_2_3.out')
|
[
"noreply@github.com"
] |
vikrambhatt1405.noreply@github.com
|
ffb723bce5647ba3b185cf4e227e25b2ff78a4d7
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/frdyon001/question2.py
|
26dadb99dfec05c266eb818b46161070e84fcf6d
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
# Student Number: FRDYON001
# Name: Yonela Ford
# 30 Second Rule Expert
# Date: 08 March 2014
def rule():
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
ans=input("Did anyone see you? (yes/no)\n")
if (ans=="yes"):
ans=input("Was it a boss/lover/parent? (yes/no)\n")
if (ans=="yes"):
ans=input("Was it expensive? (yes/no)\n")
if (ans=="yes"):
ans=input("Can you cut off the part that touched the floor? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Your call.")
elif (ans=="no"):
ans=input("Is it chocolate? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Don't eat it.")
elif (ans=="no"):
print("Decision: Eat it.")
elif (ans=="no"):
ans=input("Was it sticky? (yes/no)\n")
if (ans=="yes"):
ans=input("Is it a raw steak? (yes/no)\n")
if (ans=="yes"):
ans=input("Are you a puma? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Don't eat it.")
elif (ans=="no"):
ans=input("Did the cat lick it? (yes/no)\n")
if (ans=="yes"):
ans=input("Is your cat healthy? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Your call.")
elif (ans=="no"):
print( "Decision: Eat it.")
elif (ans=="no"):
ans=input("Is it an Emausaurus? (yes/no)\n")
if (ans=="yes"):
ans=input("Are you a Megalosaurus? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Don't eat it.")
elif (ans=="no"):
ans=input("Did the cat lick it? (yes/no)\n")
if (ans=="yes"):
ans=input("Is your cat healthy? (yes/no)\n")
if (ans=="yes"):
print("Decision: Eat it.")
elif (ans=="no"):
print("Decision: Your call.")
elif (ans=="no"):
print("Decision: Eat it.")
rule()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
7b6b6a70301e291fa4b76eaefb53ca00181d8cf0
|
5806d9dcae364e980baebbd223d942f8cfb2aac4
|
/FreshApp/apps.py
|
6c1c84f30abb52b504b280c280d95b1b16a06d9a
|
[] |
no_license
|
Alec-Thoman/DjangoApp
|
c8c6c65d36f91b31fc01218e059fad32e028dfd4
|
43a6d0df03cc24f0c258ae07e0ce18dc070a8cea
|
refs/heads/main
| 2023-02-25T09:08:03.530584
| 2021-01-16T19:29:39
| 2021-01-16T19:29:39
| 330,227,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
from django.apps import AppConfig
class FreshappConfig(AppConfig):
name = 'FreshApp'
|
[
"noreply@github.com"
] |
Alec-Thoman.noreply@github.com
|
7b5aa9aaf1793ecd9679085c283898340b11608c
|
4b1d3e1db447f178499665e1284a3cf201da7c76
|
/tools/drug_strength.py
|
c2d6430f18ce0cf79debb04756aa82c07be79680
|
[] |
no_license
|
yajatvishwak/html-scraper
|
97c151296ab3ffd8db42b6879f544a2d3ab12b61
|
c57c12ffbcdc6437dba858ce37658ec19536dc76
|
refs/heads/master
| 2023-05-11T21:41:47.691489
| 2021-05-31T08:39:32
| 2021-05-31T08:39:32
| 273,669,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
from bs4 import BeautifulSoup
import os
if os.path.exists("./testfile_drug_strength.txt"):
os.remove("./testfile_drug_strength.txt")
if os.path.exists("../processed/drug_strength.txt"):
os.remove("../processed/drug_strength.txt")
with open("../htmlFile/index.html", "r") as html:
file = open("testfile_drug_strength.txt", "a")
soup = BeautifulSoup(html, 'html.parser')
for div in soup.findAll(text=lambda t: "Strength:" in t):
file.write(div)
with open("testfile_drug_strength.txt", "r") as name:
file = open("../processed/drug_strength.txt", "a")
for line in name:
if line.strip().isalnum:
file.write(line.strip() + "\n")
|
[
"yajat472001@gmail.com"
] |
yajat472001@gmail.com
|
9d6f04b80f6977ff3a51d5c3eeb0354b37a1d374
|
6c38257dae2d5550dc04468eb99613128929bb45
|
/uproot4/containers.py
|
65416c7c6dd643551b7cc3203d208fb70a5b2711
|
[
"BSD-3-Clause"
] |
permissive
|
bendavid/uproot4
|
4360b4a8cc5c975ca69c8bdd364d83c8ff2ed81a
|
d67d431483e7b2b38d01305311438b2a5685566d
|
refs/heads/master
| 2022-11-27T13:50:20.366768
| 2020-07-28T13:50:29
| 2020-07-28T13:50:29
| 284,759,319
| 0
| 0
| null | 2020-08-03T17:08:16
| 2020-08-03T17:08:15
| null |
UTF-8
|
Python
| false
| false
| 30,654
|
py
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE
from __future__ import absolute_import
import types
import struct
try:
from collections.abc import Sequence
from collections.abc import Set
from collections.abc import Mapping
from collections.abc import KeysView
from collections.abc import ValuesView
except ImportError:
from collections import Sequence
from collections import Set
from collections import Mapping
KeysView = None
ValuesView = None
import numpy
import uproot4._util
import uproot4.model
import uproot4.interpretation.numerical
import uproot4.deserialization
_stl_container_size = struct.Struct(">I")
_stl_object_type = numpy.dtype(numpy.object)
def _content_typename(content):
if isinstance(content, numpy.dtype):
return uproot4.interpretation.numerical._dtype_kind_itemsize_to_typename[
content.kind, content.itemsize
]
elif isinstance(content, type):
return uproot4.model.classname_decode(content.__name__)[0]
else:
return content.typename
def _content_cache_key(content):
if isinstance(content, numpy.dtype):
bo = uproot4.interpretation.numerical._numpy_byteorder_to_cache_key[
content.byteorder
]
return "{0}{1}{2}".format(bo, content.kind, content.itemsize)
elif isinstance(content, type):
return content.__name__
else:
return content.cache_key
def _read_nested(
model, length, chunk, cursor, context, file, selffile, parent, header=True
):
if isinstance(model, numpy.dtype):
return cursor.array(chunk, length, model, context)
else:
values = numpy.empty(length, dtype=_stl_object_type)
if isinstance(model, AsContainer):
for i in range(length):
values[i] = model.read(
chunk, cursor, context, file, selffile, parent, header=header
)
else:
for i in range(length):
values[i] = model.read(chunk, cursor, context, file, selffile, parent)
return values
def _tostring(value):
if uproot4._util.isstr(value):
return repr(value)
else:
return str(value)
def _str_with_ellipsis(tostring, length, lbracket, rbracket, limit):
leftlen = len(lbracket)
rightlen = len(rbracket)
left, right, i, j, done = [], [], 0, length - 1, False
while True:
if i > j:
done = True
break
x = tostring(i) + ("" if i == length - 1 else ", ")
i += 1
dotslen = 0 if i > j else 5
if leftlen + rightlen + len(x) + dotslen > limit:
break
left.append(x)
leftlen += len(x)
if i > j:
done = True
break
y = tostring(j) + ("" if j == length - 1 else ", ")
j -= 1
dotslen = 0 if i > j else 5
if leftlen + rightlen + len(y) + dotslen > limit:
break
right.insert(0, y)
rightlen += len(y)
if length == 0:
return lbracket + rbracket
elif done:
return lbracket + "".join(left) + "".join(right) + rbracket
elif len(left) == 0 and len(right) == 0:
return lbracket + "{0}, ...".format(tostring(0)) + rbracket
elif len(right) == 0:
return lbracket + "".join(left) + "..." + rbracket
else:
return lbracket + "".join(left) + "..., " + "".join(right) + rbracket
class AsContainer(object):
@property
def header(self):
return self._header
@header.setter
def header(self, value):
if value is True or value is False:
self._header = value
else:
raise TypeError(
"{0}.header must be True or False".format(type(self).__name__)
)
def strided_interpretation(
self, file, header=False, tobject_header=True, original=None
):
raise uproot4.interpretation.objects.CannotBeStrided(self.typename)
@property
def cache_key(self):
raise AssertionError
@property
def typename(self):
raise AssertionError
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
raise AssertionError
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
raise AssertionError
def __eq__(self, other):
raise AssertionError
def __ne__(self, other):
return not self == other
class Container(object):
def __ne__(self, other):
return not self == other
def tolist(self):
raise AssertionError
class AsFIXME(AsContainer):
def __init__(self, message):
self.message = message
def __hash__(self):
return hash((AsFIXME, self.message))
def __repr__(self):
return "AsFIXME({0})".format(repr(self.message))
@property
def cache_key(self):
return "AsFIXME({0})".format(repr(self.message))
@property
def typename(self):
return "unknown"
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
raise uproot4.interpretation.objects.CannotBeAwkward(self.message)
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
raise uproot4.deserialization.DeserializationError(
self.message + "; please file a bug report!", None, None, None, None
)
def __eq__(self, other):
if isinstance(other, AsFIXME):
return self.message == other.message
else:
return False
class AsString(AsContainer):
def __init__(self, header, length_bytes="1-5", typename=None):
self.header = header
if length_bytes in ("1-5", "4"):
self._length_bytes = length_bytes
else:
raise ValueError("length_bytes must be '1-5' or '4'")
self._typename = typename
@property
def length_bytes(self):
return self._length_bytes
def __hash__(self):
return hash((AsString, self._header, self._length_bytes))
def __repr__(self):
args = [repr(self._header)]
if self._length_bytes != "1-5":
args.append("length_bytes={0}".format(repr(self._length_bytes)))
return "AsString({0})".format(", ".join(args))
@property
def cache_key(self):
return "AsString({0},{1})".format(self._header, repr(self._length_bytes))
@property
def typename(self):
if self._typename is None:
return "std::string"
else:
return self._typename
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
import awkward1
return awkward1.forms.ListOffsetForm(
index_format,
awkward1.forms.NumpyForm((), 1, "B", parameters={"__array__": "char"}),
parameters={
"__array__": "string",
"uproot": {
"as": "string",
"header": self._header,
"length_bytes": self._length_bytes,
},
},
)
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
if self._header and header:
start_cursor = cursor.copy()
num_bytes, instance_version = uproot4.deserialization.numbytes_version(
chunk, cursor, context
)
if self._length_bytes == "1-5":
out = cursor.string(chunk, context)
elif self._length_bytes == "4":
length = cursor.field(chunk, _stl_container_size, context)
out = cursor.string_with_length(chunk, context, length)
else:
raise AssertionError(repr(self._length_bytes))
if self._header and header:
uproot4.deserialization.numbytes_check(
chunk,
start_cursor,
cursor,
num_bytes,
self.typename,
context,
file.file_path,
)
return out
def __eq__(self, other):
return (
isinstance(other, AsString)
and self.header == other.header
and self.length_bytes == other.length_bytes
)
class AsPointer(AsContainer):
def __init__(self, pointee):
self._pointee = pointee
@property
def pointee(self):
return self._pointee
def __hash__(self):
return hash((AsPointer, self._pointee))
def __repr__(self):
if isinstance(self._pointee, type):
pointee = self._pointee.__name__
else:
pointee = repr(self._pointee)
return "AsPointer({0})".format(pointee)
@property
def cache_key(self):
return "AsPointer({0})".format(_content_cache_key(self._pointee))
@property
def typename(self):
return _content_typename(self._pointee) + "*"
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
raise uproot4.interpretation.objects.CannotBeAwkward("arbitrary pointer")
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
return uproot4.deserialization.read_object_any(
chunk, cursor, context, file, selffile, parent
)
def __eq__(self, other):
if isinstance(other, AsPointer):
return self._pointee == other._pointee
else:
return False
class AsArray(AsContainer):
def __init__(self, header, values):
self._header = header
self._values = values
@property
def values(self):
return self._values
def __repr__(self):
if isinstance(self._values, type):
values = self._values.__name__
else:
values = repr(self._values)
return "AsArray({0}, {1})".format(self.header, values)
@property
def cache_key(self):
return "AsArray({0},{1})".format(self.header, _content_cache_key(self._values))
@property
def typename(self):
return _content_typename(self._values) + "*"
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
import awkward1
return awkward1.forms.ListOffsetForm(
index_format,
uproot4._util.awkward_form(
self._values, file, index_format, header, tobject_header
),
parameters={"uproot": {"as": "array", "header": self._header}},
)
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
if self._header and header:
cursor.skip(1)
if isinstance(self._values, numpy.dtype):
remainder = chunk.remainder(cursor.index, cursor, context)
return remainder.view(self._values)
else:
out = []
while cursor.index < chunk.stop:
out.append(
self._values.read(chunk, cursor, context, file, selffile, parent)
)
return numpy.array(out, dtype=numpy.dtype(numpy.object))
class AsDynamic(AsContainer):
def __init__(self, model=None):
self._model = model
@property
def model(self):
return self._model
def __repr__(self):
if self._model is None:
model = ""
elif isinstance(self._model, type):
model = "model=" + self._model.__name__
else:
model = "model=" + repr(self._model)
return "AsDynamic({0})".format(model)
@property
def cache_key(self):
if self._model is None:
return "AsDynamic(None)"
else:
return "AsDynamic({0})".format(_content_cache_key(self._model))
@property
def typename(self):
if self._model is None:
return "void*"
else:
return _content_typename(self._values) + "*"
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
import awkward1
if self._model is None:
raise uproot4.interpretation.objects.CannotBeAwkward("dynamic type")
else:
return awkward1.forms.ListOffsetForm(
index_format,
uproot4._util.awkward_form(
self._model, file, index_format, header, tobject_header
),
parameters={"uproot": {"as": "array", "header": self._header}},
)
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
classname = cursor.string(chunk, context)
cursor.skip(1)
cls = file.class_named(classname)
return cls.read(chunk, cursor, context, file, selffile, parent)
class AsVector(AsContainer):
def __init__(self, header, values):
self.header = header
if isinstance(values, AsContainer):
self._values = values
elif isinstance(values, type) and issubclass(
values, (uproot4.model.Model, uproot4.model.DispatchByVersion)
):
self._values = values
else:
self._values = numpy.dtype(values)
def __hash__(self):
return hash((AsVector, self._header, self._values))
@property
def values(self):
return self._values
def __repr__(self):
if isinstance(self._values, type):
values = self._values.__name__
else:
values = repr(self._values)
return "AsVector({0}, {1})".format(self._header, values)
@property
def cache_key(self):
return "AsVector({0},{1})".format(
self._header, _content_cache_key(self._values)
)
@property
def typename(self):
return "std::vector<{0}>".format(_content_typename(self._values))
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
import awkward1
return awkward1.forms.ListOffsetForm(
index_format,
uproot4._util.awkward_form(
self._values, file, index_format, header, tobject_header
),
parameters={"uproot": {"as": "vector", "header": self._header}},
)
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
if self._header and header:
start_cursor = cursor.copy()
num_bytes, instance_version = uproot4.deserialization.numbytes_version(
chunk, cursor, context
)
length = cursor.field(chunk, _stl_container_size, context)
values = _read_nested(
self._values, length, chunk, cursor, context, file, selffile, parent
)
out = STLVector(values)
if self._header and header:
uproot4.deserialization.numbytes_check(
chunk,
start_cursor,
cursor,
num_bytes,
self.typename,
context,
file.file_path,
)
return out
def __eq__(self, other):
if not isinstance(other, AsVector):
return False
if self.header != other.header:
return False
if isinstance(self.values, numpy.dtype) and isinstance(
other.values, numpy.dtype
):
return self.values == other.values
elif not isinstance(self.values, numpy.dtype) and not isinstance(
other.values, numpy.dtype
):
return self.values == other.values
else:
return False
class STLVector(Container, Sequence):
def __init__(self, values):
if isinstance(values, types.GeneratorType):
values = numpy.asarray(list(values))
elif isinstance(values, Set):
values = numpy.asarray(list(values))
elif isinstance(values, (list, tuple)):
values = numpy.asarray(values)
self._values = values
def __str__(self, limit=85):
def tostring(i):
return _tostring(self._values[i])
return _str_with_ellipsis(tostring, len(self), "[", "]", limit)
def __repr__(self, limit=85):
return "<STLVector {0} at 0x{1:012x}>".format(
self.__str__(limit=limit - 30), id(self)
)
def __getitem__(self, where):
return self._values[where]
def __len__(self):
return len(self._values)
def __contains__(self, what):
return what in self._values
def __iter__(self):
return iter(self._values)
def __reversed__(self):
return STLVector(self._values[::-1])
def __eq__(self, other):
if isinstance(other, STLVector):
return self._values == other._values
elif isinstance(other, Sequence):
return self._values == other
else:
return False
def tolist(self):
return [
x.tolist() if isinstance(x, (Container, numpy.ndarray)) else x for x in self
]
class AsSet(AsContainer):
def __init__(self, header, keys):
self.header = header
if isinstance(keys, AsContainer):
self._keys = keys
elif isinstance(keys, type) and issubclass(
keys, (uproot4.model.Model, uproot4.model.DispatchByVersion)
):
self._keys = keys
else:
self._keys = numpy.dtype(keys)
def __hash__(self):
return hash((AsSet, self._header, self._keys))
@property
def keys(self):
return self._keys
def __repr__(self):
if isinstance(self._keys, type):
keys = self._keys.__name__
else:
keys = repr(self._keys)
return "AsSet({0}, {1})".format(self._header, keys)
@property
def cache_key(self):
return "AsSet({0},{1})".format(self._header, _content_cache_key(self._keys))
@property
def typename(self):
return "std::set<{0}>".format(_content_typename(self._keys))
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
import awkward1
return awkward1.forms.ListOffsetForm(
index_format,
uproot4._util.awkward_form(
self._keys, file, index_format, header, tobject_header
),
parameters={
"__array__": "set",
"uproot": {"as": "set", "header": self._header},
},
)
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
if self._header and header:
start_cursor = cursor.copy()
num_bytes, instance_version = uproot4.deserialization.numbytes_version(
chunk, cursor, context
)
length = cursor.field(chunk, _stl_container_size, context)
keys = _read_nested(
self._keys, length, chunk, cursor, context, file, selffile, parent
)
out = STLSet(keys)
if self._header and header:
uproot4.deserialization.numbytes_check(
chunk,
start_cursor,
cursor,
num_bytes,
self.typename,
context,
file.file_path,
)
return out
def __eq__(self, other):
if not isinstance(other, AsSet):
return False
if self.header != other.header:
return False
if isinstance(self.keys, numpy.dtype) and isinstance(other.keys, numpy.dtype):
return self.keys == other.keys
elif not isinstance(self.keys, numpy.dtype) and not isinstance(
other.keys, numpy.dtype
):
return self.keys == other.keys
else:
return False
class STLSet(Container, Set):
def __init__(self, keys):
if isinstance(keys, types.GeneratorType):
keys = numpy.asarray(list(keys))
elif isinstance(keys, Set):
keys = numpy.asarray(list(keys))
else:
keys = numpy.asarray(keys)
self._keys = numpy.sort(keys)
def __str__(self, limit=85):
def tostring(i):
return _tostring(self._keys[i])
return _str_with_ellipsis(tostring, len(self), "{", "}", limit)
def __repr__(self, limit=85):
return "<STLSet {0} at 0x{1:012x}>".format(
self.__str__(limit=limit - 30), id(self)
)
def __len__(self):
return len(self._keys)
def __iter__(self):
return iter(self._keys)
def __contains__(self, where):
where = numpy.asarray(where)
index = numpy.searchsorted(self._keys.astype(where.dtype), where, side="left")
if uproot4._util.isint(index):
if index < len(self._keys) and self._keys[index] == where:
return True
else:
return False
else:
return False
def __eq__(self, other):
if isinstance(other, Set):
if not isinstance(other, STLSet):
other = STLSet(other)
else:
return False
if len(self._keys) != len(other._keys):
return False
keys_same = self._keys == other._keys
if isinstance(keys_same, bool):
return keys_same
else:
return numpy.all(keys_same)
def tolist(self):
return set(
x.tolist() if isinstance(x, (Container, numpy.ndarray)) else x for x in self
)
def _has_nested_header(obj):
if isinstance(obj, AsContainer):
return obj.header
else:
return False
class AsMap(AsContainer):
def __init__(self, header, keys, values):
self.header = header
if isinstance(keys, AsContainer):
self._keys = keys
else:
self._keys = numpy.dtype(keys)
if isinstance(values, AsContainer):
self._values = values
elif isinstance(values, type) and issubclass(
values, (uproot4.model.Model, uproot4.model.DispatchByVersion)
):
self._values = values
else:
self._values = numpy.dtype(values)
def __hash__(self):
return hash((AsMap, self._header, self._keys, self._values))
@property
def keys(self):
return self._keys
@property
def values(self):
return self._values
def __repr__(self):
if isinstance(self._keys, type):
keys = self._keys.__name__
else:
keys = repr(self._keys)
if isinstance(self._values, type):
values = self._values.__name__
else:
values = repr(self._values)
return "AsMap({0}, {1}, {2})".format(self._header, keys, values)
@property
def cache_key(self):
return "AsMap({0},{1},{2})".format(
self._header,
_content_cache_key(self._keys),
_content_cache_key(self._values),
)
@property
def typename(self):
return "std::map<{0}, {1}>".format(
_content_typename(self._keys), _content_typename(self._values)
)
def awkward_form(self, file, index_format="i64", header=False, tobject_header=True):
import awkward1
return awkward1.forms.ListOffsetForm(
index_format,
awkward1.forms.RecordForm(
(
uproot4._util.awkward_form(
self._keys, file, index_format, header, tobject_header
),
uproot4._util.awkward_form(
self._values, file, index_format, header, tobject_header
),
)
),
parameters={
"__array__": "sorted_map",
"uproot": {"as": "map", "header": self._header},
},
)
def read(self, chunk, cursor, context, file, selffile, parent, header=True):
if self._header and header:
start_cursor = cursor.copy()
num_bytes, instance_version = uproot4.deserialization.numbytes_version(
chunk, cursor, context
)
cursor.skip(6)
length = cursor.field(chunk, _stl_container_size, context)
if _has_nested_header(self._keys) and header:
cursor.skip(6)
keys = _read_nested(
self._keys,
length,
chunk,
cursor,
context,
file,
selffile,
parent,
header=False,
)
if _has_nested_header(self._values) and header:
cursor.skip(6)
values = _read_nested(
self._values,
length,
chunk,
cursor,
context,
file,
selffile,
parent,
header=False,
)
out = STLMap(keys, values)
if self._header and header:
uproot4.deserialization.numbytes_check(
chunk,
start_cursor,
cursor,
num_bytes,
self.typename,
context,
file.file_path,
)
return out
def __eq__(self, other):
if not isinstance(other, AsMap):
return False
if self.header != other.header:
return False
if isinstance(self.keys, numpy.dtype) and isinstance(other.keys, numpy.dtype):
if self.keys != other.keys:
return False
elif not isinstance(self.keys, numpy.dtype) and not isinstance(
other.keys, numpy.dtype
):
if self.keys != other.keys:
return False
else:
return False
if isinstance(self.values, numpy.dtype) and isinstance(
other.values, numpy.dtype
):
return self.values == other.values
elif not isinstance(self.values, numpy.dtype) and not isinstance(
other.values, numpy.dtype
):
return self.values == other.values
else:
return False
class STLMap(Container, Mapping):
@classmethod
def from_mapping(cls, mapping):
return STLMap(mapping.keys(), mapping.values())
def __init__(self, keys, values):
if KeysView is not None and isinstance(keys, KeysView):
keys = numpy.asarray(list(keys))
elif isinstance(keys, types.GeneratorType):
keys = numpy.asarray(list(keys))
elif isinstance(keys, Set):
keys = numpy.asarray(list(keys))
else:
keys = numpy.asarray(keys)
if ValuesView is not None and isinstance(values, ValuesView):
values = numpy.asarray(list(values))
elif isinstance(values, types.GeneratorType):
values = numpy.asarray(list(values))
if len(keys) != len(values):
raise ValueError("number of keys must be equal to the number of values")
index = numpy.argsort(keys)
self._keys = keys[index]
try:
self._values = values[index]
except Exception:
self._values = numpy.asarray(values)[index]
def __str__(self, limit=85):
def tostring(i):
return _tostring(self._keys[i]) + ": " + _tostring(self._values[i])
return _str_with_ellipsis(tostring, len(self), "{", "}", limit)
def __repr__(self, limit=85):
return "<STLMap {0} at 0x{1:012x}>".format(
self.__str__(limit=limit - 30), id(self)
)
def __getitem__(self, where):
where = numpy.asarray(where)
index = numpy.searchsorted(self._keys.astype(where.dtype), where, side="left")
if uproot4._util.isint(index):
if index < len(self._keys) and self._keys[index] == where:
return self._values[index]
else:
raise KeyError(where)
elif len(self._keys) == 0:
values = numpy.empty(len(index))
return numpy.ma.MaskedArray(values, True)
else:
index[index >= len(self._keys)] = 0
mask = self._keys[index] != where
return numpy.ma.MaskedArray(self._values[index], mask)
def get(self, where, default=None):
where = numpy.asarray(where)
index = numpy.searchsorted(self._keys.astype(where.dtype), where, side="left")
if uproot4._util.isint(index):
if index < len(self._keys) and self._keys[index] == where:
return self._values[index]
else:
return default
elif len(self._keys) == 0:
return numpy.array([default])[numpy.zeros(len(index), numpy.int32)]
else:
index[index >= len(self._keys)] = 0
matches = self._keys[index] == where
values = self._values[index]
defaults = numpy.array([default])[numpy.zeros(len(index), numpy.int32)]
return numpy.where(matches, values, defaults)
def __len__(self):
return len(self._keys)
def __iter__(self):
return iter(self._keys)
def __contains__(self, where):
where = numpy.asarray(where)
index = numpy.searchsorted(self._keys.astype(where.dtype), where, side="left")
if uproot4._util.isint(index):
if index < len(self._keys) and self._keys[index] == where:
return True
else:
return False
else:
return False
def keys(self):
return self._keys
def values(self):
return self._values
def items(self):
return numpy.transpose(numpy.vstack([self._keys, self._values]))
def __eq__(self, other):
if isinstance(other, Mapping):
if not isinstance(other, STLMap):
other = STLMap(other.keys(), other.values())
else:
return False
if len(self._keys) != len(other._keys):
return False
keys_same = self._keys == other._keys
values_same = self._values == other._values
if isinstance(keys_same, bool) and isinstance(values_same, bool):
return keys_same and values_same
else:
return numpy.logical_and(keys_same, values_same).all()
def tolist(self):
out = {}
for i in range(len(self)):
x = self._values[i]
if isinstance(x, (Container, numpy.ndarray)):
out[self._keys[i]] = x.tolist()
else:
out[self._keys[i]] = x
return out
|
[
"noreply@github.com"
] |
bendavid.noreply@github.com
|
30e9ec7a26838f6419b32d07ae9170643ae83701
|
d93832e698d65bc09858ab3b78594ab1ae87494f
|
/python/chart/feature_tests.py
|
ae92d271c9d94065f1d1cab16e54fab95d1010c8
|
[
"Apache-2.0"
] |
permissive
|
cbrew/chartparse
|
bb3ac69f4185f8f8e82b2d5345101b5c648589a1
|
1b93fc73d73bb2c5ac6ff5d55cc250a5f4565428
|
refs/heads/master
| 2020-05-24T12:55:14.305942
| 2014-08-05T22:45:53
| 2014-08-05T22:45:53
| 6,537,959
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,754
|
py
|
"""
feature tests
=============
We need to be sure that the chart does the right thing
with complex categories. Specifically, if we have a
bare determiner
::code
det
in some cell of the chart, and an edge proposed adding
::code
det(num:sing)
we should not do so, since that is duplicative. Equally, when we
add ``det(num:sing)``, then propose adding ``det``, we should
replace the less general with the more general.
We also want to make sure that feature percolation works as
expected. The intention is that a feature name mentioned
in more than one place in a rule will result in propagation
of any feature value found, both from a child node to any
labeled later siblings and from child node to the parent, if
the parent is labeled. We never propagate from later siblings
to earlier siblings, because chart parsing is built on the
assumption that nodes will not become more specific after they
are entered into the chart.
"""
import chart
from features import ImmutableCategory as icat
from features import make_feature_grammar
import time
def too_expensive(): # pragma:no cover
from math import log10
sentence = ('the pigeons are punished' + ( ' and they suffer' * 152)).split()
print 'sentence of length',len(sentence)
start = time.clock()
v = chart.parse(sentence,return_chart=True,
print_trees=False,show_chart=False)
end = time.clock()
print "Took",(end - start),"seconds"
print "Counting trees"
start = time.clock()
print "Log of number of trees",log10(v.trace_edges())
end = time.clock()
print "Took",(end - start),"seconds"
print 'By best estimate this is many more trees than there are atoms in the universe'
ts = v.trees(v.solutions(v.topcat)[0])
tree = chart.treestring(ts.next())
print 'First tree has length',len(tree),'characters'
print tree[:40],'...\n',tree[10000:10100],'...\n',tree[-80:]
for i in range(199):
tree = chart.treestring(ts.next())
print '200th tree has length',len(tree),'characters'
print tree[:40],'...\n',tree[10000:10100],'...\n',tree[-80:]
def test_subsumption():
"""
>>> chart.parse(['pigeons'], use_features=True,topcat='Nn(num:pl)')
['pigeons']
Parse 1:
Nn(num:pl)
n(num:pl) pigeons
1 parses
>>> ch = chart.Chart(['pigeons'], grammar=make_feature_grammar(),using_features=True)
>>> len(ch.completes[0])
4
>>> ch = chart.Chart(['pigeons'], grammar=make_feature_grammar(),using_features=True)
>>> ch.completes[0]
set([C(Pn, 0, 1), C(n(num:pl), 0, 1), C(Nn(num:pl), 0, 1), C(pigeons, 0, 1)])
Adding an edge that is already there should be a non-op
>>> ch = chart.Chart(['pigeons'], grammar=make_feature_grammar(),using_features=True)
>>> ch.incorporate(list(ch.completes[0])[0])
>>> len(ch.completes[0])
4
Adding an edge that is more specific than one already there should be a non-op
>>> ch = chart.Chart(['pigeons'], grammar=make_feature_grammar(),using_features=True)
>>> edge = chart.Edge(label=icat.from_string('n(num:pl,case:subj)'),left=0,right=1,needed=tuple([]),constraints=None)
>>> ch.incorporate(edge)
>>> ch.completes[0]
set([C(Pn, 0, 1), C(n(num:pl), 0, 1), C(Nn(num:pl), 0, 1), C(pigeons, 0, 1)])
Adding an edge that is less specific than one already there should result
in the replacement of the previous edge by the new one, leaving the length
of the chart unchanged.
>>> ch = chart.Chart(['pigeons'], grammar=make_feature_grammar(),using_features=True)
>>> edge = chart.Edge(label=icat.from_string('n'),left=0,right=1,needed=tuple([]),constraints=None)
>>> ch.incorporate(edge)
>>> len(ch.completes[0])
4
>>> ch = chart.Chart(['pigeons'], grammar=make_feature_grammar(),using_features=True)
>>> edge = chart.Edge(label=icat.from_string('n'),left=0,right=1,needed=tuple([]),constraints=None)
>>> ch.incorporate(edge)
>>> ch.completes[0]
set([C(Pn, 0, 1), C(Nn(num:pl), 0, 1), C(pigeons, 0, 1), C(n, 0, 1)])
>>> ch = chart.Chart(['the','pigeons','are','punished'], grammar=make_feature_grammar(),using_features=True)
>>> ps = sorted(ch.partials[2])
>>> edge = sorted(ch.partials[2])[-3]
>>> edge
P(S(num:pl), 0, 2,(Vp(num:pl),))
>>> ch.incorporate(edge)
>>> ps == sorted(ch.partials[2])
True
Make sure we can build a partial edge ourselves and that incorporating it is a non-op
>>> ch = chart.Chart(['the','pigeons','are','punished'], grammar=make_feature_grammar(),using_features=True)
>>> ps = sorted(ch.partials[2])
>>> edge=chart.Edge(label=icat.from_string('S(num:pl)'),left=0,right=2,needed=tuple([icat.from_string('Vp(num:pl)')]),constraints=None)
>>> ch.incorporate(edge)
>>> ps == sorted(ch.partials[2])
True
Make sure we can build a partial edge ourselves differing only in needed field, less general
>>> ch = chart.Chart(['the','pigeons','are','punished'], grammar=make_feature_grammar(),using_features=True)
>>> ps = sorted(ch.partials[2])
>>> edge=chart.Edge(label=icat.from_string('S(num:pl)'),left=0,right=2,needed=tuple([icat.from_string('Vp(num:pl,case:subj)')]),constraints=None)
>>> ch.incorporate(edge)
>>> ps == sorted(ch.partials[2])
True
>>> sorted(ch.partials[2])[7]
P(S(num:pl), 0, 2,(Vp(num:pl),))
Make sure we can build a partial edge ourselves differing only in needed field, more general. Changes set.
>>> ch = chart.Chart(['the','pigeons','are','punished'], grammar=make_feature_grammar(),using_features=True)
>>> ps = sorted(ch.partials[2])
>>> edge=chart.Edge(label=icat.from_string('S(num:pl)'),left=0,right=2,needed=tuple([icat.from_string('Vp')]),constraints=None)
>>> ch.incorporate(edge)
>>> ps == sorted(ch.partials[2])
False
>>> sorted(ch.partials[2])[7]
P(S(num:pl), 0, 2,(Vp,))
Next one should have a parse because number agreement is not enforced between different branches of
a conjunction.
>>> v = chart.parse('stuart suffers and they suffer'.split(),return_chart=True,use_features=True,sep='_')
['stuart', 'suffers', 'and', 'they', 'suffer']
Parse 1:
S
_S(num:sing)
__Np(num:sing)
___pn(num:sing) stuart
__Vp(num:sing)
___v(num:sing,tr:intrans) suffers
_conj and
_S(num:pl)
__Np(case:subj,num:pl)
___pn(case:subj,num:pl) they
__Vp(num:pl)
___v(num:pl,tr:intrans) suffer
1 parses
This one should have no number features on the conjoined S.
>>> v = chart.parse('stuart suffers and stuart suffers'.split(),return_chart=True,use_features=True,sep='_')
['stuart', 'suffers', 'and', 'stuart', 'suffers']
Parse 1:
S
_S(num:sing)
__Np(num:sing)
___pn(num:sing) stuart
__Vp(num:sing)
___v(num:sing,tr:intrans) suffers
_conj and
_S(num:sing)
__Np(num:sing)
___pn(num:sing) stuart
__Vp(num:sing)
___v(num:sing,tr:intrans) suffers
1 parses
"""
|
[
"cbrew@acm.org"
] |
cbrew@acm.org
|
6cab1ca10fac326f54d052e11c35b4e107b1fd20
|
e430c2f92a9b984914abf972d2b39c2ec00c8bab
|
/app/main/views.py
|
b2698dc24702d70231e25be9656f19fb1c7a31e9
|
[] |
no_license
|
wstarxing/test-flask-pcincipal
|
72daa0dd664939a24d54b6f8d87f29aaa98492a5
|
3ba8c16eb87f0e03e3d2432f4c5bca6c446563bf
|
refs/heads/master
| 2021-01-14T08:10:29.085230
| 2017-03-09T09:10:49
| 2017-03-09T09:10:49
| 81,930,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,066
|
py
|
# -*- coding: UTF-8 -*-
from flask import *
from flask_login import login_user, logout_user, login_required, current_user
from app.main import main
from app.main.principalsource import *
from app.models.user import *
@main.route('/abc')
@login_required
# @admin_permission.require()
# @user_permission.require()
def do_articles():
# permission = Permission(RoleNeed('admin'))
# if permission.can():
if user_permission.can():
return jsonify({'status': 'ok'})
else:
return jsonify({'status': 'failed'})
@main.route('/abcd')
@login_required
@admin_permission.require()
def do_articles1():
return jsonify({'status': 'admin1'})
@main.route('/vip')
@login_required
@vip_permission.require()
def vip():
return jsonify({'status': 'vip'})
@main.route('/login', methods=['POST'])
def login():
datas = {}
if request.method == 'POST':
data = request.get_json(force=True)
username = data['username']
password = data['password']
user = User.query.filter_by(its_username=username).first()
if user and user.verify_password(password):
login_user(user)
datas[u'info'] = 'success login !'
datas[u'status'] = 0
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.its_id))
else:
datas[u'info '] = 'success failed !'
datas[u'status'] = 1
return jsonify(datas)
@main.route('/logout', methods=['POST'])
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return jsonify({'status': 'ok'})
@main.route('/userinfo', methods=['GET'])
@login_required
def userinfo():
print session
return jsonify({'id': current_user.its_id,
'name': current_user.its_username,
'role': current_user.bbs_signature
})
@main.route('/userroles', methods=['GET'])
def userroles():
role_list = []
for role in current_user.roles:
role_list.append(role.name)
return jsonify({'role': role_list})
@main.route('/roles', methods=['GET'])
def roles():
role = Role.query.filter_by(id=0).first()
user_list = []
print role.user
for user in role.user:
user_list.append(user.its_username)
return jsonify({'user': user_list})
@main.route('/posts', methods=['GET'])
def edit_post():
post_id = request.args.get('id', '')
permission = EditBlogPostPermission(post_id)
if permission.can():
# Save the edits ...
return jsonify({'posts': 'ok'})
else:
return jsonify({'posts': 'failed'})
@main.route('/getposts', methods=['GET'])
def getpost():
posts_list = []
for post in current_user.posts:
posts_list.append(post.id)
return jsonify({'posts': posts_list})
|
[
"wangxxxing@qq.com"
] |
wangxxxing@qq.com
|
87f1b3a99f14a6c990640b36e039893ecae3e2a3
|
92ae7f3e789e29e28bfcedce0c9de12ed2a031ed
|
/exercicio17.py
|
3eb6e999fa1df71901ef1daa627ea1237ad6c79e
|
[] |
no_license
|
marcosfujimoto/exercicios_udemy
|
5737e91cd20fb1b32bfe0fb1a156de72da79c659
|
0a0dd15072a804e09aabdddba9efdce6881d45a6
|
refs/heads/main
| 2023-08-03T05:37:39.891295
| 2021-09-09T15:43:05
| 2021-09-09T15:43:05
| 404,775,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
#Esse programa mostra o maior e menor número depois de uma entrada
a= float(input("Digite um número: "))
b= float(input("Digite um segundo número: "))
c=float(input("Digite um terceiro número: "))
if a>b and a>c:
print(f"O maior número é:", a)
elif b>a and b>c:
print(f"O maior número é:", b)
else:
print(f"O maior número é:", c)
if a<b and a<c:
print(f"O menor número é:", a)
elif b<a and b<c:
print(f"O menor número é:", b)
else:
print(f"O menor número é:", c)
|
[
"marcosacfujimoto@gmail.com"
] |
marcosacfujimoto@gmail.com
|
6da05ce10f3fc2835fb017f6b5c4b41940d294a7
|
8b3e6319a91aaea381ff6dac26ddbe6018b75dd2
|
/test/functional/interface_rest.py
|
1ad80b5ba6d8a76d2bcb29b38f5c676a08e9f98b
|
[
"MIT"
] |
permissive
|
JihanSilbert/Titan
|
9ddd7d57a6fa59501ac748c853ef5ff3a12ba51e
|
4dfd180ca8518c3ba7160c6cf113eb4e1237c42d
|
refs/heads/master
| 2020-08-15T16:07:37.998031
| 2019-10-19T10:45:49
| 2019-10-19T10:45:49
| 215,368,860
| 0
| 0
|
MIT
| 2019-10-15T18:27:27
| 2019-10-15T18:27:27
| null |
UTF-8
|
Python
| false
| false
| 15,081
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Titancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
from test_framework.test_framework import TitancoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (TitancoinTestFramework):
FORMAT_SEPARATOR = "."
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 5000)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
#######################################
# GETUTXOS: query an unspent outpoint #
#######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
#################################################
# GETUTXOS: now query an already spent outpoint #
#################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be an outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 200 because we are within the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
[
"info@jccoin.co"
] |
info@jccoin.co
|
3cfc9fcf32d42fa790cbc667c7409a210c97a2f2
|
1aaa2a556cb79f423baaa9ea5b98de960deabd4e
|
/nlc.py
|
7a30c8509b6a36f5c760ebb6aff5cb97a361f859
|
[] |
no_license
|
IBMLeo/Watson-Legal
|
3e28d016b08204a028b19fe49a5dd84e2f05c64f
|
ff86a697efe75fe76aae2d7e086adda9ae0c2333
|
refs/heads/master
| 2020-04-23T16:12:07.291942
| 2019-02-18T14:49:47
| 2019-02-18T14:49:47
| 171,290,157
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
from sklearn.naive_bayes import GaussianNB
import pandas, xgboost, numpy, textblob, string
import os
from flask import Flask, flash, request, redirect, url_for, send_from_directory, render_template
app = Flask(__name__)
# load the dataset
data = open('NLC_NEW2.csv').read()
labels, texts = [], []
for i, line in enumerate(data.split("\n")):
content = line.split(',')
labels.append(content[1])
texts.append(content[0])
trainDF = pandas.DataFrame()
trainDF['text'] = texts
trainDF['label'] = labels
v = TfidfVectorizer(use_idf = True)
texts = v.fit_transform(texts).toarray()
mnb = naive_bayes.MultinomialNB()
mnb = mnb.fit(texts, labels)
cache = {}
@app.route('/processa', methods=['POST'])
def processa():
txt = request.form['valor']
test = v.transform([txt]).toarray()
if txt in cache:
return cache[txt]
else:
prediction = mnb.predict(test)
cache[txt] = str(prediction[0])
return str(prediction[0])
app.run(port=3001)
|
[
"Leonardo@MinideLeonardo.isc.br.ibm.com"
] |
Leonardo@MinideLeonardo.isc.br.ibm.com
|
d278389107f46520c3e1cf7dc30dca6620ea1ed4
|
7f3cdb68feb469e1bfb0f324facb0fa3e773a2e9
|
/tutorial/tests/test_login_2.py
|
073bbbb7da50e23b9fc4c0d7363431ec30d84efd
|
[] |
no_license
|
qdoan1651/OrangeHRM
|
460c840586d6ab444ad069d20e129df636276a98
|
67044eb7aab64a47b9fbfdf846ed43586cadd4ad
|
refs/heads/master
| 2021-01-25T13:06:27.980187
| 2020-04-07T19:30:22
| 2020-04-07T19:30:22
| 64,992,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
'''
@author: Raghav Pal
https://www.youtube.com/watch?v=BURK7wMcCwU&t=1538s
Selenium Python Small Sample Project | Page Object Model POM
Implement Page Object Model [20:00]
'''
import unittest, time, os
from selenium import webdriver
from tutorial.pages.login_page import LoginPage2
from tutorial.pages.home_page import HomePage2
class LoginTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
project_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
chrome_path = os.path.join(project_path, 'drivers/chromedriver.v.77.exe')
cls.driver = webdriver.Chrome(chrome_path)
cls.driver.set_window_size(1600, 1000)
def test_admin_login(self):
self.driver.get('https://opensource-demo.orangehrmlive.com')
# login
login_page = LoginPage2(self.driver)
login_page.enter_username('Admin')
login_page.enter_password('admin123')
login_page.click_login()
# log out
home_page = HomePage2(self.driver)
home_page.click_welcome()
time.sleep(1)
home_page.click_logout()
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
print('Test completed.')
if __name__ == '__main__':
unittest.main()
|
[
"qdoan1651@gmail.com"
] |
qdoan1651@gmail.com
|
d8e1132a217349048e8003e0bf7d2a90d7ca14d3
|
763ab1610133ba5b1129181a762ba00094f4755f
|
/ex43.py
|
4eb102a063b85b267cc036ab4b0a867c28c70ac1
|
[] |
no_license
|
siddhartha-chandra/LPTHW
|
3487ba2a7e0babceae28845791589baa5e3aabbb
|
38dcafd008277a4b40984b4ca60faee41893ea66
|
refs/heads/master
| 2021-01-11T00:03:19.095451
| 2016-10-25T19:41:21
| 2016-10-25T19:41:21
| 70,763,783
| 0
| 0
| null | 2016-10-25T19:41:21
| 2016-10-13T03:04:27
| null |
UTF-8
|
Python
| false
| false
| 8,736
|
py
|
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enter()."
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your mom would be proud...if you were smarter.",
"Such a loser.",
"I have a small puppy that's better at this."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class CentralCorridor(Scene):
def enter(self):
print "The Gothons of Planet Percal #25 have invaded your ship and destroyed"
print "your entire crew. You are the last surviving member and your last"
print "mission is to get the neutron destruct bomb from the Weapons Armory,"
print "put it in the bridge, and blow the ship up after getting into an "
print "escape pod."
print "\n"
print "You're running down the central corridor to the Weapons Armory when"
print "a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume"
print "flowing around his hate filled body. He's blocking the door to the"
print "Armory and about to pull a weapon to blast you."
action = raw_input("> ")
if action == "shoot":
print "Quick on the draw you yank out your blaster and fire it at the Gothon."
print "His clown costume is flowing and moving around his body, which throws"
print "off your aim. Your laser hits his costume but misses him entirely. This"
print "completely ruins his brand new costume his mother bought him, which"
print "makes him fly into an insane rage and blast you repeatedly in the face until"
print "you are dead. Then he eats you."
return 'death'
elif action == "dodge":
print "Like a world class boxer you dodge, weave, slip and slide right"
print "as the Gothon's blaster cranks a laser past your head."
print "In the middle of your artful dodge your foot slips and you"
print "bang your head on the metal wall and pass out."
print "You wake up shortly after only to die as the Gothon stomps on"
print "your head and eats you."
return 'death'
elif action == "tell a joke":
print "Lucky for you they made you learn Gothon insults in the academy."
print "You tell the one Gothon joke you know:"
print "Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr."
print "The Gothon stops, tries not to laugh, then busts out laughing and can't move."
print "While he's laughing you run up and shoot him square in the head"
print "putting him down, then jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "Invalid action"
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print "You do a dive roll into the Weapon Armory, crouch and scan the room"
print "for more Gothons that might be hiding. It's dead quiet, too quiet."
print "You stand up and run to the far side of the room and find the"
print "neutron bomb in its container. There's a keypad lock on the box"
print "and you need the code to get the bomb out. If you get the code"
print "wrong 20 times then the lock closes forever and you can't"
print "get the bomb. The code is 2 digits."
code = "%d%d" % (randint(1,5), randint(1,5))
guess = raw_input("[keypad]> ")
guesses = 0
while guess != code and guesses < 20:
print "BZZZZEDDDD!"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code or guess == 'hoopla':
print "The container clicks open and the seal breaks, letting gas out."
print "You grab the neutron bomb and run as fast as you can to the"
print "bridge where you must place it in the right spot."
return 'the_bridge'
else:
print "The lock buzzes one last time and then you hear a sickening"
print "melting sound as the mechanism is fused together."
print "You decide to sit there, and finally the Gothons blow up the"
print "ship from their ship and you die."
return 'death'
class TheBridge(Scene):
def enter(self):
print "You burst onto the Bridge with the netron destruct bomb"
print "under your arm and surprise 5 Gothons who are trying to"
print "take control of the ship. Each of them has an even uglier"
print "clown costume than the last. They haven't pulled their"
print "weapons out yet, as they see the active bomb under your"
print "arm and don't want to set it off."
action = raw_input("> ")
if action == "throw the bomb":
print "In a panic you throw the bomb at the group of Gothons"
print "and make a leap for the door. Right as you drop it a"
print "Gothon shoots you right in the back killing you."
print "As you die you see another Gothon frantically try to disarm"
print "the bomb. You die knowing they will probably blow up when"
print "it goes off."
return 'death'
elif action == "slowly place the bomb":
print "You point your blaster at the bomb under your arm"
print "and the Gothons put their hands up and start to sweat."
print "You inch backward to the door, open it, and then carefully"
print "place the bomb on the floor, pointing your blaster at it."
print "You then jump back through the door, punch the close button"
print "and blast the lock so the Gothons can't get out."
print "Now that the bomb is placed you run to the escape pod to"
print "get off this tin can."
return 'escape_pod'
else:
print "Invalid action"
return 'the_bridge'
class EscapePod(Scene):
def enter(self):
print "You rush through the ship desperately trying to make it to"
print "the escape pod before the whole ship explodes. It seems like"
print "hardly any Gothons are on the ship, so your run is clear of"
print "interference. You get to the chamber with the escape pods, and"
print "now need to pick one to take. Some of them could be damaged"
print "but you don't have time to look. There's 5 pods, which one"
print "do you take?"
good_pod = randint(1,3)
guess = raw_input("[pod #]> ")
if int(guess) != good_pod:
print "You jump into pod %s and hit the eject button." % guess
print "The pod escapes out into the void of space, then"
print "implodes as the hull ruptures, crushing your body"
print "into jam jelly."
return 'death'
else:
print "You jump into pod %s and hit the eject button." % guess
print "The pod easily slides out into space heading to"
print "the planet below. As it flies to the planet, you look"
print "back and see your ship implode then explode like a"
print "bright star, taking out the Gothon ship at the same"
print "time. You won!"
return 'finished'
class Finished(Scene):
def enter(self):
print "You won! Good job."
return 'finished'
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished()
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
|
[
"siddhartha.chandra0112@gmail.com"
] |
siddhartha.chandra0112@gmail.com
|
90fb19f58923debadd698c8a9a76793b115af96d
|
dd8fadc12b7c89609031e6f19c336457b2e7f314
|
/test/test_myStrategy.py
|
73bf0735e13ea29147cf69d72574cce2475211a8
|
[] |
no_license
|
Sandy4321/ai_cup_2015_code_race
|
59c86ae5d61f9c2656e327dd818a8e153aec3c5f
|
15cc6d7f79aa7be495fe115c478917f85218fcc1
|
refs/heads/master
| 2020-05-31T15:32:40.612880
| 2016-11-16T14:29:14
| 2016-11-16T14:29:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
from unittest import TestCase
from debug.ObjectLoader import *
from MyStrategy import MyStrategy
from model.Move import Move
class TestMyStrategy(TestCase):
def test_move(self):
me, world, game = load_objects()
strategy = MyStrategy()
move = Move()
strategy.move(me, world, game, move)
|
[
"enorone@gmail.com"
] |
enorone@gmail.com
|
8651ed35b2eb2e819fafde9dad2ea2b5c37309a2
|
9354603c34bd8e0e477f8777054dbd775c994ea4
|
/webhr/urls.py
|
b8fc022d061ed07a32c02af440fd47c815a9bd56
|
[] |
no_license
|
Drjimoh/the-hr-project
|
8c661ee2b5052b8e919dfcdb75827246bcf8d8ea
|
fc5d2085c7d4da0326cedc66342b0f9f058136d5
|
refs/heads/master
| 2020-05-20T16:20:02.217053
| 2019-05-08T18:58:44
| 2019-05-08T18:58:44
| 185,664,268
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
"""webhr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from cvranker import views
from django.contrib.admin.views.decorators import staff_member_required
urlpatterns = [
path('admin/', admin.site.urls),
path('api', staff_member_required(views.ApiView.as_view()), name='api view'),
path('', views.add_cv, name= 'Homepage'),
path('ranks', views.get_top_cvs, name='Ranks'),
path('generate', views.GenerateScoredCvView.as_view(), name='generate'),
]
|
[
"waliu.jimoh@yahoo.com"
] |
waliu.jimoh@yahoo.com
|
2c8e40527c2055da598d7f70ad8bf4ae9d304e79
|
ba70cd3dcb502fc6f61244c39b1d446c8f43904e
|
/venv/Scripts/pip3-script.py
|
df11badebd96ac6cc1352267dfea865e9b7778bd
|
[] |
no_license
|
MujtabaMohsin/to-read-books
|
fa623cf1e76485e61acc35918164b05f2fc34965
|
b19ac21af0027b3befb753a7d6884e0320483e71
|
refs/heads/master
| 2023-05-30T16:16:01.802228
| 2021-06-14T09:30:41
| 2021-06-14T09:30:41
| 376,756,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
#!C:\Users\User\PycharmProjects\udacity-FSND-activity\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"mmjj1010@gmail.com"
] |
mmjj1010@gmail.com
|
fc04e12dcdbbafa967a840973bd3e33969b3becb
|
54b7bae79af992c149c644c21a8fa09313841449
|
/Demo/SVM/svmMLiA.py
|
1f86b194b77772aa9bc3a153a8814b0e320d8405
|
[
"Apache-2.0"
] |
permissive
|
ViatorSun/GitChat_CNN
|
75f9542364c9a51a467d69625cc219e8f2a6795d
|
d2f16eb2d108afa58ab31e09956424af22d96c47
|
refs/heads/master
| 2021-07-11T15:09:03.579803
| 2019-01-22T16:00:55
| 2019-01-22T16:00:55
| 143,283,383
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,182
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2018.
# @Author : 绿色羽毛
# @Email : lvseyumao@foxmail.com
# @Blog : https://blog.csdn.net/ViatorSun
# @Note : 功能实现
from numpy import *
# from time import sleep
def loadDataSet(fileName):
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat,labelMat
def selectJrand(i,m):
j=i #we want to select any J not equal to i
while (j==i):
j = int(random.uniform(0,m))
return j
def clipAlpha(aj,H,L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose()
b = 0; m,n = shape(dataMatrix)
alphas = mat(zeros((m,1)))
iter = 0
while (iter < maxIter):
alphaPairsChanged = 0
for i in range(m):
fXi = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[i,:].T)) + b
Ei = fXi - float(labelMat[i])#if checks if an example violates KKT conditions
if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i]*Ei > toler) and (alphas[i] > 0)):
j = selectJrand(i,m)
fXj = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[j,:].T)) + b
Ej = fXj - float(labelMat[j])
alphaIold = alphas[i].copy(); alphaJold = alphas[j].copy()
if (labelMat[i] != labelMat[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L==H: print("L==H"); continue
eta = 2.0 * dataMatrix[i,:]*dataMatrix[j,:].T - dataMatrix[i,:]*dataMatrix[i,:].T \
- dataMatrix[j,:]*dataMatrix[j,:].T
if eta >= 0: print("eta>=0"); continue
alphas[j] -= labelMat[j]*(Ei - Ej)/eta
alphas[j] = clipAlpha(alphas[j],H,L)
if (abs(alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); continue
alphas[i] += labelMat[j]*labelMat[i]*(alphaJold - alphas[j])#update i by the same amount as j
#the update is in the oppostie direction
b1 = b - Ei- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T \
- labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T
b2 = b - Ej- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T \
- labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T
if (0 < alphas[i]) and (C > alphas[i]): b = b1
elif (0 < alphas[j]) and (C > alphas[j]): b = b2
else: b = (b1 + b2)/2.0
alphaPairsChanged += 1
print("iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
if (alphaPairsChanged == 0): iter += 1
else: iter = 0
print("iteration number: %d" % iter)
return b,alphas
def kernelTrans(X, A, kTup): #calc the kernel or transform data to a higher dimensional space
m,n = shape(X)
K = mat(zeros((m,1)))
if kTup[0]=='lin': K = X * A.T #linear kernel
elif kTup[0]=='rbf':
for j in range(m):
deltaRow = X[j,:] - A
K[j] = deltaRow*deltaRow.T
K = exp(K/(-1*kTup[1]**2)) #divide in NumPy is element-wise not matrix like Matlab
else: raise NameError('Houston We Have a Problem -- \
That Kernel is not recognized')
return K
class optStruct:
def __init__(self,dataMatIn, classLabels, C, toler, kTup): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m,1)))
self.b = 0
self.eCache = mat(zeros((self.m,2))) #first column is valid flag
self.K = mat(zeros((self.m,self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X, self.X[i,:], kTup)
def calcEk(oS, k):
fXk = float(multiply(oS.alphas,oS.labelMat).T*oS.K[:,k] + oS.b)
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJ(i, oS, Ei): #this is the second choice -heurstic, and calcs Ej
maxK = -1; maxDeltaE = 0; Ej = 0
oS.eCache[i] = [1,Ei] #set valid #choose the alpha that gives the maximum delta E
validEcacheList = nonzero(oS.eCache[:,0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: #loop through valid Ecache values and find the one that maximizes delta E
if k == i: continue #don't calc for i, waste of time
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
return maxK, Ej
else: #in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
return j, Ej
""" after any alpha has changed update the new value in the cache """
def updateEk(oS, k):
Ek = calcEk(oS, k)
oS.eCache[k] = [1,Ek]
def innerL(i, oS):
Ei = calcEk(oS, i)
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJ(i, oS, Ei) #this has been changed from selectJrand
alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy()
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L==H: print("L==H"); return 0
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j] #changed for kernel
if eta >= 0: print("eta>=0"); return 0
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEk(oS, j) #added this for the Ecache
if (abs(oS.alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); return 0
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j]) #update i by the same amount as j
updateEk(oS, i) #added this for the Ecache #the update is in the oppostie direction
b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,i] - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[i,j]
b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,j]- oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[j,j]
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
else: oS.b = (b1 + b2)/2.0
return 1
else: return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter,kTup=('lin', 0)): #full Platt SMO
oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler, kTup)
iter = 0
entireSet = True; alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet: #go over all
for i in range(oS.m):
alphaPairsChanged += innerL(i,oS)
print("fullSet, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
else:#go over non-bound (railed) alphas
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print("non-bound, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
if entireSet: entireSet = False #toggle entire set loop
elif (alphaPairsChanged == 0): entireSet = True
print("iteration number: %d" % iter)
return oS.b,oS.alphas
def calcWs(alphas,dataArr,classLabels):
X = mat(dataArr); labelMat = mat(classLabels).transpose()
m,n = shape(X)
w = zeros((n,1))
for i in range(m):
w += multiply(alphas[i]*labelMat[i],X[i,:].T)
return w
def testRbf(k1=1.3):
dataArr,labelArr = loadDataSet('testSetRBF.txt')
b,alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf', k1)) #C=200 important
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs=datMat[svInd] #get matrix of only support vectors
labelSV = labelMat[svInd]
print("there are %d Support Vectors" % shape(sVs)[0])
m,n = shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the training error rate is: %f" % (float(errorCount)/m))
dataArr,labelArr = loadDataSet('testSetRBF2.txt')
errorCount = 0
datMat=mat(dataArr)
labelMat = mat(labelArr).transpose()
m,n = shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the test error rate is: %f" % (float(errorCount)/m))
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
def loadImages(dirName):
from os import listdir
hwLabels = []
trainingFileList = listdir(dirName) #load the training set
m = len(trainingFileList)
trainingMat = zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0] #take off .txt
classNumStr = int(fileStr.split('_')[0])
if classNumStr == 9: hwLabels.append(-1)
else: hwLabels.append(1)
trainingMat[i,:] = img2vector('%s/%s' % (dirName, fileNameStr))
return trainingMat, hwLabels
def testDigits(kTup=('rbf', 10)):
dataArr,labelArr = loadImages('trainingDigits')
b,alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, kTup)
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs=datMat[svInd]
labelSV = labelMat[svInd]
print("there are %d Support Vectors" % shape(sVs)[0])
m,n = shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],kTup)
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the training error rate is: %f" % (float(errorCount)/m))
dataArr,labelArr = loadImages('testDigits')
errorCount = 0
datMat=mat(dataArr);
labelMat = mat(labelArr).transpose()
m,n = shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],kTup)
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the test error rate is: %f" % (float(errorCount)/m))
""" Non-Kernel VErsions below """
class optStructK:
def __init__(self,dataMatIn, classLabels, C, toler): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m,1)))
self.b = 0
self.eCache = mat(zeros((self.m,2))) #first column is valid flag
def calcEkK(oS, k):
fXk = float(multiply(oS.alphas,oS.labelMat).T*(oS.X*oS.X[k,:].T)) + oS.b
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJK(i, oS, Ei): #this is the second choice -heurstic, and calcs Ej
maxK = -1; maxDeltaE = 0; Ej = 0
oS.eCache[i] = [1,Ei] #set valid #choose the alpha that gives the maximum delta E
validEcacheList = nonzero(oS.eCache[:,0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: #loop through valid Ecache values and find the one that maximizes delta E
if k == i: continue #don't calc for i, waste of time
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
return maxK, Ej
else: #in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
return j, Ej
def updateEkK(oS, k):#after any alpha has changed update the new value in the cache
Ek = calcEk(oS, k)
oS.eCache[k] = [1,Ek]
def innerLK(i, oS):
Ei = calcEk(oS, i)
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJ(i, oS, Ei) #this has been changed from selectJrand
alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy()
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L==H: print("L==H"); return 0
eta = 2.0 * oS.X[i,:]*oS.X[j,:].T - oS.X[i,:]*oS.X[i,:].T - oS.X[j,:]*oS.X[j,:].T
if eta >= 0: print("eta>=0"); return 0
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEk(oS, j) #added this for the Ecache
if (abs(oS.alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); return 0
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])#update i by the same amount as j
updateEk(oS, i) #added this for the Ecache #the update is in the oppostie direction
b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T \
- oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T \
- oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
else: oS.b = (b1 + b2)/2.0
return 1
else: return 0
# full Platt SMO
def smoPK(dataMatIn, classLabels, C, toler, maxIter):
oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler)
iter = 0
entireSet = True; alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
# go over all
if entireSet:
for i in range(oS.m):
alphaPairsChanged += innerL(i,oS)
print("fullSet, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
# go over non-bound (railed) alphas
else:
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print("non-bound, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
# toggle entire set loop
if entireSet:
entireSet = False
elif (alphaPairsChanged == 0):
entireSet = True
print("iteration number: %d" % iter)
return oS.b,oS.alphas
|
[
"noreply@github.com"
] |
ViatorSun.noreply@github.com
|
917f801689c9521ae3cd9db9f38c779b8466c316
|
d695adfe54f6019ecaf3e0ad741391654c348666
|
/introduction to requests.py
|
d6ba2c9e05da2890600a46d9baecdb146a18adf1
|
[] |
no_license
|
yangmiaohong/Python-02_codes
|
98a048334479b89c877a5d53a515bd03759cdcf4
|
80be2800acdf0ea9493cd91f9a177bb22daf7d15
|
refs/heads/master
| 2020-06-17T02:11:36.572815
| 2016-11-28T14:58:45
| 2016-11-28T14:58:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,675
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 12 21:21:16 2016
@author: chen
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import requests
#---------------------------------------------------------------
#Make a request
#get method
r = requests.get('http://esf.xm.fang.com/house/i32/')
#post method
'''
headers_2 = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
# 'cookie':cookie,
'Host': 'www.landchina.com',
'Origin': 'http://www.landchina.com',
'Referer': 'http://www.landchina.com/default.aspx?tabid=263&wmguid=75c72564-ffd9-426a-954b-8ac2df0903b7&p=9f2c3acd-0256-4da2-a659-6949c4671a2a%3A'+str(self.start_time)+'~'+str(self.end_time),
'Upgrade-Insecure-Requests': '1',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.21 Safari/537.36',
#'http':proxy,
}
data = {
'VIEWSTATE': '/wEPDwUJNjkzNzgyNTU4D2QWAmYPZBYIZg9kFgICAQ9kFgJmDxYCHgdWaXNpYmxlaGQCAQ9kFgICAQ8WAh4Fc3R5bGUFIEJBQ0tHUk9VTkQtQ09MT1I6I2YzZjVmNztDT0xPUjo7ZAICD2QWAgIBD2QWAmYPZBYCZg9kFgJmD2QWBGYPZBYCZg9kFgJmD2QWAmYPZBYCZg9kFgJmDxYEHwEFIENPTE9SOiNEM0QzRDM7QkFDS0dST1VORC1DT0xPUjo7HwBoFgJmD2QWAgIBD2QWAmYPDxYCHgRUZXh0ZWRkAgEPZBYCZg9kFgJmD2QWAmYPZBYEZg9kFgJmDxYEHwEFhwFDT0xPUjojRDNEM0QzO0JBQ0tHUk9VTkQtQ09MT1I6O0JBQ0tHUk9VTkQtSU1BR0U6dXJsKGh0dHA6Ly93d3cubGFuZGNoaW5hLmNvbS9Vc2VyL2RlZmF1bHQvVXBsb2FkL3N5c0ZyYW1lSW1nL3hfdGRzY3dfc3lfamhnZ18wMDAuZ2lmKTseBmhlaWdodAUBMxYCZg9kFgICAQ9kFgJmDw8WAh8CZWRkAgIPZBYCZg9kFgJmD2QWAmYPZBYCZg9kFgJmD2QWAmYPZBYEZg9kFgJmDxYEHwEFIENPTE9SOiNEM0QzRDM7QkFDS0dST1VORC1DT0xPUjo7HwBoFgJmD2QWAgIBD2QWAmYPDxYCHwJlZGQCAg9kFgJmD2QWBGYPZBYCZg9kFgJmD2QWAmYPZBYCZg9kFgJmD2QWAmYPFgQfAQUgQ09MT1I6I0QzRDNEMztCQUNLR1JPVU5ELUNPTE9SOjsfAGgWAmYPZBYCAgEPZBYCZg8PFgIfAmVkZAICD2QWBGYPZBYCZg9kFgJmD2QWAmYPZBYCAgEPZBYCZg8WBB8BBYYBQ09MT1I6I0QzRDNEMztCQUNLR1JPVU5ELUNPTE9SOjtCQUNLR1JPVU5ELUlNQUdFOnVybChodHRwOi8vd3d3LmxhbmRjaGluYS5jb20vVXNlci9kZWZhdWx0L1VwbG9hZC9zeXNGcmFtZUltZy94X3Rkc2N3X3p5X2pnZ2dfMDEuZ2lmKTsfAwUCNDYWAmYPZBYCAgEPZBYCZg8PFgIfAmVkZAIBD2QWAmYPZBYCZg9kFgJmD2QWAgIBD2QWAmYPFgQfAQUgQ09MT1I6I0QzRDNEMztCQUNLR1JPVU5ELUNPTE9SOjsfAGgWAmYPZBYCAgEPZBYCZg8PFgIfAmVkZAIDD2QWAgIDDxYEHglpbm5lcmh0bWwFtwY8cCBhbGlnbj0iY2VudGVyIj48c3BhbiBzdHlsZT0iZm9udC1zaXplOiB4LXNtYWxsIj4mbmJzcDs8YnIgLz4NCiZuYnNwOzxhIHRhcmdldD0iX3NlbGYiIGhyZWY9Imh0dHA6Ly93d3cubGFuZGNoaW5hLmNvbS8iPjxpbWcgYm9yZGVyPSIwIiBhbHQ9IiIgd2lkdGg9IjI2MCIgaGVpZ2h0PSI2MSIgc3JjPSIvVXNlci9kZWZhdWx0L1VwbG9hZC9mY2svaW1hZ2UvdGRzY3dfbG9nZS5wbmciIC8+PC9hPiZuYnNwOzxiciAvPg0KJm5ic3A7PHNwYW4gc3R5bGU9ImNvbG9yOiAjZmZmZmZmIj5Db3B5cmlnaHQgMjAwOC0yMDE0IERSQ25ldC4gQWxsIFJpZ2h0cyBSZXNlcnZlZCZuYnNwOyZuYnNwOyZuYnNwOyA8c2NyaXB0IHR5cGU9InRleHQvamF2YXNjcmlwdCI+DQp2YXIgX2JkaG1Qcm90b2NvbCA9ICgoImh0dHBzOiIgPT0gZG9jdW1lbnQubG9jYXRpb24ucHJvdG9jb2wpID8gIiBodHRwczovLyIgOiAiIGh0dHA6Ly8iKTsNCmRvY3VtZW50LndyaXRlKHVuZXNjYXBlKCIlM0NzY3JpcHQgc3JjPSciICsgX2JkaG1Qcm90b2NvbCArICJobS5iYWlkdS5jb20vaC5qcyUzRjgzODUzODU5YzcyNDdjNWIwM2I1Mjc4OTQ2MjJkM2ZhJyB0eXBlPSd0ZXh0L2phdmFzY3JpcHQnJTNFJTNDL3NjcmlwdCUzRSIpKTsNCjwvc2NyaXB0PiZuYnNwOzxiciAvPg0K54mI5p2D5omA5pyJJm5ic3A7IOS4reWbveWcn+WcsOW4guWcuue9kTxiciAvPg0K5aSH5qGI5Y+3OiDkuqxJQ1DlpIcwOTA3NDk5MuWPtyDkuqzlhaznvZHlronlpIcxMTAxMDIwMDA2NjYoMikmbmJzcDs8YnIgLz4NCjwvc3Bhbj4mbmJzcDsmbmJzcDsmbmJzcDs8YnIgLz4NCiZuYnNwOzwvc3Bhbj48L3A+HwEFZEJBQ0tHUk9VTkQtSU1BR0U6dXJsKGh0dHA6Ly93d3cubGFuZGNoaW5hLmNvbS9Vc2VyL2RlZmF1bHQvVXBsb2FkL3N5c0ZyYW1lSW1nL3hfdGRzY3cyMDEzX3l3XzEuanBnKTtkZFgrT4ZXzyk2fvKb+ZQdNgDE7amPUgf1dsAbA0tQEzbS',
'__EVENTVALIDATION': '/wEWAgKNgPHpAgLN3cj/BMeqdKR8EqyZqeFW25/wiD3Dqo+sG7dks/liloBmr6j/',
'hidComName': 'default',
'TAB_QueryConditionItem': '9f2c3acd-0256-4da2-a659-6949c4671a2a',
'TAB_QuerySortItemList': '282:False',
'TAB_QuerySubmitConditionData': '9f2c3acd-0256-4da2-a659-6949c4671a2a:'+str(self.start_time)+'~'+str(self.end_time),
'TAB_RowButtonActionControl': '',
'TAB_QuerySubmitPagerData': str(self.page),
'TAB_QuerySubmitSortData': ''
}
requests.post(self.url, data, headers=headers_2,cookies=self.cookie)
'''
#---------------------------------------------------------------
#Response Content
print len(r.content)
print len(r.text)
print r.encoding
#-----------------------------------------------------------------
#Passing Parameters In URLs
parameters= {'keyword': 'T-shirt', 'enc': 'utf-8',"cid3":"1349"}
p = requests.get("http://search.jd.com/search", params=parameters)
print (p.url)
#-----------------------------------------------------------------
#parsing JSON
location = '厦门大学经济学院'
url_1 = 'http://apis.map.qq.com/ws/geocoder/v1/?address='
url_2 = urllib.unquote(location)
url_3 = '&key='
url_4 = ''
url = ''.join([url_1,url_2,url_3,url_4])
print requests.get(url).content
print requests.get(url).json()
print type(requests.get(url).json())
print requests.get(url).json()['result']['title']
l = eval(requests.get(url).content)
print type(l)
print l['result']['title']
#-----------------------------------------------------------------
#Headers,Proxies,timeout
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
# 'cookie':cookie,
#'Host': '',
#'Origin': '',
#'Referer': ',
'Upgrade-Insecure-Requests': '1',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.21 Safari/537.36',
}
#q = requests.get('http://esf.xm.fang.com/house/i32/',headers=headers,timeout=10)
#q = requests.get('http://esf.xm.fang.com/house/i32/',headers=headers,timeout=10,proxies={})
#-----------------------------------------------------------------
#Session
#s = requests.session()
#login_data={"account":"","password":""}
#res=s.post("http://mail.163.com/",login_data)
#print res.status_code
#print res.content
#print res.headers
#print res.cookies
#print res.json()
|
[
"noreply@github.com"
] |
yangmiaohong.noreply@github.com
|
a7c4980bb01592141cea84dc3aad994dd75b58bd
|
b5e7243aa9d0e00e9017aa0324a901d0bde0570e
|
/task.10.py
|
d90a855b49177cdcf4f37ec70db0d8b411908a19
|
[] |
no_license
|
Lekhasreesangeetham/Lekha-python
|
94cea2609b14ee73e53a2f1d5d7d20ea85c5a8b3
|
096dbe4f18e175e59147da250e03cdf616685ba6
|
refs/heads/main
| 2023-08-05T08:19:03.902187
| 2021-09-24T06:30:22
| 2021-09-24T06:30:22
| 403,986,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
a=input("enter a string")
b=a[::-1]
if(a==b):
print("given string is a palindrome")
else:
print("given string is not a palindrome")
|
[
"noreply@github.com"
] |
Lekhasreesangeetham.noreply@github.com
|
4c4409f87b3e88223ff6038724d1ce3c7a819609
|
e4ea05e525feb006f1aa4e50ea8f11379b7ceff4
|
/server.py
|
5c34bbaed2f6b660cb6e5d6b627cc0f957514de2
|
[
"MIT"
] |
permissive
|
datopian/resolver
|
e1d141cf35ef0f588f95754b74810be84df55882
|
a897a90021432691ace81974dd261e48ee34fc95
|
refs/heads/master
| 2021-09-14T16:33:35.775866
| 2018-05-16T05:52:33
| 2018-05-16T05:52:33
| 98,884,698
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
import os
import logging
from flask import Flask
from flask_cors import CORS
import flask
from resolver import make_blueprint
SERVICE_NAME = os.environ.get('DATAHUB_SERVICE_NAME', 'resolver')
# Create application
app = Flask(__name__, static_folder=None)
# CORS support
CORS(app, supports_credentials=True)
@app.errorhandler(404)
def page_not_found(error):
ascii_message = '''
'''
info = "%s service - part of the DataHub platform" % SERVICE_NAME
docs = "http://docs.datahub.io"
return flask.jsonify(info=info, docs=docs), 404
# Register blueprints
app.register_blueprint(make_blueprint(),
url_prefix='/%s/' % SERVICE_NAME)
logging.getLogger().setLevel(logging.INFO)
if __name__=='__main__':
app.run()
|
[
"irakli.mchedlishvili@datopian.com"
] |
irakli.mchedlishvili@datopian.com
|
29e3ba05a591d264cfda6485ad4b17677323be16
|
4fff448b20b92a929e105448f0ff01ffda5d3073
|
/GrabadoraDeVoz/Python/expandContractions.py
|
6fa43d58201b09bb8440d890b9bcbcb177092256
|
[] |
no_license
|
wsebastiangroves/SampleWork
|
9869b6417f3a2ac5e2d4114fd12d96191e6da9b9
|
b324e88effc35dbbf339c5356505d355d6ae368c
|
refs/heads/master
| 2020-04-28T07:09:42.669086
| 2020-03-10T16:40:59
| 2020-03-10T16:40:59
| 175,082,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Source: http://stackoverflow.com/questions/19790188/expanding-english-language-contractions-in-python
"""
import re
cList = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there had",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'alls": "you alls",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you you will",
"you'll've": "you you will have",
"you're": "you are",
"you've": "you have"
}
c_re = re.compile('(%s)' % '|'.join(cList.keys()))
def expandContractions(text, c_re=c_re):
def replace(match):
return cList[match.group(0)]
return c_re.sub(replace, text)
|
[
"wsebastiangroves@gmail.com"
] |
wsebastiangroves@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.