blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
994164e610d278fe042d18fcfb17557acddd8a41
|
47a496e0c7ea9adf35c006d193a88357006a370e
|
/algorithm/TopicB2/TreePagoda.py
|
fcce3df7058ac52a7a5bc94496f5eb20ed821fda
|
[] |
no_license
|
Curious-chen/curriculum-design
|
01ea5aff12c3097f7283571befd7bcfe68149817
|
036f78a62b15ec8e5c8e1013d124f726fd2bebe4
|
refs/heads/master
| 2020-12-06T14:19:29.026158
| 2020-01-08T06:30:50
| 2020-01-08T06:30:50
| 232,483,805
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
"""
将正整数排成等边三角形(也叫数塔),三角形的底边有个数,
下图给出了的一个例子。从三角形顶点出发通过一系列相邻整数(在图中用正方形表示),
如何使得到达底边时的总和最大
"""
import numpy as np
"""
https://www.jianshu.com/p/2a7f5cac0d58
"""
"""
动态规划
dp[i][j] = max(dp[i+1][j],dp[i+1][j+1])+date[i][j]
"""
"""
(1) 初始化距离数组dp,令距离dp的最后一行复制树塔的最后一行的值
(2) 从树塔倒数第二行开始,自底向上计算
(3) 判断x点的左右孩子的大小,对应的距离dp = 左右孩子中的较大值加上树塔对应位置值
(4) 重复2、3步骤,直到计算完树塔顶端
"""
class TreePagoda(object):
def __init__(self, pagoda):
self.pagoda = np.array(pagoda)
# 初始化节点到树塔底的距离
dp = self.pagoda.copy()
dp[:-1, :] = 0
self.dp = dp
# 下一坐标
self.next = dict()
def run(self):
index = len(self.pagoda) - 1
for j, value in enumerate(self.pagoda[-1]):
yield self.getIndex(index, j), 0, value
for i in range(len(self.pagoda) - 2, -1, -1): # 自底向上求得最优值
layer = self.pagoda[i]
for j in range(len(layer)):
if layer[j] == 0:
break
self.find(i, j)
yield self.getIndex(i, j), self.getIndex(*self.next[(i, j)]), self.dp[i, j]
def getIndex(self, i, j):
return int(i * (i + 1) / 2 + j)
def find(self, i, j):
if self.dp[i + 1, j] > self.dp[i + 1, j + 1]:
self.dp[i, j] = self.dp[i + 1, j] + self.pagoda[i, j]
self.next[(i, j)] = (i + 1, j)
else:
self.dp[i, j] = self.dp[i + 1, j + 1] + self.pagoda[i, j]
self.next[(i, j)] = (i + 1, j + 1)
def createdPath(self):
cu = (0, 0)
yield self.getIndex(*cu)
while True:
cu = self.next[cu]
yield self.getIndex(*cu)
if cu[0] == len(self.pagoda) - 1:
break
def Test():
treePagoda = np.array(((9, 0, 0, 0, 0),
(12, 15, 0, 0, 0),
(10, 6, 8, 0, 0),
(2, 18, 9, 5, 0),
(19, 7, 10, 4, 16)))
t = TreePagoda(treePagoda)
y = t.run()
for i in range(15):
x = next(y)
print(x)
t.createdPath()
print(t.dp)
for i in t.createdPath():
print(i)
if __name__ == '__main__':
Test()
|
[
"noreply@github.com"
] |
Curious-chen.noreply@github.com
|
af0407d686f5be807f2d3d4b938ec56483a3f89e
|
d6b0bc433b260b5d519d73087d5df46aa516fcdd
|
/biobb_adapters/pycompss/biobb_amber/pmemd/pmemd_mdrun.py
|
e94945a6809b7c30cc12c1d92b7e2ea6151423f4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bioexcel/biobb_adapters
|
b5442fe953b90be4e66faf3460b4a88a40e6d448
|
3daa84ba83a7951add017dd0f05dc361aa99dfe5
|
refs/heads/master
| 2023-08-14T08:46:39.323257
| 2023-08-02T09:05:21
| 2023-08-02T09:05:21
| 157,351,268
| 0
| 2
|
Apache-2.0
| 2023-04-01T14:56:43
| 2018-11-13T09:07:36
|
Common Workflow Language
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
# Python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
from pycompss.api.multinode import multinode
from pycompss.api.constraint import constraint
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_amber.pmemd.pmemd_mdrun import PmemdMDRun # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
computing_nodes = str(os.environ.get('TASK_COMPUTING_NODES', "1"))
computing_units = str(os.environ.get('TASK_COMPUTING_UNITS', "1"))
gpu_units = str(os.environ.get('TASK_GPU_UNITS', "0"))
@constraint(processors=[{'processorType':'CPU', 'computingUnits':computing_units}, {'processorType':'GPU', 'computingUnits':gpu_units}])
@multinode(computing_nodes=computing_nodes)
@task(input_top_path=FILE_IN, input_crd_path=FILE_IN, output_log_path=FILE_OUT, output_traj_path=FILE_OUT, output_rst_path=FILE_OUT, input_mdin_path=FILE_IN, input_cpin_path=FILE_IN, input_ref_path=FILE_IN, output_cpout_path=FILE_OUT, output_cprst_path=FILE_OUT, output_mdinfo_path=FILE_OUT,
on_failure="IGNORE", time_out=task_time_out)
def _pmemdmdrun(input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path, input_cpin_path, input_ref_path, output_cpout_path, output_cprst_path, output_mdinfo_path, properties, **kwargs):
task_config.config_multinode(properties)
try:
PmemdMDRun(input_top_path=input_top_path, input_crd_path=input_crd_path, output_log_path=output_log_path, output_traj_path=output_traj_path, output_rst_path=output_rst_path, input_mdin_path=input_mdin_path, input_cpin_path=input_cpin_path, input_ref_path=input_ref_path, output_cpout_path=output_cpout_path, output_cprst_path=output_cprst_path, output_mdinfo_path=output_mdinfo_path, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def pmemd_mdrun(input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path=None, input_cpin_path=None, input_ref_path=None, output_cpout_path=None, output_cprst_path=None, output_mdinfo_path=None, properties=None, **kwargs):
if (output_log_path is None or (os.path.exists(output_log_path) and os.stat(output_log_path).st_size > 0)) and \
(output_traj_path is None or (os.path.exists(output_traj_path) and os.stat(output_traj_path).st_size > 0)) and \
(output_rst_path is None or (os.path.exists(output_rst_path) and os.stat(output_rst_path).st_size > 0)) and \
(output_cpout_path is None or (os.path.exists(output_cpout_path) and os.stat(output_cpout_path).st_size > 0)) and \
(output_cprst_path is None or (os.path.exists(output_cprst_path) and os.stat(output_cprst_path).st_size > 0)) and \
(output_mdinfo_path is None or (os.path.exists(output_mdinfo_path) and os.stat(output_mdinfo_path).st_size > 0)) and \
True:
print("WARN: Task PmemdMDRun already executed.")
else:
_pmemdmdrun( input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path, input_cpin_path, input_ref_path, output_cpout_path, output_cprst_path, output_mdinfo_path, properties, **kwargs)
|
[
"andriopau@gmail.com"
] |
andriopau@gmail.com
|
7d10a0ba89d020ea8778672c530012d3496bb89b
|
0ab5b15d1b97b9d72a9e4218ad6b7377c26e76ec
|
/tkContacts_LAB15.py
|
c4c4c3d8fbfbf064790aa63503f585440122fa65
|
[] |
no_license
|
RagggySu/-Sample-work-from-other-person-Portfolio
|
3beb01e18b5ace8858bb73eb9aad76e67c87d94b
|
8f5b6d2f3f4d82435cd166d6f4c038ae7352e59c
|
refs/heads/main
| 2023-05-05T06:50:13.906847
| 2021-05-28T18:45:05
| 2021-05-28T18:45:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,481
|
py
|
# Programmer: James Aniciete
# Course No.: CSC 157
# Lab No.: 15
# Date: 5/9/2020
from tkinter import *
from tkinter import messagebox # for exit button's messagebox
import os # for exiting the app
import myDatabasefile as dbf
import sqlite3
# create table
dbf.createTable()
# get contactlist
contactlist = dbf.selectAll()
# function to check for valid data entries
def validate(s): # s for string
if s.strip("") != "":
return True
else:
return False
# function to get the selection from the listbox
def selection():
return int(select.curselection()[0])
# function to add a contact
def addContact():
if validate(nameVar.get()) == True and validate(phoneVar.get()) == True:
dbf.insert(nameVar.get(), phoneVar.get())
canRoll = True
# refresh the GUI
refresh()
elif validate(nameVar.get()) == False:
print("Error: Enter a name.")
elif validate(phoneVar.get()) == False:
print("Error: Enter a phone number.")
else:
print("Error: Contact not added.\nMake sure that the Name and Phone fields are filled.")
# function to update a contact
def updateContact():
if validate(nameVar.get()) == True and validate(phoneVar.get()) == True:
dbf.update(oName, oPhone, nameVar.get(), phoneVar.get())
canRoll = True
# refresh the GUI
refresh()
elif validate(nameVar.get()) == False:
print("Error: Enter a name.")
elif validate(phoneVar.get()) == False:
print("Error: Enter a phone number.")
else:
print("Error: Contact not updated.\nMake sure a contact is selected and that the Name and Phone fields are filled.")
# function to delete a contact
def deleteContact():
try:
if messagebox.askokcancel(title = "Delete Contact",
message = f"Are you sure you want to delete {contactlist[selection()][0]}'s contact information?") == 1:
dbf.delete(nameVar.get(), phoneVar.get())
canRoll = True
refresh()
except:
print("Error: Select a contact to be deleted.")
# function to load a contact
def loadContact():
try:
# not really sure how this works
global oName, oPhone
oName = contactlist[selection()][0]
oPhone = contactlist[selection()][1]
# put name and phone selections into a tuple
name, phone = contactlist[selection()]
# use tuple to assign values to name and phone variables
nameVar.set(name)
phoneVar.set(phone)
except:
print("Error: Select a contact from the list.")
# function to rollback a change
def rollback():
global canRoll
if canRoll == True:
if (messagebox.askokcancel(title = "Rollback", message = "Would you like to undo the previous change?") == 1):
dbf.rollback()
refresh()
canRoll = False
# function to exit the program
def exitContact():
app_title = "Contacts"
if messagebox.askokcancel(title = app_title, message = "Do you want to exit, OK or Cancel") == 1:
# commit and close the database
dbf.db.commit()
dbf.db.close()
os._exit(1)
# function that places all widgets into the frame individually
def buildFrame () :
# define global variables
global nameVar, phoneVar, select
# create the main window widget
root = Tk()
# add title to the frame
root.title("My Contact List")
# create & pack a frame in the root window
frame1 = Frame(root)
frame1.pack()
# on 1st row of frame:
# create a label for name
Label(frame1, text="Name:").grid(row=0, column=0, sticky=W)
# initialize StringVar for name
nameVar = StringVar()
# assign entry button value to the name var
name = Entry(frame1, textvariable=nameVar)
# position name var in first row, second column, aligned to the west cell border
name.grid(row=0, column=1, sticky=W)
# on 2nd row of the frame:
# create a label for phone no.
Label(frame1, text="Phone:").grid(row=1, column=0, sticky=W)
# create string var for phone no.
phoneVar= StringVar()
# assign entry button value to phone var
phone= Entry(frame1, textvariable=phoneVar)
# position phone var in second row, second column, aligned to the west
phone.grid(row=1, column=1, sticky=W)
# create & pack a frame in the root window
frame1 = Frame(root)
frame1.pack()
# add a row of buttons to frame1 with respective callback functions
btn1 = Button(frame1,text=" Add ",command=addContact)
btn2 = Button(frame1,text="Update",command=updateContact)
btn3 = Button(frame1,text="Delete",command=deleteContact)
btn4 = Button(frame1,text=" Load ",command=loadContact)
btn5 = Button(frame1,text="Rollback",command=rollback)
# pack the buttons on the same row to the left
btn1.pack(side=LEFT)
btn2.pack(side=LEFT)
btn3.pack(side=LEFT)
btn4.pack(side=LEFT)
btn5.pack(side=LEFT)
# allow for selection of names from a ListBox with a scrollbar
frame1 = Frame(root)
frame1.pack()
# create a vertical bar widget
scroll = Scrollbar(frame1, orient=VERTICAL)
# whichever value from the ListBox is clicked is assigned to select
# height = # of values visible in the Listbox
select = Listbox(frame1, yscrollcommand=scroll.set, height=8)
scroll.config (command=select.yview)
scroll.pack(side=RIGHT, fill=Y)
select.pack(side=LEFT, fill=BOTH)
# create frame for Exit button at the bottom of the window
frame2 = Frame(root)
frame2.pack()
# create exit button & pack it
btn6 = Button(frame2, text = " Exit ", command = exitContact)
btn6.pack()
# return root object to allow for the frame to be built
return root
# sorts the contact list & allows for an update to the ListBox
def setList():
contactlist.sort()
# delete all elements from the select element
select.delete(0, END)
# insert each name from the list to the end of the select element
for name, phone in contactlist:
select.insert(END, name)
# refresh function - used add the end of add, update, delete functions
def refresh():
global canRoll, contactlist
canRoll = True
contactlist = dbf.selectAll()
setList()
# initialize the application
root = buildFrame()
setList()
# set size of window (width x height)
root.geometry("300x225")
root.mainloop()
|
[
"noreply@github.com"
] |
RagggySu.noreply@github.com
|
7be70ac3312c262cb16fc7fdd8dcb45124a48f14
|
d2b2023261ccdcaf560a2e7b0bab13ecdedacfc9
|
/03/fullbackup.py
|
00cb6631683557864d36d5b2b9b06ca824c29799
|
[] |
no_license
|
lilyef2000/lesson
|
a9d96ffc19f68fa3f044f240de6496b6d69394f6
|
2a5abb00b9bbb8bb36602ea6e1e8c464accc0759
|
refs/heads/master
| 2021-01-10T08:41:14.524421
| 2016-01-01T18:04:04
| 2016-01-01T18:04:04
| 46,460,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
#!/usr/bin/python
import sys,os,time,logger
source_file = sys.argv[1]
formated_source_file = source_file.split('/')[-1]
backup_dir = '/home/Administrator/lesson/backup/'
backup_to_file = '''%s%s_%s.tgz'''% (backup_dir,formated_source_file,time.strftime("%Y%m%d%H%M%S",time.localtime()))
def run_backup(runtime='now',exclude_file_name='None'):
if len(sys.argv) == 4:
print '--------exclude file mode--------'
if sys.argv[2] == '-X':
exclude_file_name = sys.argv[3]
backup_cmd = "tar -cvzfX %s %s %s " %(backup_to_file,exclude_file_name,source_file)
else:
print '--------Normal mode:--------'
backup_cmd = "tar -cvzf %s %s |wc -l" %(backup_to_file,source_file)
run_command = os.system(backup_cmd)
if run_command == 0:
logger.record_log('Full Backup','Success','N/A','test')
else:
logger.record_log('Full Backup','Failure','N/A','test')
run_backup()
|
[
"lilyef2000@gmail.com"
] |
lilyef2000@gmail.com
|
848a890e8baab9228465b85ff2aaf300a3bd3890
|
59835adaceb26614d0aa51cf8dda2be5be79bcfb
|
/run_menu.py
|
91721ab25d52dd5a240b4d7c8ac9c851985b7866
|
[] |
no_license
|
Farah-H/python_menu
|
b438e11d649729611ec4aa8ca3a8c9bd0106c3b6
|
7401eb938a71c03a89da30667ebda4d59f75d4ac
|
refs/heads/master
| 2023-01-07T03:28:06.934944
| 2020-11-08T19:43:54
| 2020-11-08T19:43:54
| 310,585,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from waitstaff_class import Waitstaff
# This part of the program will actually execute taking an order, saving it, and printing it back to the user
#instantiating the waitstaff class
jenny = Waitstaff()
all_orders = [] # a list to store all orders in (could increment by making this csv output)
# prompting the user for which part of the menu they would like to see
category = input('Would you like to see our starters, mains, desserts or drinks? Please enter "nothing" if you do not want to see the menu.').lower()
# if they are done with (or don't want to read) the menu, they can start to place an order
if input('Are you ready to make an order?').lower() == 'yes':
this_order = jenny.get_order()
print(this_order)
print(jenny.print_order(this_order))
else:
jenny.display_menu(category)
|
[
"61236001+farahmh@users.noreply.github.com"
] |
61236001+farahmh@users.noreply.github.com
|
2cc9faf3e8e17c9e733a3ce6a37951dfcd9caabb
|
5602c3572852f8574dff7173fd19c32c48520b28
|
/rigify/rigs/basic/raw_copy.py
|
2ebbe13382bfcfe90dd4692ae3038b58086e1ad6
|
[] |
no_license
|
Dancingbubble/blender-addons
|
58be022f1d8f712ca83acdbd765336e74074a14d
|
a6ee5b0e6f6a945c33b6159fd0536d548b23ccb6
|
refs/heads/master
| 2023-02-19T22:19:53.125675
| 2021-01-01T20:54:21
| 2021-01-01T20:54:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,521
|
py
|
#====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
# <pep8 compliant>
import bpy
from ...utils.naming import strip_org, strip_prefix, choose_derived_bone, is_control_bone
from ...utils.mechanism import copy_custom_properties_with_ui
from ...utils.widgets import layout_widget_dropdown, create_registered_widget
from ...base_rig import BaseRig
from ...base_generate import SubstitutionRig
from itertools import repeat
'''
Due to T80764, bone name handling for 'limbs.raw_copy' was hard-coded in generate.py
class Rig(SubstitutionRig):
""" A raw copy rig, preserving the metarig bone as is, without the ORG prefix. """
def substitute(self):
# Strip the ORG prefix during the rig instantiation phase
new_name = strip_org(self.base_bone)
new_name = self.generator.rename_org_bone(self.base_bone, new_name)
return [ self.instantiate_rig(InstanceRig, new_name) ]
'''
class RelinkConstraintsMixin:
""" Utilities for constraint relinking. """
def relink_bone_constraints(self, bone_name):
if self.params.relink_constraints:
for con in self.get_bone(bone_name).constraints:
self.relink_single_constraint(con)
relink_unmarked_constraints = False
def relink_single_constraint(self, con):
if self.params.relink_constraints:
parts = con.name.split('@')
if len(parts) > 1:
self.relink_constraint(con, parts[1:])
elif self.relink_unmarked_constraints:
self.relink_constraint(con, [''])
def relink_move_constraints(self, from_bone, to_bone, *, prefix=''):
if self.params.relink_constraints:
src = self.get_bone(from_bone).constraints
dest = self.get_bone(to_bone).constraints
for con in list(src):
if con.name.startswith(prefix):
dest.copy(con)
src.remove(con)
def relink_bone_parent(self, bone_name):
if self.params.relink_constraints:
self.generator.disable_auto_parent(bone_name)
parent_spec = self.params.parent_bone
if parent_spec:
old_parent = self.get_bone_parent(bone_name)
new_parent = self.find_relink_target(parent_spec, old_parent or '') or None
self.set_bone_parent(bone_name, new_parent)
return new_parent
def relink_constraint(self, con, specs):
if con.type == 'ARMATURE':
if len(specs) == 1:
specs = repeat(specs[0])
elif len(specs) != len(con.targets):
self.raise_error("Constraint {} actually has {} targets", con.name, len(con.targets))
for tgt, spec in zip(con.targets, specs):
if tgt.target == self.obj:
tgt.subtarget = self.find_relink_target(spec, tgt.subtarget)
elif hasattr(con, 'subtarget'):
if len(specs) > 1:
self.raise_error("Only the Armature constraint can have multiple '@' targets: {}", con.name)
if con.target == self.obj:
con.subtarget = self.find_relink_target(specs[0], con.subtarget)
def find_relink_target(self, spec, old_target):
if spec == '':
return old_target
elif spec in {'CTRL', 'DEF', 'MCH'}:
result = choose_derived_bone(self.generator, old_target, spec.lower())
if not result:
result = choose_derived_bone(self.generator, old_target, spec.lower(), by_owner=False)
if not result:
self.raise_error("Cannot find derived {} bone of bone '{}' for relinking", spec, old_target)
return result
else:
if spec not in self.obj.pose.bones:
self.raise_error("Cannot find bone '{}' for relinking", spec)
return spec
@classmethod
def add_relink_constraints_params(self, params):
params.relink_constraints = bpy.props.BoolProperty(
name = "Relink Constraints",
default = False,
description = "For constraints with names formed like 'base@bonename', use the part after '@' as the new subtarget after all bones are created. Use '@CTRL', '@DEF' or '@MCH' to simply replace the prefix"
)
params.parent_bone = bpy.props.StringProperty(
name = "Parent",
default = "",
description = "Replace the parent with a different bone after all bones are created. Using simply CTRL, DEF or MCH will replace the prefix instead"
)
@classmethod
def add_relink_constraints_ui(self, layout, params):
r = layout.row()
r.prop(params, "relink_constraints")
if params.relink_constraints:
r = layout.row()
r.prop(params, "parent_bone")
layout.label(text="Constraint names have special meanings.", icon='ERROR')
class Rig(BaseRig, RelinkConstraintsMixin):
def find_org_bones(self, pose_bone):
return pose_bone.name
def initialize(self):
self.relink = self.params.relink_constraints
def parent_bones(self):
self.relink_bone_parent(self.bones.org)
def configure_bones(self):
org = self.bones.org
if is_control_bone(org):
copy_custom_properties_with_ui(self, org, org, ui_controls=[org])
def rig_bones(self):
self.relink_bone_constraints(self.bones.org)
def generate_widgets(self):
org = self.bones.org
widget = self.params.optional_widget_type
if widget and is_control_bone(org):
create_registered_widget(self.obj, org, widget)
@classmethod
def add_parameters(self, params):
self.add_relink_constraints_params(params)
params.optional_widget_type = bpy.props.StringProperty(
name = "Widget Type",
default = '',
description = "Choose the type of the widget to create"
)
@classmethod
def parameters_ui(self, layout, params):
col = layout.column()
col.label(text='This rig type does not add the ORG prefix.')
col.label(text='Manually add ORG, MCH or DEF as needed.')
self.add_relink_constraints_ui(layout, params)
pbone = bpy.context.active_pose_bone
if pbone and is_control_bone(pbone.name):
layout_widget_dropdown(layout, params, "optional_widget_type")
#add_parameters = InstanceRig.add_parameters
#parameters_ui = InstanceRig.parameters_ui
def create_sample(obj):
""" Create a sample metarig for this rig type.
"""
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
bone = arm.edit_bones.new('DEF-bone')
bone.head[:] = 0.0000, 0.0000, 0.0000
bone.tail[:] = 0.0000, 0.0000, 0.2000
bone.roll = 0.0000
bone.use_connect = False
bones['DEF-bone'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['DEF-bone']]
pbone.rigify_type = 'basic.raw_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
return bones
|
[
"angavrilov@gmail.com"
] |
angavrilov@gmail.com
|
73b6a55d16f9a0ddb2370537646877ecaa9d332e
|
464b6f3a8e3662ecc357735b17c5fe859aa9f3e3
|
/StanCode-Projects/searching_name_system/babygraphics.py
|
2ee9308af259ad456f5b8b65ffae016860eaec6b
|
[
"MIT"
] |
permissive
|
jennywei1995/sc-projects
|
a840f1fcb6e691999a6b8ac31a53c8a5b0f260b8
|
ec192434a967d68fee4f772ae907e5ef5fa556d2
|
refs/heads/main
| 2022-12-30T13:06:44.186249
| 2020-10-20T07:56:43
| 2020-10-20T07:56:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,451
|
py
|
"""
File: babygraphics.py
Name: Jenny Wei
-----------------
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
-----------------
This file will create a canvas and enable user to
use the program to search for babes' names' ranks
over decades.
Once the user search a name, the corresponding rank in a specific year
will be found and added to the canvas, there will also be lines to connect
each years' rank and draw a run chart.
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
x_range = (width - (GRAPH_MARGIN_SIZE * 2)) / len(YEARS)
x_coordinate = int(GRAPH_MARGIN_SIZE + (x_range * year_index))
return x_coordinate
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# to draw the peripheral line
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE,
CANVAS_WIDTH - GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE)
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
GRAPH_MARGIN_SIZE)
canvas.create_line(GRAPH_MARGIN_SIZE, 0, GRAPH_MARGIN_SIZE, CANVAS_HEIGHT)
# to draw the line that evenly divided the according to how many years are provided
for i in range(len(YEARS)):
line_x = get_x_coordinate(CANVAS_WIDTH, i)
canvas.create_line(line_x, 0, line_x, CANVAS_HEIGHT)
canvas.create_text(line_x + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[i], anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# once the user click enter, the data will be shown
y_position = ((CANVAS_HEIGHT - (GRAPH_MARGIN_SIZE * 2)) / (MAX_RANK-1))
for i in range(len(lookup_names)):
# to determine the color of data's text and line
if i <= len(COLORS)-1:
color = COLORS[i]
else:
# while the given colors are all used, the color data used will start from the first color
color = COLORS[int((i % len(COLORS)))]
# to find the dic of the name that contains its rank over the years
baby_dic = name_data[lookup_names[i]]
# to create a year list to check if the year matches the constant year list
new_year_lst = []
for year, rank in baby_dic.items():
new_year_lst.append(year)
# if the names' data doesn't exit in the given file
for k in range(len(YEARS)):
# assign these names' rank as 1001
if f'{YEARS[k]}' not in new_year_lst:
baby_dic[f'{YEARS[k]}'] = '1001'
# a list that will contain the y value
y_list = []
for j in range(len(YEARS)):
for year in baby_dic:
# to find the rank of the given name in specific year
rank = baby_dic[year]
# to add the text of name and its rank of a specific year to the canvas
line_x = get_x_coordinate(CANVAS_WIDTH, j)
if int(year) == YEARS[j]:
if int(rank) > MAX_RANK:
new_rank = '*'
else:
new_rank = rank
canvas.create_text(line_x + TEXT_DX,
int(y_position * int(rank) + GRAPH_MARGIN_SIZE),
text=f'{lookup_names[i]} {new_rank}', anchor=tkinter.SW, fill=color)
y_list.append(int(y_position * int(rank) + GRAPH_MARGIN_SIZE))
# to draw the line that connects each year's rank data on the canvas
for j in range(len(YEARS) - 1):
line_x = get_x_coordinate(CANVAS_WIDTH, j)
line_x1 = get_x_coordinate(CANVAS_WIDTH, j + 1)
line_y = y_list[j]
line_y1 = y_list[j + 1]
canvas.create_line(line_x, line_y, line_x1, line_y1, width=LINE_WIDTH, fill=color)
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
jennywei1995.noreply@github.com
|
85dbdd459b8e5552ad1d55043b0a1f5779b84c91
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_20926.py
|
194a6671b01c6bb8bdc4a0d1f301faf7b48d8ed5
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32
|
py
|
# Modifying sys.path
PYTHONPATH
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
171783a41f6cc03ffad67745ac99b75219895fad
|
c37de1b37ea7f6e5d0e4b6715be6f6da342cba9a
|
/examples/vasp/wallet.py
|
794836a62040bbfc7b35e797ac4dca07f265240e
|
[
"Apache-2.0"
] |
permissive
|
fil-blue/client-sdk-python
|
6389d6b40c1af1587b23ecef96a4db5af66e34dd
|
2105e7362a35e69298de0896e17331006374de57
|
refs/heads/master
| 2023-02-15T02:54:40.512655
| 2021-01-05T23:42:40
| 2021-01-05T23:42:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,300
|
py
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from dataclasses import dataclass, field
from http import server
from diem import (
identifier,
jsonrpc,
diem_types,
stdlib,
testnet,
utils,
LocalAccount,
offchain,
)
import logging, threading, typing
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class User:
name: str
subaddresses: typing.List[str] = field(default_factory=lambda: [])
def kyc_data(self) -> offchain.KycDataObject:
return offchain.individual_kyc_data(
given_name=self.name,
surname=f"surname-{self.name}",
address=offchain.AddressObject(city="San Francisco"),
)
def additional_kyc_data(self) -> str:
return f"{self.name}'s secret"
class ActionResult(str):
def merge(self, ret: str) -> "ActionResult":
if ret == ActionResult.SEND_REQUEST_SUCCESS:
return self
return self + ", " + ret
# the following ActionResult is created for testing purpose to indicate specific task is executed
ActionResult.PASS = ActionResult("pass")
ActionResult.REJECT = ActionResult("reject")
ActionResult.SOFT_MATCH = ActionResult("soft_match")
ActionResult.SENT_ADDITIONAL_KYC_DATA = ActionResult("sent_additional_kyc_data")
ActionResult.TXN_EXECUTED = ActionResult("transaction_executed")
ActionResult.SEND_REQUEST_SUCCESS = ActionResult("send_request_success")
BgResult = typing.Union[ActionResult, typing.Tuple[offchain.Action, ActionResult]]
@dataclass
class WalletApp:
"""WalletApp is an example of custodial wallet application"""
@staticmethod
def generate(name: str, client: jsonrpc.Client) -> "WalletApp":
"""generate a WalletApp running on testnet"""
offchain_service_port = offchain.http_server.get_available_port()
account = testnet.gen_vasp_account(client, f"http://localhost:{offchain_service_port}")
w = WalletApp(
name=name,
jsonrpc_client=client,
parent_vasp=account,
offchain_service_port=offchain_service_port,
)
w.add_child_vasp()
return w
name: str
jsonrpc_client: jsonrpc.Client
parent_vasp: LocalAccount
offchain_service_port: int
hrp: str = field(default=identifier.TDM)
saved_commands: typing.Dict[str, offchain.Command] = field(default_factory=lambda: {})
child_vasps: typing.List[LocalAccount] = field(default_factory=lambda: [])
users: typing.Dict[str, User] = field(default_factory=lambda: {})
evaluate_kyc_data_result: typing.Dict[str, ActionResult] = field(default_factory=lambda: {})
manual_review_result: typing.Dict[str, ActionResult] = field(default_factory=lambda: {})
task_queue: typing.List[typing.Callable[["WalletApp"], BgResult]] = field(default_factory=lambda: [])
locks: typing.Dict[str, threading.Lock] = field(default_factory=lambda: {})
def __post_init__(self) -> None:
self.compliance_key = self.parent_vasp.compliance_key
self.offchain_client = offchain.Client(self.parent_vasp.account_address, self.jsonrpc_client, self.hrp)
# --------------------- end user interaction --------------------------
def pay(
self,
user_name: str,
intent_id: str,
desc: typing.Optional[str] = None,
original_payment_reference_id: typing.Optional[str] = None,
) -> typing.Tuple[(str, ActionResult)]:
"""make payment from given user account to intent_id"""
intent = identifier.decode_intent(intent_id, self.hrp)
command = offchain.PaymentCommand.init(
self.gen_user_account_id(user_name),
self.users[user_name].kyc_data(),
intent.account_id,
intent.amount,
intent.currency_code,
original_payment_reference_id=original_payment_reference_id,
description=desc,
)
self.save_command(command)
return command.reference_id()
def gen_intent_id(
self,
user_name: str,
amount: int,
currency: typing.Optional[str] = testnet.TEST_CURRENCY_CODE,
) -> str:
account_id = self.gen_user_account_id(user_name)
return identifier.encode_intent(account_id, currency, amount)
# --------------------- offchain integration --------------------------
def process_inbound_request(
self, x_request_id: str, request_sender_address: str, request_bytes: bytes
) -> typing.Tuple[int, bytes]:
inbound_command = None
try:
inbound_command = self.offchain_client.process_inbound_request(request_sender_address, request_bytes)
self.save_command(inbound_command)
resp = offchain.reply_request(inbound_command.cid)
code = 200
except offchain.Error as e:
logger.exception(e)
resp = offchain.reply_request(inbound_command.cid if inbound_command else None, e.obj)
code = 400
return (code, offchain.jws.serialize(resp, self.compliance_key.sign))
def run_once_background_job(
self,
) -> BgResult:
if len(self.task_queue) == 0:
return None
task = self.task_queue[0]
ret = task(self)
self.task_queue.remove(task)
return ret
# --------------------- admin --------------------------
def start_server(self) -> server.HTTPServer:
return offchain.http_server.start_local(self.offchain_service_port, self.process_inbound_request)
def add_child_vasp(self) -> jsonrpc.Transaction:
self.child_vasps.append(testnet.gen_child_vasp(self.jsonrpc_client, self.parent_vasp))
def add_user(self, name) -> None:
self.users[name] = User(name)
def vasp_balance(self, currency: str = testnet.TEST_CURRENCY_CODE) -> int:
balance = 0
for vasp in [self.parent_vasp] + self.child_vasps:
balance += utils.balance(self.jsonrpc_client.get_account(vasp.account_address), currency)
return balance
def clear_data(self) -> None:
self.evaluate_kyc_data_result = {}
self.manual_review_result = {}
self.users = {}
self.saved_commands = {}
self.task_queue = []
self.locks = {}
# -------- offchain business actions ---------------
def _send_additional_kyc_data(
self, command: offchain.Command
) -> typing.Tuple[ActionResult, offchain.PaymentCommand]:
command = typing.cast(offchain.PaymentCommand, command)
account_id = command.my_actor_obj().address
_, subaddress = identifier.decode_account(account_id, self.hrp)
user = self._find_user_by_subaddress(subaddress)
new_cmd = command.new_command(additional_kyc_data=user.additional_kyc_data())
return (ActionResult.SENT_ADDITIONAL_KYC_DATA, new_cmd)
def _submit_travel_rule_txn(
self,
command: offchain.Command,
) -> ActionResult:
command = typing.cast(offchain.PaymentCommand, command)
child_vasp = self._find_child_vasp(command.sender_account_address(self.hrp))
testnet.exec_txn(
self.jsonrpc_client,
child_vasp,
stdlib.encode_peer_to_peer_with_metadata_script(
currency=utils.currency_code(command.payment.action.currency),
payee=command.receiver_account_address(self.hrp),
amount=command.payment.action.amount,
metadata=command.travel_rule_metadata(self.hrp),
metadata_signature=bytes.fromhex(command.payment.recipient_signature),
),
)
return ActionResult.TXN_EXECUTED
def _evaluate_kyc_data(self, command: offchain.Command) -> typing.Tuple[ActionResult, offchain.PaymentCommand]:
command = typing.cast(offchain.PaymentCommand, command)
op_kyc_data = command.opponent_actor_obj().kyc_data
ret = self.evaluate_kyc_data_result.get(op_kyc_data.given_name, ActionResult.PASS)
if ret == ActionResult.SOFT_MATCH:
return (ret, command.new_command(status=offchain.Status.soft_match))
return (ret, self._kyc_data_result("evaluate key data", ret, command))
def _manual_review(self, command: offchain.Command) -> typing.Tuple[ActionResult, offchain.PaymentCommand]:
command = typing.cast(offchain.PaymentCommand, command)
op_kyc_data = command.opponent_actor_obj().kyc_data
ret = self.manual_review_result.get(op_kyc_data.given_name, ActionResult.PASS)
return (ret, self._kyc_data_result("review", ret, command))
def _kyc_data_result(
self, action: str, ret: ActionResult, command: offchain.PaymentCommand
) -> offchain.PaymentCommand:
if ret == ActionResult.PASS:
if command.is_receiver():
return self._send_kyc_data_and_receipient_signature(command)
return command.new_command(status=offchain.Status.ready_for_settlement)
return command.new_command(
status=offchain.Status.abort,
abort_code=offchain.AbortCode.reject_kyc_data,
abort_message=f"{action}: {ret}",
)
def _send_kyc_data_and_receipient_signature(
self,
command: offchain.PaymentCommand,
) -> offchain.PaymentCommand:
sig_msg = command.travel_rule_metadata_signature_message(self.hrp)
subaddress = command.receiver_subaddress(self.hrp)
user = self._find_user_by_subaddress(subaddress)
return command.new_command(
recipient_signature=self.compliance_key.sign(sig_msg).hex(),
kyc_data=user.kyc_data(),
status=offchain.Status.ready_for_settlement,
)
# ---------------------- offchain Command ---------------------------
def _send_request(self, command: offchain.PaymentCommand) -> ActionResult:
self.offchain_client.send_command(command, self.compliance_key.sign)
self._enqueue_follow_up_action(command)
return ActionResult.SEND_REQUEST_SUCCESS
def _enqueue_follow_up_action(self, command: offchain.PaymentCommand) -> None:
if command.follow_up_action():
self.task_queue.append(lambda app: app._offchain_business_action(command.reference_id()))
def _offchain_business_action(self, ref_id: str) -> BgResult:
command = self.saved_commands.get(ref_id)
action = command.follow_up_action()
if action == offchain.Action.SUBMIT_TXN:
return (action, self._submit_travel_rule_txn(command))
actions = {
offchain.Action.EVALUATE_KYC_DATA: self._evaluate_kyc_data,
offchain.Action.CLEAR_SOFT_MATCH: self._send_additional_kyc_data,
offchain.Action.REVIEW_KYC_DATA: self._manual_review,
}
ret, new_command = actions[action](command)
self.save_command(new_command)
# return action and action result for test
return (action, ret)
# ---------------------- commands ---------------------------
def save_command(self, command: offchain.Command) -> None:
"""save command locks prior command by reference id, validate and save new command.
in a production implementation, the lock should be database / distributed lock to ensure
atomic process(read and write) command by the reference id.
"""
lock = self.lock(command.reference_id())
if not lock.acquire(blocking=False):
msg = f"command(reference_id={command.reference_id()}) is locked"
raise offchain.command_error(offchain.ErrorCode.conflict, msg)
try:
prior = self.saved_commands.get(command.reference_id())
if command == prior:
return
command.validate(prior)
self.saved_commands[command.reference_id()] = command
if command.is_inbound():
self._enqueue_follow_up_action(command)
else: # outbound
self.task_queue.append(lambda app: app._send_request(command))
finally:
lock.release()
def lock(self, ref_id: str) -> threading.Lock:
return self.locks.setdefault(ref_id, threading.Lock())
# ---------------------- users ---------------------------
def _find_user_by_subaddress(self, subaddress: bytes) -> User:
for u in self.users.values():
if subaddress in u.subaddresses:
return u
raise ValueError(f"could not find user by subaddress: {subaddress.hex()}, {self.name}")
def gen_user_account_id(self, user_name: str) -> str:
subaddress = identifier.gen_subaddress()
self.users[user_name].subaddresses.append(subaddress)
return identifier.encode_account(self._available_child_vasp().account_address, subaddress, self.hrp)
# ---------------------- child vasps ---------------------------
def _available_child_vasp(self) -> LocalAccount:
return self.child_vasps[0]
def _find_child_vasp(self, address: diem_types.AccountAddress) -> LocalAccount:
for vasp in self.child_vasps:
if vasp.account_address == address:
return vasp
raise ValueError(f"could not find child vasp by address: {address.to_hex()}")
|
[
"ilx@fb.com"
] |
ilx@fb.com
|
d047e999cc18d2f81e6f7afc24a22551af5b8e21
|
c96f923cba05f4bfefafa24c02818cc98e8caa14
|
/sum.py
|
86724be39016ee75ac99bd413acdd8c139cca37c
|
[] |
no_license
|
saviorseelf/test
|
0178865ff0fbafe37ee286301669876ecb5e7ae6
|
7438be19b185e16a92a1c3e72cad402b987edc01
|
refs/heads/master
| 2021-05-30T11:11:31.744015
| 2016-01-21T19:15:18
| 2016-01-21T19:15:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
from threading import Thread
i = 0
def add():
global i
for j in range(0,1000000):
i += 1
def sub():
global i
for j in range(0,1000000):
i -= 1
def main():
thread1 = Thread(target = add, args = (),)
thread2 = Thread(target = sub, args = (),)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print i
main()
|
[
"andershanssen92@gmail.com"
] |
andershanssen92@gmail.com
|
fa4650d4a8f4d6e62f671e455d2f45eaa553ced4
|
d9b0e4be5b29c6bdb806eeb2b6df340aa26d1152
|
/payloads/shop2.py
|
016c15fba07f2ae5dd9a4a3ca6bbe7da515a824f
|
[
"MIT"
] |
permissive
|
opoudel/sculptbf-bot
|
ba5a4fb3550ffd51620d38d5171413cb89fbe136
|
3d9307bc4506844c8a693db68217d37fe2e76130
|
refs/heads/master
| 2020-12-02T11:34:19.539736
| 2017-07-21T15:39:20
| 2017-07-21T15:39:20
| 96,653,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,652
|
py
|
# -*- coding: utf-8 -*-
import json
def shop(recipient_id):
return json.dumps({
"recipient": {
"id": recipient_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "list",
"top_element_style": "compact",
"elements": [
{
"title": "Lypo - Spheric Vitamin C",
"image_url": "https://sculptbf-bot.herokuapp.com/static/lypo.png",
"subtitle": "Price: $48",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/lypo-spheric-vit-c/",
"webview_height_ratio": "tall"
}
]
},
{
"title": "ASAP Moisturizer Sun Screen 50+",
"image_url": "https://sculptbf-bot.herokuapp.com/static/asap.png",
"subtitle": "Price: $65",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/asap-moisturizer-sun-screen-50/",
"webview_height_ratio": "tall"
}
]
},
{
"title": "Cosmedix Purity Clean",
"image_url": "https://sculptbf-bot.herokuapp.com/static/cosmedix.png",
"subtitle": "Price: Not in Stock!!",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/cosmedix-purity-clean/",
"webview_height_ratio": "tall"
}
]
},
{
"title": "Skin Medica TNS Essential Serum",
"image_url": "https://sculptbf-bot.herokuapp.com/static/skin_medica.png",
"subtitle": "Price: $250",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/skin-medica-tns-essential-serum/",
"webview_height_ratio": "tall"
}
]
}
],
"buttons": [
{
"title": "View More",
"type": "postback",
"payload": "MORE_SHOPPING_3"
}
]
}
}
}
})
|
[
"opoudel@me.com"
] |
opoudel@me.com
|
5c6efe87ee9b93f8027bf4a15335244acf89f525
|
ae2f3356ab79b77090f8eb927f692c23ee070278
|
/SMA_SES_DES.py
|
6ae165e143cee8c29eb017cdeffa048c74e8509c
|
[
"MIT"
] |
permissive
|
ImPHX13/Demand-Forecasting
|
5cfdbfdd712dc23834f702e347b39bcdf23d1d3d
|
078e58fed6fdd59e8fbae69e8f54d01e784d4be7
|
refs/heads/master
| 2022-11-25T22:07:23.255490
| 2022-11-18T04:26:40
| 2022-11-18T04:26:40
| 265,248,188
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,881
|
py
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#Import dataset
df = pd.read_csv('data.csv',parse_dates=True, dayfirst=True)
df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%y')
print(df.dtypes)
df.head()
df=df.set_index('Date')
df.index
#Create a timeseseries
ts=df['Quantity']
ts.head()
#Rolling mean and standard deviation calculation to check for stationarity
rolling_mean = ts.rolling(window = 5).mean()
rolling_std = ts.rolling(window = 5).std()
plt.plot(ts, color = 'blue', label = 'Original')
plt.plot(rolling_mean, color = 'red', label = 'Rolling Mean')
plt.plot(rolling_std, color = 'black', label = 'Rolling Std')
plt.legend(loc = 'best')
plt.title('Rolling Mean & Rolling Standard Deviation')
plt.show()
#ADF test for checking stationarity of timeseries
result = adfuller(ts)
print('ADF Statistic: {}'.format(result[0]))
print('p-value: {}'.format(result[1]))
print('Critical Values:')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
#Timeseries log transformation
ts_log = np.log(ts)
plt.plot(ts_log)
result = adfuller(ts_log)
print('ADF Statistic: {}'.format(result[0]))
print('p-value: {}'.format(result[1]))
print('Critical Values:')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
#Function for ADF test
def get_stationarity(timeseries):
rolling_mean = timeseries.rolling(window=5).mean()
rolling_std = timeseries.rolling(window=5).std()
original = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolling_mean, color='red', label='Rolling Mean')
std = plt.plot(rolling_std, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
result = adfuller(timeseries)
print('ADF Statistic: {}'.format(result[0]))
print('p-value: {}'.format(result[1]))
print('Critical Values:')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
rolling_mean = ts_log.rolling(window=5).mean()
ts_log_minus_mean = ts_log - rolling_mean
ts_log_minus_mean.dropna(inplace=True)
get_stationarity(ts_log_minus_mean)
#Exponential Decay
rolling_mean_exp_decay = ts_log.ewm(halflife=5, min_periods=0, adjust=True).mean()
ts_log_exp_decay = ts_log - rolling_mean_exp_decay
ts_log_exp_decay.dropna(inplace=True)
get_stationarity(ts_log_exp_decay)
#Timeseries log shifted to make it stationary
ts_log_shift = ts_log - ts_log.shift()
ts_log_shift.dropna(inplace=True)
get_stationarity(ts_log_shift)
#Timeseries log differenced to make it stationary
ts_log_diff = ts_log - ts_log.shift()
plt.plot(ts_log_diff)
ts_log_diff.dropna(inplace=True)
get_stationarity(ts_log_diff)
#Seasonal Decomposition to check for seasonality and trends
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(ts_log,freq=7)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
plt.subplot(411)
plt.plot(ts_log, label='Original')
plt.legend(loc='best')
plt.subplot(412)
plt.plot(trend, label='Trend')
plt.legend(loc='best')
plt.subplot(413)
plt.plot(seasonal,label='Seasonality')
plt.legend(loc='best')
plt.subplot(414)
plt.plot(residual, label='Residuals')
plt.legend(loc='best')
plt.tight_layout()
ts_log_decompose = residual
ts_log_decompose.dropna(inplace=True)
get_stationarity(ts_log_decompose)
#ACF and PACF plots to find p and q values
from statsmodels.tsa.stattools import acf, pacf
lag_acf = acf(ts_log_diff, nlags=20)
lag_pacf = pacf(ts_log_diff, nlags=20, method='ols')
plt.subplot(121)
plt.plot(lag_acf)
plt.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.title('Autocorrelation Function')
plt.subplot(122)
plt.plot(lag_pacf)
plt.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.title('Partial Autocorrelation Function')
plt.tight_layout()
#Gridsearch for ideal p,q parameters based on lowest AIC value
import statsmodels.api as sm
resDiff = sm.tsa.arma_order_select_ic(ts_log, max_ar=7, max_ma=7, ic='aic', trend='c')
print('ARMA(p,q) =',resDiff['aic_min_order'],'is the best.')
#Fitting ARIMA model from the obtained (p,d,q) values
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(ts_log, order=(1, 1, 0))
results_ARIMA = model.fit(disp=-1)
plt.plot(ts_log_diff)
plt.plot(results_ARIMA.fittedvalues, color='red')
predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
print(predictions_ARIMA_diff.head())
#Bring back the predictions to original scale
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
print(predictions_ARIMA_diff_cumsum.head())
predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
predictions_ARIMA_log.head()
#Plot of Actual vs Forecasted values
predictions_ARIMA = np.exp(predictions_ARIMA_log)
plt.plot(ts)
plt.plot(predictions_ARIMA)
plt.title('ARIMA MAPE: %.4f'% np.mean(np.abs(predictions_ARIMA-ts)/np.abs(ts)))
#RMSE and MAPE calculations
mape = np.mean(np.abs(predictions_ARIMA - ts)/np.abs(ts))
rmse = np.mean((predictions_ARIMA - ts)**2)**.5
print(mape)
print(rmse)
#Summary of ARIMA model
results_ARIMA.summary()
|
[
"noreply@github.com"
] |
ImPHX13.noreply@github.com
|
aa300723ff8030d337ad1c65d8905af0053a9077
|
760578355ed00ce758591b9a0b4929a3105de530
|
/query/protocols/Gamespy.py
|
ed4437c4d9ffc4c0f2cfdf96f9a4022703cf0062
|
[
"MIT"
] |
permissive
|
SanSource/GameQuery
|
6c385e7607d7ad7fca0782ef8eea839f838268a7
|
b10845bffc872e9ce3d3d5d4016fd1905b3b8b0c
|
refs/heads/master
| 2020-12-29T16:07:39.179677
| 2017-08-20T22:59:33
| 2017-08-20T22:59:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
from ..connection import BaseUDP
from ..helpers import async_raise_on_timeout
from ..parser.helpers import QueryBytes
class Gamespy1(BaseUDP):
@async_raise_on_timeout
async def get_info(self):
reader, writer = await self._connection.connect()
query = QueryBytes()
query.append(b'\\info\\', None)
writer.write(query.buffer)
return self.parse_info(QueryBytes(await reader.readline()))
def parse_info(self, response):
list_info = list()
list_split = response.buffer[1:].split(b'\\')
list_info = list(zip(list_split[::2], list_split[1::2]))
return list_info
class Gamespy2(BaseUDP):
@async_raise_on_timeout
async def get_info(self):
reader, writer = await self._connection.connect()
query = QueryBytes()
query.append(b'\xFE\xFD\x00\x43\x4F\x52\x59\xFF\x00\x00', None)
writer.write(query.buffer)
return self.parse_info(QueryBytes(await reader.readline()))
def parse_info(self, response):
# if response[0:5] != b'\x00CORY':
# list_commands = response[5:].split(b'\x00\x00\x00')[0].split(b'\x00')
list_info = list()
list_split = response.buffer[5:].split(b'\x00\x00\x00')[0].split(b'\x00')
list_info = list(zip(list_split[::2], list_split[1::2]))
return list_info
class Gamespy3(BaseUDP):
is_challenge = False
@async_raise_on_timeout
async def get_info(self):
reader, writer = await self._connection.connect()
timestamp = b'\x04\x05\x06\x07' # timestamp
query = QueryBytes()
query.append(b'\xFE\xFD\x09', None)
query.append(timestamp, None)
if self.is_challenge:
writer.write(query.buffer)
response = QueryBytes(await reader.readline())
if response.buffer[:5] != b'\x09' + timestamp:
raise Exception() # fixme
challange_int = int(response.buffer[5:-1]).to_bytes(4, 'big', signed=True)
query.append(challange_int, None)
query.append(b'\xFF\x00\x00\x01', None)
query.set(b'\x00', QueryBytes.BIG_TYPE_BYTE, 1, offset=2)
writer.write(query.buffer)
return self.parse_info(QueryBytes(await reader.readline()))
def parse_info(self, response):
# if response[0] != 0x00 or response[1:5] != timestamp or response[15] != 0x00:
# list_commands = response
# list_commands.remove('p1073741829') # fix for Unreal Tournament 3 because he have invalid data ?
list_info = list()
list_split = response.buffer[16:-2].split(b'\x00\x00\x01')[0].split(b'\x00')
list_info = list(zip(list_split[::2], list_split[1::2]))
return list_info
class Gamespy4(Gamespy3):
is_challenge = True
|
[
"patryk.sondej@gmail.com"
] |
patryk.sondej@gmail.com
|
cdc9c0fe13be7945a2a837c9dfa2b6ee764b8977
|
8b881e5a11a4b69362edf70929570964644aab75
|
/src/ai/AlphaBetaOwnSeeds.py
|
eb7539b7632553ff9285607c9d3a507bb67ba13b
|
[] |
no_license
|
BpGameHackSoc/kalahai
|
18b84bf528c6e5e12e2ac0b0abb3052fec4b81c8
|
abc2ce1aa4c766fd1cadb62bf3bf4d92b9fe5f56
|
refs/heads/master
| 2021-09-02T10:16:27.051245
| 2017-12-23T10:23:52
| 2017-12-23T10:25:30
| 110,739,312
| 2
| 1
| null | 2017-11-19T14:39:11
| 2017-11-14T20:10:11
|
Python
|
UTF-8
|
Python
| false
| false
| 760
|
py
|
import numpy as np
from . import AlphaBeta
from model import Side
class AlphaBetaOwnSeeds(AlphaBeta.AlphaBeta):
def evaluate(self,state):
south_holes = state.board.get_holes(Side.SOUTH)
north_holes = state.board.get_holes(Side.NORTH)
south_store = state.board.get_store(Side.SOUTH)
north_store = state.board.get_store(Side.NORTH)
val = (self.keepable_seeds(south_holes) -self.keepable_seeds(north_holes)) *0.25
val += south_store - north_store
return val
def keepable_seeds(self,buckets):
size = len(buckets)
clipper = np.array(range(size,0,-10))-np.ones(size)
return np.sum(np.clip(buckets,None,clipper))
def move(self, state):
return super().move(state)
|
[
"gergely.halacsy@gmail.com"
] |
gergely.halacsy@gmail.com
|
28849c5633fc880b6e4043d6ee95027eb192b0fe
|
d77b363dd92fd61ff0f1fc75ffb9836dea201524
|
/main.py
|
f44ecfc93f61f7c047c6480e45d49d15fc1f7556
|
[] |
no_license
|
BalticPinguin/ArgonMD
|
6309ac8cf2aceb115f2615c81b62eaeacb5bf286
|
ddb723e7b34ec8b150acf187aaff3d61df9c0f08
|
refs/heads/master
| 2016-09-03T07:39:16.201770
| 2015-08-10T19:42:32
| 2015-08-10T19:42:32
| 39,498,512
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
#!/usr/bin/python3
import physics as ph
import sys
def frange(start, stop, step): #imitate range, but with floats.
r = start
i=0
while r <= stop:
yield r
i+=1
r =start + i*step
def main(argv):
assert len(argv)==1, "only temperature and alpha are allowed as input-parameter!"
N=256 #number of particles
#N=32 #number of particles
#length in angstroem, integer required
#L=10 #size of box; needs to be cubic for the method to be working
T=float(argv[0])
dt=10 #10 ps per step
t=3e3 #3 fs of simulation time.
alpha=0.02
L=21 #--> density of rho=1.8 g/cm^3
#L=10.5 #--> density of rho=1.8 g/cm^3
#time-step (in ps)
#t=30e3
output="box.dat"
output2="pairDist.dat"
#now, start simulation
#particle,mass=ph.testBox(N,L, T)
#particle,mass=ph.testForce(N,L, T)
#particle,mass=ph.seed_fcc(N,L,T)
particle,mass=ph.seed_small(N,L,T)
#particle,mass=ph.seed(N,L, T)
force=ph.update_force(particle,L) #get forces
#ph.print_conf(particle,output, output2,0, L)
for time in frange(dt,t,dt):
force,particle=ph.propagate(force,particle,L, dt,mass, alpha,T)
if time >2e3: # don't waste time, printing dumb data.
ph.print_conf(particle,output, output2, time, L)
if __name__=="__main__":
main(sys.argv[1:])
|
[
"tobias.moehle@uni-rostock.de"
] |
tobias.moehle@uni-rostock.de
|
82405e9839e46249f460ed4e84143cc38d8ef32b
|
55e31bc59b435ccfb60da178d560dedd6248b593
|
/resources/store.py
|
b1c85be12f356d4d78b91c85e8bebff15149a086
|
[] |
no_license
|
kenHsieh25053/flaskapi
|
a456c2ae28127ba422582693949fcb79bff71977
|
da1130585fc722910db3c503946ee5d3b8d66591
|
refs/heads/master
| 2020-03-10T22:44:36.581563
| 2018-04-22T09:07:09
| 2018-04-22T09:07:09
| 129,625,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
from flask_restful import Resource
from models.store import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_by_name(name)
if store:
return store.json()
return {'message': 'Store not found'}, 404
def post(self, name):
if StoreModel.find_by_name(name):
return {'message': 'A store with name {} already exits.'.format(name)}, 400
store = StoreModel(name)
try:
store.save_to_db()
except:
return {'message': 'An error occured while creating the store'}, 500
return store.json(), 201
def delete(self, name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return {'message': 'Store deleted'}
class StoreList(Resource):
def get(self):
return {'stores': [store.json() for store in StoreModel.query.all()]}
|
[
"kw1984@livemail.tw"
] |
kw1984@livemail.tw
|
c82afac573bf870007f2a26a2677f45d8e51d99c
|
04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29
|
/1233/solution.py
|
c47461e1a3ab14eb3051ffb577ac9f8ff8d4de5e
|
[] |
no_license
|
zhangruochi/leetcode
|
6f739fde222c298bae1c68236d980bd29c33b1c6
|
cefa2f08667de4d2973274de3ff29a31a7d25eda
|
refs/heads/master
| 2022-07-16T23:40:20.458105
| 2022-06-02T18:25:35
| 2022-06-02T18:25:35
| 78,989,941
| 14
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
class Node():
def __init__(self, str_):
self.str_ = str_
def __eq__(self, other):
return self.str_ == other.str_
def __repr__(self):
return self.str_
def __repr__(self):
return self.str_
def __hash__(self):
return hash(self.str_)
def __call__(self,str_):
return Node(str_)
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
trie = {}
res = []
def transfrom(f):
return list(map(Node, f.strip("/").split("/")))
folder = list( map(transfrom, folder))
print(folder)
for f in folder:
trie_pointer = trie
for char in f:
trie_pointer = trie_pointer.setdefault(char, {})
trie_pointer["#"] = "#"
def combine(path):
return "/"+"/".join([str(node) for node in path])
def dfs(trie, path):
nonlocal res
if "#" in trie:
res.append(combine(path))
return
for char in trie:
path.append(char)
dfs(trie[char],path)
path.pop()
dfs(trie, [])
return res
|
[
"zrc720@gmail.com"
] |
zrc720@gmail.com
|
54a7a8cba0c76261822e8420ebdd9b22a638ba22
|
1ba12eb2be477e7dc99b4f13d1014917e78199aa
|
/usr/lib/solydxk/constructor/solydxk.py
|
89f79749e8211f426ccb25c69f76882e3d7ac50e
|
[] |
no_license
|
KDB2/solydxk-constructor
|
0704f5ce5ef331f45888348804936cfcf4c43f25
|
c05b8c38b873bb36eb3c8d3160600f45d5cd4798
|
refs/heads/master
| 2021-01-17T06:31:41.055358
| 2015-11-03T16:02:32
| 2015-11-03T16:02:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,011
|
py
|
#! /usr/bin/env python3
import re
import threading
from os import remove, rmdir, makedirs, system, listdir
from shutil import copy, move
from datetime import datetime
from execcmd import ExecCmd
from os.path import join, exists, basename, abspath, dirname, lexists, isdir
class IsoUnpack(threading.Thread):
def __init__(self, mountDir, unpackIso, unpackDir, queue):
threading.Thread.__init__(self)
self.ec = ExecCmd()
self.mountDir = mountDir
self.unpackIso = unpackIso
self.unpackDir = unpackDir
self.queue = queue
self.returnMessage = None
def run(self):
try:
if not exists(self.mountDir):
print(("Create mount directory: %s" % self.mountDir))
makedirs(self.mountDir)
rootDir = join(self.unpackDir, "root")
if not exists(rootDir):
print(("Create root directory: %s" % rootDir))
makedirs(rootDir)
isolinuxDir = join(self.unpackDir, "boot/isolinux")
if not exists(isolinuxDir):
print(("Create isolinux directory: %s" % isolinuxDir))
makedirs(isolinuxDir)
liveDir = join(self.unpackDir, "boot/live")
if not exists(liveDir):
print(("Create liveDir directory: %s" % liveDir))
makedirs(liveDir)
# Mount the ISO
system("mount -o loop '%s' '%s'" % (self.unpackIso, self.mountDir))
# Check isolinux directory
mountIsolinux = join(self.mountDir, "isolinux")
if not exists(mountIsolinux):
self.ec.run("umount --force '%s'" % self.mountDir)
self.returnMessage = "ERROR: Cannot find isolinux directory in ISO"
fixCfgCmd = None
dirs = []
mountSquashfs = None
if self.returnMessage is None:
subdirs = self.getDirectSubDirectories(self.mountDir)
for subdir in subdirs:
if self.hasSquashFs(join(self.mountDir, subdir)):
mountSquashfs = join(self.mountDir, subdir)
if subdir != "live":
fixCfgCmd = "sed -i 's/\/%s/\/live/g' %s/isolinux.cfg" % (subdir, isolinuxDir)
elif subdir != "isolinux":
dirs.append(join(self.mountDir, subdir))
if mountSquashfs is None:
self.ec.run("umount --force '%s'" % self.mountDir)
self.returnMessage = "ERROR: Cannot find squashfs directory in ISO"
if self.returnMessage is None:
# Copy files from ISO to unpack directory
for d in dirs:
self.ec.run("rsync -at --del '%s' '%s'" % (d, join(self.unpackDir, "boot/")))
self.ec.run("rsync -at --del '%s/' '%s'" % (mountIsolinux, isolinuxDir))
self.ec.run("rsync -at --del '%s/' '%s'" % (mountSquashfs, liveDir))
self.ec.run("umount --force '%s'" % self.mountDir)
if fixCfgCmd is not None:
self.ec.run(fixCfgCmd)
# copy squashfs root
squashfs = join(liveDir, "filesystem.squashfs")
if exists(squashfs):
self.ec.run("mount -t squashfs -o loop '%s' '%s'" % (squashfs, self.mountDir))
self.ec.run("rsync -at --del '%s/' '%s/'" % (self.mountDir, rootDir))
self.ec.run("umount --force '%s'" % self.mountDir)
# Cleanup
rmdir(self.mountDir)
# set proper permissions
self.ec.run("chmod 6755 '%s'" % join(rootDir, "usr/bin/sudo"))
self.ec.run("chmod 0440 '%s'" % join(rootDir, "etc/sudoers"))
self.returnMessage = "DONE - ISO unpacked to: %s" % self.unpackDir
self.queue.put(self.returnMessage)
except Exception as detail:
self.ec.run("umount --force '%s'" % self.mountDir)
rmdir(self.mountDir)
self.returnMessage = "ERROR: IsoUnpack: %(detail)s" % {"detail": detail}
self.queue.put(self.returnMessage)
def getDirectSubDirectories(self, directory):
subdirs = []
names = listdir(directory)
for name in names:
if isdir(join(directory, name)):
subdirs.append(name)
return subdirs
def hasSquashFs(self, directory):
names = listdir(directory)
for name in names:
if name == "filesystem.squashfs":
return True
return False
class BuildIso(threading.Thread):
def __init__(self, distroPath, queue):
threading.Thread.__init__(self)
self.ec = ExecCmd()
self.dg = DistroGeneral(distroPath)
self.ed = EditDistro(distroPath)
self.queue = queue
self.returnMessage = None
# Paths
distroPath = distroPath.rstrip('/')
if basename(distroPath) == "root":
distroPath = dirname(distroPath)
self.distroPath = distroPath
self.rootPath = join(distroPath, "root")
self.bootPath = join(distroPath, "boot")
self.livePath = join(self.bootPath, "live")
self.scriptDir = abspath(dirname(__file__))
# Check for old dir
oldDir = join(self.bootPath, "solydxk")
if exists(oldDir):
self.ec.run("rm -r %s" % oldDir)
# Make sure live directory exists
if not exists(self.livePath):
self.ec.run("mkdir -p %s" % self.livePath)
# ISO Name
self.isoName = self.dg.description
# ISO distribution
self.isoBaseName = self.dg.getIsoFileName()
self.isoFileName = join(self.distroPath, self.isoBaseName)
# Trackers, and webseeds
self.trackers = ""
self.webseeds = ""
trackersPath = join(self.scriptDir, "files/trackers")
webseedsPath = join(self.scriptDir, "files/webseeds")
if exists(trackersPath):
with open(trackersPath, "r") as f:
lines = f.readlines()
trList = []
for line in lines:
trList.append(line.strip())
self.trackers = ",".join(trList)
if exists(webseedsPath):
with open(webseedsPath, "r") as f:
lines = f.readlines()
wsList = []
for line in lines:
#wsList.append("%s/%s" % (line.strip(), webseedIsoName))
wsList.append("%s/%s" % (line.strip(), self.isoBaseName))
self.webseeds = ",".join(wsList)
def run(self):
try:
if not exists(self.rootPath):
self.returnMessage = "ERROR: Cannot find root directory: %s" % self.rootPath
if not exists(self.bootPath):
self.returnMessage = "ERROR: Cannot find boot directory: %s" % self.bootPath
if self.returnMessage is None:
print("======================================================")
print("INFO: Cleanup and prepare ISO build...")
print("======================================================")
# Clean-up
script = "cleanup.sh"
scriptSource = join(self.scriptDir, "files/{}".format(script))
scriptTarget = join(self.rootPath, script)
if exists(scriptSource):
self.copy_file(scriptSource, scriptTarget)
self.ec.run("chmod a+x %s" % scriptTarget)
plymouthTheme = self.dg.getPlymouthTheme()
#self.ec.run("chroot '%(rootPath)s' /bin/bash %(cleanup)s %(plymouthTheme)s" % {"rootPath": self.rootPath, "cleanup": cleanup, "plymouthTheme": plymouthTheme})
cmd = "/bin/bash %(cleanup)s %(plymouthTheme)s" % {"cleanup": script, "plymouthTheme": plymouthTheme}
self.ed.openTerminal(cmd)
remove(scriptTarget)
rootHome = join(self.rootPath, "root")
nanoHist = join(rootHome, ".nano_history")
if exists(nanoHist):
remove(nanoHist)
bashHist = join(rootHome, ".bash_history")
if exists(bashHist):
remove(bashHist)
# Config naming
regExp = "solyd.*(\d{6}|-bit)"
d = datetime.now()
dateString = d.strftime("%Y%m")
nameString = "{} {}".format(self.isoName, dateString)
# write iso name to boot/isolinux/isolinux.cfg
cfgFile = join(self.bootPath, "isolinux/isolinux.cfg")
if exists(cfgFile):
content = ""
with open(cfgFile, 'r') as f:
content = f.read()
if content != "":
content = re.sub(regExp, nameString, content, flags=re.IGNORECASE)
# Make sure that the paths are correct (correcting very old stuff)
content = re.sub('.lz', '.img', content)
content = re.sub('/solydxk/', '/live/', content)
with open(cfgFile, 'w') as f:
f.write(content)
# Write info for grub (EFI)
grubFile = join(self.bootPath, "boot/grub/grub.cfg")
if exists(grubFile):
content = ""
with open(grubFile, 'r') as f:
content = f.read()
if content != "":
content = re.sub(regExp, nameString, content, flags=re.IGNORECASE)
with open(grubFile, 'w') as f:
f.write(content)
loopbackFile = join(self.bootPath, "boot/grub/loopback.cfg")
if exists(loopbackFile):
content = ""
with open(loopbackFile, 'r') as f:
content = f.read()
if content != "":
content = re.sub(regExp, nameString, content, flags=re.IGNORECASE)
with open(loopbackFile, 'w') as f:
f.write(content)
# Clean boot/live directory
#popen("rm -rf %s/live/*" % self.bootPath)
# Vmlinuz
vmlinuzSymLink = join(self.distroPath, "root/vmlinuz")
if lexists(vmlinuzSymLink):
vmlinuzFile = self.ec.run("ls -al %s | cut -d'>' -f2" % vmlinuzSymLink)[0].strip()
else:
self.returnMessage = "ERROR: %s not found" % vmlinuzSymLink
if self.returnMessage is None:
vmlinuzPath = join(self.distroPath, "root/%s" % vmlinuzFile)
if exists(vmlinuzPath):
print("Copy vmlinuz")
self.copy_file(vmlinuzPath, join(self.livePath, "vmlinuz"))
else:
self.returnMessage = "ERROR: %s not found" % vmlinuzPath
if self.returnMessage is None:
# Initrd
initrdSymLink = join(self.distroPath, "root/initrd.img")
if lexists(initrdSymLink):
initrdFile = self.ec.run("ls -al %s | cut -d'>' -f2" % initrdSymLink)[0].strip()
else:
self.returnMessage = "ERROR: %s not found" % initrdSymLink
if self.returnMessage is None:
initrdPath = join(self.distroPath, "root/%s" % initrdFile)
if exists(initrdPath):
print("Copy initrd")
self.copy_file(initrdPath, join(self.livePath, "initrd.img"))
else:
self.returnMessage = "ERROR: %s not found" % initrdPath
if self.returnMessage is None:
# Generate UUID
#diskDir = join(self.bootPath, ".disk")
#if not exists(diskDir):
#makedirs(diskDir)
#self.ec.run("rm -rf %s/*uuid*" % diskDir)
#self.ec.run("uuidgen -r > %s/live-uuid-generic" % diskDir)
#copy_file(join(diskDir, "live-uuid-generic"), join(diskDir, "live-uuid-generic"))
#Update filesystem.size
#self.ec.run("du -b %(directory)s/root/ 2> /dev/null | tail -1 | awk {'print $1;'} > %(directory)s/live/filesystem.size" % {"directory": self.bootPath})
print("======================================================")
print("INFO: Start building ISO...")
print("======================================================")
# build squash root
print("Creating SquashFS root...")
print("Updating File lists...")
dpkgQuery = ' dpkg -l | awk \'/^ii/ {print $2, $3}\' | sed -e \'s/ /\t/g\' '
self.ec.run('chroot \"' + self.rootPath + '\"' + dpkgQuery + ' > \"' + join(self.livePath, "filesystem.packages") + '\"' )
#dpkgQuery = ' dpkg-query -W --showformat=\'${Package} ${Version}\n\' '
#self.ec.run('chroot \"' + self.rootPath + '\"' + dpkgQuery + ' > \"' + join(self.bootPath, "live/filesystem.manifest") + '\"' )
#copy_file(join(self.bootPath, "live/filesystem.manifest"), join(self.bootPath, "live/filesystem.manifest-desktop"))
# check for existing squashfs root
if exists(join(self.livePath, "filesystem.squashfs")):
print("Removing existing SquashFS root...")
remove(join(self.livePath, "filesystem.squashfs"))
print("Building SquashFS root...")
# check for alternate mksquashfs
# check for custom mksquashfs (for multi-threading, new features, etc.)
mksquashfs = self.ec.run(cmd="echo $MKSQUASHFS", returnAsList=False).strip()
rootPath = join(self.distroPath, "root/")
squashfsPath = join(self.livePath, "filesystem.squashfs")
if mksquashfs == '' or mksquashfs == 'mksquashfs':
try:
nrprocessors = int(int(self.ec.run("nproc", False, False))/2)
if nrprocessors < 1:
nrprocessors = 1
except:
nrprocessors = 1
cmd = "mksquashfs \"{}\" \"{}\" -comp xz -processors {}".format(rootPath, squashfsPath, nrprocessors)
else:
cmd = "{} \"{}\" \"{}\"".format(mksquashfs, rootPath, squashfsPath)
#print(cmd)
self.ec.run(cmd)
# build iso
print("Creating ISO...")
# update manifest files
#self.ec.run("/usr/lib/solydxk/constructor/updateManifest.sh %s" % self.distroPath)
# update md5
print("Updating md5 sums...")
if exists(join(self.bootPath, "md5sum.txt")):
remove(join(self.bootPath, "md5sum.txt"))
if exists(join(self.bootPath, "MD5SUMS")):
remove(join(self.bootPath, "MD5SUMS"))
self.ec.run('cd \"' + self.bootPath + '\"; ' + 'find . -type f -print0 | xargs -0 md5sum > md5sum.txt')
#Remove md5sum.txt, MD5SUMS, boot.cat and isolinux.bin from md5sum.txt
self.ec.run("sed -i '/md5sum.txt/d' %s/md5sum.txt" % self.bootPath)
self.ec.run("sed -i '/MD5SUMS/d' %s/md5sum.txt" % self.bootPath)
self.ec.run("sed -i '/boot.cat/d' %s/md5sum.txt" % self.bootPath)
self.ec.run("sed -i '/isolinux.bin/d' %s/md5sum.txt" % self.bootPath)
#Copy md5sum.txt to MD5SUMS (for Debian compatibility)
self.copy_file(join(self.bootPath, "md5sum.txt"), join(self.bootPath, "MD5SUMS"))
# Update isolinux files
syslinuxPath = join(self.rootPath, "usr/lib/syslinux")
modulesPath = join(syslinuxPath, "modules/bios")
isolinuxPath = join(self.bootPath, "isolinux")
self.ec.run("chmod -R +w {}".format(isolinuxPath))
cat = join(isolinuxPath, "boot.cat")
if exists(cat):
remove(cat)
self.copy_file(join(modulesPath, "chain.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "hdt.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libmenu.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libgpl.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "reboot.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "vesamenu.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "poweroff.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "ldlinux.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libcom32.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libutil.c32"), isolinuxPath)
self.copy_file(join(self.rootPath, "boot/memtest86+.bin"), join(isolinuxPath, "memtest86"))
self.copy_file("/usr/lib/ISOLINUX/isolinux.bin", isolinuxPath)
# remove existing iso
if exists(self.isoFileName):
print("Removing existing ISO...")
remove(self.isoFileName)
# build iso according to architecture
print("Building ISO...")
self.ec.run('genisoimage -input-charset utf-8 -o \"' + self.isoFileName + '\" -b \"isolinux/isolinux.bin\" -c \"isolinux/boot.cat\" -no-emul-boot -boot-load-size 4 -boot-info-table -V \"' + self.isoName + '\" -cache-inodes -r -J -l \"' + self.bootPath + '\"')
print("Making Hybrid ISO...")
self.ec.run("isohybrid %s" % self.isoFileName)
print("Create ISO md5 file...")
self.ec.run("echo \"$(md5sum \"%s\" | cut -d' ' -f 1) %s\" > \"%s.md5\"" % (self.isoFileName, self.isoBaseName, self.isoFileName))
print("Create Torrent file...")
torrentFile = "%s.torrent" % self.isoFileName
if exists(torrentFile):
remove(torrentFile)
self.ec.run("mktorrent -a \"%s\" -c \"%s\" -w \"%s\" -o \"%s\" \"%s\"" % (self.trackers, self.isoName, self.webseeds, torrentFile, self.isoFileName))
print("======================================================")
self.returnMessage = "DONE - ISO Located at: %s" % self.isoFileName
print((self.returnMessage))
print("======================================================")
self.queue.put(self.returnMessage)
except Exception as detail:
self.returnMessage = "ERROR: BuildIso: %(detail)s" % {"detail": detail}
self.queue.put(self.returnMessage)
def copy_file(self, file_path, destination):
if exists(file_path):
try:
copy(file_path, destination)
except Exception as detail:
print(("ERROR: BuildIso.copy_file: {}".format(detail)))
else:
print(("ERROR: BuildIso.copy_file: cannot find {}".format(file_path)))
# Class to create a chrooted terminal for a given directory
# https://wiki.debian.org/chroot
class EditDistro(object):
def __init__(self, distroPath):
self.ec = ExecCmd()
self.dg = DistroGeneral(distroPath)
distroPath = distroPath.rstrip('/')
if basename(distroPath) == "root":
distroPath = dirname(distroPath)
self.rootPath = join(distroPath, "root")
# ISO edition
self.edition = self.dg.edition
def openTerminal(self, command=""):
# Set some paths
resolveCnfHost = "/etc/resolv.conf"
resolveCnf = join(self.rootPath, "etc/resolv.conf")
resolveCnfBak = "%s.bak" % resolveCnf
wgetrc = join(self.rootPath, "etc/wgetrc")
wgetrcBak = "%s.bak" % wgetrc
terminal = "/tmp/constructor-terminal.sh"
lockDir = join(self.rootPath, "run/lock/")
proc = join(self.rootPath, "proc/")
dev = join(self.rootPath, "dev/")
pts = join(self.rootPath, "dev/pts/")
sys = join(self.rootPath, "sys/")
policy = join(self.rootPath, "usr/sbin/policy-rc.d")
ischroot = join(self.rootPath, "usr/bin/ischroot")
ischrootTmp = join(self.rootPath, "usr/bin/ischroot.tmp")
try:
# temporary create /run/lock
if not exists(lockDir):
makedirs(lockDir)
# setup environment
# copy dns info
if exists(resolveCnf):
move(resolveCnf, resolveCnfBak)
if exists(resolveCnfHost):
copy(resolveCnfHost, resolveCnf)
# umount /proc /dev /dev/pts /sys
self.unmount([pts, dev, proc, sys])
# mount /proc /dev /dev/pts /sys /run /sys
self.ec.run("mount --bind /proc '%s'" % proc)
self.ec.run("mount --bind /dev '%s'" % dev)
self.ec.run("mount --bind /dev/pts '%s'" % pts)
self.ec.run("mount --bind /sys '%s'" % sys)
# copy apt.conf
#copy("/etc/apt/apt.conf", join(self.rootPath, "etc/apt/apt.conf"))
# copy wgetrc
move(wgetrc, wgetrcBak)
copy("/etc/wgetrc", wgetrc)
# Let dpkg only start daemons when desired
scr = "#!/bin/sh\nexit 101\n"
with open(policy, 'w') as f:
f.write(scr)
self.ec.run("chmod a+x %s" % policy)
# Temporary fix ischroot
if not exists(ischrootTmp):
self.ec.run("mv %s %s" % (ischroot, ischrootTmp))
if not exists(ischroot):
self.ec.run("ln -s /bin/true %s" % ischroot)
# HACK: create temporary script for chrooting
if exists(terminal):
remove(terminal)
scr = "#!/bin/sh\nchroot '%s' %s\n" % (self.rootPath, command)
with open(terminal, 'w') as f:
f.write(scr)
self.ec.run("chmod a+x %s" % terminal)
if self.ec.run('which x-terminal-emulator'):
# use x-terminal-emulator if xterm isn't available
if exists("/usr/bin/xterm"):
self.ec.run('export HOME=/root ; xterm -bg black -fg white -rightbar -title \"%s\" -e %s' % (self.edition, terminal))
else:
self.ec.run('export HOME=/root ; x-terminal-emulator -e %s' % terminal)
else:
print('Error: no valid terminal found')
# restore wgetrc
move(wgetrcBak, wgetrc)
# remove apt.conf
#remove(join(self.rootPath, "root/etc/apt/apt.conf"))
# move dns info
if exists(resolveCnfBak):
move(resolveCnfBak, resolveCnf)
else:
remove(resolveCnf)
# umount /proc /dev /dev/pts /sys
self.unmount([pts, dev, proc, sys])
# remove temp script
if exists(terminal):
remove(terminal)
# remove policy script
if exists(policy):
remove(policy)
# replace ischroot
if exists("%s.tmp" % ischroot):
self.ec.run("rm %s" % ischroot)
self.ec.run("mv %s.tmp %s" % (ischroot, ischroot))
# cleanup /run
self.ec.run("rm -rf %s/run/*" % self.rootPath)
except Exception as detail:
# restore wgetrc
move(wgetrcBak, wgetrc)
# remove apt.conf
#remove(join(self.rootPath, "etc/apt/apt.conf"))
# move dns info
if exists(resolveCnfBak):
move(resolveCnfBak, resolveCnf)
else:
remove(resolveCnf)
# umount /proc /dev /dev/pts /sys
self.unmount([pts, dev, proc, sys])
# remove temp script
if exists(terminal):
remove(terminal)
# remove policy script
if exists(policy):
remove(policy)
# replace ischroot
if exists("%s.tmp" % ischroot):
self.ec.run("rm %s" % ischroot)
self.ec.run("mv %s.tmp %s" % (ischroot, ischroot))
# cleanup /run
self.ec.run("rm -rf %s/run/*" % self.rootPath)
errText = 'Error launching terminal: '
print((errText, detail))
def unmount(self, mounts=[]):
for mount in mounts:
self.ec.run("umount --force '%s'" % mount)
self.ec.run("umount -l '%s'" % mount)
class DistroGeneral(object):
def __init__(self, distroPath):
self.ec = ExecCmd()
distroPath = distroPath.rstrip('/')
if basename(distroPath) == "root":
distroPath = dirname(distroPath)
self.distroPath = distroPath
self.rootPath = join(distroPath, "root")
self.edition = basename(distroPath)
self.description = "SolydXK"
infoPath = join(self.rootPath, "etc/solydxk/info")
if exists(infoPath):
self.edition = self.ec.run(cmd="grep EDITION= {} | cut -d'=' -f 2".format(infoPath), returnAsList=False).strip('"')
self.description = self.ec.run(cmd="grep DESCRIPTION= {} | cut -d'=' -f 2".format(infoPath), returnAsList=False).strip('"')
def getPlymouthTheme(self):
plymouthTheme = ""
if exists(join(self.rootPath, "usr/share/plymouth/themes/solydk-logo")):
plymouthTheme = "solydk-logo"
elif exists(join(self.rootPath, "usr/share/plymouth/themes/solydx-logo")):
plymouthTheme = "solydx-logo"
return plymouthTheme
def getIsoFileName(self):
# Get the date string
d = datetime.now()
serial = d.strftime("%Y%m")
# Check for a localized system
localePath = join(self.rootPath, "etc/default/locale")
if exists(localePath):
locale = self.ec.run(cmd="grep LANG= {}".format(localePath), returnAsList=False).strip('"').replace(" ", "")
matchObj = re.search("\=\s*([a-z]{2})", locale)
if matchObj:
language = matchObj.group(1)
if language != "en":
serial += "_{}".format(language)
isoFileName = "{}_{}.iso".format(self.description.lower().replace(' ', '_').split('-')[0], serial)
return isoFileName
|
[
"root@solydxk"
] |
root@solydxk
|
1c990786b09382998bcbe64210b2d6960dcbb44f
|
6691d0c71ddb92422fddb5d5994b660ee88a2435
|
/SDP_Assignments/Game_of_life/game_of_life_vishnu/GolLogic.py
|
0bbc877b4cafbc1f8997045ede5e4138c3d71dd9
|
[] |
no_license
|
dadi-vardhan/SDP
|
fb1b2e49c014d769add0e6244ca302e4b6939de5
|
f692837c2cda68d8b16d57727d4b727acf545bf2
|
refs/heads/master
| 2023-03-13T10:28:49.060533
| 2021-03-08T16:45:38
| 2021-03-08T16:45:38
| 310,674,824
| 0
| 1
| null | 2020-11-23T09:06:31
| 2020-11-06T18:23:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
import time
import numpy as np
import matplotlib.pyplot as plt
class Logic(object):
def __init__(self, console):
self.state = console.state
def neighbour_cell_count(self):
'''
Counts the number of cells present at time 't'
on the console and returns it.
Parameters: none
Returns: cell [int]
'''
state = self.state
cell = (state[0:-2,0:-2] + state[0:-2,1:-1] + state[0:-2,2:] +
state[1:-1,0:-2] + state[1:-1,2:] + state[2:,0:-2] +
state[2:,1:-1] + state[2:,2:])
return cell
def cell_propogation_rules(self):
'''
function that defines the rules for the cell-propogation.
Parameters: none
Returns : state
'''
cell = self.neighbour_cell_count()
state = self.state
cell_birth = (cell == 3) & (state[1:-1,1:-1] == 0)
survive = ((cell == 2) | (cell == 3)) & (state[1:-1,1:-1] == 1)
state[...] = 0
state[1:-1,1:-1][cell_birth | survive] = 1
total_cell_birth = np.sum(cell_birth)
self.total_cell_birth = total_cell_birth
total_cell_survived = np.sum(survive)
self.total_cell_survived = total_cell_survived
return state
|
[
"vishnu.dadi@smail.h-brs.de"
] |
vishnu.dadi@smail.h-brs.de
|
3259d0615171353e16e44fb0506a5558587028c0
|
d037002f9d2b383ef84686bbb9843dac8ee4bed7
|
/tutorials/Trash/Distributed-DRL/torch/sac_test/utils/environment.py
|
c86069ea34cea9e7eb5b64d4846270b3babd3d96
|
[
"MIT"
] |
permissive
|
ICSL-hanyang/Code_With_RL
|
4edb23ca24c246bb8ec75fcf445d3c68d6c40b6d
|
1378996e6bf6da0a96e9c59f1163a635c20b3c06
|
refs/heads/main
| 2023-08-15T18:37:57.689950
| 2021-10-18T07:31:59
| 2021-10-18T07:31:59
| 392,944,467
| 0
| 0
| null | 2021-08-05T07:20:57
| 2021-08-05T07:20:56
| null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
import gym
class Environment:
def __init__(self,env_name):
self.env = gym.make(env_name)
self.state_dim = self.env.observation_space.shape[0]
self._max_episode_steps = self.env._max_episode_steps
self.can_run = False
self.state = None
if type(self.env.action_space) == gym.spaces.box.Box : #Continuous
self.action_dim = self.env.action_space.shape[0]
self.is_discrete = False
else :
self.action_dim = self.env.action_space.n
self.is_discrete = True
def reset(self):
assert not self.can_run
self.can_run = True
self.state = self.env.reset()
return self.state
def step(self,action):
assert self.can_run
next_state, reward, done, info = self.env.step(action)
self.state = next_state
if done == True:
self.can_run = False
return next_state, reward, done, info
|
[
"nzy1414117007@gmail.com"
] |
nzy1414117007@gmail.com
|
fe5b26c41e27f960c84721814d918ba912d334fe
|
2aee7676daad10456a34fe23ce952966c05718ff
|
/regular_expression/q3.py
|
0b17a5f0733331c189c670e8f860c6394bec5ba8
|
[] |
no_license
|
sharonsabu/pythondjango2021
|
405b45bc08717301315016d7ccb9b4a03c631475
|
1dfb60b92296bc85248bad029a3fd370745623a6
|
refs/heads/master
| 2023-04-18T19:39:40.378956
| 2021-05-02T05:48:56
| 2021-05-02T05:48:56
| 333,471,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
from re import *
pattern="a{2,3}" #checks for min 2 and max 3 no of "a"
matcher=finditer(pattern,"aaaacaabbaaab")
for match in matcher:
print(match.start())
print(match.group())
|
[
"sharonsabu100@gmail.com"
] |
sharonsabu100@gmail.com
|
b71909c9661e6baf2be15d0e61a3055456d35d1a
|
290b4c7ca63a975b38e55018cc38bd2766e14639
|
/ORC_app/jni-build/jni/include/tensorflow/tensorflow.bzl
|
bb0e46adddd64bf4473131cda060e9cc6eee198f
|
[
"MIT"
] |
permissive
|
luoabd/EMNIST-ORC
|
1233c373abcc3ed237c2ec86491b29c0b9223894
|
8c2d633a9b4d5214e908550812f6a2489ba9eb72
|
refs/heads/master
| 2022-12-27T14:03:55.046933
| 2020-01-16T15:20:04
| 2020-01-16T15:20:04
| 234,325,497
| 0
| 1
|
MIT
| 2022-12-11T13:32:42
| 2020-01-16T13:25:23
|
C++
|
UTF-8
|
Python
| false
| false
| 20,524
|
bzl
|
# -*- Python -*-
# Parse the bazel version string from `native.bazel_version`.
def _parse_bazel_version(bazel_version):
# Remove commit from version.
version = bazel_version.split(" ", 1)[0]
# Split into (release, date) parts and only return the release
# as a tuple of integers.
parts = version.split('-', 1)
# Turn "release" into a tuple of integers
version_tuple = ()
for number in parts[0].split('.'):
version_tuple += (int(number),)
return version_tuple
# Check that a specific bazel version is being used.
def check_version(bazel_version):
if "bazel_version" in dir(native) and native.bazel_version:
current_bazel_version = _parse_bazel_version(native.bazel_version)
minimum_bazel_version = _parse_bazel_version(bazel_version)
if minimum_bazel_version > current_bazel_version:
fail("\nCurrent Bazel version is {}, expected at least {}\n".format(
native.bazel_version, bazel_version))
pass
# Return the options to use for a C++ library or binary build.
# Uses the ":optmode" config_setting to pick the options.
load("//tensorflow/core:platform/default/build_config_root.bzl",
"tf_cuda_tests_tags")
# List of proto files for android builds
def tf_android_core_proto_sources():
return ["//tensorflow/core:" + p
for p in tf_android_core_proto_sources_relative()]
# As tf_android_core_proto_sources, but paths relative to
# //third_party/tensorflow/core.
def tf_android_core_proto_sources_relative():
return [
"example/example.proto",
"example/feature.proto",
"framework/allocation_description.proto",
"framework/attr_value.proto",
"framework/device_attributes.proto",
"framework/function.proto",
"framework/graph.proto",
"framework/kernel_def.proto",
"framework/log_memory.proto",
"framework/op_def.proto",
"framework/step_stats.proto",
"framework/summary.proto",
"framework/tensor.proto",
"framework/tensor_description.proto",
"framework/tensor_shape.proto",
"framework/tensor_slice.proto",
"framework/types.proto",
"framework/versions.proto",
"lib/core/error_codes.proto",
"protobuf/config.proto",
"protobuf/saver.proto",
"util/memmapped_file_system.proto",
"util/saved_tensor_slice.proto",
"util/test_log.proto",
]
# Returns the list of pb.h headers that are generated for
# tf_android_core_proto_sources().
def tf_android_core_proto_headers():
return ["//tensorflow/core/" + p.replace(".proto", ".pb.h")
for p in tf_android_core_proto_sources_relative()]
def if_cuda(a, b=[]):
return select({
"//third_party/gpus/cuda:cuda_crosstool_condition": a,
"//conditions:default": b,
})
def if_android_arm(a, b=[]):
return select({
"//tensorflow:android_arm": a,
"//conditions:default": b,
})
def tf_copts():
return (["-fno-exceptions", "-DEIGEN_AVOID_STL_ARRAY",] +
if_cuda(["-DGOOGLE_CUDA=1"]) +
if_android_arm(["-mfpu=neon"]) +
select({"//tensorflow:android": [
"-std=c++11",
"-DMIN_LOG_LEVEL=0",
"-DTF_LEAN_BINARY",
"-O2",
],
"//tensorflow:darwin": [],
"//conditions:default": ["-pthread"]}))
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate a library for that file.
def tf_gen_op_libs(op_lib_names):
# Make library out of each op so it can also be used to generate wrappers
# for various languages.
for n in op_lib_names:
native.cc_library(name=n + "_op_lib",
copts=tf_copts(),
srcs=["ops/" + n + ".cc"],
deps=(["//tensorflow/core:framework"]),
visibility=["//visibility:public"],
alwayslink=1,
linkstatic=1,)
def tf_gen_op_wrapper_cc(name, out_ops_file, pkg=""):
# Construct an op generator binary for these ops.
tool = out_ops_file + "_gen_cc"
native.cc_binary(
name = tool,
copts = tf_copts(),
linkopts = ["-lm"],
linkstatic = 1, # Faster to link this one-time-use binary dynamically
deps = (["//tensorflow/cc:cc_op_gen_main",
pkg + ":" + name + "_op_lib"])
)
# Run the op generator.
if name == "sendrecv_ops":
include_internal = "1"
else:
include_internal = "0"
native.genrule(
name=name + "_genrule",
outs=[out_ops_file + ".h", out_ops_file + ".cc"],
tools=[":" + tool],
cmd=("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
"$(location :" + out_ops_file + ".cc) " + include_internal))
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate individual C++ .cc and .h
# files for each of the ops files mentioned, and then generate a
# single cc_library called "name" that combines all the
# generated C++ code.
#
# For example, for:
# tf_gen_op_wrappers_cc("tf_ops_lib", [ "array_ops", "math_ops" ])
#
#
# This will ultimately generate ops/* files and a library like:
#
# cc_library(name = "tf_ops_lib",
# srcs = [ "ops/array_ops.cc",
# "ops/math_ops.cc" ],
# hdrs = [ "ops/array_ops.h",
# "ops/math_ops.h" ],
# deps = [ ... ])
def tf_gen_op_wrappers_cc(name,
op_lib_names=[],
other_srcs=[],
other_hdrs=[],
pkg=""):
subsrcs = other_srcs
subhdrs = other_hdrs
for n in op_lib_names:
tf_gen_op_wrapper_cc(n, "ops/" + n, pkg=pkg)
subsrcs += ["ops/" + n + ".cc"]
subhdrs += ["ops/" + n + ".h"]
native.cc_library(name=name,
srcs=subsrcs,
hdrs=subhdrs,
deps=["//tensorflow/core:core_cpu"],
copts=tf_copts(),
alwayslink=1,)
# Invoke this rule in .../tensorflow/python to build the wrapper library.
def tf_gen_op_wrapper_py(name, out=None, hidden=[], visibility=None, deps=[],
require_shape_functions=False):
# Construct a cc_binary containing the specified ops.
tool_name = "gen_" + name + "_py_wrappers_cc"
if not deps:
deps = ["//tensorflow/core:" + name + "_op_lib"]
native.cc_binary(
name = tool_name,
linkopts = ["-lm"],
copts = tf_copts(),
linkstatic = 1, # Faster to link this one-time-use binary dynamically
deps = (["//tensorflow/core:framework",
"//tensorflow/python:python_op_gen_main"] + deps),
visibility = ["//tensorflow:internal"],
)
# Invoke the previous cc_binary to generate a python file.
if not out:
out = "ops/gen_" + name + ".py"
native.genrule(
name=name + "_pygenrule",
outs=[out],
tools=[tool_name],
cmd=("$(location " + tool_name + ") " + ",".join(hidden)
+ " " + ("1" if require_shape_functions else "0") + " > $@"))
# Make a py_library out of the generated python file.
native.py_library(name=name,
srcs=[out],
srcs_version="PY2AND3",
visibility=visibility,
deps=[
"//tensorflow/python:framework_for_generated_wrappers",
],)
# Define a bazel macro that creates cc_test for tensorflow.
# TODO(opensource): we need to enable this to work around the hidden symbol
# __cudaRegisterFatBinary error. Need more investigations.
def tf_cc_test(name, deps, linkstatic=0, tags=[], data=[], size="medium",
suffix="", args=None):
name = name.replace(".cc", "")
native.cc_test(name="%s%s" % (name.replace("/", "_"), suffix),
size=size,
srcs=["%s.cc" % (name)],
args=args,
copts=tf_copts(),
data=data,
deps=deps,
linkopts=["-lpthread", "-lm"],
linkstatic=linkstatic,
tags=tags,)
def tf_cuda_cc_test(name, deps, tags=[], data=[], size="medium"):
tf_cc_test(name=name,
deps=deps,
tags=tags + ["manual"],
data=data,
size=size)
tf_cc_test(name=name,
suffix="_gpu",
deps=deps + if_cuda(["//tensorflow/core:gpu_runtime"]),
linkstatic=if_cuda(1, 0),
tags=tags + tf_cuda_tests_tags(),
data=data,
size=size)
# Create a cc_test for each of the tensorflow tests listed in "tests"
def tf_cc_tests(tests, deps, linkstatic=0, tags=[], size="medium", args=None):
for t in tests:
tf_cc_test(t, deps, linkstatic, tags=tags, size=size, args=args)
def tf_cuda_cc_tests(tests, deps, tags=[], size="medium"):
for t in tests:
tf_cuda_cc_test(t, deps, tags=tags, size=size)
# Build defs for TensorFlow kernels
# When this target is built using --config=cuda, a cc_library is built
# that passes -DGOOGLE_CUDA=1 and '-x cuda', linking in additional
# libraries needed by GPU kernels.
def tf_gpu_kernel_library(srcs, copts=[], cuda_copts=[], deps=[], hdrs=[],
**kwargs):
cuda_copts = ["-x", "cuda", "-DGOOGLE_CUDA=1",
"-nvcc_options=relaxed-constexpr", "-nvcc_options=ftz=true",
"--gcudacc_flag=-ftz=true"] + cuda_copts
native.cc_library(
srcs = srcs,
hdrs = hdrs,
copts = copts + if_cuda(cuda_copts),
deps = deps + if_cuda([
"//tensorflow/core:cuda",
"//tensorflow/core:gpu_lib",
]),
alwayslink=1,
**kwargs)
def tf_cuda_library(deps=None, cuda_deps=None, copts=None, **kwargs):
"""Generate a cc_library with a conditional set of CUDA dependencies.
When the library is built with --config=cuda:
- both deps and cuda_deps are used as dependencies
- the gcudacc runtime is added as a dependency (if necessary)
- The library additionally passes -DGOOGLE_CUDA=1 to the list of copts
Args:
- cuda_deps: BUILD dependencies which will be linked if and only if:
'--config=cuda' is passed to the bazel command line.
- deps: dependencies which will always be linked.
- copts: copts always passed to the cc_library.
- kwargs: Any other argument to cc_library.
"""
if not deps:
deps = []
if not cuda_deps:
cuda_deps = []
if not copts:
copts = []
native.cc_library(
deps = deps + if_cuda(cuda_deps + ["//tensorflow/core:cuda"]),
copts = copts + if_cuda(["-DGOOGLE_CUDA=1"]),
**kwargs)
def tf_kernel_library(name, prefix=None, srcs=None, gpu_srcs=None, hdrs=None,
deps=None, alwayslink=1, **kwargs):
"""A rule to build a TensorFlow OpKernel.
May either specify srcs/hdrs or prefix. Similar to tf_cuda_library,
but with alwayslink=1 by default. If prefix is specified:
* prefix*.cc (except *.cu.cc) is added to srcs
* prefix*.h (except *.cu.h) is added to hdrs
* prefix*.cu.cc and prefix*.h (including *.cu.h) are added to gpu_srcs.
With the exception that test files are excluded.
For example, with prefix = "cast_op",
* srcs = ["cast_op.cc"]
* hdrs = ["cast_op.h"]
* gpu_srcs = ["cast_op_gpu.cu.cc", "cast_op.h"]
* "cast_op_test.cc" is excluded
With prefix = "cwise_op"
* srcs = ["cwise_op_abs.cc", ..., "cwise_op_tanh.cc"],
* hdrs = ["cwise_ops.h", "cwise_ops_common.h"],
* gpu_srcs = ["cwise_op_gpu_abs.cu.cc", ..., "cwise_op_gpu_tanh.cu.cc",
"cwise_ops.h", "cwise_ops_common.h", "cwise_ops_gpu_common.cu.h"]
* "cwise_ops_test.cc" is excluded
"""
if not srcs:
srcs = []
if not hdrs:
hdrs = []
if not deps:
deps = []
if prefix:
if native.glob([prefix + "*.cu.cc"], exclude = ["*test*"]):
if not gpu_srcs:
gpu_srcs = []
gpu_srcs = gpu_srcs + native.glob([prefix + "*.cu.cc", prefix + "*.h"],
exclude = ["*test*"])
srcs = srcs + native.glob([prefix + "*.cc"],
exclude = ["*test*", "*.cu.cc"])
hdrs = hdrs + native.glob([prefix + "*.h"], exclude = ["*test*", "*.cu.h"])
cuda_deps = ["//tensorflow/core:gpu_lib"]
if gpu_srcs:
tf_gpu_kernel_library(
name = name + "_gpu",
srcs = gpu_srcs,
deps = deps,
**kwargs)
cuda_deps.extend([":" + name + "_gpu"])
tf_cuda_library(
name = name,
srcs = srcs,
hdrs = hdrs,
copts = tf_copts(),
cuda_deps = cuda_deps,
linkstatic = 1, # Needed since alwayslink is broken in bazel b/27630669
alwayslink = alwayslink,
deps = deps,
**kwargs)
def tf_kernel_libraries(name, prefixes, deps=None, **kwargs):
"""Makes one target per prefix, and one target that includes them all."""
for p in prefixes:
tf_kernel_library(name=p, prefix=p, deps=deps, **kwargs)
native.cc_library(name=name, deps=[":" + p for p in prefixes])
# Bazel rules for building swig files.
def _py_wrap_cc_impl(ctx):
srcs = ctx.files.srcs
if len(srcs) != 1:
fail("Exactly one SWIG source file label must be specified.", "srcs")
module_name = ctx.attr.module_name
cc_out = ctx.outputs.cc_out
py_out = ctx.outputs.py_out
src = ctx.files.srcs[0]
args = ["-c++", "-python"]
args += ["-module", module_name]
args += ["-l" + f.path for f in ctx.files.swig_includes]
cc_include_dirs = set()
cc_includes = set()
for dep in ctx.attr.deps:
cc_include_dirs += [h.dirname for h in dep.cc.transitive_headers]
cc_includes += dep.cc.transitive_headers
args += ["-I" + x for x in cc_include_dirs]
args += ["-I" + ctx.label.workspace_root]
args += ["-o", cc_out.path]
args += ["-outdir", py_out.dirname]
args += [src.path]
outputs = [cc_out, py_out]
ctx.action(executable=ctx.executable.swig_binary,
arguments=args,
mnemonic="PythonSwig",
inputs=sorted(set([src]) + cc_includes + ctx.files.swig_includes +
ctx.attr.swig_deps.files),
outputs=outputs,
progress_message="SWIGing {input}".format(input=src.path))
return struct(files=set(outputs))
_py_wrap_cc = rule(attrs={
"srcs": attr.label_list(mandatory=True,
allow_files=True,),
"swig_includes": attr.label_list(cfg=DATA_CFG,
allow_files=True,),
"deps": attr.label_list(allow_files=True,
providers=["cc"],),
"swig_deps": attr.label(default=Label(
"//tensorflow:swig")), # swig_templates
"module_name": attr.string(mandatory=True),
"py_module_name": attr.string(mandatory=True),
"swig_binary": attr.label(default=Label("//tensorflow:swig"),
cfg=HOST_CFG,
executable=True,
allow_files=True,),
},
outputs={
"cc_out": "%{module_name}.cc",
"py_out": "%{py_module_name}.py",
},
implementation=_py_wrap_cc_impl,)
# Bazel rule for collecting the header files that a target depends on.
def _transitive_hdrs_impl(ctx):
outputs = set()
for dep in ctx.attr.deps:
outputs += dep.cc.transitive_headers
return struct(files=outputs)
_transitive_hdrs = rule(attrs={
"deps": attr.label_list(allow_files=True,
providers=["cc"]),
},
implementation=_transitive_hdrs_impl,)
def transitive_hdrs(name, deps=[], **kwargs):
_transitive_hdrs(name=name + "_gather",
deps=deps)
native.filegroup(name=name,
srcs=[":" + name + "_gather"])
# Create a header only library that includes all the headers exported by
# the libraries in deps.
def cc_header_only_library(name, deps=[], **kwargs):
_transitive_hdrs(name=name + "_gather",
deps=deps)
native.cc_library(name=name,
hdrs=[":" + name + "_gather"],
**kwargs)
def tf_custom_op_library_additional_deps():
return [
"//google/protobuf",
"//third_party/eigen3",
"//tensorflow/core:framework_headers_lib",
]
# Helper to build a dynamic library (.so) from the sources containing
# implementations of custom ops and kernels.
def tf_custom_op_library(name, srcs=[], gpu_srcs=[], deps=[]):
cuda_deps = [
"//tensorflow/core:stream_executor_headers_lib",
"//third_party/gpus/cuda:cudart_static",
]
deps = deps + tf_custom_op_library_additional_deps()
if gpu_srcs:
basename = name.split(".")[0]
cuda_copts = ["-x", "cuda", "-DGOOGLE_CUDA=1",
"-nvcc_options=relaxed-constexpr", "-nvcc_options=ftz=true",
"--gcudacc_flag=-ftz=true"]
native.cc_library(
name = basename + "_gpu",
srcs = gpu_srcs,
copts = if_cuda(cuda_copts),
deps = deps + if_cuda(cuda_deps))
cuda_deps.extend([":" + basename + "_gpu"])
native.cc_binary(name=name,
srcs=srcs,
deps=deps + if_cuda(cuda_deps),
linkshared=1,
linkopts = select({
"//conditions:default": [
"-lm",
],
"//tensorflow:darwin": [],
}),
)
def tf_extension_linkopts():
return [] # No extension link opts
def tf_extension_copts():
return [] # No extension c opts
def tf_py_wrap_cc(name, srcs, swig_includes=[], deps=[], copts=[], **kwargs):
module_name = name.split("/")[-1]
# Convert a rule name such as foo/bar/baz to foo/bar/_baz.so
# and use that as the name for the rule producing the .so file.
cc_library_name = "/".join(name.split("/")[:-1] + ["_" + module_name + ".so"])
extra_deps = []
_py_wrap_cc(name=name + "_py_wrap",
srcs=srcs,
swig_includes=swig_includes,
deps=deps + extra_deps,
module_name=module_name,
py_module_name=name)
native.cc_binary(
name=cc_library_name,
srcs=[module_name + ".cc"],
copts=(copts + ["-Wno-self-assign", "-Wno-write-strings"]
+ tf_extension_copts()),
linkopts=tf_extension_linkopts(),
linkstatic=1,
linkshared=1,
deps=deps + extra_deps)
native.py_library(name=name,
srcs=[":" + name + ".py"],
srcs_version="PY2AND3",
data=[":" + cc_library_name])
def tf_py_test(name, srcs, size="medium", data=[], main=None, args=[],
tags=[], shard_count=1, additional_deps=[]):
native.py_test(
name=name,
size=size,
srcs=srcs,
main=main,
args=args,
tags=tags,
visibility=["//tensorflow:internal"],
shard_count=shard_count,
data=data,
deps=[
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/python:kernel_tests/gradient_checker",
] + additional_deps,
srcs_version="PY2AND3")
def cuda_py_test(name, srcs, size="medium", data=[], main=None, args=[],
shard_count=1, additional_deps=[]):
test_tags = tf_cuda_tests_tags()
tf_py_test(name=name,
size=size,
srcs=srcs,
data=data,
main=main,
args=args,
tags=test_tags,
shard_count=shard_count,
additional_deps=additional_deps)
def py_tests(name,
srcs,
size="medium",
additional_deps=[],
data=[],
tags=[],
shard_count=1,
prefix=""):
for src in srcs:
test_name = src.split("/")[-1].split(".")[0]
if prefix:
test_name = "%s_%s" % (prefix, test_name)
tf_py_test(name=test_name,
size=size,
srcs=[src],
main=src,
tags=tags,
shard_count=shard_count,
data=data,
additional_deps=additional_deps)
def cuda_py_tests(name, srcs, size="medium", additional_deps=[], data=[], shard_count=1):
test_tags = tf_cuda_tests_tags()
py_tests(name=name, size=size, srcs=srcs, additional_deps=additional_deps,
data=data, tags=test_tags, shard_count=shard_count)
|
[
"abdellah.lahnaoui@gmail.com"
] |
abdellah.lahnaoui@gmail.com
|
b55e30d6f12b49a52c2c808328cfba62b35668cb
|
71711bd2c11a3c0cbbc99bcfa78384d005e07828
|
/puct_mcts/datasets.py
|
f2aa99600a387a45d927073b70ec24d3e7ff95c7
|
[
"BSD-3-Clause"
] |
permissive
|
kastnerkyle/exploring_species_counterpoint
|
9365b2485cd227e375521f769ba1bfbd62c7b629
|
dda762463e64036adeba7efd46c51daaaf906019
|
refs/heads/master
| 2021-09-13T10:55:03.096300
| 2018-04-28T19:00:21
| 2018-04-28T19:00:21
| 103,225,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14
|
py
|
../datasets.py
|
[
"kastnerkyle@gmail.com"
] |
kastnerkyle@gmail.com
|
4fcd5f7a94f65e8208038c8f3ad8ad80fbf84495
|
0e531fa04060ca129a1c3323c7c403a373e6c00d
|
/pca2tracks.py
|
144f71e0fac0d9de660813931366a2c86113f2fa
|
[] |
no_license
|
zhipenglu/xist_structure
|
6b71f4f718991d22d00d5b0fc8008b6e97581b62
|
0dfb910d0b303fc94d421c66bb2e484b8e72297e
|
refs/heads/master
| 2020-04-22T23:17:33.781817
| 2019-02-14T18:22:33
| 2019-02-14T18:22:33
| 170,736,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
"""
pca2tracks.py
This script converts the PCA analysis results for RIP/CLIP enrichment to a
minimal number of tracks for display on IGV. This approach provides more useful
information than the heatmap. The input file is *pca_array.pc.txt, and output
are the first few tracks that explain the most variance (e.g. *pc1.bedgraph).
Input format:
Interval NAME MEAN PC1 PC2 ...
hsXIST_0_100 hsXIST_0_100 value value value ...
Example:
cd /Users/lu/Documents/chang/eCLIP/fripsum
python ~/Documents/scripts/pca2tracks.py \
frip_gap_hsXIST_geometric_100nt_pca_array.pc.txt 7 \
frip_gap_hsXIST_geometric_100nt_pca_array
For the PCA results from gene level, need to transpose the matrix###############
python ~/Documents/scripts/pca2tracks.py \
frip_gap_hsXIST_geometric_100nt_pca_gene.pc.txt 7 array \
frip_gap_hsXIST_geometric_100nt_pca_gene
"""
import sys
if len(sys.argv) < 4:
print "Usage: python pca2tracks.py pca_file track_number dim output_prefix"
print "dim: gene or array"
sys.exit()
pcafile = open(sys.argv[1], 'r')
ntracks = int(sys.argv[2])
dimension = sys.argv[3]
outputprefix = sys.argv[4]
pcadata = pcafile.readlines()[1:] #input as a list, remove the header line
pcamatrix = [line.strip('\n').split() for line in pcadata]
meanbedgraph = open(outputprefix + "_mean.bedgraph", 'w') #output mean bedgraph
meanout = ''
for row in pcamatrix: meanout += ('\t'.join(row[0].split('_') + row[2:3]) +'\n')
meanbedgraph.write(meanout)
meanbedgraph.close()
for i in range(ntracks): #output major principal component tracks
pctrack = open(outputprefix + '_pc' + str(i+1) + '.bedgraph', 'w')
pctrackout = ''
for row in pcamatrix:
pctrackout += ('\t'.join(row[0].split('_') + row[3+i:4+i]) + '\n')
pctrack.write(pctrackout)
pctrack.close()
pcafile.close()
|
[
"noreply@github.com"
] |
zhipenglu.noreply@github.com
|
31b58b74e967def34fcd7730cc4170cb953bf04e
|
d23ddee7237f138d003b44d859d12a9f8385cfce
|
/app.py
|
acc6da3dca2bd6c32be0c58631cb41c1bbe758e2
|
[] |
no_license
|
Kelby-Wilson/sqlalchemy_challenge
|
a6497bde709e8edf838949b75cf1e2a7fa011074
|
3b8ba0e3a1ac237ae319532eba892445b5be4912
|
refs/heads/master
| 2022-12-03T17:29:22.003559
| 2020-08-26T16:52:57
| 2020-08-26T16:52:57
| 262,371,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,189
|
py
|
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify
import datetime as dt
# Relative Date
###
# Database Setup
###
engine = create_engine("sqlite:///hawaii.sqlite", connect_args={'check_same_thread': False}, echo=True)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
###
# Flask Setup
###
app = Flask(__name__)
###
# Flask Routes
###
@app.route("/")
def welcome():
"""List all available api routes."""
return"""<html>
<h1>List of all available Honolulu, HI API routes</h1>
<ul>
<br>
<li>
Return a list of precipitations from last year:
<br>
<a href="/api/v1.0/precipitation">/api/v1.0/precipitation</a>
</li>
<br>
<li>
Return a JSON list of stations from the dataset:
<br>
<a href="/api/v1.0/stations">/api/v1.0/stations</a>
</li>
<br>
<li>
Return a JSON list of Temperature Observations (tobs) for the previous year:
<br>
<a href="/api/v1.0/tobs">/api/v1.0/tobs</a>
</li>
<br>
<li>
Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided:
<br>Replace <start> with a date in Year-Month-Day format.
<br>
<a href="/api/v1.0/2017-01-01">/api/v1.0/2017-01-01</a>
</li>
<br>
<li>
Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive:
<br>
Replace <start> and <end> with a date in Year-Month-Day format.
<br>
<br>
<a href="/api/v1.0/2017-01-01/2017-01-07">/api/v1.0/2017-01-01/2017-01-07</a>
</li>
<br>
</ul>
</html>
"""
@app.route("/api/v1.0/precipitation")
def precipitation():
# Docstring
"""Return a list of precipitations from last year"""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Get the first element of the tuple
max_date = max_date[0]
# Calculate the date 1 year ago from today
# The days are equal 366 so that the first day of the year is included
year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366)
# Perform a query to retrieve the data and precipitation scores
results_precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_ago).all()
# Convert list of tuples into normal list
precipitation_dict = dict(results_precipitation)
return jsonify(precipitation_dict)
@app.route("/api/v1.0/stations")
def stations():
# Docstring
"""Return a JSON list of stations from the dataset."""
# Query stations
results_stations = session.query(Measurement.station).group_by(Measurement.station).all()
# Convert list of tuples into normal list
stations_list = list(np.ravel(results_stations))
return jsonify(stations_list)
@app.route("/api/v1.0/tobs")
def tobs():
# Docstring
"""Return a JSON list of Temperature Observations (tobs) for the previous year."""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Get the first element of the tuple
max_date = max_date[0]
# Calculate the date 1 year ago from today
# The days are equal 366 so that the first day of the year is included
year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366)
# Query tobs
results_tobs = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= year_ago).all()
# Convert list of tuples into normal list
tobs_list = list(results_tobs)
return jsonify(tobs_list)
@app.route("/api/v1.0/<start>")
def start(start=None):
# Docstring
"""Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided"""
from_start = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).group_by(Measurement.date).all()
from_start_list=list(from_start)
return jsonify(from_start_list)
@app.route("/api/v1.0/<start>/<end>")
def start_end(start=None, end=None):
# Docstring
"""Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive"""
between_dates = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all()
between_dates_list=list(between_dates)
return jsonify(between_dates_list)
if __name__ == '__main__':
app.run(debug=True)
|
[
"noreply@github.com"
] |
Kelby-Wilson.noreply@github.com
|
d930901a91772e4d664bb3b770867aa984a3e77f
|
08aadcd04337ee45b01e6bd7f5cc9d87cd433bfd
|
/basic_projects/2D lists and nested loops.py
|
3098b812d793bcbe6b1a711c279db30a87fbdf59
|
[] |
no_license
|
AnthonyPerugini/Training_projects
|
eb12acc36f0c2562ea9da6ca76221ea32bd73d38
|
bf4d8027740abedbcce296675a7484fae5e1095f
|
refs/heads/master
| 2021-03-10T04:43:14.473043
| 2020-03-16T18:31:32
| 2020-03-16T18:31:32
| 246,419,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
number_grid = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[0]
]
for row in number_grid:
for col in row:
print(col)
|
[
"Anthony.r.perugini@gmail.com"
] |
Anthony.r.perugini@gmail.com
|
e9413bfa3cd627adaf3cf6bb968577c84e905767
|
2b84bd7cdcfe9c921fa60fefa2ee1257df33ce38
|
/utils/email_util.py
|
713ad3955e46c9b1c3cf07d310a5c6f928855407
|
[] |
no_license
|
webclinic017/market_monitor
|
f9cfa4a8443b81830abd9e5900509c7dfdab3e37
|
9a8a9b6181e1ab4f5d3dad32641ac941c5e4fabf
|
refs/heads/main
| 2023-07-28T00:39:34.481170
| 2021-09-15T12:39:38
| 2021-09-15T12:39:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
# Copyright (c) 2015 Shiye Inc.
# All rights reserved.
#
# Author: zsx <zhaoshouxin@shiyejinrong.com>
# Date: 2019-03-07
import smtplib
from email.mime.text import MIMEText
from docs.config.email_cfg.config import mail_info as m
class SchedulerError(RuntimeError):
def __init__(self, time):
self.time = time
class EmailUtil(object):
def __init__(self):
self.__mail_host = m.mail_host
self.__mail_user = m.mail_user
self.__mail_pass = m.mail_pass
self.__mail_to = m.mail_to
def send_email(self, email_title, email_content):
if email_content is None or len(email_content) == 0:
return
email_struct = MIMEText(email_content, _subtype="plain",
_charset="gb2312")
email_struct["Subject"] = email_title
email_struct["From"] = "".join(["市场预警", "<", self.__mail_user, ">"])
email_struct["To"] = ";".join(self.__mail_to)
# server = smtplib.SMTP()
#linux
server = smtplib.SMTP_SSL(self.__mail_host, 465)
server.connect(self.__mail_host)
server.login(self.__mail_user, self.__mail_pass)
server.sendmail(
email_struct["From"], self.__mail_to, email_struct.as_string())
server.close()
def send_email(err_info, email_title="市场预警测试邮件"):
email_content = err_info
email_util = EmailUtil()
email_util.send_email(email_title, email_content)
if __name__ == '__main__':
send_email("2222")
|
[
"1125191117@qq.com"
] |
1125191117@qq.com
|
fcafef610287029b1a2c87cfaac8bd9b6790c9b0
|
285f136156a925b05b5d51f3a4021813a455b971
|
/backend/handlers/__init__.py
|
73a855f9a4321fcd162797bf6c8a09cc1dbcc598
|
[
"Apache-2.0"
] |
permissive
|
kubikvid/weather-this-day
|
41185aacbbdcf65578576bf6f5974d00a00a3275
|
ada662f191ee122190168265d3d50e925ef26630
|
refs/heads/master
| 2020-05-21T21:24:21.959135
| 2019-05-13T01:42:14
| 2019-05-13T01:42:14
| 186,151,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Copyright (c) 2019. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
# Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
# Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
# Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
# Vestibulum commodo. Ut rhoncus gravida arcu.
from handlers import history
|
[
"moonquiz@ya.ru"
] |
moonquiz@ya.ru
|
7fd9141000ee1b4be8b4f5dd9b969abf33c9eac9
|
d5dbae52bbfded54436a665f614a2793029371ea
|
/models/model2csv.py
|
64ec1c92bead5bf32dd3801e0c4c0df6e534c482
|
[
"Apache-2.0"
] |
permissive
|
bmarggraff/allie
|
88b97acffebe2c1876b379d478b293bfb9edfefb
|
2e2f8780f0a42229b582703455e9ce1d42cf9f96
|
refs/heads/master
| 2022-11-28T02:27:55.100030
| 2020-08-07T19:55:46
| 2020-08-07T19:55:46
| 285,911,411
| 1
| 0
| null | 2020-08-07T20:03:08
| 2020-08-07T20:03:07
| null |
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Creates an excel sheet of all currently trained models with their model performances;
useful to summarize all modeling sessions quickly; outputs to current directory.
Usage: python3 model2csv.py
'''
import os, json
import pandas as pd
def id_folder():
curdir=os.getcwd()
directories=['audio_models', 'text_models', 'image_models', 'video_models', 'csv_models']
metrics_list=list()
model_names=list()
for i in range(len(directories)):
try:
os.chdir(curdir)
os.chdir(directories[i])
listdir=os.listdir()
folders=list()
for j in range(len(listdir)):
if listdir[j].find('.') < 0:
folders.append(listdir[j])
curdir2=os.getcwd()
for j in range(len(folders)):
os.chdir(curdir2)
os.chdir(folders[j])
os.chdir('model')
listdir2=os.listdir()
jsonfile=folders[j]+'.json'
for k in range(len(listdir2)):
if listdir2[k] == jsonfile:
g=json.load(open(jsonfile))
metrics_=g['metrics']
metrics_list.append(metrics_)
model_names.append(jsonfile[0:-5])
except:
pass
# print(directories[i])
# print('does not exist...')
return metrics_list, model_names
metrics_list, model_names=id_folder()
accuracies=list()
roc_curve=list()
for i in range(len(model_names)):
accuracies.append(metrics_list[i]['accuracy'])
roc_curve.append(metrics_list[i]['roc_auc'])
data={'model names': model_names,
'accuracies': accuracies,
'roc_auc': roc_curve}
print(model_names)
print(accuracies)
print(roc_curve)
df=pd.DataFrame.from_dict(data)
df.to_csv('models.csv')
|
[
"noreply@github.com"
] |
bmarggraff.noreply@github.com
|
23cfee1ada500316d73bc8ad4983d16ddaefb85b
|
c71ad354837830987f17ab93ca3f7ceb6d405311
|
/khajuri/bin/pipeline_test.py
|
772f8726ae18693915e513ab87a0fac87cf3679f
|
[] |
no_license
|
zigvu/samosa
|
f353248a75fe7a83a8a59b375b104abec8d1d855
|
3962b3c7bab9d26bf871d257e15dd39c45ffaddd
|
refs/heads/master
| 2021-03-30T18:12:58.441901
| 2016-02-20T00:12:30
| 2016-02-20T00:12:30
| 50,481,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
#!/usr/bin/env python
import logging
import os
import glob
import argparse
import _init_paths
from khajuri.pipeline.run_pipeline import RunPipeline
from khajuri.multi.clip import Clip
from tools.files.file_utils import FileUtils
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Test on zigvu model on clips')
parser.add_argument('--clip_folder', dest='clip_folder',
help='Path to clips', required=True)
parser.add_argument('--output_path', dest='output_path',
help='Output folder path', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.debug('Start testing.')
args = parse_args()
runPipeline = RunPipeline()
allClipFiles = glob.glob("{}/*.mp4".format(args.clip_folder))
for clipFile in allClipFiles:
clipNumber = os.path.splitext(os.path.basename(clipFile))[0]
clipOutPath = os.path.join(args.output_path, clipNumber)
clip = Clip()
clip.clip_id = clipNumber
clip.clip_path = clipFile
clip.result_path = os.path.join(clipOutPath, 'clip.pkl')
runPipeline.clipdbQueue.put(clip)
logging.debug('RabbitToClip: process clip: {}'.format(clip.clip_id))
runPipeline.start()
runPipeline.join()
|
[
"eacharya@gmail.com"
] |
eacharya@gmail.com
|
9c398ed840e6c2bc5aa61edeb589e34f35fb1ef5
|
c36d43dc3ebb5ab987bda1cd7329a6fab58af45b
|
/semnet/interp/evaluator.py
|
5575ddf4edf6448507606f8f00ce119d1381ded7
|
[] |
no_license
|
patgrasso/semnet
|
e37cacfdab0903b0b5aed5ac010e071f24decb65
|
e5fd8912a1768f3f59dee937199feaa2158c925c
|
refs/heads/master
| 2021-01-12T06:38:10.173145
| 2016-12-31T00:04:35
| 2016-12-31T00:04:35
| 77,401,845
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
class Evaluator(object):
def __init__(self, env):
self.env = env
def valueof(self, node, node_list):
self.env.get(node["word"])
|
[
"pgrasso@stevens.edu"
] |
pgrasso@stevens.edu
|
07bf5e876ec76acc417629cf2befc0a819977d2d
|
4a4d727cab138c5a3bf3bfb05d48084ba06bd5d4
|
/Python master/MODULO 7 - API/primeiro_api.py
|
1bd2268ba3f4021a784fbde76a9f426ed37e5ca4
|
[] |
no_license
|
RoniNunes/python
|
9e9d61e69deab02ee9e9955a5e95c7e6ef610e7a
|
52f6b068f469fc63907b84f67e6005f9b7964442
|
refs/heads/master
| 2023-06-16T11:37:35.046750
| 2021-07-02T11:37:32
| 2021-07-02T11:37:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
from flask import Flask, jsonify, request
app = Flask(__name__)
postagens = [
{
'titulo': 'Api com Flask',
'autor': 'Roni nunes'
},
{
'titulo': 'Voce ja usou o Selenium?',
'autor': 'Roni nunes'
},
{
'titulo': 'Como instalar o python',
'autor': 'Roni nunes'
}
]
nova_postagem = [
{
'titulo': 'Nova postagem com Flask',
'autor': 'Roni nunes'
}]
@app.route('/postagens', methods=['GET'])
def obter_todas_postagens():
return jsonify(postagens), 200
@app.route('/postagens/<int:postagem_id>', methods=['GET'])
def obter_postagens_por_id(postagem_id): #Passamos o ID que queremos consultar.
return jsonify(postagens[postagem_id]), 200
@app.route('/postagens', methods=['POST'])
def nova_postagem():
postagem = request.get_json()
postagens.append(postagem)
return jsonify({'mensagem': 'Recurso criado com sucesso'}), 200
@app.route('/postagens/<int:postagem_id>', methods=['PUT'])
def atualizar_postagem(postagem_id):#Passamos o ID que queremos consultar.
resultado = request.get_json()
postagens[postagem_id].update(resultado)
return jsonify(postagens[postagem_id]), 200
@app.route('/postagens/<int:postagem_id>', methods=['DELETE'])
def excluir_postagem(postagem_id):#Passamos o ID que queremos consultar.
postagem = postagens[postagem_id]
del postagens[postagem_id]
return jsonify({'mensagem': 'A postagem foi excluida com sucesso'}), 200
if __name__ == '__main__':
app.run(port=5000, host='localhost',debug=True)
|
[
"noreply@github.com"
] |
RoniNunes.noreply@github.com
|
e5679a098872822f28be752dec6bb6519196d5b7
|
8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac
|
/PySide/QtCore/QTimer.py
|
5e91243992b9f324a3a089a65f93db3242e8a538
|
[
"Apache-2.0"
] |
permissive
|
sonictk/python-skeletons
|
be09526bf490856bb644fed6bf4e801194089f0d
|
49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d
|
refs/heads/master
| 2020-04-06T04:38:01.918589
| 2016-06-09T20:37:43
| 2016-06-09T20:37:43
| 56,334,503
| 0
| 0
| null | 2016-04-15T16:30:42
| 2016-04-15T16:30:42
| null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
# encoding: utf-8
# module PySide.QtCore
# from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtCore.so
# by generator 1.138
# no doc
# no imports
from QObject import QObject
class QTimer(QObject):
# no doc
def interval(self, *args, **kwargs): # real signature unknown
pass
def isActive(self, *args, **kwargs): # real signature unknown
pass
def isSingleShot(self, *args, **kwargs): # real signature unknown
pass
def killTimer(self, *args, **kwargs): # real signature unknown
pass
def setInterval(self, *args, **kwargs): # real signature unknown
pass
def setSingleShot(self, *args, **kwargs): # real signature unknown
pass
def singleShot(self, *args, **kwargs): # real signature unknown
pass
def start(self, *args, **kwargs): # real signature unknown
pass
def startTimer(self, *args, **kwargs): # real signature unknown
pass
def stop(self, *args, **kwargs): # real signature unknown
pass
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def timerId(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *more): # real signature unknown; restored from __doc__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
staticMetaObject = None
timeout = None
__new__ = None
|
[
"yliangsiew@blizzard.com"
] |
yliangsiew@blizzard.com
|
eb66be29af0e15d10254c571bd6fd7164a88478f
|
3b0a27a6fbaed8a3cba81a70f0142e99b8ce60c7
|
/blender/io_import_sprites/export_scripts.py
|
8a16ddcd39c4a45f817cbd941ce7ef358f390af0
|
[] |
no_license
|
sangohan/flumpkit
|
43b263bdf8076c5e02234b1ccd644370a93ec2d0
|
017a3f94b9363b719a6a502a4c42e66bfc305223
|
refs/heads/master
| 2021-01-16T20:31:51.898801
| 2013-08-07T18:03:58
| 2013-08-07T18:03:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,996
|
py
|
## Author: Daniel Gerson
##GPL 3.0 unless otherwise specified.
import bpy
from bpy.types import Operator
from bpy.types import Menu, Panel
import mathutils
import math
import os
import collections
import json
import re
from bpy.props import (StringProperty,
BoolProperty,
EnumProperty,
IntProperty,
FloatProperty,
CollectionProperty,
)
from bpy_extras.object_utils import AddObjectHelper, object_data_add
from bpy_extras.image_utils import load_image
from mathutils import Vector
from mathutils import Quaternion
#the from part represents directory and filenames
#the import part represents a class or method name etc
from bl_ui.space_view3d_toolbar import View3DPanel
print("LOADING: import_scripts.py!!!!")
from io_import_sprites.common import (
SpritesFunctions,
FlumpProps
)
class EXPORT_OT_flump_to_json(Operator, SpritesFunctions):
bl_idname = "export_sprites.flump_to_json"
bl_label = "Export Json"
bl_options = {'REGISTER', 'UNDO'}
props = bpy.props.PointerProperty(type=FlumpProps)
def execute(self, context):
## self.props = bpy.context.scene.FlumpProps
self.export_to_json(context)
return {'FINISHED'}
#inverts y axis
def transform_point(self, x, y, width, height):
return (x, height - y)
def transform_location(self, x, y):
return (x, -y)
#take transform of plane and convert into pivot
def get_pivot(self, arm_name, bone_name, obj, width, height):
#use relative
#TODO, find by armature name
if not bpy.data.armatures[0].bones[bone_name].use_relative_parent:
tx = width /2.0
ox = -obj.location.x +tx
oy = -obj.location.y
return self.transform_point(ox, oy, width, height)
tx = width /2.0
ox = -obj.location.x +tx
oy = -obj.location.y + (height /2.0)
return self.transform_point(ox, oy, width, height)
def export_to_json(self, context):
#~ jsonFile = get_json_file();
#~ print(jsonFile)
props = bpy.context.scene.FlumpProps
jsonFile = props.flump_library
json_data=open(jsonFile)
data = json.load(json_data)
json_data.close()
#we now have the file in data.
#now create a new movies area
movies = []
data['movies'] = movies
data['frameRate'] = bpy.context.scene.render.fps
movie = {}
movies.append(movie)
movie['id'] = props.movie_id
movie['layers'] = []
#get layers
armature_name = 'Armature'
bpy.context.scene.objects.active = bpy.context.scene.objects[armature_name] #context now armature
arm = bpy.context.scene.objects.active
ob_act = bpy.context.scene.objects.active.animation_data.action
curves = ob_act.fcurves
bone_keys = bpy.context.object.pose.bones.keys() #some of these bones are layers
layers = (b for b in bone_keys if 'flump_layer' in bpy.context.object.pose.bones[b])
#Assumes one symbol per layer
symbols = {}
for child in arm.children:
symbols[child.parent_bone] = child #object, not name
layer_frames ={}
#loop through curves, add keyframes to ALL bones that are influenced by this bone
for curve_id, curve in enumerate(curves) :
obj_name =re.search(r'"(\w+)"', curve.data_path).group(1)
if obj_name not in layer_frames:
layer_frames[obj_name] = []
for key in curve.keyframe_points :
frame, value = key.co
#add frame to ALL objects that share obj_name TODO (parents)
layer_frames[obj_name].append(int(frame))
# do something with curve_id, frame and value
## self.report({'INFO'}, 'EXTRACT {0},{1},{2}'.format(curve_id, frame, value))
layer_frames[obj_name] = sorted(list(set(layer_frames[obj_name])))
#add parents keyframes to child
for bone in bpy.data.armatures[0].bones[:]:
parents = [p.name for p in bone.parent_recursive]
for parent in parents:
layer_frames[bone.name].extend(layer_frames[parent])
layer_frames[bone.name] = sorted(list(set(layer_frames[bone.name])))
sequence_length = int(bpy.context.scene.frame_end)
layer_zdict = {}
#loop through layer_frames
for bone_name in layers:
frames = layer_frames[bone_name]
#add json layer
json_layer = {}
json_layer['name'] = bone_name
json_keyframes = []
json_layer['keyframes'] = json_keyframes
zdepth = None
keyframe_container = {}
#old way, straight
for i in range(len(frames)):
nextframe = sequence_length
if (i+1 < len(frames)):
nextframe = frames[i+1]
json_frame, loc_z = self.create_keyframe(frames[i], bone_name,
armature_name, symbols)
keyframe_container[frames[i]] = json_frame
#fit to curve
constants = (sequence_length, armature_name, bone_name, symbols)
for i in range(len(frames)):
nextframe = sequence_length
if (i+1 < len(frames)):
nextframe = frames[i+1]
self.fit_to_curve(frames[i], nextframe,
keyframe_container, constants)
#sort, add duration, add json to final list,
#rotation hack
frames = sorted(list(set(keyframe_container.keys())))
rot_adjust = 0
for i in range(len(frames)):
nextframe = sequence_length
if (i+1 < len(frames)):
nextframe = frames[i+1]
json_frame = keyframe_container[frames[i]]
json_keyframes.append(json_frame)
json_frame['duration'] = nextframe - frames[i]
#rotation hack (fixes smooth >360 flips, dislikes long transitions).
json_frame['skew'][0] += rot_adjust
json_frame['skew'][1] += rot_adjust
if nextframe is not sequence_length:
rotation1 = json_frame['skew'][0]
rotation2 = keyframe_container[frames[i+1]]['skew'][0] + rot_adjust
if rotation1 - rotation2 > math.pi:
rot_adjust += 2*math.pi
if rotation1 - rotation2 < -math.pi:
rot_adjust -= 2*math.pi
#find z depth order (useful to do this at the same time
loc, rotQ, scale = self.get_bone_transform(0, bone_name)
if zdepth is None: #only run on first keyframe
zdepth = loc[2]
if zdepth not in layer_zdict:
layer_zdict[zdepth] = []
layer_zdict[zdepth].append(json_layer)
#add json layers in correct zdepth order, as determined by first keyframe.
for z in sorted(list(layer_zdict.keys())): #not thread safe ;-)
for item in layer_zdict[z]:
movie['layers'].append(item) #json_layer
## self.report({'INFO'}, 'EXTRACT {0},{1},{2}'.format(loc,rotQ.to_euler(),scale))
with open(jsonFile, 'w') as outfile:
json.dump(data, outfile)
return
#adds keyframes to match linear to curve.
def fit_to_curve(self, start_frame, end_frame,
keyframe_container, constants):
#extract constants
sequence_length, armature_name, bone_name, symbols = constants
for i in range(start_frame, end_frame):
transform_start = None
transform_end = None
#generate start keyframe
if start_frame not in keyframe_container:
json_start, transform_start = self.create_keyframe(start_frame, bone_name,
armature_name, symbols)
keyframe_container[start_frame] = json_start
else: #TODO redundant sometimes.
transform_start = self.get_bone_transform(start_frame, bone_name)
#generate end keyframe
if end_frame not in keyframe_container:
json_end, transform_end = self.create_keyframe(end_frame, bone_name,
armature_name, symbols)
keyframe_container[end_frame] = json_end
else: #TODO redundant sometimes.
transform_end = self.get_bone_transform(end_frame, bone_name)
#get transform of frame i
transform_i = self.get_bone_transform(i, bone_name)
#interpolate start and end transforms at i
percent = (i - start_frame)/ (end_frame - start_frame)
loc = transform_start[0] + (transform_end[0] - transform_start[0]) * percent
rz_start = transform_start[1].to_euler().z
rz_end = transform_end[1].to_euler().z
rz = rz_start + (rz_end - rz_start) * percent
scale = transform_start[2] + (transform_end[2] - transform_start[2]) * percent
#test
match = True
if (abs(loc[0] - transform_i[0][0]) > 1): match = 1
if (abs(loc[1] - transform_i[0][1]) > 1): match = 2
ri = transform_i[1].to_euler().z
angle_diff = ((ri - rz)/math.pi*180) % 360
if (angle_diff > 1 and angle_diff < 359): match = 3
#TODO scale
## if match is not True:
## self.report({'INFO'}, 'match {0}'.format(angle_diff))
if match is True: #matches where it is supposed to be
continue
mid_frame = int((start_frame + end_frame)/2)
if mid_frame in [start_frame, end_frame]: return
#recursion
self.fit_to_curve(start_frame, mid_frame,
keyframe_container, constants)
self.fit_to_curve(mid_frame, end_frame,
keyframe_container, constants)
return
def get_bone_transform(self, frame, bone_name):
bpy.context.scene.frame_set(frame)
pose_bone = bpy.context.object.pose.bones[bone_name]
obj = pose_bone.id_data
matrix = obj.matrix_world * pose_bone.matrix
## loc, rotQ, scale = matrix.decompose()
return matrix.decompose()
def create_keyframe(self, frame, bone_name, armature_name, symbols):
json_frame = {}
json_frame['index'] = frame
#store frame values
loc, rotQ, scale = self.get_bone_transform(frame, bone_name)
#bounding box
local_coords = symbols[bone_name].bound_box[:]
coords = [p[:] for p in local_coords]
width = coords[0][0] * -2
height = coords[0][1] * -2
x, y = self.transform_location(loc[0], loc[1])
json_frame['loc'] =[x, y]
angle = -rotQ.to_euler().z #* math.pi / 180
json_frame['skew'] = [angle, angle]
json_frame['scale'] = [scale[0], scale[1]]
json_frame['pivot'] = self.get_pivot(armature_name, bone_name, symbols[bone_name],
width, height)
json_frame['ref'] = symbols[bone_name].name
return json_frame, (loc, rotQ, scale)
|
[
"daniel@mambo.co.za"
] |
daniel@mambo.co.za
|
800613bb979e2a651e7833167d3b6536f748963a
|
699add6df73ad158b8ebeb5f9de4aada5820f205
|
/facebook/app/posts/models/comments.py
|
51bab010f0aef4c5c779bd1f65e15e568916fbfe
|
[] |
no_license
|
ricagome/Api-Facebook-Clone
|
4f035ad280e6cb48d375fd87a9f62eecce67eb51
|
fae5c0b2e388239e2e32a3fbf52aa7cfd48a7cbb
|
refs/heads/main
| 2023-08-17T12:34:33.379017
| 2021-10-05T21:23:32
| 2021-10-05T21:23:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
"""Comment model."""
# Django
from django.db import models
# Utilities
from app.utils.models import FbModel
class Comment(FbModel):
"""Comment model."""
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
profile = models.ForeignKey('users.Profile', on_delete=models.CASCADE)
post = models.ForeignKey('posts.Post', on_delete=models.CASCADE)
text = models.TextField(help_text='write a comment', max_length=250)
reactions = models.IntegerField(default=0)
def __str__(self):
"""Return username, post title and comment."""
return '@{} has commented {} on {}'.format(
self.user.username,
self.text, self.post)
|
[
"juliancamilohermida@hotmail.com"
] |
juliancamilohermida@hotmail.com
|
a254ecc9342fa1c6acec1d6dd7d1b9ee994945ee
|
8ae3e86fd736b65825a8c810560a73d17da74575
|
/solrdataimport/dataload/cqlbuilder.py
|
ee377653b8dbfc43530f90d778226878ff1f73fd
|
[
"Apache-2.0"
] |
permissive
|
pisceanfoot/solrdataimport
|
68d12e6ab96f7ed856e8187806981af8635920d6
|
a7f97cda5eb4ff569e67e5636a9217e9fe1a5fb5
|
refs/heads/master
| 2021-01-10T06:17:16.154994
| 2018-03-17T07:14:09
| 2018-03-17T07:14:09
| 49,885,709
| 2
| 1
|
Apache-2.0
| 2018-03-17T07:14:10
| 2016-01-18T15:29:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals, \
with_statement
import logging
from solrdataimport.cass.cassClient import CassandraClient
from solrdataimport.cass.cassSchema import CassSchema
logger = logging.getLogger(__name__)
class CqlBuilder(object):
@classmethod
def buildCacheKey(cls, cql, params):
return cql + '_'.join(map(str, params))
@classmethod
def buildCql(cls, fullDataImport, table, table_key, rowKey=None):
cql = 'select * from {0}'.format(table)
appendKey = []
if not fullDataImport and table_key:
appendKey = table_key
if rowKey:
for key in rowKey:
appendKey.append(key)
if appendKey:
key = ' = ? and '.join(appendKey)
cql = cql + ' where ' + key + ' = ?;'
return cql
@classmethod
def buildParam(cls, fullDataImport, table, table_key, row=None, rowKey=None, **kwargs):
if fullDataImport:
return None
params = []
if table_key:
for x in table_key:
if x not in kwargs:
raise Exception('key %s not found in param', x)
column_type = cls.__fetchFieldType(table, x)
params.append(CassandraClient.wrapper(column_type, kwargs.pop(x)))
if row and rowKey:
for key in rowKey:
fetchKey = rowKey[key].lower()
column_type = cls.__fetchFieldType(table, key)
params.append(CassandraClient.wrapper(column_type, row[fetchKey]))
return params
@classmethod
def __fetchFieldType(cls, table, field):
logger.debug('fetch filed type for table "%s" field "%s"', table, field)
schema = CassSchema.load(table)
field_name_lower = field.lower()
if field_name_lower in schema:
return schema[field_name_lower]
else:
logger.error('field "%s" not in table "%s"', field, table)
raise Exception('field "%s" not in table "%s"', field, table)
|
[
"pisceanfoot@gmail.com"
] |
pisceanfoot@gmail.com
|
5483a62a0289eaf03b82b517c8e78dd11f7e8a9d
|
4a2f163e603f90d5b9a4b2a100d7bc7bc77d1c95
|
/predicting_biological_response/hemy_example.py
|
401b7f3d5dd2f883930c7bfdf5ca5cfa2b058519
|
[] |
no_license
|
tusonggao/data_cck
|
d781334bd1d425f6ecd613ebdb194835846e3adb
|
91d48589e8431fd00d70348dcb049c52fdcd2c7f
|
refs/heads/master
| 2020-04-09T03:59:09.931284
| 2020-01-26T15:54:14
| 2020-01-26T15:54:14
| 160,005,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
# https://blog.csdn.net/data_scientist/article/details/79036382
# https://blog.csdn.net/Gin077/article/details/84339790
# https://github.com/rushter/heamy
|
[
"tusonggao@163.com"
] |
tusonggao@163.com
|
20cb0d0b09a6ffefdcad9798b490f37d638c9fec
|
73ffeccb2b50320536e375c255c1a48f5dfa4493
|
/quantified_self_project/settings.py
|
080b42090d5b843aa1ce6c8b14cd8290e86b11be
|
[] |
no_license
|
justinetroyke/qs-django
|
0db7737b96d5deb1e3c6f81a25097b87a4da61c5
|
095524f8d0e8e83e702bfb02dbab8fb6bd650d17
|
refs/heads/master
| 2020-03-27T13:34:49.323224
| 2018-08-29T15:02:51
| 2018-08-29T15:02:51
| 146,617,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
"""
Django settings for quantified_self_project project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zztez77xu&)++b!lnr+1yeis@sqced!id%6g-n%v6y3)64z9=9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'quantified_self_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'quantified_self_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
]
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
[
"jjtroyke@gmail.com"
] |
jjtroyke@gmail.com
|
a77ed31a71760f495bdfed54cbe1295c506714c3
|
6dd65ba20f60ee02e5d449d1bbe61865a993ab3b
|
/Monthly_Bussiest_Route.py
|
9d0eb6636f660d57d29a3156c505f071b3bb0262
|
[] |
no_license
|
subhanshugpt07/Aviation_Big_Data_2017
|
a065aa52afaa287d489b88cd89c2df3544521fb0
|
fd5d68c6f9dfa92853ba67e2cfceda8d15f602bb
|
refs/heads/master
| 2021-07-03T08:20:12.739348
| 2017-09-24T23:21:44
| 2017-09-24T23:21:44
| 104,683,057
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,243
|
py
|
from pyspark.sql.functions import *
import csv
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark import SparkContext
from pyspark.sql import HiveContext
from pyspark.sql.functions import *
from pyspark.sql.functions import udf
from pyspark.sql.types import BooleanType
from pyspark.sql import Row
import csv
from pyspark.sql import SQLContext
def parseCSV(idx, part):
if idx==0:
part.next()
for p in csv.reader(part):
if p[14] < p[23]:
if p[0] == '2014':
yield Row(YEAR = p[0],
MONTH = int(p[2]),
ORIGIN=p[14],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[23],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[14],p[23]))
elif p[0] == '2015':
yield Row(YEAR = p[0],
MONTH = int(p[2])+12,
ORIGIN=p[14],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[23],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[14],p[23]))
elif p[0] == '2016':
yield Row(YEAR = p[0],
MONTH = int(p[2])+24,
ORIGIN=p[14],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[23],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[14],p[23]))
else:
pass
else:
if p[0] == '2014':
yield Row(YEAR = p[0],
MONTH = int(p[2]),
ORIGIN=p[23],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[14],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[23],p[14]))
elif p[0] == '2015':
yield Row(YEAR = p[0],
MONTH = int(p[2])+12,
ORIGIN=p[23],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[14],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[23],p[14]))
elif p[0] == '2016':
yield Row(YEAR = p[0],
MONTH = int(p[2])+24,
ORIGIN=p[23],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[14],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[23],p[14]))
else:
pass
def main(sc):
spark = HiveContext(sc)
sqlContext = HiveContext(sc)
print "holaaaaa"
rows = sc.textFile('../lmf445/Flight_Project/Data/864625436_T_ONTIME_2*.csv').mapPartitionsWithIndex(parseCSV)
df = sqlContext.createDataFrame(rows)
busiest_route_month_pivot = \
df.select('ORIGIN_AIRPORT_ID', 'ROUTE', 'MONTH') \
.groupBy('ROUTE').pivot('MONTH').count()
busiest_route_month_pivot.toPandas().to_csv('Output/MonthlyRoutes.csv')
if __name__ == "__main__":
sc = SparkContext()
main(sc)
# In[ ]:
|
[
"sg4595@nyu.edu"
] |
sg4595@nyu.edu
|
91b43cda449292a11f4a69bb1dffb18b7872d0b9
|
32349a7406af3f6926e508dd4154a9042cd8a0b6
|
/DAA/Dynammic Programming/edit_distance.py
|
3fd4b208d55bfa264a9def65b8de2f5664df98c2
|
[] |
no_license
|
anumehaagrawal/LabWork-Sem-4
|
d78b95b61b2ec94d1ad143768200b739d40c2105
|
782430f67bb423b84749295a3fef61f241293032
|
refs/heads/master
| 2021-05-12T07:30:30.746342
| 2018-04-17T03:11:12
| 2018-04-17T03:11:12
| 117,244,508
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
def edit_distance(str1,str2,n,m):
dp_array =[[0 for i in range(m)] for k in range(n)]
for i in range(n):
for k in range(m):
if i==0:
dp_array[i][k] = k
elif k==0 :
dp_array[i][k] = i
if str1[i] ==str2[k]:
dp_array[i][k] = dp_array[i-1][k-1]
else:
dp_array[i][k] = 1+ min(dp_array[i][k-1],dp_array[i-1][k],dp_array[i-1][k-1])
print(dp_array[n-1][m-1])
def main():
str1 = "hello"
str2 = "heeeee"
edit_distance(str1,str2,len(str1),len(str2))
main()
|
[
"anuzenith29@gmail.com"
] |
anuzenith29@gmail.com
|
d0e2832e8ee5e98f43faaa16e7637d13c046db78
|
29fc564df8ee16a2d140cbd150260e04f4ddc5c5
|
/0x0A-python-inheritance/10-square.py
|
6c8775c89972ea3d1271833f32c5db7e5717d8fe
|
[] |
no_license
|
ChristianAgha/holbertonschool-higher_level_programming
|
9359fdf4e3f30ed4422a0af59672ac5ff397d4a2
|
cce59b31aba3e2a09cb4bf76a6fcfeefa7ab5031
|
refs/heads/master
| 2021-01-20T07:15:31.258319
| 2017-09-27T05:53:25
| 2017-09-27T05:53:25
| 89,984,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
#!/usr/bin/python3
"""Geometry Module"""
class BaseGeometry:
"""class BaseGeometry"""
def area(self):
"""raises an Exception with the message area() is not implemented"""
raise Exception("area() is not implemented")
def integer_validator(self, name, value):
"""validates value"""
if type(value) is not int:
raise TypeError("{} must be an integer".format(name))
if value <= 0:
raise ValueError("{} must be greater than 0".format(name))
class Rectangle(BaseGeometry):
"""class Rectangle"""
def __init__(self, width, height):
"""initialization"""
self.__width__ = width
self.__height__ = height
BaseGeometry.integer_validator(self, "width", width)
BaseGeometry.integer_validator(self, "height", height)
def __str__(self):
"""for print"""
return("[Rectangle] {}/{}".format(self.__width__, self.__height__))
def area(self):
"""return area"""
return self.__width__ * self.__height__
class Square(Rectangle):
"""class Square"""
def __init__(self, size):
"""initialization"""
self.__size__ = size
Rectangle.__init__(self, size, size)
def area(self):
"""return area"""
return self.__size__ ** 2
|
[
"christianagha@gmail.com"
] |
christianagha@gmail.com
|
24c336f380a817f634b1f446450fdffa2ad476f9
|
7ace4c9742af543db1965afec55b115b38d70aea
|
/programs/classconsrtuctor.py
|
1d91c88cb6d14bb1fccabcfaaa58948dd4b781f5
|
[] |
no_license
|
abhis021/C-DAC
|
8a7472517fb9d664cdcf1d6b33146219da970943
|
cd002a5740f63aa6fd25b982a4c7f2942877f12d
|
refs/heads/main
| 2023-08-25T03:36:35.726671
| 2021-10-17T07:41:27
| 2021-10-17T07:41:27
| 416,581,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
class partyanimal:
x=0
name=' '
def __init__(self,name1):
self.name=name1
def party(self):
self.x=self.x+1
print(self.name,'party count',self.x)
an=partyanimal('sally')
an.party()
na=partyanimal('jim')
na.party()
na.party()
|
[
"abhisheku722@gmail.com"
] |
abhisheku722@gmail.com
|
d24b0c9ae9dcf47759d369bdaf972fc87c046577
|
8dfd0de8519bf29565cf44ac342587a2b93fb086
|
/sonar.py
|
6dbb5b8b2981536bd1f86487a0012dbc577fb58c
|
[] |
no_license
|
ThePfarrer/Invent-Your-Own-Games
|
d058fdbb5f7408ab5ac3b4a301298fda62b0d458
|
ae13a457277f0cad53185bb1d611203eb78c22b0
|
refs/heads/master
| 2023-02-09T17:57:11.661474
| 2021-01-06T18:46:41
| 2021-01-06T18:46:41
| 323,171,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,797
|
py
|
# Sonar Treasure Hunt
import random
import sys
import math
def get_new_board():
# Create a new 60x15 board data structure.
board = []
for x in range(60): # The main list is list of 60 lists.
board.append([])
# Each list in the main list has 15 single-character strings.
for y in range(15):
# Use different characters for the ocean to make it more readable.
if random.randint(0, 1) == 0:
board[x].append('~')
else:
board[x].append('`')
return board
def draw_board(board):
# Draw the board data structure.
# Initial space for the numbers down the left side of the board
tens_digits_line = ' '
for i in range(1, 6):
tens_digits_line += (' ' * 9) + str(i)
# Print the numbers across the top of the board.
print(tens_digits_line)
print(' ' + ('0123456789' * 6))
print()
# Print each of the 15 rows.
for row in range(15):
# Single-digit numbers need to be padded with an extra space.
if row < 10:
extra_space = ' '
else:
extra_space = ''
# Create the string for this row on the board.
board_row = ''
for column in range(60):
board_row += board[column][row]
print(f'{extra_space}{row} {board_row} {row}')
# Print the numbers across the bottom of the board.
print()
print(' ' + ('0123456789' * 6))
print(tens_digits_line)
def get_random_chests(num_chests):
# Create a list of chest data structures (two-item lists of x, y int coordinates).
chests = []
while len(chests) < num_chests:
new_chest = [random.randint(0, 59), random.randint(0, 14)]
if new_chest not in chests: # Make sure a chest is not already here.
chests.append(new_chest)
return chests
def is_on_board(x, y):
# Return True if the coordinates are on the board; otherwise, return False.
return x >= 0 and x <= 59 and y >= 0 and y <= 14
def make_move(board, chests, x, y):
# Change the board data structure with a sonar device character. Remove treasure chests from the chests list as they are found.
# Return False if this is an invalid move.
# Otherwise, return the string of the result of this move.
smallest_distance = 100 # Any chest will be closer than 100.
for cx, cy in chests:
distance = math.sqrt((cx - x)**2 + (cy - y)**2)
if distance < smallest_distance: # We want the closest treasure chest.
smallest_distance = distance
smallest_distance = round(smallest_distance)
if smallest_distance == 0:
# xy is directly on a treasure chest!
chests.remove([x, y])
return 'You have found a sunken treasure chest!'
else:
if smallest_distance < 10:
board[x][y] = str(smallest_distance)
return f'Treasure detected at a distance of {smallest_distance} from the sonar device.'
else:
board[x][y] = 'X'
return 'Sonar did not detect anything. All treasure chests out of range.'
def enter_player_move(previous_moves):
# Let the player enter their move. Return a two-item list of int xy coordinates.
print('Where do you want to drop the next sonar device? (0-59 0-14) (or type quit)')
while True:
move = input()
if move.lower() == 'quit':
print('Thanks for playing!')
sys.exit()
move = move.split()
if len(move) == 2 and move[0].isdigit() and move[1].isdigit() and is_on_board(int(move[0]), int(move[1])):
if [int(move[0]), int(move[1])] in previous_moves:
print('You already moved there.')
continue
return [int(move[0]), int(move[1])]
print('Enter a number from 0 to 59, a space, then a number from 0 to 14.')
def show_instructions():
print('''Instructions:
You are the captain of the Simon, a treasure-hunting ship. Your current mission
is to use sonar devices to find three sunken treasure chests at the bottom of
the ocean. But you only have cheap sonar that finds distance, not direction.
Enter the coordinates to drop a sonar device. The ocean map will be marked with
how far away the nearest chest is, or an X if it is beyond the sonar device's
range. For example, the C marks are where chests are. The sonar device shows a
3 because the closest chest is 3 spaces away.
1 2 3
012345678901234567890123456789012
0 ~~`~`~~~`~``~~```~``~~~````~~``~~ 0
1 ~`~~~~``~``~``~``~```~~`~``~```~~ 1
2 ``X~~3~~~`~C~````````~~~`~```~``` 2
3 ```~``~~`~~`~``~~~``~~~`~`~~~~~`~ 3
4 ~~~~~`````~`C```~`~`~~`~~```~```` 4
012345678901234567890123456789012
1 2 3
(In the real game, the chests are not visible in the ocean.)
Press enter to continue...''')
input()
print('''
When you drop a sonar device directly on a chest, you retrieve it and the other
sonar devices update to show how far away the next nearest chest is. The chests
are beyond the range of the sonar device on the left, so it shows an X.
1 2 3
012345678901234567890123456789012
0 ~~`~`~~~`~``~~```~``~~~````~~``~~ 0
1 ~`~~~~``~``~``~``~```~~`~``~```~~ 1
2 ``X~~7~~~`~C~````````~~~`~```~``` 2
3 ```~``~~`~~`~``~~~``~~~`~`~~~~~`~ 3
4 ~~~~~`````~`C```~`~`~~`~~```~```` 4
012345678901234567890123456789012
1 2 3
The treasure chests don't move around. Sonar devices can detect treasure chests
up to a distance of 9 spaces. Try to collect all 3 chests before running out of
sonar devices. Good luck!
Press enter to continue...''')
input()
print('S O N A R !')
print()
print('Would you like to view the instructions? (yes/no)')
if input().lower().startswith('y'):
show_instructions()
while True:
# Game setup
sonar_devices = 20
the_board = get_new_board()
the_chests = get_random_chests(3)
draw_board(the_board)
previous_moves = []
while sonar_devices > 0:
# Show sonar device and chest statuses.
print(
f'You have {sonar_devices} sonar device(s) left. {len(the_chests)} treasure chest(s) remaining.')
x, y = enter_player_move(previous_moves)
# We must track all moves so that sonar devices can be updated.
previous_moves.append([x, y])
move_result = make_move(the_board, the_chests, x, y)
if move_result == False:
continue
else:
if move_result == 'You have found a sunken treasure chest!':
# Update all the sonar devices currently on the map.
for x, y in previous_moves:
make_move(the_board, the_chests, x, y)
draw_board(the_board)
print(move_result)
if len(the_chests) == 0:
print(
'You have found all the sunken treasure chests! Congratulations and good game!')
break
sonar_devices -= 1
if sonar_devices == 0:
print(
'We\'ve run out of sonar devices! Now we have to tunr the ship around and head')
print('for home with treasure chests still out there! Game over.')
print(' The remaining chests were here:')
for x, y in the_chests:
print(f' {x}, {y}')
print('Do you want to play again? (yes or no)')
if not input().lower().startswith('y'):
sys.exit()
|
[
"orezpablo@gmail.com"
] |
orezpablo@gmail.com
|
88e7be6d96ec8e784aba5e12b0692d4c5beb1949
|
2db7597686f33a0d700f7082e15fa41f830a45f0
|
/Python/LeetCode2.0/DP/72.Edit Distance.py
|
b071302d4d3bdf3daf32936c19f8404f75c65131
|
[] |
no_license
|
Leahxuliu/Data-Structure-And-Algorithm
|
04e0fc80cd3bb742348fd521a62bc2126879a70e
|
56047a5058c6a20b356ab20e52eacb425ad45762
|
refs/heads/master
| 2021-07-12T23:54:17.785533
| 2021-05-17T02:04:41
| 2021-05-17T02:04:41
| 246,514,421
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/05/09
'''
input: two words: str; the length of word is from 0 to inf
output: int; the number of modify steps
corner case:
one of the word is ‘’ → len(word2)
both words are ‘’ → 0
Method - DP
Steps:
build DP table; the size of table is (len(word1) + 1)* (len(word2) + 1)
dp[i][j]: the optimal solution when the size of word1 is i, the size of word2 is j
dp[i][j] = dp[i-1][j-1], word1[i - 1] != word2[j - 1]
= min(dp[i][j-1], dp[i-1][j],dp[i-1][j-1]) + 1, word1[i - 1] == word2[j - 1]
result is dp[len(word2)][len(word1)]
base case:
dp[0][j] = j
dp[i][0] = i
Time Complexity: O(NM), N is the length of word1 and M is the length of word2
Space Complexity: O(NM), DP table’s size
'''
# 易错点,注意哪个word是行,哪个word是列; word1[i - 1] != word2[j - 1], 减1不能忘
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
m = len(word1)
n = len(word2)
if m == 0:
return n
if n == 0:
return m
dp = [[0] * (m + 1) for _ in range(n + 1)]
for i in range(n + 1):
for j in range(m + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif word2[i - 1] == word1[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1
return dp[n][m]
|
[
"58391184+Leahxuliu@users.noreply.github.com"
] |
58391184+Leahxuliu@users.noreply.github.com
|
5b8829efc99be0d97be1f033a445e8090d9021fe
|
7c0a5b40e86c876e72d3a635a60978dbf1c79c8b
|
/__init__.py
|
dbc8e29cabf9ba156d1b1396ed22dcd9204f2a28
|
[] |
no_license
|
BlenderCN-Org/selection_logic
|
a48e396f2ebfaf6f750bfa5871f33d49c69b15ba
|
7d240d626d699e5b41f1b45728730f41a360fc77
|
refs/heads/master
| 2020-05-23T21:20:40.599162
| 2018-10-10T17:35:25
| 2018-10-10T17:35:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
bl_info = {
"name": "Selection Logic",
"description": "Advanced selections based on logical conditions.",
"author": "Omar Ahmad",
"version": (1, 0),
"blender": (2, 79, 0),
"location": "View3D",
"warning": "",
"category": "Mesh"
}
import bpy
from . import ui
from . import operators
class SelectByExpressionOperator(bpy.types.Operator):
bl_idname = "mesh.select_by_expression"
bl_label = "Select By Expression"
def execute(self, context):
operators.selectVertices(context)
return {'FINISHED'}
def register():
ui.register()
operators.register()
bpy.utils.register_class(SelectByExpressionOperator)
def unregister():
ui.unregister()
operators.unregister()
bpy.utils.unregister_class(SelectByExpressionOperator)
|
[
"omar.squircleart@gmail.com"
] |
omar.squircleart@gmail.com
|
ddad2ca9b7b59fdf640e2b0a0f29fdc4854b3efb
|
a1a789f14eb2d5c039fbf61283b03f2f1e0d2651
|
/jeopardy/migrations/0002_auto_20150622_0957.py
|
7c525a02cd0795373e6b581937c6f647021a3936
|
[
"MIT"
] |
permissive
|
codefisher/web_games
|
279bf5be5a348951e6ae3361c24b696ac841e01c
|
d09ffb8f86b24e04568b2a33c94aa49d80455715
|
refs/heads/master
| 2021-01-10T13:10:33.097712
| 2017-07-12T05:46:37
| 2017-07-12T05:46:37
| 36,868,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jeopardy', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='points',
options={'verbose_name': 'Points', 'verbose_name_plural': 'Points'},
),
migrations.AddField(
model_name='question',
name='bonus',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='question',
name='topic',
field=models.ForeignKey(related_name='topicopi', verbose_name='Topic', to='jeopardy.Topic', on_delete=models.CASCADE),
),
]
|
[
"mail@codefisher.org"
] |
mail@codefisher.org
|
89a66584f244256442569d26ef92908874f586c1
|
7da8913218b6450e83c3833f21315630717c7d88
|
/thomasStudents/odu.py
|
c3de060757e7fb196b013215e6237d1f35168bf4
|
[] |
no_license
|
andrefisch/PythonScripts
|
b028bec4ebf0f4442face3602dd136235efc32fa
|
bd68981ac931ab9ea7b44761647f5e2fff04e4c8
|
refs/heads/master
| 2021-01-17T13:10:49.227307
| 2017-07-06T04:30:43
| 2017-07-06T04:30:43
| 57,985,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import pandas
import openpyxl
import re
import math
import pygame, time
'''
1. import spreadsheet
2. for loop:
A. find value in cell C(row)
B. make a request to database
C. find student in response using last name C(row) and first name B(row)
D. if name exists in database:
a. replace empty cell D(row) with email address
3. save file
HTTPError: HTTP Error 500: Internal Server Error
'''
# 1.
# Open the file for editing
xfile = openpyxl.load_workbook('odu.xlsx')
# Open the worksheet we want to edit
sheet = xfile.get_sheet_by_name('students')
# Open the finished playing sound
pygame.init()
pygame.mixer.music.load('note.mp3')
# Some servers get annoyed if you make too many requests so dont do them all at once
# Start here
start = 17978
# End here
end = sheet.max_row + 1
# end = 6000
for row in range (start, end):
if (row % 999 == 0):
print ("GIVING THE SERVER A FIVE MINUTE BREAK")
xfile.save('odu.xlsx')
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
time.sleep(300)
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
print ("BREAK IS OVER, BACK TO WORK!")
# A.
firstName = sheet['B' + str(row)].value
lastName = sheet['C' + str(row)].value
# B.
if ((' ' in firstName) or (' ' in lastName)):
continue
else:
url = 'https://www.odu.edu/directory/?F_NAME=' + firstName + "&L_NAME=" + lastName + "&SEARCH_IND=S"
# post_fields = {'L_NAME': lastName, "F_NAME": firstName, "SEARCH_IND": "S"}
request = Request(url)#, urlencode(post_fields).encode())
json = urlopen(request).read()
# Make sure there are any results for the search
if "<table" in str(json):
try:
html = pandas.read_html(json)
email = html[0][1][3]
for i in range(2, len(html[0][1])):
if lastName in html[0][0][i] and firstName in html[0][0][i]:
p = re.compile('\w*@odu\.edu')
# print (isinstance(html[0][2][i], str))
if (isinstance(html[0][1][i], str)):
m = p.search(html[0][1][i])
if (m):
sheet['D' + str(row)] = m.group()
# Keep track of how close we are to being done
print (str(format((row - start) / (end - start) * 100.00, '.2f')) + "%: " + m.group())
except Exception:
pass
xfile.save('odu.xlsx')
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
|
[
"anfischl@gmail.com"
] |
anfischl@gmail.com
|
9488c0f83f1e5752703d6f5e72ddae45c675c8e9
|
86095e9590db8bab47b95752b967d9dbb88647da
|
/client.py
|
6ee40913d22e329ed34554e2633080860679cf5e
|
[] |
no_license
|
jrestuccio/python-udp-filetransfer
|
3cb2e4ec5d0751d133e648fefc20db73755e75c4
|
0c9e4cf278279a0fb980749eb9a3a2a8ca5796e9
|
refs/heads/master
| 2020-12-03T08:13:27.254990
| 2014-05-05T11:41:08
| 2014-05-05T11:41:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,013
|
py
|
"""
:title: client.py
:author: Josephine Lim
:description: Client to download files from server
Summary of packet types:
1 = 0b0001 = read request = \x00\x00\x00\x01
2 = 0b0010 = read response = \x00\x00\x00\x02
4 = 0b0100 = open request = \x00\x00\x00\x04
8 = 0b1000 = open response = \x00\x00\x00\x08
9 = 0b1001 = close request = \x00\x00\x00\x09
"""
from socket import *
import sys
import select
import struct
import random
class Client(object):
NUM_BYTES_TO_READ = 1400 #Total bytes sent inc header will be <1500 to prevent fragmentation over Ethernet links
epoch_no = 0
handle_no = 0
def __init__(self):
"""Sets up UDP socket, obtains 5 values at command line:
Filename to be read from server
Filename under which received file is to be stored locally
IP address or hostname of server (localhost if client is run on same machine)
Port number of server
Probability of packet loss, p
"""
self.client_socket = socket(AF_INET, SOCK_DGRAM)
# Value for number of bytes socket can receive. ( For best match with hardware and network realities,
# the value should be a relatively small power of 2, for example, 4096)
self.buffer_ = 2048
self.file_read = self.get_file_read_arg()
self.local_filename = self.get_local_filename_arg()
self.ip = self.get_ip_arg()
self.port = self.get_port_arg()
self.p = self.get_p_arg()
self.address = (self.ip, self.port)
# Create file on local system with name provided, to write our received file to
self.file_write = open(self.local_filename, 'wb')
self.eof = False
def get_file_read_arg(self):
"""Gets the name of the file to receive from the command line.
Throws an error if it is empty or more than 100 characters."""
try:
arg = sys.argv[1]
file_read = str(arg)
except IndexError:
print "Please provide the name of the file that you wish to receive."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
if (len(file_read) > 100):
print "Name of file must be equal to or less than 100 characters."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return file_read
def get_local_filename_arg(self):
"""Gets the name under which received file is to be stored locally, from the command line.
Throws an error if it is empty."""
try:
arg = sys.argv[2]
local_filename = str(arg)
except IndexError:
print "Please provide the name under which the received file is to be stored locally."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return local_filename
def get_ip_arg(self):
"""Gets the ip number or hostname of the server from the command line.
Throws an error if it is empty."""
try:
arg = sys.argv[3]
ip = str(arg)
except IndexError:
print "The IP address or hostname of the server must be provided."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return ip
def get_port_arg(self):
"""Gets the port number of the server from the command line.
Throws an error if it is empty, not an integer, or not in the range of 1024 - 60000."""
try:
arg = sys.argv[4]
port = int(arg)
except ValueError:
print "Port must be a number only."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
except IndexError:
print "Port number must be provided."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
if any([port < 1024, port > 60000]):
print "Port must be between 1024 and 60000"
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return port
def get_p_arg(self):
"""Gets the probability of packet loss, p, from the command line.
Throws an error if it is empty, or not a float in the range of 0.0 - 1.0."""
try:
arg = sys.argv[5]
p = float(arg)
except IndexError:
print "The probability of packet loss, p, must be provided."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
if (p < 0.0 or p > 1.0):
print "p value must be between 0.0 and 1.0 inclusive."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return p
def recv_invalid_response(self, recv_data, invalid_type = ""):
"""When bit signature is invalid or wrong packet type is received,
discard packet and print error message."""
if (invalid_type == "bit_signature"):
print("Error: Packet received from outside our network (wrong bit signature)")
recv_data = ""
elif (invalid_type == "response_type"):
print("Error: Wrong response type in packet received.")
recv_data = ""
return
def send_open_request(self):
"""Sends an open-request packet to the server in binary.
Format of packet is:
4 bytes - bit signature - 0b1101
4 bytes - open request type - 0b0100
100 bytes - filename to be read as ASCII string
"""
print "Sending open request for file named ", self.file_read
send_data = struct.pack("!2I100s", 0b1101, 0b0100, self.file_read)
self.client_socket.sendto(send_data, self.address)
return
def recv_open_response(self, recv_payload):
"""When client receives an (already-validated) open-response packet from the server,
it unpacks the payload and saves the received fields as instance variables if file found."""
unpacked_payload = struct.unpack("!?Q2I", recv_payload)
# Read status field. If set to False, ignore remaining fields and
# generate error msg (file not found) before exiting.
# Each unpacked value is a tuple, so [0] accesses the value that we want
status = unpacked_payload[0:1][0]
if status == False:
print "Error: File not found."
sys.exit()
#If set to True, read remaining fields.
elif status == True:
print("File found.")
self.file_length = unpacked_payload[1:2][0]
self.epoch_no = unpacked_payload[2:3][0]
self.handle_no = unpacked_payload[3:][0]
return
def send_read_request(self, start_position):
"""Sends a read request packet to the server in binary.
Format of packet is:
4 bytes - bit signature - 0b1101
4 bytes - read request type - 0b0001
4 bytes - epoch number - provided by server in open response
4 bytes - handle number - provided by server in open response
4 bytes - start position of the block to be read from the file - incremented sequentially
4 bytes - number of bytes to read - 1400
"""
send_data = struct.pack("!6I", 0b1101, 0b0001, self.epoch_no, self.handle_no, start_position, self.NUM_BYTES_TO_READ)
self.client_socket.sendto(send_data, self.address)
return
def recv_read_response(self, recv_payload):
"""When client receives an (already-validated) read-response packet from the server, it unpacks payload,
checks that epoch number and handle number are correct and status field is 'OK',
and appends file data received to the local file at the given start position."""
#Only unpack the headers because we want to store the file data as binary
unpacked_payload = struct.unpack('!H3IQ', recv_payload[:22])
status = unpacked_payload[0:1][0]
epoch_no = unpacked_payload[1:2][0]
handle_no = unpacked_payload[2:3][0]
#Check that file handle is the same, to make sure it is the same file request.
if (self.epoch_no == epoch_no and self.handle_no == handle_no):
start_position = unpacked_payload[3:4][0]
num_bytes_been_read = unpacked_payload[4:5][0]
# If we receive less bytes than the number we requested to read, this means that
# end of file has been reached
if (num_bytes_been_read < self.NUM_BYTES_TO_READ):
self.eof = True
data_to_write = recv_payload[22:]
#If status field says that response contains real data: Append to file. Otherwise react
#depending on error code received.
#Status 00 = OK
#Status 01 = Epoch no. of file handle doesnt match epoch no. of current invocation
#Status 10 = No context found for file-handle and no data has been read
#Status 11 = Context could be found but start position out of range
if (status == 0b00):
self.file_append.seek(start_position)
self.file_append.write(data_to_write)
elif (status == 0b01):
print("Error: Epoch no. of file handle doesnt match epoch no. of current invocation")
sys.exit()
elif (status == 0b10):
print("Error: No context found for file-handle and no data has been read")
sys.exit()
elif(status == 0b11):
print("Error: Context could be found but start position out of range")
sys.exit()
else:
print("Error: File handle does not match file handle stored in client. Wrong file received.")
sys.exit()
#Then return control to read_service_loop() method so that next iteration of send_read_request
#from new start position is called.
return
def send_close_request(self):
"""Sends a close request packet to the server to close the file object.
Format of packet is:
4 bytes - bit signature - 0b1101
4 bytes - close request type - 0b1001
4 bytes - epoch number
4 bytes - handle number
"""
data = struct.pack("!4I", 0b1101, 0b1001, self.epoch_no, self.handle_no)
self.client_socket.sendto(data, self.address)
self.client_socket.close()
return
def open_service_loop(self):
"""Loop that governs the timing and retransmission of open request packets,
then checks packets received for the bit signature and response type fields to ensure that they are correct."""
print "Attempting to receive file", self.file_read, "from", self.ip, "at port", self.port, "."
recv_data = None
num_retransmits = 0
#Start timer, retransmit after each timeout of one second. If receive response within the timer, move on to next step.
#Limit number of retransmits to 60 so as not to enter infinite loop.
while(num_retransmits < 60):
num_retransmits += 1
self.send_open_request()
input_socket = [self.client_socket]
inputready,outputready,exceptready = select.select(input_socket,[],[], 1)
#if timer expires without input becoming ready, empty list is returned. So go to next iteration of loop (retransmit)
if (inputready == []):
continue
else:
try:
recv_data = self.client_socket.recv(self.buffer_)
except Exception as exception_:
print("Wrong port number or IP address provided, or server is not available at the moment.")
sys.exit()
print("Received a packet.")
#Generate a random number between 0 and 1 with uniform distribution to simulate packet loss.
if (random.uniform(0,1) < self.p):
recv_data = None
print("Packet dropped randomly to simulate packet losses")
continue
bit_signature = recv_data[0:4]
response_type = recv_data[4:8]
recv_payload = recv_data[8:]
#Check that bit signature is valid (packet is from our network)
if bit_signature != "\x00\x00\x00\r":
recv_invalid_response(recv_data, "bit_signature")
continue
else:
#We have only ever sent a open_request, so the only viable response at this point is an open_response.
#If this field contains anything else, it is an invalid packet. Retransmit request.
if response_type != "\x00\x00\x00\x08":
self.recv_invalid_response(recv_data, "response_type")
continue
else:
#Bit signature and response type fields are both valid.
print("Received open response from server...")
self.recv_open_response(recv_payload)
break
if (num_retransmits >= 60):
print ("Exceeded number of retransmissions allowed. Exiting program.")
sys.exit()
return
def read_service_loop(self):
"""Loop that governs the timing and retransmission of read request packets,
then checks packets received for the bit signature and response type fields to ensure that they are correct."""
#Increment start_position each time packet sent, send a read request packet for each new position.
#Expect to receive a read_response packet for each time read request sent.
recv_data = None
print("Sending request to server to read and receive file...")
start_position = 0
while(self.eof == False):
print("Reading from byte " + str(start_position))
num_retransmits = 0
#Loop for retransmissions of the same start position
while(num_retransmits < 60):
num_retransmits = num_retransmits + 1
self.send_read_request(start_position)
input_socket = [self.client_socket]
inputready,outputready,exceptready = select.select(input_socket,[],[], 1)
if (inputready == []):
continue
else:
recv_data = self.client_socket.recv(self.buffer_)
if (random.uniform(0,1) < self.p):
recv_data = None
print("Packet dropped randomly to simulate packet losses")
continue
bit_signature = recv_data[0:4]
response_type = recv_data[4:8]
recv_payload = recv_data[8:]
if bit_signature != "\x00\x00\x00\r":
self.recv_invalid_response(recv_data, "bit_signature")
continue
else:
if response_type == "\x00\x00\x00\x02":
#Packet is valid, proceed to recv_read_response to append this bit of file received into local_filename
self.file_append = open(self.local_filename, 'r+b')
self.recv_read_response(recv_payload)
break
else:
self.recv_invalid_response(recv_data, "response_type")
continue
start_position = start_position + self.NUM_BYTES_TO_READ
if (num_retransmits >= 60):
print ("Exceeded number of retransmissions allowed. Exiting program.")
sys.exit()
return
client = Client()
client.open_service_loop()
client.read_service_loop()
client.send_close_request()
print ("File received successfully. Program will now exit.")
sys.exit()
|
[
"thecodeman66@hotmail.com"
] |
thecodeman66@hotmail.com
|
b534f887f4eef332a9a1d5dc5f0a6b197b40df84
|
29ad238bedc14b3c268b22777391b25fb8701858
|
/config.py
|
5c010a83ee93ad59c31d5579d15039b6b2d83b60
|
[] |
no_license
|
chiris-ye/-
|
2c079efe602f390fc5fdfd2d3a74d73d840c3cfd
|
84b836a637c6647ab13d801caff03359956536c0
|
refs/heads/master
| 2021-12-10T02:36:58.345901
| 2021-11-03T06:40:14
| 2021-11-03T06:40:14
| 262,729,476
| 0
| 0
| null | 2020-05-10T06:52:20
| 2020-05-10T06:52:19
| null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
class config():
embed_dim = 300
hidden_dim = 300
layers = 1
dropout = 0.1
seq_in_size=7200
fc_dim=100
out_dim=2
mind_dim = 600
man_dim=16
|
[
"noreply@github.com"
] |
chiris-ye.noreply@github.com
|
9aa84188689bfa3d627c30002874472a97dc229a
|
499ff5445e2017d042690c0429cf2e767a7f623f
|
/coral/io/_abi.py
|
b19a2ab0ec287ad6d000026ece9b71f749677f3a
|
[
"MIT"
] |
permissive
|
blthree/coral
|
b6ab934c10271d7b790130fe45e622b7c66921b4
|
30514735d9a51487583535a3a7e3fbfd0fe15ed8
|
refs/heads/master
| 2021-01-22T10:14:52.018579
| 2017-02-19T00:28:33
| 2017-02-19T00:28:33
| 81,997,699
| 0
| 0
| null | 2017-02-14T22:58:59
| 2017-02-14T22:58:59
| null |
UTF-8
|
Python
| false
| false
| 3,069
|
py
|
'''Read and write DNA sequences.'''
import coral as cr
import numpy as np
import os
from . import parsers
from .exceptions import UnsupportedFileError
def read_abi(path, trim=True, attach_trace=True):
'''Read a single ABI/AB1 Sanger sequencing file.
:param path: Full path to input file.
:type path: str
:param trim: Determines whether the sequence will be trimmed using Richard
Mott's algorithm (trims based on quality).
:type trim: bool
:param attach_trace: Determines whether to attach the trace result as a
.trace attribute of the returned sequence and the
trace peak locations as a .tracepeaks attribute. The
trace attribute is a 2D numpy array with 4 columns in
the order GATC.
:type attach_trace: bool
:returns: DNA sequence.
:rtype: coral.DNA
'''
filename, ext = os.path.splitext(os.path.split(path)[-1])
abi_exts = ['.abi', '.ab1']
if ext in abi_exts:
with open(path) as f:
abi = parsers.ABI(f)
else:
raise UnsupportedFileError('File format not recognized.')
seq = abi.seq_remove_ambig(abi.seq)
# Attach the trace results to the seq
if attach_trace:
order = abi.data['baseorder'].upper()
trace = [abi.data['raw' + str(order.index(b) + 1)] for b in 'GATC']
trace = np.array(trace)
tracepeaks = np.array(abi.data['tracepeaks'])
if trim:
try:
sequence = cr.DNA(abi.trim(seq))
except ValueError:
# A ValueError is raised if the sequence is too short
pass
trim_start = seq.index(str(sequence))
# Adjust trace data based on trimming
idx = (trim_start, trim_start + len(sequence))
peaks = tracepeaks[idx[0]:idx[1]]
sequence.trace = trace[peaks[0]:peaks[-1], :]
sequence.tracepeaks = peaks
else:
sequence = cr.DNA(seq)
sequence.name = abi.name
return sequence
def read_abis(directory, trim=True, attach_trace=True):
'''Read all ABI sequences files in a directory.
:param directory: Path to directory containing sequencing files.
:type directory: str
:param trim: Determines whether the sequence will be trimmed using Richard
Mott's algorithm (trims based on quality).
:type trim: bool
:param attach_trace: Determines whether to attach the trace result as a
.trace attribute of the returned sequence. The trace
attribute is a 2D numpy array with 4 columns in the
order GATC.
:type attach_trace: bool
:returns: A list of DNA sequences.
:rtype: coral.DNA list
'''
dirfiles = os.listdir(directory)
abis = []
for dirfile in dirfiles:
path = os.path.join(directory, dirfile)
try:
abis.append(read_abi(path, trim=trim, attach_trace=attach_trace))
except UnsupportedFileError:
pass
return abis
|
[
"nbolten@gmail.com"
] |
nbolten@gmail.com
|
0d3b60023a60eed6ae0274a83fd1daecbd04b513
|
95749b75c446df3ce4aabb03d5aec90de793e207
|
/gemini/taskapp/celery.py
|
722f621c5679f886e12c4c93ba9692df4ba43474
|
[] |
no_license
|
Hawk94/gemini
|
8288a11499c4cc12c8c79641a51b5e99afe268c5
|
3a4d0b13488b8e9fbc40dc3cde338b61bc04b494
|
refs/heads/master
| 2020-06-24T11:37:22.204269
| 2017-07-12T20:33:21
| 2017-07-12T20:33:21
| 96,935,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('gemini')
class CeleryConfig(AppConfig):
name = 'gemini.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
|
[
"x99tom.miller@gmail.com"
] |
x99tom.miller@gmail.com
|
92d9d24d3beb5ec8799d88be94123456d4805482
|
9da1a3470d60a667167ecba0a49915296de2fbc8
|
/server/app/utils/token_util.py
|
f57cff5e5ac332cd85b84fb05b608e2dbac6f71e
|
[
"MIT"
] |
permissive
|
csu-xiao-an/web_info_monitor
|
5d01d296b2fc9583a1029df30af1cd89feff4419
|
5f39254a4ae014e1a2017006290585b4648cc013
|
refs/heads/master
| 2020-07-27T08:46:16.882741
| 2019-09-09T14:08:44
| 2019-09-09T14:08:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,545
|
py
|
from flask import jsonify
from itsdangerous import TimedJSONWebSignatureSerializer, SignatureExpired, \
BadSignature
from app.models import User
import os
secret_key = os.environ.get("secret_key", "recar")
#返回token字符串
def generate_auth_token(uid, is_amdin=False, scope=None,
expiration=5000):
#通过flask提供的对象,传入过期时间和flask的SECRET_KEY
"""生成令牌"""
s = TimedJSONWebSignatureSerializer(secret_key,
expires_in=expiration)
#token里面的值,是技术方案需要订的,做相关的业务逻辑验证,uid唯一值表示当前请求的客户端
#type表示客户端类型,看业务场景进行增删
#scope权限作用域
#设置过期时间,这个是必须的,一般设置两个小时
return s.dumps({
'uid': uid,
'is_amdin': is_amdin,
'scope':scope
}).decode('ascii')
# token验证
def verify_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
# except SignatureExpired:
# raise MyHttpAuthFailed('token expired')
# # return {'message': 'token expired'}, return_code.Unauthorized#token_expired() # valid token, but expired
# except BadSignature:
# raise MyHttpAuthFailed('token invalid')
# # return {'message':'token invalid'}, return_code.Unauthorized #invalid_token() # invalid token
except:
return None
user = User.query.filter_by(id=data['uid']).first()
return user
|
[
"yansiyu@360.net"
] |
yansiyu@360.net
|
abedc4c120a71cfaac46c76124d5f686290bce4b
|
2255a4eb151b85df055d3b66455bd788b6928592
|
/lcs.py
|
24442756a9a6765b6654a20e307ef03c08b6fd1c
|
[] |
no_license
|
mloo3/LocalHooks
|
fcfe073d6be32b54421b860920a3de59a948282c
|
9ff07384e544150d2677906683a7f55c31ebd4dc
|
refs/heads/master
| 2021-01-01T19:22:27.343172
| 2017-07-28T19:35:33
| 2017-07-28T19:35:33
| 98,575,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
def lcs(x,y):
m = len(x)
n = len(y)
l = [[None]*(n+1) for i in range(m+1)]
for i in range(m+1):
for j in range(n+1):
if i == 0 or j == 0:
l[i][j] = 0
elif x[i-1] == y[j-1]:
l[i][j] = l[i-1][j-1]+1
else:
l[i][j] = max(l[i-1][j],l[i][j-1])
index = l[m][n]
lcs = [""]*(index+1)
lcs[index]="\0"
i=m
j=n
while i > 0 and j > 0:
if x[i-1]==y[j-1]:
lcs[index-1]=x[i-1]
i-=1
j-=1
index-=1
elif l[i-1][j] > l[i][j-1]:
i-=1
else:
j-=1
#print('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in l]))
return "".join(lcs)
x = "aggtab"
y = "gxtxayb"
print(lcs(x,y))
|
[
"slayer71432@gmail.com"
] |
slayer71432@gmail.com
|
2df9cffd7c706f44089b51dd1178e45e110bfbc7
|
8149d1030b5bc62cc82d5afedbe7486daedbf8c5
|
/[829][Consecutive Numbers Sum][Medium].py
|
4810671219d8327bd315d73d7fbaf90d1a403a40
|
[] |
no_license
|
guofei9987/leetcode_python
|
faef17bb59808197e32ed97e92e2222862e2ba8c
|
23703a6fb5028d982b3febc630e28f9bb65a82a6
|
refs/heads/master
| 2020-03-21T18:24:33.014579
| 2019-10-12T13:29:03
| 2019-10-12T13:29:03
| 138,889,760
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
# https://leetcode.com/problems/consecutive-numbers-sum
|
[
"guofei9987@foxmail.com"
] |
guofei9987@foxmail.com
|
cfd392a9079699ee6d0b693e945546b5a1178576
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_301/ch41_2019_04_04_16_40_15_344267.py
|
6c41a0d67bc67884cf85bc1629a7262fa142531b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
a=input('que palavra? ')
while a!='desisto':
a=input('que palavra? ')
print(voce acertou)
|
[
"you@example.com"
] |
you@example.com
|
1bb19df97eb432adc4d8988bc491abf66979b71f
|
babf32f611200957e4e2a6bd3c156916b891c43f
|
/mysite/settings.py
|
b9d178757d2991419be0a8125ff4f05d4507fd1a
|
[] |
no_license
|
Tawfiq-Abu/new_blog
|
5faffc2f569d4cc4f7e56ea9207d5ac97c64e5cd
|
10743b8ac6ef665a928e909aba8f4c1d4557964f
|
refs/heads/main
| 2023-02-19T16:54:43.002556
| 2021-01-19T11:44:10
| 2021-01-19T11:44:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,076
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g7o50)e+v6(d)n&jxt@zfg$_^p!0)ub&v6n735=ysw*e+#okaf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myblog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"tawfiqabubakr7@gmail.com"
] |
tawfiqabubakr7@gmail.com
|
346dfc71b0db9a749e8ee1d65b7425c276ff9cb1
|
4577d8169613b1620d70e3c2f50b6f36e6c46993
|
/students/1797637/homework01/program03.py
|
1dea672b0e9890cc0e4a8907a314950ef5731495
|
[] |
no_license
|
Fondamenti18/fondamenti-di-programmazione
|
cbaf31810a17b5bd2afaa430c4bf85d05b597bf0
|
031ec9761acb1a425fcc4a18b07884b45154516b
|
refs/heads/master
| 2020-03-24T03:25:58.222060
| 2018-08-01T17:52:06
| 2018-08-01T17:52:06
| 142,419,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
def codifica(chiave, testo):
''' Viene codificato e restituito un testo, fornito il testo stesso e una chiave di codifica'''
codifica=codifica_chiave(chiave)
for indice,carattere in enumerate(testo):
if carattere in codifica.keys(): testo = testo[:indice]+ testo[indice:].replace(testo[indice],codifica[carattere],1)
return testo
def decodifica(chiave, testo):
''' Viene decodificato e restituito un testo, fornito il testo stesso e una chiave di codifica'''
decodifica=decodifica_chiave(chiave)
for indice,carattere in enumerate(testo):
if carattere in decodifica.keys(): testo = testo[:indice]+ testo[indice:].replace(testo[indice],decodifica[carattere],1)
return testo
def codifica_chiave(chiave):
chiave=processa_chiave(chiave)
chiave_ord=''.join(sorted(chiave))
codifica={}
for indice,carattere in enumerate(chiave_ord): codifica[carattere]=chiave[indice]
return codifica
def decodifica_chiave(chiave):
chiave=processa_chiave(chiave)
chiave_ord=''.join(sorted(chiave))
decodifica={}
for indice,carattere in enumerate(chiave): decodifica[carattere]=chiave_ord[indice]
return decodifica
def processa_chiave(chiave):
for carattere in chiave:
if ord(carattere)<ord('a') or ord(carattere)>ord('z'): chiave= chiave.replace(carattere,'')
chiave=elimina_copie(chiave)
return chiave
def elimina_copie(chiave):
for carattere in chiave:
if carattere in chiave[chiave.find(carattere)+1:]: chiave= chiave.replace(carattere,'',1)
return chiave
|
[
"a.sterbini@gmail.com"
] |
a.sterbini@gmail.com
|
ab0c049cca67cdb3f90aa2e8ce48ecceed5f6ce8
|
83acd2e879b8d1dfbd7d735193539b8537e86d08
|
/pyropod/ropod/ftsm/ftsm_base.py
|
b5a56108172f6d678c51713eb4724ab28dca21d7
|
[] |
no_license
|
HBRS-SDP/ropod_common
|
89b296e6bb56dc225319850036d3a63efd46ace9
|
5ce24b8ae79239f4fd5d2249fd33d1b1061eaceb
|
refs/heads/master
| 2020-05-09T23:39:11.209722
| 2019-03-12T12:59:48
| 2019-03-12T12:59:48
| 181,508,576
| 0
| 0
| null | 2019-04-15T14:52:18
| 2019-04-15T14:52:18
| null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
from pyftsm.ftsm import FTSM, FTSMStates, FTSMTransitions
class FTSMBase(FTSM):
'''ROPOD-specific implementation of a fault-tolerant state machine
@author Alex Mitrevski
@maintainer Alex Mitrevski, Santosh Thoduka, Argentina Ortega Sainz
@contact aleksandar.mitrevski@h-brs.de, santosh.thoduka@h-brs.de, argentina.ortega@h-brs.de
'''
def __init__(self, name, dependencies, max_recovery_attempts=1):
super(FTSMBase, self).__init__(name, dependencies, max_recovery_attempts)
def init(self):
'''Method for component initialisation; returns FTSMTransitions.INITIALISED by default
'''
return FTSMTransitions.INITIALISED
def configuring(self):
'''Method for component configuration/reconfiguration;
returns FTSMTransitions.DONE_CONFIGURING by default
'''
return FTSMTransitions.DONE_CONFIGURING
def ready(self):
'''Method for the behaviour of a component when it is ready
for operation, but not active; returns FTSMTransitions.RUN by default
'''
return FTSMTransitions.RUN
def running(self):
'''Abstract method for the behaviour of a component during active operation
'''
pass
def recovering(self):
'''Abstract method for component recovery
'''
pass
|
[
"aleksandar.mitrevski@h-brs.de"
] |
aleksandar.mitrevski@h-brs.de
|
edc33e4a7d63438dd82b67c0afebd70a4f1e0c49
|
6fb6a62a33b13690f3c95c166f07a736836308b6
|
/functions/cartupdate/main.py
|
483369e75fa95ce5f8173cd6d6f83a32c2c1ff5e
|
[] |
no_license
|
Dualic/petshop
|
88172ed47d65ccef79342524262b4de26995a463
|
03443c0b8c2a3a12e9552a5924b99745fb4b6465
|
refs/heads/master
| 2023-07-16T08:53:22.961111
| 2021-09-03T12:16:17
| 2021-09-03T12:16:17
| 401,612,658
| 0
| 2
| null | 2021-09-03T09:40:14
| 2021-08-31T07:29:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
def getsecret(secretname):
import google.cloud.secretmanager as secretmanager
client = secretmanager.SecretManagerServiceClient()
name = f"projects/week10-1-324606/secrets/{secretname}/versions/latest"
response = client.access_secret_version(request={"name": name})
return response.payload.data.decode("UTF-8")
def cartupdate(request):
import psycopg2
dbname = getsecret("dbname")
user = "postgres"
password = getsecret("dbpassword")
host = getsecret("host")
conn = None
request_json = request.get_json(silent=True)
id = request_json.get("id")
customer_id = request_json.get("customer_id")
product_id = request_json.get("product_id")
amount = request_json.get("amount")
SQL = "UPDATE cart SET customer_id = %s, product_id = %s, amount = %s WHERE id = %s;"
result = "Update failed"
try:
conn = psycopg2.connect(host=host, dbname=dbname, user=user, password=password)
cursor = conn.cursor()
cursor.execute(SQL, (customer_id, product_id, amount, id))
conn.commit()
cursor.close()
result = "Update success"
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result
|
[
"ilkka.o.pekkala@gmail.com"
] |
ilkka.o.pekkala@gmail.com
|
89e6f9abf269be06d699b31d7a18f80d863cd0af
|
ea57b713f59d2e2a8d6f4b0b6938c20a8ae6d67d
|
/fetchQzone/iszhi.py
|
96f87ced87c63b0bdd70fb54d9775a8bf09cc8d9
|
[] |
no_license
|
guoyu07/fetchQzone
|
9919f9fad3d44a4643ebaba61d534f3d99c95f8f
|
db0d69b7d4369bd8aaafc2af8f14fdbe6316d294
|
refs/heads/master
| 2021-05-28T20:50:23.052035
| 2015-03-06T05:10:56
| 2015-03-06T05:10:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
def iszhi(x):
cnt=0
if x<=1:
return False
for m in range(1,x+1):
if x%m==0:
++cnt
if cnt>2:
return True
return False
|
[
"zhangxu1573@qq.com"
] |
zhangxu1573@qq.com
|
8ccd44a76e64b8cc0ad921f213460c409e895266
|
cc7b4e71b3c27240ec650a75cc6f6bbab5e11387
|
/crdb/templatetags/email_tags.py
|
b13eedd6c32b7950e6ee3313c89e155c42547e14
|
[
"MIT"
] |
permissive
|
jsayles/CoworkingDB
|
0cdada869d950a28cfef20d1b9c1eb3eb4d7b1c2
|
78776910eba0354a7fd96b2e2c53a78e934d8673
|
refs/heads/master
| 2023-02-22T23:11:19.040799
| 2021-12-28T19:13:39
| 2021-12-28T19:13:39
| 883,951
| 3
| 0
|
MIT
| 2023-02-15T17:59:10
| 2010-09-02T18:36:43
|
Python
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
import os
from django.template import Library
from django import template
from django.conf import settings
from django.utils.html import format_html
from django.urls import reverse
from crdb.models import EmailAddress
register = template.Library()
@register.simple_tag
def email_verified(email):
if not email:
return None
if not isinstance(email, EmailAddress):
# Got a string so we should pull the object from the database
email = EmailAddress.objects.get(email=email)
if email.is_verified():
return ""
html = '<span style="color:red;">( <a target="_top" style="color:red;" href="{}">{}</a> )</span>'
link = email.get_send_verif_link()
label = "Not Verified"
return format_html(html, link, label)
|
[
"jsayles@gmail.com"
] |
jsayles@gmail.com
|
8eac566ccd717ac44dc96ccf4939d880776e6da5
|
abeb7f8ce8fa3fe3035ad6d7139273266588248f
|
/bottles.py
|
6797be3262b2af6facecda607921990935effc46
|
[] |
no_license
|
mohanoatc/pythonSamples
|
dcddd6a9d989c5435d17bc888aa19ed6bc94c1c1
|
6ff5657e24d46b9d47561e9c9c5fe5735f65aea3
|
refs/heads/master
| 2020-03-22T14:16:46.219459
| 2018-07-15T12:03:17
| 2018-07-15T12:03:17
| 140,166,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
for bottles in range(10, 0, -1):
if bottles > 1:
print(bottles, " bottles of beer on the wall")
print(bottles, " bottles of the beer ")
else:
print(bottles, " bottle of beer on the wall")
print(bottles, " bottle of the beer ")
print("Take one down.\nPass it around.")
if bottles > 1:
print(bottles - 1, "bottle of beer on the wall\n")
else:
print("No more bottles of beer on the wall\n")
|
[
"noreply@github.com"
] |
mohanoatc.noreply@github.com
|
e1e3124cd44931303505037d6d88f51555fb555a
|
403e7f22b8dd4119fc83d153d6dc6e3520ac1922
|
/python-scripts/S3/awsS3PutBigFiles.py
|
b1f76cdc33db30acf3ebdecee8ddf7bb2eea8edd
|
[] |
no_license
|
vincedgy/AWS-Scripts
|
1e56c13245b38f5c520a4207acf544f1d01ac5cb
|
f350167c200700daea23ad9dcbe609ab1d7b90d9
|
refs/heads/master
| 2020-03-29T00:39:13.453200
| 2017-11-01T20:34:25
| 2017-11-01T20:34:25
| 94,635,738
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
""" """
# Create a big file (100 Mb):
# dd if=/dev/zero of=/tmp/bigfile bs=1024 count=0 seek=$[1024*100]
import os
import sys
import threading
import boto3
from boto3.s3 import transfer
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename,
self._seen_so_far,
self._size,
percentage))
sys.stdout.flush()
# ---------------------------------------------------------------------
# Main
if __name__ == '__main__':
client = boto3.client('s3', 'eu-west-1')
config = transfer.TransferConfig(
multipart_threshold=8 * 1024 * 1024,
max_concurrency=10,
num_download_attempts=10,
)
uploading = transfer.S3Transfer(client, config)
uploading.upload_file(
'/tmp/bigfile',
'e-attestations-ova',
'bigfile',
callback=ProgressPercentage('/tmp/bigfile')
)
|
[
"vincent.dagoury@gmail.com"
] |
vincent.dagoury@gmail.com
|
9c33d363aec75e149c68e57f14c11dbc0baa71bd
|
3825f56bef58063374d56d06a9de3418d04bedd6
|
/exercices/advanced-modules/stringio.py
|
168ebcf3ec63621fd6fabcba9afb42aea7e44b71
|
[
"MIT"
] |
permissive
|
cfascina/python-learning
|
a989869846fe8eca45f2f0717ea958bd603d12e5
|
1bc1d4032fb68456a092229de94b5207db7e9143
|
refs/heads/master
| 2020-05-20T08:48:06.121746
| 2019-07-11T18:07:01
| 2019-07-11T18:07:01
| 185,482,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
import io
# StringIO method sets the string as file like object
file = io.StringIO("This is just a normal string.")
# Read the file and print it
print(file.read())
# Reset the cursor and writes a new string
file.seek(0)
file.write("Second line written to the file like object.")
# Reset the cursor, read the file again and print it
file.seek(0)
print(file.read())
# Close the file like object when contents are no longer needed
file.close()
|
[
"cfascina@gmail.com"
] |
cfascina@gmail.com
|
2d0fe84cfd8f2ee9d2079fa3b668038f362c4362
|
e48faca9b6e2016ae936a77e8acc2f9bce08d207
|
/series_in_func.py
|
d6418cb68aeeb2c92160d18f6dd99020f817e56e
|
[] |
no_license
|
ramachitikineddy/becomecoder
|
84d7315e7f99c1e18855350c9f14729ba8e57087
|
d4e9611bb8a82dd0fb85a33e9b00443daee1e781
|
refs/heads/main
| 2023-05-06T20:59:51.022901
| 2021-05-29T10:33:17
| 2021-05-29T10:33:17
| 367,065,138
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
def seq(n):
if n%2:
return 3*n+1
return n//2
n=int(input())
print(n,end=" ")
while (n:=seq(n)):
print(n,end=" ")
if n==1:
break
|
[
"noreply@github.com"
] |
ramachitikineddy.noreply@github.com
|
732ef0438ed7f6a4a45a2ba312e54337afc3e84a
|
c7f8193a80d68b6144af8d9b2e2f012bf463af6a
|
/busstop.py
|
e02b0e8415b6ff5dc87ad36202eda1409cd94c78
|
[] |
no_license
|
marcteale/DAKboard-OneBusAway-integration
|
8c060360062f07d1be4e1f88e7d0759b3efd8a8d
|
9803aa8568828e8b0533e5c4f452b50f424805b1
|
refs/heads/master
| 2020-03-21T08:29:31.120908
| 2018-10-03T19:36:04
| 2018-10-03T19:36:04
| 138,347,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,173
|
py
|
#!/usr/bin/env python3.6
import configparser
import json
import os
import sys
from datetime import datetime
import requests
def get_departures_for_stop(departures, stop_id, routes, minutes_before, minutes_after, server, apikey):
"""Fetch the departures for the requested stop and return them as a dict."""
r = requests.get('{}/api/where/arrivals-and-departures-for-stop/{}.json'.format(server, stop_id),
params={'key': apikey, 'minutesBefore': minutes_before, 'minutesAfter': minutes_after})
rj = r.json()
stop_name = ''
if r.ok:
for stop in rj['data']['references']['stops']:
if stop['id'] == stop_id:
stop_name = stop['name']
break
current_time = datetime.fromtimestamp(rj['currentTime'] / 1000)
if rj['data']['entry']['arrivalsAndDepartures']:
for a in rj['data']['entry']['arrivalsAndDepartures']:
if a['departureEnabled'] and (routes is None or a['routeShortName'] in routes):
if a['predicted'] and a['predictedDepartureTime'] != 0:
departure_string = 'predictedDepartureTime'
else:
departure_string = 'scheduledDepartureTime'
departure_time = datetime.fromtimestamp(a[departure_string] / 1000)
delta = int((departure_time - current_time).seconds / 60)
value = "{} - {} minute{}".format(a['routeShortName'], delta, '' if abs(delta) == 1 else 's')
subtitle = '{}'.format(departure_string.replace('DepartureTime', ''))
departures.append({'value': value, 'title': stop_name, 'subtitle': subtitle})
else:
departures.append(
{'value': 'No scheduled departures', 'title': stop_name,
'subtitle': 'No departures schedule or predicted in the next {} minutes.'.format(minutes_after)}
)
else:
departures.append({'value': 'Failed to fetch data', 'title': '', 'subtitle': rj['text']})
return departures
def get_config():
"""Read the config file."""
config = configparser.ConfigParser(allow_no_value=True)
configfile = os.path.abspath(os.path.dirname(__file__)) + '/busstop.conf'
config.read(configfile)
routes = [unicode(r.strip()) for r in config.get('defaults', 'routes').split(',')] \
if config.has_option('defaults', 'routes') else None
defaults = {'minutesbefore': config.get('defaults', 'minutesbefore'),
'minutesafter': config.get('defaults', 'minutesafter'),
'routes': routes,
'apikey': os.environ['APIKEY'],
'server': config.get('defaults', 'server')}
config.remove_section('defaults')
config.remove_section('defaults')
return config, defaults
def app(environ, start_response):
status = "200 OK"
try:
config, defaults = get_config()
results = []
ok = True
except Exception as e:
status = "500 Internal Server Error"
results = json.dumps({'title': 'Error', 'value': e.message, 'subtitle': ''})
ok = False
if ok:
for section in config.sections():
minsBefore = config.get(section, 'minutesbefore') \
if config.has_option(section, 'minutesbefore') else defaults['minutesbefore']
minsAfter = config.get(section, 'minutesafter') \
if config.has_option(section, 'minutesafter') else defaults['minutesafter']
routes = [unicode(r.strip()) for r in config.get(section, 'routes').split(',')] \
if config.has_option(section, 'routes') else defaults['routes']
stopId = section
results = get_departures_for_stop(results, stopId, routes, minsBefore, minsAfter, defaults['server'],
defaults['apikey'])
data = str.encode(json.dumps(results))
response_headers = [
("Content-Type", "application/json"),
("Content-Length", str(len(data)))
]
start_response(status, response_headers)
return iter([data])
|
[
"marc.teale@openmarket.com"
] |
marc.teale@openmarket.com
|
005779a57f96302b20a3bcde3152d53965d436f1
|
d496d504bf4ccdb59fbbeeee7b5d70ae7ab136b8
|
/ts_development/version1/ts__development/models/models.py
|
7448e94a50dfbd30c0d9546155d7beafbc13bcf3
|
[] |
no_license
|
taybahsoftegy-dev/ts-modules
|
6e92bb0748238fcde38df146ab73ae311f16df55
|
cf4ec549943a0ba29d203ef1611a337040389d64
|
refs/heads/master
| 2022-11-08T23:18:37.734241
| 2020-06-29T12:45:15
| 2020-06-29T12:45:15
| 275,809,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api,_
import time
class Development_Tracking(models.Model):
_name = 'development.tracking'
_inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin']
_mail_post_access = 'read'
_check_company_auto = True
# _mail_post_access = 'read'
serial = fields.Integer(string='Serial',tracking=True)
Date = fields.Date(default=lambda *a: time.strftime('%Y-%m-%d'),tracking=True)
module = fields.Char(string='Module',tracking=True)
form = fields.Char(string = 'Form',tracking=True)
report = fields.Char(string='Report',tracking=True)
new = fields.Char(string='New',tracking=True)
description = fields.Text(string= 'Description',tracking=True)
status = fields.Selection([
('open', 'Open'),
('closed', 'Closed'),
('rejected', 'Rejected')], default='open', tracking=True)
time_consumed = fields.Char('Time Consumed',tracking=True)
user_id = fields.Many2one('res.users',
default=lambda self: self.env.uid,
index=True, tracking=True)
user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False)
partner_id = fields.Many2one('res.partner',
string='Customer',
default=lambda self: self.env.uid,)
user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False)
# class Development_Tracking(models.Model):
# _name = 'Development.Tracking'
# _description = 'For Tracking Development for TaybahSoft'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# for record in self:
# record.value2 = float(record.value) / 100
|
[
"dev.mohamedfci@gmail.com"
] |
dev.mohamedfci@gmail.com
|
e8b2f8c81f953e4c0e4a8d266dceb71804203e01
|
7f25740b1ef47edc24db1a3618b399959b073fe1
|
/1029_17_smallproject.py
|
97673d239a34ef5759856f9eeba050bcf1977446
|
[] |
no_license
|
pjh9362/PyProject
|
b2d0aa5f8cfbf2abbd16232f2b55859be50446dc
|
076d31e0055999c1f60767a9d60e122fb1fc913e
|
refs/heads/main
| 2023-01-09T12:12:06.913295
| 2020-11-07T15:32:03
| 2020-11-07T15:32:03
| 306,814,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
cost = int(input())
cpn = input()
if cpn == "Cash3000":
print(cost-3000)
elif cpn == "Cash5000":
print(cost-5000)
else:
print("쿠폰이 적용되지 않았습니다.")
print(cost)
|
[
"pjh9362@gmail.com"
] |
pjh9362@gmail.com
|
a105b75168724e5d6040804652d0f8dd4fadeb5e
|
ca97700838056596c072a0b63934f179c6fbac17
|
/_21_ev_differentDER.py
|
6e2edf1852af3d9af8f4081db991a47e467510ae
|
[] |
no_license
|
mlamlamla/powernet_pyGridlabD_eval
|
c18bff98164eb6df4ae79a157b840a59c19ff6d9
|
54275cbd86517bb1728e72824ba16fcbec99e767
|
refs/heads/master
| 2022-04-17T11:50:17.273163
| 2020-04-03T18:23:18
| 2020-04-03T18:23:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,644
|
py
|
import os
import pandas as pd
import numpy as np
def get_monthly(run,ind,month,df_total_load_all=None):
folder = '/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind
directory = run + '_' + ind + '_vis'
#Procurement costs
df_system = pd.read_csv(run+'/' + directory +'/df_system.csv',index_col=[0],parse_dates=True).iloc[(24*60):]
#df_system = df_system.iloc[24*60:]
df_system['measured_real_energy'] = df_system['measured_real_power']/60.
df_system['p_max'] = p_max
df_system['WS_capped'] = df_system[["WS", "p_max"]].min(axis=1)
df_system['procurement_cost'] = df_system['measured_real_energy']*df_system['WS_capped'] # in MW and USD/MWh
proc_cost_Jan_nomarket = df_system['procurement_cost'].sum()
print('Procurement cost in '+month+' (no market): '+str(proc_cost_Jan_nomarket))
#Total house load no market
df_total_load = pd.read_csv(folder+'/total_load_all.csv',skiprows=range(8)).iloc[(24*60):]
df_total_load['# timestamp'] = df_total_load['# timestamp'].map(lambda x: str(x)[:-4])
df_total_load = df_total_load.iloc[:-1]
df_total_load['# timestamp'] = pd.to_datetime(df_total_load['# timestamp'])
df_total_load.set_index('# timestamp',inplace=True)
df_total_load = df_total_load/1000 #convert to MW
df_total_load = df_total_load/60. #convert to energy
df_total_load_gross = df_total_load.copy()
#Subtract PV generation and add EV consumption
df_PV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_PV_state.csv')
list_PV = list(pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_PV_state.csv')['inverter_name'])
list_EV = list(pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv')['EV_name'])
list_EV_inv = []
for EV in list_EV:
EV_inv = 'EV_inverter'+EV[2:]
list_EV_inv += [EV_inv]
if len(list_PV) + len(list_EV) > 0:
df_inv_load = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/total_P_Out.csv',skiprows=range(8)).iloc[(24*60):]
df_inv_load['# timestamp'] = df_inv_load['# timestamp'].map(lambda x: str(x)[:-4])
df_inv_load = df_inv_load.iloc[:-1]
df_inv_load['# timestamp'] = pd.to_datetime(df_inv_load['# timestamp'])
df_inv_load.set_index('# timestamp',inplace=True)
df_inv_load = (df_inv_load/1000000)/60 # to MWh
#Include PV
if len(list_PV) > 0:
df_PV = df_inv_load[list_PV] #W -> MW (comes from GridlabD)
for house in df_total_load.columns:
if house in (df_PV_appl['house_name']).tolist():
PV_inv = df_PV_appl['inverter_name'].loc[df_PV_appl['house_name'] == house].iloc[0]
df_total_load[house] = df_total_load[house] - df_PV[PV_inv]
#Include EV consumption
if len(list_EV):
df_EV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv')
df_EV = df_inv_load[list_EV_inv]
for house in df_total_load.columns:
if house in (df_EV_appl['house_name']).tolist():
EV_inv = 'EV_inverter'+df_EV_appl['EV_name'].loc[df_EV_appl['house_name'] == house].iloc[0][2:]
df_total_load[house] = df_total_load[house] - df_EV[EV_inv] #EV_inv is negatively defined
if df_total_load_all is None:
#print('df_total_load_all doesnot exist yet')
df_total_load_all = df_total_load.copy() #Becomes master load df
else:
df_total_load_all = df_total_load_all.append(df_total_load)
energy_nomarket_Jan = df_total_load.sum().sum() # Total net energy
print('Energy in '+month+' (no market): '+str(energy_nomarket_Jan))
# print(str(len(df_system)/(24*60))+' days')
# print(str(len(df_total_load)/(24*60))+' days')
# print(str(len(df_inv_load)/(24*60))+' days')
return df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan
def get_monthly_wm(run,ind,month,df_total_base_market=None,df_total_flex_market=None,df_cleared_market=None):
folder = '/Users/admin/Documents/powernet/powernet_markets_mysql/'+run + '/' + run + '_' + ind
directory = run + '/' + run + '_' + ind + '_vis'
#Procurement cost
df_system = pd.read_csv(directory+'/df_system.csv',index_col=[0],parse_dates=True).iloc[(24*60):]
df_system = df_system #.iloc[24*60:]
df_system['measured_real_energy'] = df_system['measured_real_power']/60. #MW
df_system['p_max'] = p_max
df_system['WS_capped'] = df_system[["WS", "p_max"]].min(axis=1)
df_system['procurement_cost'] = df_system['measured_real_energy']*df_system['WS_capped'] # in MW and USD/MWh
proc_cost_Jan_market = df_system['procurement_cost'].sum()
print('Procurement cost in '+month+' (market): '+str(proc_cost_Jan_market))
#print(str(len(df_system)/(24*60))+' days')
#Total house load with market
df_total_load = pd.read_csv(folder+'/total_load_all.csv',skiprows=range(8)).iloc[(24*60):]
df_total_load['# timestamp'] = df_total_load['# timestamp'].map(lambda x: str(x)[:-4])
df_total_load = df_total_load.iloc[:-1]
df_total_load['# timestamp'] = pd.to_datetime(df_total_load['# timestamp'])
df_total_load.set_index('# timestamp',inplace=True)
df_total_load = df_total_load/1000 #convert to MW
df_total_load = df_total_load/60. #convert to energy
df_hvac_load = pd.read_csv(folder+'/hvac_load_all.csv',skiprows=range(8)).iloc[(24*60):]
df_hvac_load['# timestamp'] = df_hvac_load['# timestamp'].map(lambda x: str(x)[:-4])
df_hvac_load = df_hvac_load.iloc[:-1]
df_hvac_load['# timestamp'] = pd.to_datetime(df_hvac_load['# timestamp'])
df_hvac_load.set_index('# timestamp',inplace=True)
df_hvac_load = df_hvac_load/1000 #convert to MW
df_hvac_load = df_hvac_load/60. #convert to energy
df_base_load = df_total_load.copy()
df_flex_load = df_total_load.copy()
df_total_load.data = 0.0
#Get list of flexible appliances
df_PV_appl = pd.read_csv(folder+'/df_PV_state.csv')
list_PV = list(df_PV_appl['inverter_name'])
df_EV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv')
list_EV = list(df_EV_appl['EV_name'])
list_EV_inv = []
for EV in list_EV:
EV_inv = 'EV_inverter'+EV[2:]
df_Bat_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind +'/df_battery_state.csv')
list_Bat = list(df_Bat_appl['battery_name'])
list_Bat_inv = []
for Bat in list_Bat:
Bat_inv = 'Bat_inverter'+Bat[7:]
list_Bat_inv += [Bat_inv]
if len(list_PV) + len(list_Bat) + len(list_EV_inv) > 0:
df_inv_load = pd.read_csv(folder+'/total_P_Out.csv',skiprows=range(8)).iloc[(24*60):]
df_inv_load['# timestamp'] = df_inv_load['# timestamp'].map(lambda x: str(x)[:-4])
df_inv_load = df_inv_load.iloc[:-1]
df_inv_load['# timestamp'] = pd.to_datetime(df_inv_load['# timestamp'])
df_inv_load.set_index('# timestamp',inplace=True)
df_inv_load = (df_inv_load/1000000)/60 # to MWh
df_PV = df_inv_load[list_PV]
df_EV = df_inv_load[list_EV_inv]
df_Bat = df_inv_load[list_Bat_inv]
df_base_load = df_total_load - df_hvac_load #for100% flex hvac!
df_flex_load = df_hvac_load.copy()
for house in df_hvac_load.columns:
if len(list_PV) > 0:
if house in (df_PV_appl['house_name']).tolist():
PV_inv = df_PV_appl['inverter_name'].loc[df_PV_appl['house_name'] == house].iloc[0]
df_flex_load[house] = df_flex_load[house] - df_PV[PV_inv]
if len(list_EV_inv):
if house in (df_EV_appl['house_name']).tolist():
EV_inv = 'EV_inverter'+df_EV_appl['EV_name'].loc[df_EV_appl['house_name'] == house].iloc[0][2:]
df_flex_load[house] = df_flex_load[house] - df_EV[EV_inv] #EV_inv is negatively defined
if len(list_Bat) > 0:
if house in (df_Bat_appl['house_name']).tolist():
Bat_inv = 'Bat_inverter'+df_Bat_appl['battery_name'].loc[df_Bat_appl['house_name'] == house].iloc[0][7:]
df_flex_load[house] = df_flex_load[house] - df_Bat[Bat_inv] #Bat_inv is negatively defined
#Clearing prices
df_cleared = pd.read_csv(folder+'/df_prices.csv',parse_dates=[0]).iloc[24*12:] #USD/MWh
df_cleared.rename(columns={'Unnamed: 0':'timedate'},inplace=True)
df_cleared.set_index('timedate',inplace=True)
df_cleared = df_cleared[['clearing_price']]
df_cleared_long = pd.DataFrame(index=df_total_load.index,columns=['clearing_price'],data=df_cleared['clearing_price'])
df_cleared_long.fillna(method='ffill',inplace=True)
# print(str(len(df_system)/(24*60))+' days')
# print(str(len(df_total_load)/(24*60))+' days')
# print(str(len(df_hvac_load)/(24*60))+' days')
# print(str(len(df_inv_load)/(24*60))+' days')
# print(str(len(df_cleared_long)/(24*60))+' days')
#Total load
if df_total_base_market is None:
print('df_total_load_all_market doesnot exist yet')
df_total_base_market = df_base_load.copy() #Becomes master load df
df_total_flex_market = df_flex_load.copy() #Becomes master load df
df_cleared_market = df_cleared_long.copy()
else:
df_total_base_market = df_total_base_market.append(df_base_load)
df_total_flex_market = df_total_flex_market.append(df_flex_load)
df_cleared_market = df_cleared_market.append(df_cleared_long)
energy_nomarket_Jan = df_total_load.sum().sum()
if len(list_PV) > 0:
energy_nomarket_Jan -= df_PV.sum().sum()
if len(list_EV) > 0:
energy_nomarket_Jan -= df_EV.sum().sum()
if len(list_Bat) > 0:
energy_nomarket_Jan -= df_Bat.sum().sum()
#Use baseload only
df_system['measured_real_energy_base'] = df_base_load.sum(axis=1)
df_system['procurement_cost_base'] = df_system['measured_real_energy_base']*df_system['WS_capped'] # in MW and USD/MWh
proc_cost_Jan_market = df_system['procurement_cost_base'].sum()
energy_nomarket_Jan = df_system['measured_real_energy_base'].sum()
print('Energy in '+month+' (market): '+str(energy_nomarket_Jan))
return df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_nomarket_Jan
##############
#GENERAL SETTINGS
##############
run = 'FinalReport2' #'FinalReport_Jul1d'
settings_file = '/Users/admin/Documents/powernet/powernet_markets_mysql/settings_final2.csv'
df_settings = pd.read_csv(settings_file)
p_max = 100.
risk_prem = 1.025
##############
#SETTINGS: Only HVAC, no other DER
#
#NO market: 64,65,66
#With market: 70,71,72 // 103, 104, 105 (with reference price based on forward prices)
##############
print('Only HVAC, no other DER')
##############
#NO MARKET YET
##############
df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan = get_monthly(run,'0064','JANUARY')
df_total_load_all, proc_cost_Jul_nomarket, energy_nomarket_Jul = get_monthly(run,'0065','JULY',df_total_load_all)
df_total_load_all, proc_cost_Oct_nomarket, energy_nomarket_Oct = get_monthly(run,'0066','OCTOBER',df_total_load_all)
#Calculate the retail tariff for procurement of energy
proc_cost_nomarket = proc_cost_Jan_nomarket + proc_cost_Jul_nomarket + proc_cost_Oct_nomarket
print('Procurement cost (no market, no DER): '+str(proc_cost_nomarket))
energy_nomarket= energy_nomarket_Jan + energy_nomarket_Jul + energy_nomarket_Oct
retail_nomarket = proc_cost_nomarket/energy_nomarket
print('Retail tariff (no market, no DER): '+str(retail_nomarket))
#Calculate cost for houses without a market under a constant retail tariff
df_cost_nomarket = df_total_load_all*retail_nomarket
df_cost = pd.DataFrame(index=df_cost_nomarket.columns,columns=['costs_nomarket'],data=df_cost_nomarket.sum(axis=0))
df_cost['costs_nomarket_riskprem5'] = df_cost['costs_nomarket']*risk_prem
print('Total customer bills (no market, no DER) over three weeks: '+str(df_cost['costs_nomarket_riskprem5'].sum()))
#print('Calculate for year')
##############
#MARKET
##############
df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_market_Jan = get_monthly_wm(run,'0106','JANUARY')
df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jul_market, energy_market_Jul = get_monthly_wm(run,'0107','JULY',df_total_base_market, df_total_flex_market,df_cleared_market)
df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Oct_market, energy_market_Oct = get_monthly_wm(run,'0108','OCTOBER',df_total_base_market, df_total_flex_market,df_cleared_market)
proc_cost_market = proc_cost_Jan_market + proc_cost_Jul_market + proc_cost_Oct_market
print('Procurement cost (market, HVAC only): '+str(proc_cost_market))
retail_new = (proc_cost_market)/(energy_market_Jan + energy_market_Jul + energy_market_Oct)
print('New retail tariff (with market): '+str(retail_new))
#Calculate consumer costs
df_costs_market = df_total_base_market*retail_nomarket + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
df_cost['cost_market_oldRR'] = df_costs_market.sum(axis=0)
df_cost['cost_market_oldRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
print('Total customer bills (market, HVAC only) at old RR: '+str(df_cost['cost_market_oldRR_riskprem5'].sum()))
df_costs_market = df_total_base_market*retail_new + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
df_cost['cost_market_newRR'] = df_costs_market.sum(axis=0)
df_cost['cost_market_newRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
print('Total customer bills (market, HVAC only) at new RR: '+str(df_cost['cost_market_newRR_riskprem5'].sum()))
df_cost['abs_change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])
df_cost['change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
print('\nMedian type 1600 old RR')
print(df_cost['change_oldRR'].median())
df_cost['abs_change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])
df_cost['change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
print('\nMedian type 1600 new RR')
print(df_cost['change_newRR'].median())
#df_cost.to_csv(run+'/cost_changes_procneutral_1600_all.csv')
# ##############
# #SETTINGS: Only other DER, no HVAC
# #
# #NO market: 79,80,81
# #With market: 70,71,72
# ##############
# print('No HVAC, only other DER')
# ##############
# #NO MARKET YET
# ##############
# df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan = get_monthly(run,'0079','JANUARY')
# df_total_load_all, proc_cost_Jul_nomarket, energy_nomarket_Jul = get_monthly(run,'0080','JULY',df_total_load_all)
# df_total_load_all, proc_cost_Oct_nomarket, energy_nomarket_Oct = get_monthly(run,'0081','OCTOBER',df_total_load_all)
# #Calculate the retail tariff for procurement of energy
# proc_cost_nomarket = proc_cost_Jan_nomarket + proc_cost_Jul_nomarket + proc_cost_Oct_nomarket
# print('Procurement cost (no market, with PV and EV): '+str(proc_cost_nomarket))
# energy_nomarket= energy_nomarket_Jan + energy_nomarket_Jul + energy_nomarket_Oct
# retail_nomarket = proc_cost_nomarket/energy_nomarket
# print('Retail tariff (no market, with PV and EV): '+str(retail_nomarket))
# #Calculate cost for houses without a market under a constant retail tariff
# df_cost_nomarket = df_total_load_all*retail_nomarket
# df_cost = pd.DataFrame(index=df_cost_nomarket.columns,columns=['costs_nomarket'],data=df_cost_nomarket.sum(axis=0))
# df_cost['costs_nomarket_riskprem5'] = df_cost['costs_nomarket']*risk_prem
# print('Total customer bills (no market, with PV and EV): '+str(df_cost['costs_nomarket_riskprem5'].sum()))
# print('Calculate for year')
# ##############
# #MARKET
# ##############
# df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_market_Jan = get_monthly_wm(run,'0076','JANUARY')
# df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jul_market, energy_market_Jul = get_monthly_wm(run,'0077','JULY',df_total_base_market, df_total_flex_market,df_cleared_market)
# df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Oct_market, energy_market_Oct = get_monthly_wm(run,'0078','OCTOBER',df_total_base_market, df_total_flex_market,df_cleared_market)
# proc_cost_market = proc_cost_Jan_market + proc_cost_Jul_market + proc_cost_Oct_market
# print('Procurement cost (market, other DER): '+str(proc_cost_market))
# retail_new = (proc_cost_market)/(energy_market_Jan + energy_market_Jul + energy_market_Oct)
# print('New retail tariff (with market): '+str(retail_new))
# #Calculate consumer costs
# df_costs_market = df_total_base_market*retail_nomarket + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
# df_cost['cost_market_oldRR'] = df_costs_market.sum(axis=0)
# df_cost['cost_market_oldRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
# print('Total customer bills (market, other DER) at old RR: '+str(df_cost['cost_market_oldRR_riskprem5'].sum()))
# df_costs_market = df_total_base_market*retail_new + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
# df_cost['cost_market_newRR'] = df_costs_market.sum(axis=0)
# df_cost['cost_market_newRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
# print('Total customer bills (market, other DER) at new RR: '+str(df_cost['cost_market_newRR_riskprem5'].sum()))
# df_cost['abs_change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])
# df_cost['change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
# print('\nMedian type 1600 old RR')
# print(df_cost['change_oldRR'].median())
# df_cost['abs_change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])
# df_cost['change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
# print('\nMedian type 1600 new RR')
# print(df_cost['change_newRR'].median())
|
[
"admin@admins-air.attlocal.net"
] |
admin@admins-air.attlocal.net
|
385836ada1f0c7aa8919ec7aeb97acca6aea94c0
|
644b13f90d43e9eb2fae0d2dc580c7484b4c931b
|
/network2.py
|
5dbc8833c5526d15e355e3169680c46c4a5bc280
|
[] |
no_license
|
yeonnseok/ps-algorithm
|
c79a41f132c8016655719f74e9e224c0870a8f75
|
fc9d52b42385916344bdd923a7eb3839a3233f18
|
refs/heads/master
| 2020-07-09T11:53:55.786001
| 2020-01-26T02:27:09
| 2020-01-26T02:27:09
| 203,962,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
def cal_ans():
temp = []
ans = 0
for i in range(len(src)):
if src[i] == 0:
if len(temp) == 5:
temp = temp[1:]
temp.append(i)
else:
ans += i * len(temp) - sum(temp)
for j in temp:
link[i + 1].append(j + 1)
link[j + 1].append(i + 1)
return ans
def cal_group():
cnt, group = 0, 0
zero_one = False
start, end = -1, 0
for i in range(len(src)):
start = i + 1
if src[i] == 1:
group += 1
else:
break
for i in range(len(src) - 1, -1, -1):
end = i + 1
if src[i] == 0:
group += 1
else:
break
for i in range(start, end):
if src[i] == 0:
cnt += 1
elif src[i] == 1:
if cnt >= 5:
group += (cnt - 4)
elif i >= 1 and src[i-1] == 0:
zero_one = True
cnt = 0
if zero_one and len(src) != 1:
return group + 1
return group
num_of_case = int(input())
for case in range(1, num_of_case + 1):
n = int(input())
src = list(map(int, input().split()))
link = [[] for _ in range(n + 1)]
print("#%d" % case, end=" ")
print(cal_ans(), end=" ")
print(cal_group())
|
[
"smr603@snu.ac.kr"
] |
smr603@snu.ac.kr
|
3923da15d3cfb9a730088a4d9708e6a18aa4ff3f
|
2ef742fe5e3208715208ff711eb2046acc1f5ef6
|
/NathHorrigan/wsgi.py
|
b4e20145e98ba5cc686716da381e9690d4db59ac
|
[] |
no_license
|
NathHorrigan/NathHorrigan.com
|
9ac53208061b16d3f8bc4a00e4575df83083dc7c
|
636165b718659cf5dcd70ed29251ae69b4b09748
|
refs/heads/master
| 2020-03-20T16:23:07.531145
| 2018-08-31T22:22:13
| 2018-08-31T22:22:13
| 137,537,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for NathHorrigan project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NathHorrigan.settings.dev")
application = get_wsgi_application()
|
[
"nathan_horrigan@icloud.com"
] |
nathan_horrigan@icloud.com
|
20ba1ba73360f4befafe0351c226f32696426e2f
|
a8163b09c4b4a58fc82cdb6ff8df29197fd15945
|
/_OldVersion/index.py
|
2756bb9c27b13e6d15761ae94704a0c1842d6512
|
[] |
no_license
|
zhudonlin/Fuck_HENUDC
|
bd78a78f0807e96fdfda36a727c2c017cab7ad9c
|
0aa398333c1d8e42c4820f6b80292509af46cfd0
|
refs/heads/main
| 2023-07-14T13:43:51.803980
| 2021-09-03T08:08:13
| 2021-09-03T08:08:13
| 402,689,111
| 1
| 0
| null | 2021-09-03T07:46:57
| 2021-09-03T07:46:56
| null |
UTF-8
|
Python
| false
| false
| 16,520
|
py
|
# -*- coding: utf-8 -*-
import sys
import json
import uuid
import oss2
import yaml
import base64
import requests
import time
import random
import uanalyse
from pyDes import des, CBC, PAD_PKCS5
from datetime import datetime, timedelta, timezone
from urllib.parse import urlparse
from urllib3.exceptions import InsecureRequestWarning
import notification
# debug模式
debug = True
if debug:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
rand_lon = str(random.randint(0, 9))
rand_lat = str(random.randint(0, 9))
# 读取yml配置
def getYmlConfig(yaml_file='config.yml'):
file = open(yaml_file, 'r', encoding="utf-8")
file_data = file.read()
file.close()
config = yaml.load(file_data, Loader=yaml.FullLoader)
return dict(config)
# 全局配置
config = getYmlConfig(yaml_file='config.yml')
# 获取当前utc时间,并格式化为北京时间
def getTimeStr():
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
return bj_dt.strftime("%Y-%m-%d %H:%M:%S")
# 输出调试信息,并及时刷新缓冲区
def log(content):
print(getTimeStr() + ' ' + str(content))
sys.stdout.flush()
# 获取今日校园api
def getCpdailyApis(user):
apis = {}
user = user['user']
if 'cpdaily' in user['ua']:
print('你UA输入的有问题,请看说明书!')
exit(2)
if 'Android' not in user['ua']:
print('你UA输入的有问题,请看说明书!')
exit(2)
schools = requests.get(url='https://mobile.campushoy.com/v6/config/guest/tenant/list', verify=not debug).json()[
'data']
flag = True
for one in schools:
if one['name'] == user['school']:
if one['joinType'] == 'NONE':
log(user['school'] + ' 未加入今日校园')
exit(-1)
flag = False
params = {
'ids': one['id']
}
res = requests.get(url='https://mobile.campushoy.com/v6/config/guest/tenant/info', params=params,
verify=not debug)
data = res.json()['data'][0]
joinType = data['joinType']
idsUrl = data['idsUrl']
ampUrl = data['ampUrl']
if 'campusphere' in ampUrl or 'cpdaily' in ampUrl:
parse = urlparse(ampUrl)
host = parse.netloc
res = requests.get(parse.scheme + '://' + host)
parse = urlparse(res.url)
apis[
'login-url'] = idsUrl + '/login?service=' + parse.scheme + r"%3A%2F%2F" + host + r'%2Fportal%2Flogin'
apis['host'] = host
ampUrl2 = data['ampUrl2']
if 'campusphere' in ampUrl2 or 'cpdaily' in ampUrl2:
parse = urlparse(ampUrl2)
host = parse.netloc
res = requests.get(parse.scheme + '://' + host)
parse = urlparse(res.url)
apis[
'login-url'] = idsUrl + '/login?service=' + parse.scheme + r"%3A%2F%2F" + host + r'%2Fportal%2Flogin'
apis['host'] = host
break
if flag:
log(user['school'] + ' 未找到该院校信息,请检查是否是学校全称错误')
exit(-1)
log(apis)
return apis
# 登陆并获取session
def getSession(user, apis):
user = user['user']
params = {
# 'login_url': 'http://authserverxg.swu.edu.cn/authserver/login?service=https://swu.cpdaily.com/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay',
'login_url': 'https://ids.henu.edu.cn/authserver/login?service=https%3A%2F%2Fids.henu.edu.cn%2Fauthserver%2Fmobile%2Fcallback%3FappId%3D277935239',
'needcaptcha_url': '',
'captcha_url': '',
'username': user['username'],
'password': user['password']
}
cookies = {}
# 借助上一个项目开放出来的登陆API,模拟登陆
if 'enable' in user:
if user['enable'] == 0:
print('您设定了enable=0,安全模式将不会获取COOKIE,您想要使用的话请修改config.yml里面的到enable=1!')
sendMessage('如果您看到这条消息,请您去github上重新设置您的config。', user, '报错提醒-今日校园自动签到')
exit(888)
if user['usecookies'] == 0:
res = ''
try:
j = 0
for i in range(0, 5):
print("使用config中定义的api")
res = requests.post(config['login']['api'], data=params)
if 'success' not in res.json()['msg']:
time.sleep(5)
print(f'第{j + 1}次未获取到Cookies')
j = j + 1
else:
break
if 'success' not in res.json()['msg']:
print(f'{j}次尝试也没有cookies,可能学校服务器坏了,自己弄吧!')
sendMessage(f'如果您看到这条消息,证明{j}次尝试也没有cookies,可能学校服务器坏了,自己弄吧!', user)
exit(888)
print(res.json())
except Exception as e:
res = requests.post(url='http://www.zimo.wiki:8080/wisedu-unified-login-api-v1.0/api/login', data=params)
print("使用子墨的API")
if 'success' not in res.json()['msg']:
print('用子墨的API也没有获取到Cookies')
sendMessage(f'如果您看到这条消息,证明子墨的api也没有获取到cookies,可能学校服务器坏了,自己弄吧!', user, '报错提醒-今日校园自动签到')
# cookieStr可以使用手动抓包获取到的cookie,有效期暂时未知,请自己测试
# cookieStr = str(res.json()['cookies'])
cookieStr = str(res.json()['cookies'])
print('已从API获取到Cookie')
# exit(999)
else:
cookieStr = user['cookies']
print('使用文件内Cookie')
print(cookieStr)
# log(cookieStr) 调试时再输出
# if cookieStr == 'None':
# log(res.json())
# exit(-1)
# log(cookieStr)
# 解析cookie
for line in cookieStr.split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
session = requests.session()
session.cookies = requests.utils.cookiejar_from_dict(cookies, cookiejar=None, overwrite=True)
return session
# 获取最新未签到任务并全部签到
def getUnSignedTasksAndSign(session, apis, user):
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': user['user']['ua'] + ' cpdaily/9.0.0 wisedu/9.0.0',
'content-type': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'Content-Type': 'application/json;charset=UTF-8'
}
print(headers)
# 第一次请求每日签到任务接口,主要是为了获取MOD_AUTH_CAS
res = session.post(
url='https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'.format(host=apis['host']),
headers=headers, data=json.dumps({}))
# 第二次请求每日签到任务接口,拿到具体的签到任务
res = session.post(
url='https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'.format(host=apis['host']),
headers=headers, data=json.dumps({}))
print(res.json())
if len(res.json()['datas']['unSignedTasks']) < 1:
log('当前没有未签到任务')
sendMessage('当前没有未签到任务', user['user'])
exit(0)
elif time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]:
print('未在签到时间,等会再来吧!')
sendMessage('自定义限制:未在签到时间,等会再来吧!', user['user'])
# exit(8)
# TODO 删掉
# log(res.json())
for i in range(0, len(res.json()['datas']['unSignedTasks'])):
# if '出校' in res.json()['datas']['unSignedTasks'][i]['taskName'] == False:
# if '入校' in res.json()['datas']['unSignedTasks'][i]['taskName'] == False:
latestTask = res.json()['datas']['unSignedTasks'][i]
params = {
'signInstanceWid': latestTask['signInstanceWid'],
'signWid': latestTask['signWid']
}
task = getDetailTask(session, params, apis, user)
print(task)
if time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]:
print('未在签到时间,等会再来吧!')
form = fillForm(task, session, user, apis)
print(form)
if time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]:
print('未在签到时间,等会再来吧!')
submitForm(session, user, form, apis)
# 获取签到任务详情
def getDetailTask(session, params, apis, user):
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': user['user']['ua'] + ' cpdaily/9.0.0 wisedu/9.0.0',
'content-type': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'Content-Type': 'application/json;charset=UTF-8'
}
print(headers)
res = session.post(
url='https://{host}/wec-counselor-sign-apps/stu/sign/detailSignInstance'.format(host=apis['host']),
headers=headers, data=json.dumps(params))
data = res.json()['datas']
return data
# 填充表单
def fillForm(task, session, user, apis):
user = user['user']
form = {}
if task['isPhoto'] == 1:
fileName = uploadPicture(session, user['photo'], apis)
form['signPhotoUrl'] = getPictureUrl(session, fileName, apis)
else:
form['signPhotoUrl'] = ''
if task['isNeedExtra'] == 1:
extraFields = task['extraField']
defaults = config['cpdaily']['defaults']
extraFieldItemValues = []
for i in range(0, len(extraFields)):
default = defaults[i]['default']
extraField = extraFields[i]
if config['cpdaily']['check'] and default['title'] != extraField['title']:
log('第%d个默认配置项错误,请检查' % (i + 1))
sendMessage('提交错误' + '第%d个默认配置项错误,请检查' % (i + 1), user)
exit(-1)
extraFieldItems = extraField['extraFieldItems']
for extraFieldItem in extraFieldItems:
if extraFieldItem['content'] == default['value']:
extraFieldItemValue = {'extraFieldItemValue': default['value'],
'extraFieldItemWid': extraFieldItem['wid']}
# 其他,额外文本
if extraFieldItem['isOtherItems'] == 1:
extraFieldItemValue = {'extraFieldItemValue': default['other'],
'extraFieldItemWid': extraFieldItem['wid']}
extraFieldItemValues.append(extraFieldItemValue)
# log(extraFieldItemValues)
# 处理带附加选项的签到
form['extraFieldItems'] = extraFieldItemValues
# form['signInstanceWid'] = params['signInstanceWid']
form['signInstanceWid'] = task['signInstanceWid']
form['longitude'] = user['lon'] + rand_lon
form['latitude'] = user['lat'] + rand_lat
form['isMalposition'] = user['isMalposition']
form['uaIsCpadaily'] = True
################这个参数一定不能穿帮!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
form['abnormalReason'] = user['abnormalReason']
form['position'] = user['address']
# TODO 这个参数的名称有待考究 需要抓包见分晓!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
return form
# 上传图片到阿里云oss
def uploadPicture(session, image, apis):
url = 'https://{host}/wec-counselor-sign-apps/stu/sign/getStsAccess'.format(host=apis['host'])
res = session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps({}), verify=not debug)
datas = res.json().get('datas')
fileName = datas.get('fileName')
accessKeyId = datas.get('accessKeyId')
accessSecret = datas.get('accessKeySecret')
securityToken = datas.get('securityToken')
endPoint = datas.get('endPoint')
bucket = datas.get('bucket')
bucket = oss2.Bucket(oss2.Auth(access_key_id=accessKeyId, access_key_secret=accessSecret), endPoint, bucket)
with open(image, "rb") as f:
data = f.read()
bucket.put_object(key=fileName, headers={'x-oss-security-token': securityToken}, data=data)
res = bucket.sign_url('PUT', fileName, 60)
# log(res)
return fileName
# 获取图片上传位置
def getPictureUrl(session, fileName, apis):
url = 'https://{host}/wec-counselor-sign-apps/stu/sign/previewAttachment'.format(host=apis['host'])
data = {
'ossKey': fileName
}
res = session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps(data), verify=not debug)
photoUrl = res.json().get('datas')
return photoUrl
# DES加密
def DESEncrypt(s, key='b3L26XNL'):
key = key
iv = b"\x01\x02\x03\x04\x05\x06\x07\x08"
k = des(key, CBC, iv, pad=None, padmode=PAD_PKCS5)
encrypt_str = k.encrypt(s)
# print(encrypt_str)
print(f'加密结束的内容为:{base64.b64encode(encrypt_str).decode()}')
return base64.b64encode(encrypt_str).decode()
# 提交签到任务
def submitForm(session, user, form, apis):
user = user['user']
# Cpdaily-Extension
extension = {
"lon": user['lon'] + rand_lon,
"model": uanalyse.ua2model(user['ua']),
"appVersion": "9.0.0",
"systemVersion": uanalyse.ua2androidver(user['ua']),
"userId": user['username'],
"systemName": "android",
"lat": user['lat'] + rand_lat,
"deviceId": str(uuid.uuid1())
}
headers = {
'tenantId': 'henu',
'User-Agent': user['ua'] + ' okhttp/3.12.4',
'CpdailyStandAlone': '0',
'extension': '1',
'Cpdaily-Extension': DESEncrypt(json.dumps(extension)),
'Content-Type': 'application/json; charset=utf-8',
'Accept-Encoding': 'gzip',
# 'Host': 'swu.cpdaily.com',
'Connection': 'Keep-Alive'
}
print(extension)
print(headers)
# print('程序还有一步就提交了,已暂停')
# exit(888)
# TODO 设置提交锁的位置
res = session.post(url='https://{host}/wec-counselor-sign-apps/stu/sign/submitSign'.format(host=apis['host']),
headers=headers, data=json.dumps(form))
message = res.json()['message']
if message == 'SUCCESS':
log('自动签到成功')
sendMessage('自动签到成功', user, title='今日校园签到成功通知')
else:
log('自动签到失败,原因是:' + message)
sendMessage('自动签到失败' + message, user)
# sendMessage('自动签到失败,原因是:' + message, user['email'])
exit(0)
# 发送邮件通知
def sendMessage(msg, user, title='[INFO] 今日校园自动签到信息通知'):
if msg.count("未开始") > 0:
return ''
print(user)
try:
if user['useserverchan'] != 0:
log('正在发送微信通知')
log(getTimeStr())
# sendMessageWeChat(msg + getTimeStr(), '今日校园自动签到结果通知')
notification.send_serverchan(user['serverchankey'], title, getTimeStr() + ' ' + msg)
except Exception as e:
log("send failed")
# 主函数
'''def main():
try:
continue
except:
print("有一个user出错啦")
continue
# 提供给腾讯云函数调用的启动函数
def main_handler(event, context):
try:
main()
except Exception as e:
raise e
else:
return 'success'''
if __name__ == '__main__':
# print(extension)
#print(main_handler({}, {}))
for user in config['users']:
print(user)
apis = getCpdailyApis(user)
session = getSession(user, apis)
getUnSignedTasksAndSign(session, apis, user)
|
[
"yulonger@outlook.com"
] |
yulonger@outlook.com
|
03ab69e575d2a03c8d9095898808b1c4e3877e59
|
6db68bd7f4e792d3df009671c10cbe93f963c5e6
|
/NOC_Chp0/NOC_0_3/walker.py
|
db7d99af07feffb25e3763feabc401da99e501ab
|
[] |
no_license
|
mickardinal/The-Nature-of-Code-Python
|
0ce9125b92707a9de4dd57a77c4a92c04df66467
|
a883e365051826228002317741df7d198eae6dfe
|
refs/heads/master
| 2020-03-11T20:55:21.939021
| 2018-04-25T16:29:06
| 2018-04-25T16:29:06
| 130,250,157
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
class Walker(object):
def __init__(self):
self.x = width/2
self.y = height/2
def display(self):
stroke(0)
point(self.x, self.y)
def step(self):
r = random(0, 1)
if r< 0.4:
self.x += 1
elif r < 0.6:
self.x -= 1
elif r < 0.8:
self.y += 1
else:
self.y -= 1
|
[
"jsrdccsx@gmail.com"
] |
jsrdccsx@gmail.com
|
a86eb97efcd2033e7ba2688689a2d35a96976693
|
48295cd5f8e7a1b1cfda8b9642012611488156ce
|
/users/migrations/0004_auto_20191123_1158.py
|
69f25c440fe48acfb912e16d8e4f514085e401e8
|
[] |
no_license
|
mugglecoder/airbnb-clone
|
0c47445761e9f9fd82805299ddab46e382e9b5a4
|
6276cdeaa13b1a88697b62d322dcb871d9a5e25a
|
refs/heads/master
| 2022-12-10T14:00:47.409310
| 2020-01-05T14:14:39
| 2020-01-05T14:14:39
| 212,250,078
| 0
| 0
| null | 2022-12-10T11:01:26
| 2019-10-02T03:44:49
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
# Generated by Django 2.2.5 on 2019-11-23 02:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20191123_1051'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='email_confirmed',
new_name='email_verified',
),
migrations.AddField(
model_name='user',
name='email_secret',
field=models.CharField(blank=True, default='', max_length=20),
),
]
|
[
"winkknd@naver.com"
] |
winkknd@naver.com
|
43f6176cdac6fed43d610aadb95791ffb1bc8e31
|
5f6e95aa83ca132c732f644c51e786785e9bdd2f
|
/src/e_psu/e_psu/urls.py
|
c6ecbf46fdf3b32c7d3230b71dd508c60b649c90
|
[] |
no_license
|
kerupuksambel/django-e-pantau
|
9905a9902752fd5143e03326a0ab585f09ccb50d
|
bbadcd31984c9bd254ac2cc23a30f55a9fe5b997
|
refs/heads/master
| 2022-12-22T07:28:58.444314
| 2020-10-02T14:35:05
| 2020-10-02T14:35:05
| 300,641,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
"""e_psu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from .views import home_view
urlpatterns = [
path("", home_view, name="home"),
path('admin/', admin.site.urls),
path('admin_kelola/serah_terima/', include("serah_terima.urls")),
path('laporan/', include("laporan.urls")),
path('warga/', include("warga.urls")),
path('admin_kelola/', include("admin_kelola.urls")),
path('admin_skpd/', include("admin_skpd.urls"))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"kerupuksambel.2000@gmail.com"
] |
kerupuksambel.2000@gmail.com
|
1ae7978cbc58218d181868d7280ebd339c401050
|
099f7e9234cd8b3afa6f7cd8cb81a654ca5043ea
|
/models/payment.py
|
38bf0252591bf723514586c8ec8f04e40171c1d6
|
[] |
no_license
|
nazrinshahaf/Nextagram_python
|
1716893e7b4466fec5b9d48fd630e00d01f2b74f
|
8738929ca6f11da6943b9093f05bd445ff58e951
|
refs/heads/master
| 2022-12-11T21:45:58.316999
| 2020-02-04T10:35:55
| 2020-02-04T10:35:55
| 235,014,627
| 0
| 0
| null | 2021-06-02T00:56:25
| 2020-01-20T03:55:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
from models.base_model import BaseModel
import peewee as pw
from models.user import User
from models.user_images import User_images
from config import S3_LOCATION
from playhouse.hybrid import hybrid_property
from flask_login import current_user
class Payment(BaseModel):
user = pw.ForeignKeyField(User, backref='donations')
image = pw.ForeignKeyField(User_images, backref='donations')
amount = pw.IntegerField(null = False, default= 5)
message = pw.TextField(null=True)
|
[
"nazrinfernandez@gmail.com"
] |
nazrinfernandez@gmail.com
|
3947c1886e64b2e14da5a55a34c8661ff9cdde6c
|
ed8c7fba9c5592b14ab79eac399813d9d0537b7d
|
/website/migrations/0001_initial.py
|
fac9f699cda3d22a2ad1bac05bf8e333a9cb5fe6
|
[] |
no_license
|
OpenWebCurtin/Catching-out-corruption
|
4834f7d95393b71009347237aff08f7726049a7a
|
33617c4d01dd33f118aaac4c562948598f6206ba
|
refs/heads/main
| 2023-01-18T19:21:26.723344
| 2020-11-23T13:07:19
| 2020-11-23T13:07:19
| 315,027,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,429
|
py
|
# Generated by Django 2.2.6 on 2019-10-31 16:18
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='AsyncJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('status', models.IntegerField(choices=[(0, 'Unprocessed'), (1, 'Finished'), (2, 'Error'), (3, 'Unsupported')], default=0)),
],
),
migrations.CreateModel(
name='DocumentResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.CharField(max_length=128)),
('occurs_total', models.IntegerField(default=0)),
('occurs_agenda_items', models.IntegerField(default=0)),
('normalised_score', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filename', models.CharField(max_length=128)),
],
options={
'permissions': [('upload', 'Can upload documents using the PDF upload service.'), ('delete', 'Can delete documents using the file deletion service.'), ('recover', 'Can recover deleted documents using the file recovery service.')],
},
),
migrations.CreateModel(
name='FileDeletionRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.CharField(max_length=128)),
('delete_by', models.IntegerField(choices=[(0, 'Delete files by filename.'), (1, 'Delete files by uploader.')], default=0)),
('target_file', models.CharField(blank=True, max_length=128, null=True)),
('target_uploader', models.CharField(blank=True, max_length=128, null=True)),
],
),
migrations.CreateModel(
name='FileRecoveryRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.CharField(max_length=128)),
('recover_by', models.IntegerField(choices=[(0, 'Recover files by filename.'), (1, 'Recover files by uploader.')], default=0)),
('target_file', models.CharField(blank=True, max_length=128, null=True)),
('target_uploader', models.CharField(blank=True, max_length=128, null=True)),
],
),
migrations.CreateModel(
name='KeyPhraseOptionSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key_phrase', models.CharField(blank=True, default='', max_length=128)),
('key_phrase_type', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_importance', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
],
),
migrations.CreateModel(
name='PrivilegeModification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.CharField(max_length=128)),
('target_user', models.CharField(max_length=128)),
('target_group', models.CharField(choices=[('regular user', 'Regular user'), ('privileged user', 'Privileged user'), ('administrator', 'Administrator')], default=0, max_length=32)),
],
),
migrations.CreateModel(
name='RelationResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kp1', models.CharField(max_length=128)),
('kp2', models.CharField(max_length=128)),
('kp3', models.CharField(max_length=128)),
('kp4', models.CharField(max_length=128)),
('kp5', models.CharField(max_length=128)),
('document', models.CharField(blank=True, default='', max_length=128)),
('agenda_item_file', models.CharField(blank=True, default='', max_length=128)),
('agenda_item', models.CharField(blank=True, default='', max_length=128)),
('description', models.CharField(blank=True, default='', max_length=128)),
('search_type', models.IntegerField()),
],
),
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('search_by', models.IntegerField(choices=[(0, 'Search by relation'), (1, 'Search by document')], default=0)),
('search_t', models.IntegerField(choices=[(0, 'Search minutes'), (1, 'Search non-minutes')], default=0)),
('fbm', models.BooleanField(default=False)),
('fbm_filename', models.CharField(blank=True, default='', max_length=128)),
('fbm_uploader', models.CharField(blank=True, default='', max_length=128)),
('fbm_upload_date_start', models.DateField(null=True)),
('fbm_upload_date_end', models.DateField(null=True)),
('fbc', models.BooleanField(default=False)),
('fbc_council', models.CharField(blank=True, default='', max_length=128)),
('fbc_publish_date_start', models.DateField(null=True)),
('fbc_publish_date_end', models.DateField(null=True)),
('key_phrase1', models.CharField(blank=True, default='', max_length=128)),
('key_phrase2', models.CharField(blank=True, default='', max_length=128)),
('key_phrase3', models.CharField(blank=True, default='', max_length=128)),
('key_phrase4', models.CharField(blank=True, default='', max_length=128)),
('key_phrase5', models.CharField(blank=True, default='', max_length=128)),
('key_phrase_type1', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type2', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type3', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type4', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type5', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_importance1', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance2', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance3', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance4', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance5', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
],
options={
'permissions': [('search', 'Can search using the document search feature.')],
},
),
migrations.CreateModel(
name='AsyncJobType',
fields=[
('job_base', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='website.AsyncJob')),
],
),
migrations.CreateModel(
name='UploadedFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='uploads/')),
('filename', models.CharField(blank=True, default='', max_length=128)),
('type', models.IntegerField(choices=[(0, 'Public minutes document.'), (1, 'Public non-minutes document.'), (2, 'Private non-minutes document.')], default=0)),
('document_category', models.CharField(default='generic', max_length=128)),
('uploader', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RecoveryRequestItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileRecoveryRequest')),
('target_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.UploadedFile')),
],
),
migrations.CreateModel(
name='DeletionRequestItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileDeletionRequest')),
('target_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.UploadedFile')),
],
),
migrations.CreateModel(
name='ProcessingJob',
fields=[
('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')),
('file_name', models.CharField(max_length=128)),
],
bases=('website.asyncjobtype',),
),
migrations.CreateModel(
name='FileRecoveryJob',
fields=[
('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileRecoveryRequest')),
],
bases=('website.asyncjobtype',),
),
migrations.CreateModel(
name='FileDeletionJob',
fields=[
('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')),
('scheduled_time', models.FloatField()),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileDeletionRequest')),
],
bases=('website.asyncjobtype',),
),
]
|
[
"r.a.clydesdale+bb-ccp@gmail.com"
] |
r.a.clydesdale+bb-ccp@gmail.com
|
10dfdf1f98da77c3edb8bc6c1a987c773d2ff61f
|
bb4e603d41c040114a6161427593e30fad02828b
|
/classwork4.py
|
4db534ef9a08d9437b14bcf379813698bf674fbe
|
[] |
no_license
|
MS-Dok/pythonCore
|
40871c8dc53bee583fb12a6366db2275521d6e6e
|
d0d89997022f0e284626035d6fa61d94183d8f80
|
refs/heads/master
| 2021-07-02T00:52:31.621971
| 2020-10-20T12:29:40
| 2020-10-20T12:29:40
| 184,242,248
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,404
|
py
|
"""1. Створити список цілих чисел, які вводяться з терміналу та визначити серед них максимальне та мінімальне число.
"""
user_value=int(input("Enter the value: "))
print("Min value is",min([x for x in range(user_value)]))
print("Max value is",max([x for x in range(user_value)]))
"""
2. В інтервалі від 1 до 10 визначити числа
• парні, які діляться на 2,
• непарні, які діляться на 3,
• числа, які не діляться на 2 та 3.
"""
user_input_start,user_input_finish=int(input("Please enter the start value ")),int(input("Please enter the end value "))
print(list([x for x in range(user_input_start,user_input_finish) if x%2==0]))
print(list([x for x in range(user_input_start,user_input_finish) if x%3==0]))
print(list([x for x in range(user_input_start,user_input_finish) if x%3!=0 and x%2!=0]))
"""
3. Написати програму, яка обчислює факторіал числа, яке користувач вводить.(не використовувати рекурсивного виклику функції)
num_list = [int(input("Enter int {}: ".format(i+1))) for i in range(3)]
"""
while True:
user_input=int(input("Enter the value: "))
if user_input>=0:
break
if user_input==0:
"Factorial of 0 is equal to 1"
else:
result=1
for i in range(1,user_input+1):
result*=i
print("Factorial of {} is equal to {}".format(user_input,result))
"""
4. Напишіть скрипт, який перевіряє логін, який вводить користувач.
Якщо логін вірний (First), то привітайте користувача.
Якщо ні, то виведіть повідомлення про помилку.
(використайте цикл while)
"""
while True:
user_input=input("Please enter the login:\n")
if user_input=="First":
break
else:
print("Incorrect login. Please try again\n")
"""
5. Перший випадок.
Написати програму, яка буде зчитувати числа поки не зустріне від’ємне число. При появі від’ємного числа програма зупиняється (якщо зустрічається 0 програма теж зупиняється).
"""
some_array=[]
while True:
user_input=int(input("Please enter the >0 value "))
if user_input >0:
some_array.append(user_input)
else:
break
print(some_array)
"""
6. Другий випадок.
На початку на вхід подається кількість елементів послідовності, а потім самі елементи. При появі від’ємного числа програма зупиняється (якщо зустрічається 0 програма теж зупиняється).
"""
some_array_2=[]
quantity=int(input("Please enter the quantity of numbers "))
i=0
while i<quantity:
value_to_add=int(input("Please enter the value to add "))
if value_to_add>0:
some_array_2.append(value_to_add)
i+=1
else:
print("<=0 value entered. Termination")
break
print(some_array_2)
"""
7. Знайти прості числа від 10 до 30, а всі решта чисел представити у вигляді добутку чисел
(наприклад 10 equals 2 * 5
11 is a prime number
12 equals 2 * 6
13 is a prime number
14 equals 2 * 7
………………….)
"""
list_ex=[x for x in range(10,30)]
for i in list_ex:
if i%2==0:
print("{} equals 2*{}".format(str(i),int(i/2)))
elif i%3==0:
print("{} equals 3*{}".format(str(i),int(i/3)))
else:
print(str(i)+" is primal number")
"""
8. Відсортувати слова в реченні в порядку їх довжини (використати List Comprehensions)
"""
sentence="На початку на вхід подається кількість елементів послідовності а потім самі елементи."
print(sorted([x for x in set(sentence.lower().split())],key=len))
|
[
"noreply@github.com"
] |
MS-Dok.noreply@github.com
|
aa9c14845c14707dc3ac40e78df6b0a435a73c19
|
051fff90eb3fcb1f928c5857992fef351fc1ba04
|
/output/figuresAndTables/makeFinalTables.py
|
92d6e9cd2931bb2d07a7fdcae6dad5e5ed9ca5cd
|
[
"MIT"
] |
permissive
|
AndresYague/Snuppat
|
1503c8a729513d857a04a7963b8256451c9f6cd1
|
8a7f73fbc260bab67b5d38ed1efc628980f5047c
|
refs/heads/master
| 2021-06-08T11:22:11.930896
| 2021-04-08T12:58:53
| 2021-04-08T12:58:53
| 67,886,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,455
|
py
|
def getKeyList(indx, lst):
'''Return adecuate value from list'''
if indx >= 0 and indx < len(lst):
return lst[indx]
else:
return "--"
def printTable(storeNamVal):
'''Print table in order'''
nCol = 3
keys = [x for x in storeNamVal.keys()]; keys.sort()
nEls = len(keys); div = nEls/nCol
# Get number of lines for tables
nlines = div if nEls % nCol == 0 else int(div) + 1
firstOfSecond = None
for ii in range(nEls):
zz1 = getKeyList(ii, keys)
nam1, val1 = storeNamVal.get(zz1, ("--", "--"))
zz2 = getKeyList(ii + nlines, keys)
nam2, val2 = storeNamVal.get(zz2, ("--", "--"))
zz3 = getKeyList(ii + nlines*2, keys)
nam3, val3 = storeNamVal.get(zz3, ("--", "--"))
if firstOfSecond is None:
firstOfSecond = zz2
elif zz1 == firstOfSecond:
break
print("{} & {} & {:5.2f} & ".format(nam1, zz1, float(val1)), end = " ")
print("{} & {} & {:5.2f} & ".format(nam2, zz2, float(val2)), end = " ")
if val3 != "--":
print("{} & {} & {:5.2f}\\\\".format(nam3, zz3, float(val3)))
else:
print("{} & {} & {}\\\\".format(nam3, zz3, val3))
def main():
'''Transform plottedValues.dat into .tex tables'''
arch = "plottedValues.dat"
data = "../../data/species.dat"
# Index zz and names
zToName = {}
with open(data, "r") as fread:
for line in fread:
lnlst = line.split()
zz = int(lnlst[0]) - int(lnlst[2])
name = lnlst[1]
name = name[0].upper() + name[1:]
zToName[zz] = name
# Create and print tables
storeNamVal = {}
with open(arch, "r") as fread:
for line in fread:
if "#" in line:
if len(storeNamVal) > 0:
printTable(storeNamVal)
print()
storeNamVal = {}
print(line)
continue
lnlst = line.split()
if len(lnlst) == 0:
continue
zz = int(lnlst[0])
name = zToName[zz]
val = lnlst[1]
storeNamVal[zz] = (name, val)
if len(storeNamVal) > 0:
printTable(storeNamVal)
if __name__ == "__main__":
main()
|
[
"and.yague@gmail.com"
] |
and.yague@gmail.com
|
03ba849ab901a2dd4684b9660222925b7988aa2f
|
6b7aa3e8a15ab8502094d41f88c72e0fa0a6cc6d
|
/python/algoMonster/dp/knapsackWeightOnly.py
|
e98b2cd035c350fae50709d41ea7b21b5190ac2a
|
[] |
no_license
|
artem-tkachuk/algorithms
|
77f51c0db2467f718ef1ebe3822343282fc8bf39
|
a656bc363d3cf4bb81fa83d0c627bf6f12029943
|
refs/heads/master
| 2023-05-25T12:30:10.094499
| 2023-05-16T03:17:25
| 2023-05-16T03:17:25
| 205,235,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
from typing import List
def knapsackWeightOnly(weights: List[int]) -> List[int]:
# return list(knapsackWeightOnly_TopDown_Helper(weights, valuesSet=set()))
return knapsackWeightOnly_BottomUp_Tabulation_Helper(weights)
# Top down solution, no memoization, bad time complexity
def knapsackWeightOnly_TopDown_Helper(weights: List[int], valuesSet: set) -> List[int]:
# number of elements in the weights array
n = len(weights)
# Base case: 0 elements in weights gives sum of 0
if n == 0:
return set([0])
# Add the sum of weights itself to the set
valuesSet.add(sum(weights))
# go over each element and use it, recursively calling knapsack on remaining elements
for i in range(n):
weights_without_i = weights[:i] + weights[(i + 1):]
# merge all possible sums of the weights array exclusing i'th elem to all the valuesSet
valuesSet |= knapsackWeightOnly_TopDown_Helper(weights_without_i, valuesSet)
# return all possible sums for the current weights array
return valuesSet
# bottom-up tabulation solution
def knapsackWeightOnly_BottomUp_Tabulation_Helper(weights: List[int]):
# Testing
print(knapsackWeightOnly([1, 3, 3, 5]))
print(knapsackWeightOnly([1, 2, 3]))
|
[
"artemtkachuk@yahoo.com"
] |
artemtkachuk@yahoo.com
|
06a25a1b6196b3b4b67262bea39f8289fb2daa7e
|
c059ed04ed5f72d11dbe3b01e9395bacd28b6e8b
|
/문자열내p와y개수.py
|
fdb32a8a8483982f6580418362fe2487966dd8ad
|
[] |
no_license
|
kimhyewon0/kimhyewon0.github.io
|
532b5feb214d686865b8e6169251de8dca7a2caf
|
eaac275ff5b933e477099c9b4c3a1b69e05fa521
|
refs/heads/master
| 2021-01-23T04:13:25.509101
| 2019-09-22T16:40:30
| 2019-09-22T16:40:30
| 33,710,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
def solution(s):
s=s.upper()
if s.count('P') ==s.count('Y'): return True
else: return False
print(solution("Py"))
|
[
"coope0357@gmail.com"
] |
coope0357@gmail.com
|
ab0b8196c759f436a72d4ad731e16756cc9d4511
|
699cf40f6326b954a40b78e87317a62401bd4c2c
|
/.history/Drowsy_Detection_20210728124624.py
|
935884724404299f8e03c238ed4ff5289a4858c5
|
[] |
no_license
|
KhanhNguyen1308/Python-mediapippe
|
e3927f9c0c6499d8a3ba50a675617b89197dce89
|
981412efd39bd29c34a66afbec88abdabcb47ab9
|
refs/heads/main
| 2023-06-25T18:37:43.234063
| 2021-07-29T11:35:31
| 2021-07-29T11:35:31
| 368,535,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,727
|
py
|
import cv2
import time
import numpy as np
import mediapipe as mp
import tensorflow as tf
from threading import Thread
from head_pose_ratio import head_pose_ratio
from function import draw_point, eye_avg_ratio, put_text
from Angle_head_pose_ratio import head_pose_status, eye_stat
from mode import sleep_mode
interpreter = tf.lite.Interpreter('model.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
cap = cv2.VideoCapture('Video/test_1406.mp4')
# cap = cv2.VideoCapture(0)
pTime = 0
time_active = 0
m = 0
status = ''
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh()
drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)
eye_status = ''
x_status = ''
y_status = ''
z_status = ''
head_status = ''
Drowsy_mode = ''
draw = False
t = 0
ear = 0
start_time = time.time()
count = 0
blink = 0
blink_perM = 0
pre_blink = 0
while True:
ret, img = cap.read()
ih, iw = img.shape[0], img.shape[1]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results:
face = []
Mount = []
Left_eye = []
Right_eye = []
try:
for face_lms in results.multi_face_landmarks:
for lm in face_lms.landmark:
x, y = int(lm.x * iw), int(lm.y * ih)
face.append([x, y])
nose = face[5]
Left_eye.append([face[249], face[374], face[380], face[382], face[385], face[386]])
Right_eye.append([face[7], face[145], face[153], face[155], face[158], face[159]])
Mount.append([face[308], face[317], face[14], face[87], face[61], face[82], face[13], face[312]])
img = draw_point(img, nose, Left_eye, Right_eye, Mount)
ear = eye_avg_ratio(Left_eye, Right_eye)
x1, x2, x3, x4, x5, x6 = head_pose_ratio(nose, Left_eye, Right_eye)
input_shape = input_details[0]['shape']
input_data = np.array((x1, x2, x3, x4, x5, x6), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
img = cv2.putText(img, str(x5), (nose[0] - 20, nose[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
img = cv2.putText(img, str(x6), (nose[0] + 20, nose[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
head_status, mode = head_pose_status(x5, x6, x2)
eye_status, blink, count = eye_stat(ear, count, blink, mode)
if mode == 1:
print(round(ear, 3))
Drowsy_mode = sleep_mode(mode, ear, blink)
m += 1
except:
eye_status = 'None Face'
x_status = 'None Face'
y_status = 'None Face'
cTime = time.time()
fps = int(1 / (cTime - pTime))
pTime = cTime
img = cv2.putText(img, str(m), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
text_fps = 'FPS:' + str(fps)
text_EaR = 'Eye_avg_Ratio: ' + str(round(ear, 2))
text_Head_pose = 'Head_pose: ' + head_status
text_ES = 'Eye_Status: ' + eye_status
text_blink = 'Blink_Num: ' + str(blink)
text_blink_avg = 'Blink_AVG: ' + str(blink_perM)
img = put_text(img, text_fps, text_EaR, text_ES, text_blink, text_blink_avg, text_Head_pose)
cv2.imshow('results', img)
if (time.time() - start_time) > 60:
start_time = time.time()
time_active += 1
blink_perM = blink
pre_blink = blink
blink = 0
key = cv2.waitKey(1)
# if m == 900:
# break
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"khanhnguyenduy1308@gmail.com"
] |
khanhnguyenduy1308@gmail.com
|
7d24324bd1f5837946c3a16a2bf594cd700afd24
|
9d53d831b631c5431d625848ca0dbd1e4a02eb78
|
/pybo/models.py
|
7f387f16dda1be32c0a6e106a2f4bc1f0512818a
|
[] |
no_license
|
jghee/Django_pratice
|
2b918f730dc40cd6f0c9881ad1c176906e84de8f
|
859befa7b04df8dd119cd6c8985d0c13edd7521a
|
refs/heads/main
| 2023-06-20T19:56:37.231458
| 2021-07-17T02:43:25
| 2021-07-17T02:43:25
| 383,633,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Question(models.Model):
subject = models.CharField(max_length=200)
content = models.TextField()
create_date = models.DateTimeField()
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='author_question')
modify_date = models.DateTimeField(null=True, blank=True)
voter = models.ManyToManyField(User, related_name='voter_question')
def __str__(self):
return self.subject
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateTimeField()
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='author_answer')
modify_date = models.DateTimeField(null=True, blank=True)
voter = models.ManyToManyField(User, related_name='voter_answer')
class Comment(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateTimeField()
modify_date = models.DateTimeField(null=True, blank=True)
question = models.ForeignKey(Question, null=True, blank=True, on_delete=models.CASCADE)
answer = models.ForeignKey(Answer, null=True, blank=True, on_delete=models.CASCADE)
|
[
"ghj171937@gmail.com"
] |
ghj171937@gmail.com
|
181d7604566e31eea4b774b2ae9b3356926009e6
|
a40950330ea44c2721f35aeeab8f3a0a11846b68
|
/VTK/Actors/ThreeLine.py
|
e780418bfccbe2f4be8ca077eaf8f0c68c4225b5
|
[] |
no_license
|
huang443765159/kai
|
7726bcad4e204629edb453aeabcc97242af7132b
|
0d66ae4da5a6973e24e1e512fd0df32335e710c5
|
refs/heads/master
| 2023-03-06T23:13:59.600011
| 2023-03-04T06:14:12
| 2023-03-04T06:14:12
| 233,500,005
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
import vtk
# Visualize
colors = vtk.vtkNamedColors()
# Create points
p0 = [0.0, 0.0, 0.0]
p1 = [1.0, 0.0, 0.0]
p2 = [1.0, 1.0, 0.0]
p3 = [0.0, 1.0, 0.0]
p4 = [2.0, 0.0, 0.0]
p5 = [2.0, 1.0, 0.0]
# LineSource: draw a line with two points
def createLine1():
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(p1)
lineSource.SetPoint2(p2)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(lineSource.GetOutputPort())
return mapper
# LineSource Multi-point continuous straight line
def createLine2():
lineSource = vtk.vtkLineSource()
points = vtk.vtkPoints()
points.InsertNextPoint(p0)
points.InsertNextPoint(p1)
points.InsertNextPoint(p2)
points.InsertNextPoint(p3)
lineSource.SetPoints(points)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(lineSource.GetOutputPort())
return mapper
# LineSource multi-point set geometry + topology
def createLine3(): # 多条线添加 一个points_actor添加多条线段
# Create a vtkPoints object and store the points in it
points = vtk.vtkPoints()
points.InsertNextPoint(p0)
points.InsertNextPoint(p1)
points.InsertNextPoint(p2)
points.InsertNextPoint(p3)
points.InsertNextPoint(p4)
points.InsertNextPoint(p5)
# Create a cell array to store the lines in and add the lines to it
lines = vtk.vtkCellArray()
# for i in range(0, 5, 2):
# line = vtk.vtkLine()
# line.GetPointIds().SetId(0, i)
# line.GetPointIds().SetId(1, i + 1)
# lines.InsertNextCell(line)
line = vtk.vtkLine() # 默认为2个端点,
# print(line.GetPointIds())
# line.GetPointIds().SetNumberOfIds(4) # 可以设置为N个端点
line.GetPointIds().SetId(0, 0) # SetId第一个参数为端点ID, 第二个参数为点的ID
line.GetPointIds().SetId(1, 1)
lines.InsertNextCell(line)
line.GetPointIds().SetId(0, 1)
line.GetPointIds().SetId(1, 4)
# line.GetPointIds().SetId(2, 4)
lines.InsertNextCell(line)
# Create a polydata to store everything in
linesPolyData = vtk.vtkPolyData()
# Add the points to the dataset geometry
linesPolyData.SetPoints(points)
# Add the lines to the dataset topology
linesPolyData.SetLines(lines)
# Setup actor and mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(linesPolyData)
return mapper
def main():
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Line")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Visualize
colors = vtk.vtkNamedColors()
renderer.SetBackground(colors.GetColor3d("Silver"))
actor = vtk.vtkActor()
# The first way
# actor.SetMapper(createLine1())
# The second way
# actor.SetMapper(createLine2())
# The third way
actor.SetMapper(createLine3())
actor.GetProperty().SetLineWidth(4)
actor.GetProperty().SetColor(colors.GetColor3d("Peacock"))
renderer.AddActor(actor)
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
[
"443765159@qq.com"
] |
443765159@qq.com
|
59bc5e311c76d97d748a6bf5da5acff9c9eafe2f
|
92e6d757704f9916bbc9374d40d3d575122ab9f7
|
/5-Factory.py
|
df982127becd77e6fd54f11e032f776b36ebc019
|
[] |
no_license
|
TomCranitch/MATH3202-Tutorials
|
2479dced9ef89bff101a9b98a8a94caf30cd5962
|
0a4e81e82f7473b1d993b5a212d9ce2c98fe7aeb
|
refs/heads/master
| 2020-04-27T10:07:13.016853
| 2019-06-14T02:56:02
| 2019-06-14T02:56:02
| 174,240,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
from gurobipy import *
# Set up your data
profit = [10, 6, 8, 4, 11, 9, 3]
P = range(len(profit))
n = [4, 2, 3, 1, 1]
M = range(len(n))
# usage[P][M]
usage = [
[0.5, 0.1, 0.2, 0.05, 0.00],
[0.7, 0.2, 0.0, 0.03, 0.00],
[0.0, 0.0, 0.8, 0.00, 0.01],
[0.0, 0.3, 0.0, 0.07, 0.00],
[0.3, 0.0, 0.0, 0.10, 0.05],
[0.2, 0.6, 0.0, 0.00, 0.00],
[0.5, 0.0, 0.6, 0.08, 0.05]
]
T = range(6)
# maintenance[T][M]
maint = [
[1, 0, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 1]
]
# market[P][T]
market = [
[ 500, 600, 300, 200, 0, 500],
[1000, 500, 600, 300, 100, 500],
[ 300, 200, 0, 400, 500, 100],
[ 300, 0, 0, 500, 100, 300],
[ 800, 400, 500, 200,1000,1100],
[ 200, 300, 400, 0, 300, 500],
[ 100, 150, 100, 100, 0, 60]
]
MAX_STORE = 100
STORE_COST = 0.5
FINAL_STORE = 50
MONTH_HOURS = 16*24
mod = Model("Factory Planning")
X = [[mod.addVar(vtype=GRB.INTEGER) for t in T] for p in P]
Y = [[mod.addVar(vtype=GRB.INTEGER, ub=market[p][t]) for t in T] for p in P]
S = [[mod.addVar(vtype=GRB.INTEGER, ub=MAX_STORE) for t in T] for p in P]
Z = [[mod.addVar(vtype=GRB.INTEGER) for m in M] for t in T]
mod.addConstrs((quicksum(usage[p][m] * X[p][t] for p in P) <= MONTH_HOURS*(n[m] - Z[t][m]) for m in M for t in T))
mod.addConstrs(S[p][t] == S[p][t-1] + X[p][t] - Y[p][t] for p in P for t in T if t > 1)
mod.addConstrs(S[p][t] <= MAX_STORE for p in P for t in T)
mod.addConstrs(S[p][-1] >= FINAL_STORE for p in P)
mod.addConstrs(S[p][0] == X[p][0] - Y[p][0] for p in P)
mod.addConstrs(quicksum(Z[t][m] for t in T) == sum(maint[t][m] for t in T) for m in M)
mod.setObjective(quicksum(profit[p]*Y[p][t] for p in P for t in T) - quicksum(STORE_COST*S[p][t] for p in P for t in T), GRB.MAXIMIZE)
mod.optimize()
print("\n\n Report Prepared for Factory Planing\n Optimal Cost", mod.objVal, "\n\n")
for p in P:
print([X[p][t].x for t in T])
print("\n\n Sell \n")
for p in P:
print([Y[p][t].x for t in T])
print("\n\n Storage \n")
for p in P:
print([S[p][t].x for t in T])
print("\n\n Maintainence \n")
for m in M:
print([Z[t][m].x for t in T])
|
[
"tom@cranitch.com.au"
] |
tom@cranitch.com.au
|
586389d67bfb22c131f65413987a6d9937d948a4
|
085773c6b2945589e60022ba8268d2e93a61145f
|
/cha_10_regular_expression/10_7_match.py
|
1eac13208caf382181b89f4b5903da67f81f838d
|
[] |
no_license
|
bj1570saber/muke_Python_July
|
dde67a74882f0bcc72c1aca828922829376a0375
|
96fa464bd0eeb8a922e713700addb548b6ef4727
|
refs/heads/master
| 2020-09-08T14:08:36.963017
| 2020-01-09T08:54:41
| 2020-01-09T08:54:41
| 221,154,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
import re
a = 'pyth_pytho0python1pythonn2pythonnnn'
# *: match previous char 'n' 0 or many times
r = re.findall('python*', a)
print(r)# ['pytho', 'python', 'pythonn', 'pythonnnn']
print('~' * 20)
# +: match previous char 'n' 1 or many times
r = re.findall('python+', a)
print(r)# ['python', 'pythonn', 'pythonnnn']
print('~' * 20)
# ?: match previous char 'n' 1 or 0 time
r = re.findall('python?', a)
print(r)#['pytho', 'python', 'python', 'python']
print('~' * 20)
# number: match previous char 'n' 1-3 times OK
r = re.findall('python{1,3}', a) # accept 1-3 times 'n'.
print(r)#['python', 'pythonn', 'pythonnn']
print('~' * 20)
### ?: force None Greedy
# ? match previous char 'n' 1 time only
r = re.findall('python{1,3}?', a) # as less 'n' as possible.
print(r)#['python', 'python', 'python']
|
[
"bj1570saber@gmail.com"
] |
bj1570saber@gmail.com
|
83f0e5e137f2710df1e45e901c6a227e112040d5
|
3a698e77300380546267afacf72568ce8586e4f8
|
/test.py
|
ef88e5117c4ba019bead6794c698caf7f5eff76d
|
[] |
no_license
|
MichaelESwartz/TWITTERWEBBOT
|
10e6934f0bbaada148d9bee6f7c907e08cdccba8
|
e0598ddfd598c0105b0687ab2b63d5a8acb0fbaf
|
refs/heads/master
| 2021-01-20T00:47:23.285010
| 2017-05-02T22:02:31
| 2017-05-02T22:02:31
| 89,189,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
def math(y, z):
x = y + z
return x
if math(3, 2) == 5:
print "success"
else:
print "failed"
|
[
"Michael@Michaels-MacBook-Pro-4.local"
] |
Michael@Michaels-MacBook-Pro-4.local
|
904a7bc9b799b09ef6eb6b12445e53839cc08f7b
|
a02a2da4ca761b74544ab0fe819847074930fed8
|
/demos/incompressible_flow/scalar_transport/almgren-two-grids-check-div-eps-1/config.py
|
0da2495e847aad20e2ced600036b7ce982cbbbb6
|
[] |
no_license
|
marc-nguessan/mrpy
|
40ac7a11404ed97ab5824f4dc8fd57e8d51caf95
|
6fb0bce485234a45bb863f71bc2bdf0a22014de3
|
refs/heads/master
| 2020-12-03T08:28:11.312333
| 2020-01-01T19:05:05
| 2020-01-01T19:05:05
| 231,252,927
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,650
|
py
|
"""...
"""
from math import *
# Definition of time integration
t_ini = 0.
t_end = 0.5
nt = 200
dt = (t_end - t_ini) / nt
dt_sc = dt / 20
# Definition of the printing options
n_print = min(nt, 600)
#n_print = nt
dt_print = (t_end - t_ini) / n_print
# domain size
L = 1
x_1 = 0.
y_1 = 0.
x_2 = 0.09
y_2 = 0.
x_3 = -0.045
y_3 = 0.045*sqrt(3)
x_4 = -0.045
y_4 = -0.045*sqrt(3)
x_5 = -0.09
y_5 = 0.
F_1 = -150
F_2 = 50
F_3 = 50
F_4 = 50
F_5 = -100
F_6 = 50
# Definition of the domain dimensions
xmin = -L/2.
xmax = L/2.
ymin = -L/2.
ymax = L/2.
zmin = -L/2.
zmax = L/2.
# Definition of the flow characteristics
Re = 100.
nu = 5.e-4 # (m*m)/s
kappa = 5.e-3 # (m*m)/s
# Tree dimmension
dimension = 2
# Trees min_level
min_level = 2
min_level_sc = 2
# Trees max_level
max_level = 7
max_level_sc = 7
# Tree stencil graduation
stencil_graduation = 1
# Tree stencil prediction
stencil_prediction = 1
# Frequency of the multiresolution transform
mr_freq = 10
# function
# def function(x, y, t=0.):
# from math import sin, cos, exp, pi
# return sin(pi*(x+t))*sin(pi*(y+t))
#def function(x):
#
# from math import tanh
#
# return tanh(50.*abs(x-1./2.))
#def function(x, y):
#
# from math import exp, sqrt
#
# return exp(-30.*sqrt((x+0.5)**2 + (y-0.5)**2)) + exp(-30.*sqrt((x-0.5)**2 + (y+0.5)**2))
# Definition of the boundary conditions dictionary bc_dict
# # bc_dict gives the value of the north, south, west and
# # east values of every variables involved in the flow
# # computation
def u_north(coords, t=0.):
#return (Re*nu) / (xmax - xmin)
#return 1.
return 0.
def u_south(coords, t=0.):
#return 1.
return 0.
def u_west(coords, t=0.):
#return 1.
return 0.
def u_east(coords, t=0.):
#return 1.
return 0.
def u_back(coords, t=0.):
#return 1.
return 0.
def u_forth(coords, t=0.):
#return 1.
return 0.
def v_north(coords, t=0.):
#return 1.
return 0.
def v_south(coords, t=0.):
#return 1.
return 0.
def v_west(coords, t=0.):
#return 1.
return 0.
def v_east(coords, t=0.):
#return 1.
return 0.
def v_back(coords, t=0.):
#return 1.
return 0.
def v_forth(coords, t=0.):
#return 1.
return 0.
def p_north(coords, t=0.):
#return 1.
return 0.
def p_south(coords, t=0.):
#return 1.
return 0.
def p_west(coords, t=0.):
#return 1.
return 0.
def p_east(coords, t=0.):
#return 1.
return 0.
def p_back(coords, t=0.):
#return 1.
return 0.
def p_forth(coords, t=0.):
#return 1.
return 0.
def s_north(coords, t=0.):
#return 1.
return 0.
def s_south(coords, t=0.):
#return 1.
return 0.
def s_west(coords, t=0.):
#return 1.
return 0.
def s_east(coords, t=0.):
#return 1.
return 0.
def s_back(coords, t=0.):
#return 1.
return 0.
def s_forth(coords, t=0.):
#return 1.
return 0.
def phi_north(coords, t=0.):
#return 1.
return 0.
def phi_south(coords, t=0.):
#return 1.
return 0.
def phi_west(coords, t=0.):
#return 1.
return 0.
def phi_east(coords, t=0.):
#return 1.
return 0.
def phi_back(coords, t=0.):
#return 1.
return 0.
def phi_forth(coords, t=0.):
#return 1.
return 0.
bc_dict = {"u": {"north": ("neumann", u_north),
#"north": ("dirichlet", u_north),
#"north": ("periodic", u_north),
"south": ("neumann", u_south),
#"south": ("dirichlet", u_south),
#"south": ("periodic", u_south),
"east": ("dirichlet", u_east),
#"east": ("periodic", u_east),
#"east": ("neumann", u_east),
"west": ("dirichlet", u_west),
#"west": ("periodic", u_west),
#"west": ("neumann", u_west),
#"back": ("dirichlet", u_back),
"back": ("periodic", u_back),
#"back": ("neumann", u_back),
#"forth": ("dirichlet", u_forth)},
"forth": ("periodic", u_forth)},
#"forth": ("neumann", u_forth)},
"v": {#"north": ("neumann", v_north),
"north": ("dirichlet", v_north),
#"north": ("periodic", v_north),
#"south": ("neumann", v_south),
"south": ("dirichlet", v_south),
#"south": ("periodic", v_south),
#"east": ("dirichlet", v_east),
#"east": ("periodic", v_east),
"east": ("neumann", v_east),
#"west": ("dirichlet", v_west),
#"west": ("periodic", v_west),
"west": ("neumann", v_west),
#"back": ("dirichlet", v_back),
"back": ("periodic", v_back),
#"back": ("neumann", v_back),
#"forth": ("dirichlet", v_forth),
"forth": ("periodic", v_forth)},
#"forth": ("neumann", v_forth)},
"phi": {#"north": ("neumann", phi_north),
"north": ("dirichlet", phi_north),
#"north": ("periodic", phi_north),
#"south": ("neumann", phi_south),
"south": ("dirichlet", phi_south),
#"south": ("periodic", phi_south),
"east": ("dirichlet", phi_east),
#"east": ("periodic", phi_east),
#"east": ("neumann", phi_east),
"west": ("dirichlet", phi_west),
#"west": ("periodic", phi_west),
#"west": ("neumann", phi_west),
#"back": ("dirichlet", phi_back),
"back": ("periodic", phi_back),
#"back": ("neumann", phi_back),
#"forth": ("dirichlet", phi_forth),
"forth": ("periodic", phi_forth)},
#"forth": ("neumann", phi_forth)},
"s": {"north": ("neumann", s_north),
#"north": ("dirichlet", s_north),
#"north": ("periodic", s_north),
"south": ("neumann", s_south),
#"south": ("dirichlet", s_south),
#"south": ("periodic", s_south),
#"east": ("dirichlet", s_east),
#"east": ("periodic", s_east),
"east": ("neumann", s_east),
#"west": ("dirichlet", s_west),
#"west": ("periodic", s_west),
"west": ("neumann", s_west),
#"back": ("dirichlet", s_back),
"back": ("periodic", s_back),
#"back": ("neumann", s_back),
#"forth": ("dirichlet", s_forth),
"forth": ("periodic", s_forth)},
#"forth": ("neumann", s_forth)},
"p": {"north": ("neumann", p_north),
#"north": ("dirichlet", p_north),
#"north": ("periodic", p_north),
"south": ("neumann", p_south),
#"south": ("dirichlet", p_south),
#"south": ("periodic", p_south),
#"east": ("dirichlet", p_east),
#"east": ("periodic", p_east),
"east": ("neumann", p_east),
#"west": ("dirichlet", p_west),
#"west": ("periodic", p_west),
"west": ("neumann", p_west),
#"back": ("dirichlet", p_back),
#"back": ("periodic", p_back),
"back": ("neumann", p_back),
#"forth": ("dirichlet", p_forth),
#"forth": ("periodic", p_forth)}}
"forth": ("neumann", p_forth)}}
# Name of the prediction operator module used for a given simulation
prediction_operator_module = "mrpy.mr_utils.operators.prediction." + "centered_polynomial_interpolation"
# Threshold parameter
threshold_parameter = 1.e-3
# Threshold speed propagation
threshold_speed_propagation = 1
# Name of the thresholding operator
#thresholding_operator_module = "thresholding_operators." + "harten_thresholding"
thresholding_operator_module = "mrpy.mr_utils.operators.thresholding." + "predictive_thresholding"
# Name of the scheme class used for the time integration
#class_scheme_name = "mrpy.discretization." + "temporal_impl_expl_euler"
class_scheme_name = "mrpy.discretization." + "temporal_radau2A"
# Name of the scheme used for the time integration of the scalar
scalar_scheme = "mrpy.discretization." + "RK4_scalar"
# # Names of the six main spatial operators used for the computation of the
# # simulation: divergence_x, divregence_y, gradient_x, gradient_y, laplacian_x,
# # laplacian_y; the name are divided in two parts:
# # the name of the package where the spatial operators modules are stored and the
# # name of the specific python output module
#gradient_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.gradient"
gradient_module_name = "mrpy.spatial_operators." + "haar.2nd_order_ctr_finite_diff.gradient"
#divergence_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.divergence"
divergence_module_name = "spatial_operators." + "haar.2nd_order_ctr_finite_diff.divergence"
#laplacian_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.laplacian"
#laplacian_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.laplacian-bis"
laplacian_module_name = "mrpy.spatial_operators." + "haar.2nd_order_ctr_finite_diff.laplacian"
#laplacian_module_name = "mrpy.spatial_operators." + "haar.2nd_order_ctr_finite_diff.laplacian-bis"
#mass_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.mass"
mass_module_name = "spatial_operators." + "haar.2nd_order_ctr_finite_diff.mass"
#inverse_mass_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.inverse_mass"
inverse_mass_module_name = "spatial_operators." + "haar.2nd_order_ctr_finite_diff.inverse_mass"
# Name of the output file used to print the solution; it is divided in two parts:
# the name of the package where the input/output modules are stored and the
# name of the specific python output module
#output_module_name = "mrpy.io." + "output-1D-gnuplot"
#output_module_name = "mrpy.io." + "output-tikz"
#output_module_name = "mrpy.io." + "output-2D-gnuplot"
output_module_name = "mrpy.io." + "output-xdmf"
# !!!!!! partie ci-dessous a modifier !!!!!!!!
# Definition of a function that gives the exact value of the x-component of the
# velocity over the domain
# Amplitude of the signal
amp = 1.e+0
def u_exact(x, y, t=0.):
# return pi*cos(pi*x)*sin(pi*y)*exp(-2*pi*pi*nu*t)
#return amp*sin(pi*(x+t))*sin(pi*(y+t))
# return exp(-50*(x**2 + y**2))
return 0.
# Definition of a function that gives the exact value of the y-component of the
# velocity over the domain
def v_exact(x, y, t=0.):
# return -pi*sin(pi*x)*cos(pi*y)*exp(-2*pi*pi*nu*t)
#return amp*cos(pi*(x+t))*cos(pi*(y+t))
# return -exp(-50*(x**2 + y**2))
return 0.
# Definition of a function that gives the exact value of the pressure over the
# domain
def p_exact(x, y, t=0.):
# return (pi*pi)/2.*(sin(pi*x)*sin(pi*x) + sin(pi*y)*sin(pi*y))*exp(-4*pi*pi*nu*t)
#return amp*sin(pi*(x-y+t))
return 1.
def sc_init(x, y, t=0.):
return tanh(100*y)
#if y < 0:
# return 0.
#else:
# return 1.
def omega(x, y, t=0.):
#return 100*(exp(-(1/r_0**2)*((x - x_1)**2 + y**2)) + exp(-(1/r_0**2)*((x - x_2)**2 + y**2)))
#return 0.5*F_1*(1 + tanh(100*(0.03 - sqrt((x - x_1)**2 + (y - y_1)**2)))) + \
# 0.5*F_2*(1 + tanh(100*(0.03 - sqrt((x - x_2)**2 + (y - y_2)**2)))) + \
# 0.5*F_3*(1 + tanh(100*(0.03 - sqrt((x - x_3)**2 + (y - y_3)**2)))) + \
# 0.5*F_4*(1 + tanh(100*(0.03 - sqrt((x - x_4)**2 + (y - y_4)**2))))
return 0.5*F_5*(1 + tanh(100*(0.03 - sqrt((x - x_1)**2 + (y - y_1)**2)))) + \
0.5*F_6*(1 + tanh(100*(0.03 - sqrt((x - x_2)**2 + (y - y_2)**2)))) + \
0.5*F_6*(1 + tanh(100*(0.03 - sqrt((x - x_5)**2 + (y - y_5)**2))))
#return 0.
def source_term_function_velocity_x(x, y, t=0.):
#return pi*(2*amp*nu*pi*sin(pi*(x+t))*sin(pi*(y+t)) + \
# amp*cos(pi*(x-y+t)) + amp*sin(pi*(x+y+2*t)) + \
# amp*amp*sin(pi*(x+t))*cos(pi*(x+t)))
return 0.
def source_term_function_velocity_y(x, y, t=0.):
#return pi*(2*amp*nu*pi*cos(pi*(x+t))*cos(pi*(y+t)) - \
# amp*cos(pi*(x-y+t)) - amp*sin(pi*(x+y+2*t)) - \
# amp*amp*sin(pi*(y+t))*cos(pi*(y+t)))
return 0.
|
[
"arthur.nguessan@gmail.com"
] |
arthur.nguessan@gmail.com
|
3dc8a090b2c9403994f512b47d7fc301201b29e0
|
4d5c542f56dad6668dd30be7693ac93032adfe4c
|
/app.py
|
cbb0742b6876948e3d5b1447ee18a3993a7ff68a
|
[] |
no_license
|
maanbosa/restful-flask
|
0044cd6179f6877b14f46c29e11f094dbee68946
|
c587b17276929cf567d22ed4bcc51676308a8ad5
|
refs/heads/master
| 2023-02-01T23:08:48.615079
| 2020-12-11T19:01:55
| 2020-12-11T19:01:55
| 320,643,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.user import UserRegister
from resources.item import Item, ItemList
from resources.store import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PROPAGATE_EXCEPTIONS'] = True
app.secret_key = 'jose'
api = Api(app)
jwt = JWT(app, authenticate, identity) # /auth
api.add_resource(Store, '/store/<string:name>')
api.add_resource(StoreList, '/stores')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(port=5000, debug=True)
|
[
"maanbosa@gmail.com"
] |
maanbosa@gmail.com
|
b39d2e8a3337080a1c893fd9b36e4a0743b7a421
|
cf431dd9967ba3de7732541e42412fa9bd2bf4ba
|
/todo/models.py
|
555d0d4228bdccf2f6ba5aa1e9bbc0c89d8b036b
|
[] |
no_license
|
ansu5555/TaskManager
|
de073c53b50fa876118b03f564cea5a51fbc947c
|
a6a743a2fbe9203afc0694f89bbe938ace6f843a
|
refs/heads/master
| 2021-09-06T22:35:08.632556
| 2018-02-12T17:46:27
| 2018-02-12T17:46:27
| 12,598,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
from django.db import models
# Create your models here.
class todo_lists(models.Model):
todo_crtdt = models.DateTimeField(auto_now_add=True)
todo_detail = models.CharField(max_length=100)
todo_duedt = models.DateTimeField()
todo_complete = models.BooleanField(default=False)
|
[
"ansuman5555@gmail.com"
] |
ansuman5555@gmail.com
|
18eaf4480da5398f037854fd148de9adc33abbe1
|
d8940b6d45c15a84c8ee1ab298c4df8a905f956c
|
/pysnooper/__init__.py
|
4b6ea5bc1ee65f9e361836555c20c181a5e8e0ff
|
[
"MIT"
] |
permissive
|
Karanxa/PySnooper
|
f179c3e23627979c3a58664b966c9ae4cfa522a2
|
22f63ae09bb6d63de86496d613815ee03d191b75
|
refs/heads/master
| 2023-05-27T14:23:00.604201
| 2021-06-11T15:06:55
| 2021-06-11T15:06:55
| 376,061,317
| 1
| 0
|
MIT
| 2021-06-11T15:06:55
| 2021-06-11T15:04:02
| null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
'''
PySnooper - Never use print for debugging again
Usage:
import pysnooper
@pysnooper.snoop()
def your_function(x):
...
A log will be written to stderr showing the lines executed and variables
changed in the decorated function.
For more information, see https://github.com/cool-RR/PySnooper
'''
from .tracer import Tracer as snoop
from .variables import Attrs, Exploding, Indices, Keys
import collections
__VersionInfo = collections.namedtuple('VersionInfo',
('major', 'minor', 'micro'))
__version__ = '0.5.0'
__version_info__ = __VersionInfo(*(map(int, __version__.split('.'))))
del collections, __VersionInfo # Avoid polluting the namespace
|
[
"ram@rachum.com"
] |
ram@rachum.com
|
b0b09977413df66d842b53b5df6ee0e5dec3c57e
|
34096e5f3d6569e3aaee794bf8ccc0b04f2c8c8f
|
/docusign_esign/models/offline_attributes.py
|
077596d786a847ed9812880ca9dd352e3e55323a
|
[
"MIT"
] |
permissive
|
hunk/docusign-python-client
|
5c96de8a08973fe1744d902b2a3873a7376a62c7
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
refs/heads/master
| 2021-06-14T06:41:23.298368
| 2020-04-01T05:51:08
| 2020-04-01T05:51:08
| 254,482,059
| 0
| 0
|
MIT
| 2020-04-09T21:28:23
| 2020-04-09T21:28:23
| null |
UTF-8
|
Python
| false
| false
| 7,470
|
py
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OfflineAttributes(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, account_esign_id=None, device_model=None, device_name=None, gps_latitude=None, gps_longitude=None, offline_signing_hash=None):
"""
OfflineAttributes - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'account_esign_id': 'str',
'device_model': 'str',
'device_name': 'str',
'gps_latitude': 'str',
'gps_longitude': 'str',
'offline_signing_hash': 'str'
}
self.attribute_map = {
'account_esign_id': 'accountEsignId',
'device_model': 'deviceModel',
'device_name': 'deviceName',
'gps_latitude': 'gpsLatitude',
'gps_longitude': 'gpsLongitude',
'offline_signing_hash': 'offlineSigningHash'
}
self._account_esign_id = account_esign_id
self._device_model = device_model
self._device_name = device_name
self._gps_latitude = gps_latitude
self._gps_longitude = gps_longitude
self._offline_signing_hash = offline_signing_hash
@property
def account_esign_id(self):
"""
Gets the account_esign_id of this OfflineAttributes.
A GUID identifying the account associated with the consumer disclosure
:return: The account_esign_id of this OfflineAttributes.
:rtype: str
"""
return self._account_esign_id
@account_esign_id.setter
def account_esign_id(self, account_esign_id):
"""
Sets the account_esign_id of this OfflineAttributes.
A GUID identifying the account associated with the consumer disclosure
:param account_esign_id: The account_esign_id of this OfflineAttributes.
:type: str
"""
self._account_esign_id = account_esign_id
@property
def device_model(self):
"""
Gets the device_model of this OfflineAttributes.
A string containing information about the model of the device used for offline signing.
:return: The device_model of this OfflineAttributes.
:rtype: str
"""
return self._device_model
@device_model.setter
def device_model(self, device_model):
"""
Sets the device_model of this OfflineAttributes.
A string containing information about the model of the device used for offline signing.
:param device_model: The device_model of this OfflineAttributes.
:type: str
"""
self._device_model = device_model
@property
def device_name(self):
"""
Gets the device_name of this OfflineAttributes.
A string containing information about the type of device used for offline signing.
:return: The device_name of this OfflineAttributes.
:rtype: str
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""
Sets the device_name of this OfflineAttributes.
A string containing information about the type of device used for offline signing.
:param device_name: The device_name of this OfflineAttributes.
:type: str
"""
self._device_name = device_name
@property
def gps_latitude(self):
"""
Gets the gps_latitude of this OfflineAttributes.
A string containing the latitude of the device location at the time of signing.
:return: The gps_latitude of this OfflineAttributes.
:rtype: str
"""
return self._gps_latitude
@gps_latitude.setter
def gps_latitude(self, gps_latitude):
"""
Sets the gps_latitude of this OfflineAttributes.
A string containing the latitude of the device location at the time of signing.
:param gps_latitude: The gps_latitude of this OfflineAttributes.
:type: str
"""
self._gps_latitude = gps_latitude
@property
def gps_longitude(self):
"""
Gets the gps_longitude of this OfflineAttributes.
A string containing the longitude of the device location at the time of signing.
:return: The gps_longitude of this OfflineAttributes.
:rtype: str
"""
return self._gps_longitude
@gps_longitude.setter
def gps_longitude(self, gps_longitude):
"""
Sets the gps_longitude of this OfflineAttributes.
A string containing the longitude of the device location at the time of signing.
:param gps_longitude: The gps_longitude of this OfflineAttributes.
:type: str
"""
self._gps_longitude = gps_longitude
@property
def offline_signing_hash(self):
"""
Gets the offline_signing_hash of this OfflineAttributes.
:return: The offline_signing_hash of this OfflineAttributes.
:rtype: str
"""
return self._offline_signing_hash
@offline_signing_hash.setter
def offline_signing_hash(self, offline_signing_hash):
"""
Sets the offline_signing_hash of this OfflineAttributes.
:param offline_signing_hash: The offline_signing_hash of this OfflineAttributes.
:type: str
"""
self._offline_signing_hash = offline_signing_hash
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"noreply@github.com"
] |
hunk.noreply@github.com
|
6017f8bc5e80a39ea78cc67cbc7474a53ad39874
|
4d259f441632f5c45b94e8d816fc31a4f022af3c
|
/tornado/mongodb/client.py
|
df52fa27df3ea41b18e3d682e2bcf182a9f48e30
|
[] |
no_license
|
xiaoruiguo/lab
|
c37224fd4eb604aa2b39fe18ba64e93b7159a1eb
|
ec99f51b498244c414b025d7dae91fdad2f8ef46
|
refs/heads/master
| 2020-05-25T01:37:42.070770
| 2016-05-16T23:24:26
| 2016-05-16T23:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
import httplib2
from urllib import urlencode
h = httplib2.Http()
## Add articles
data = {'id':'1', 'author':'B', 'genre':'comedy'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
data = {'id':'1', 'author':'C', 'genre':'comedys'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
data = {'id':'2', 'author':'A', 'genre':'tragedy'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
data = {'id':'3', 'author':'X', 'genre':'tragedy'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
## View all articles
content, response = h.request("http://127.0.0.1:8888/articles", "GET")
print '------- all articles -------'
print response
## View articles
print '------- per articles -------'
data = {"articleid":1}
data = urlencode(data)
content, response = h.request("http://127.0.0.1:8888/articles"+ "?" + data, "GET")
#for res in response:
# print res
print response
## Delete articles
#content, response = h.request("http://127.0.0.1:8888/articles", "DELETE")
#content, response = h.request("http://127.0.0.1:8888/articles", "GET")
#print response
|
[
"junmein@junmeinde-macbook-pro-3.local"
] |
junmein@junmeinde-macbook-pro-3.local
|
74044ba31aee8febadeb0b5bdf8ad33d30405070
|
575bdfbcc1eef8a0c38b60292dc992aa3e9dab90
|
/2_Regression/Simple_Linear_Regression.py
|
11da5c04873b45adc42f921c7dfcf874c0f31693
|
[] |
no_license
|
saimahesh-geek/machine-learning
|
36ffce0cd08f4046e52f28ca8b0e9329d6346239
|
ca5ccea924c5cce9ae8046b139d80b4a661accdb
|
refs/heads/master
| 2020-04-05T22:21:31.637849
| 2018-11-19T09:44:04
| 2018-11-19T09:44:04
| 157,253,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
#Importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing datasets
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#Splitting the dataset into training and test sets
#cross_validation - deprecated
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
#Fitting Simple Linear Regression to the training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
#Predicting the test set results
y_pred = regressor.predict(X_test)
#Visualising the training set results
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary VS Experience (Training Set)')
plt.xlabel('Years of experience')
plt.ylabel('Salary')
plt.show()
#Visualising the test set results
plt.scatter(X_test, y_test, color = 'red')
plt.scatter(X_test, y_pred, color = 'yellow')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary VS Experience (Test Set)')
plt.xlabel('Years of experience')
plt.ylabel('Salary')
plt.show()
|
[
"saimahesh.cse@gmail.com"
] |
saimahesh.cse@gmail.com
|
51e97447fab2edd2d535b5f7d4cd8faff5ee62e1
|
cf833f507001409066a1aa1716161c6fcaea846b
|
/share/qt/clean_mac_info_plist.py
|
ee26b3c1fa50df733839f9002a0fcae496c788f2
|
[
"MIT"
] |
permissive
|
Dubaicash/Dubaicash
|
b963eaa3865eb9e25fac50a5c874944a71845d59
|
5dc7df7271db82691441169b47f2409e209696da
|
refs/heads/master
| 2021-01-25T11:16:00.580273
| 2018-10-29T19:51:15
| 2018-10-29T19:51:15
| 123,389,485
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Dubaicash-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Dubaicash-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
[
"dubaicashdev@gmail.com"
] |
dubaicashdev@gmail.com
|
6a61782fcfd4338c981fee4050af6ec266ed0558
|
86cdb209d9dd3dda040ca5469b68000a0d5c311a
|
/RobustIntegerKnapsackModel.py
|
086075eae4cb3fe0070696265153dd96db6164d7
|
[] |
no_license
|
ToledanoDiego/KnapsackProblem
|
40c9dd9d86036756e09cdc8a5006ede526378565
|
67ef1eb817eb65a180ba4b5c9fcb9a2694fba030
|
refs/heads/main
| 2023-08-19T03:48:13.440723
| 2021-10-04T14:55:37
| 2021-10-04T14:55:37
| 413,455,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
"""A Pyomo representation of the Robust Integer Knapsack Problem.
Run the Integer Knapsack Problem with instances found in Instances/
and write results into RobustIntegerKnapsackSolution.txt.
Typical usage example:
python3 RobustIntegerKnapsackModel.py
"""
import random
import pyomo.environ as pyo
from pyomo.environ import *
import pyomo.kernel as pmo
import os
import warnings
import time
time_start = time.perf_counter()
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore")
counter = 0
for filename in os.listdir('Instances'):
time_start = time.perf_counter()
file = open('Instances/' + filename, "r")
content = file.read()
data = content.splitlines()
index = []
profit = []
nominalWeight = []
robustWeight = []
solId = []
numberOfItems = int(data[0])
data.pop(0)
capacityOfKnapsack = int(data[-1])
data.pop(-1)
for item in data:
index.append(int(item.split()[0]) + 1)
profit.append(int(item.split()[1]))
nominalWeight.append(int(item.split()[2]))
robustWeight.append(int(item.split()[3]))
v = {index[i]: profit[i] for i in range(len(index))}
w = {index[i]: nominalWeight[i] for i in range(len(index))}
r = {index[i]: nominalWeight[i] + 10 / 100 * nominalWeight[i] for i in range(len(index))}
Gamma = int(10 / 100 * len(r))
tmp = random.sample(range(numberOfItems), Gamma)
tmp = [i + 1 for i in tmp]
for i in range(len(r)):
if i not in tmp:
r[i] = 0
M = ConcreteModel() # Pyomo.
M.ITEMS = Set(initialize=v.keys())
M.x = Var(M.ITEMS, within=pmo.NonNegativeIntegers)
M.value = Objective(expr=sum(v[i] * M.x[i] for i in M.ITEMS), sense=maximize)
M.weight = Constraint(expr=sum((w[i] + r[i]) * M.x[i] for i in M.ITEMS) <= capacityOfKnapsack)
S = pyo.SolverFactory('cplex')
results = S.solve(M)
sol = 0
for i in M.component_objects(Var, active=True):
for index in i:
sol = sol + i[index].value * v[index]
if i[index].value > 0:
for j in range(int(i[index].value)):
solId.append(index)
time_elapsed = (time.perf_counter() - time_start)
f = open("RobustIntegerKnapsackSolutions.txt", "a")
f.write(str(filename) + '|' + str(sol) + '|' + str(time_elapsed) + '|' + str(solId) + '\n')
counter = counter + 1
print(counter)
|
[
"noreply@github.com"
] |
ToledanoDiego.noreply@github.com
|
3a0f200b06d77ef08f908fd0474fe8e95f74cb21
|
b68fea9d645de59ee31da970d3dc435460fde9de
|
/discussboard/views_edit.py
|
a7cc8324343a334ab42398e43c09249b9d270868
|
[
"BSD-3-Clause"
] |
permissive
|
shagun30/djambala-2
|
03fde4d1a5b2a17fce1b44f63a489c30d0d9c028
|
06f14e3dd237d7ebf535c62172cfe238c3934f4d
|
refs/heads/master
| 2021-01-10T04:20:30.735479
| 2008-05-22T05:02:08
| 2008-05-22T05:02:08
| 54,959,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,026
|
py
|
# -*- coding: utf-8 -*-
"""
/dms/discussboard/views_edit.py
.. enthaelt den View zum Aendern der Eigenschaften des Diskussionsforums
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.02 21.05.2008 get_role_choices
0.01 12.07.2007 Beginn der Arbeit
"""
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django import newforms as forms
from django.db import transaction
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.roles import *
from dms.utils import get_tabbed_form
from dms.utils import info_slot_to_header
from dms.utils import get_parent_section_choices
from dms.utils import remove_link_icons
from dms.utils import get_choices_new_protected
from dms.utils_form import get_folderish_vars_edit
from dms.encode_decode import decode_html
from dms.discussboard.utils import get_dont
from dms.discussboard.help_form import help_form
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
@require_permission('perm_edit_folderish')
def discussboard_edit(request, item_container):
""" Eigenschaften des Ordners aendern """
params = request.GET.copy()
profi_mode = params.has_key('profi')
@transaction.commit_manually
def save_values(item_container, old, new):
""" Abspeichern der geaenderten Werte """
item_container.container.save_values(old, new)
item_container.item.save_values(old, new)
item_container.save_modified_values(old, new)
transaction.commit()
class dms_itemForm ( forms.Form ) :
title = forms.CharField(max_length=240,
widget=forms.TextInput(attrs={'size':60}) )
nav_title = forms.CharField(max_length=60,
widget=forms.TextInput(attrs={'size':30}) )
sub_title = forms.CharField(required=False, max_length=240,
widget=forms.TextInput(attrs={'size':60}) )
text = forms.CharField(required=False,
widget=forms.Textarea(attrs={'rows':5, 'cols':60, 'id':'ta',
'style':'width:100%;'}) )
text_more = forms.CharField(required=False,
widget=forms.Textarea(attrs={'rows':10, 'cols':60, 'id':'ta1',
'style':'width:100%;'}) )
image_url = forms.CharField(required=False, max_length=200,
widget=forms.TextInput(attrs={'size':60}) )
image_url_url = forms.URLField(required=False, max_length=200,
widget=forms.TextInput(attrs={'size':60}) )
image_extern = forms.BooleanField(required=False)
is_wide = forms.BooleanField(required=False)
is_important = forms.BooleanField(required=False)
if profi_mode:
info_slot_right= forms.CharField(required=False, widget=forms.Textarea(
attrs={'rows':10, 'cols':60, 'style':'width:100%;'}) )
else:
info_slot_right= forms.CharField(required=False, widget=forms.Textarea(
attrs={'rows':10, 'cols':60, 'id':'ta2', 'style':'width:100%;'}) )
section = forms.CharField(required=False,
widget=forms.Select(choices=get_parent_section_choices(item_container),
attrs={'size':4, 'style':'width:40%'} ) )
has_user_support = forms.BooleanField(required=False)
has_comments = forms.BooleanField(required=False)
is_moderated = forms.BooleanField(required=False)
is_browseable = forms.BooleanField(required=False)
visible_start = forms.DateField(input_formats=['%d.%m.%Y'],
widget=forms.TextInput(attrs={'size':10}))
visible_end = forms.DateField(input_formats=['%d.%m.%Y'],
widget=forms.TextInput(attrs={'size':10}))
show_next = forms.BooleanField(required=False)
integer_4 = forms.ChoiceField(choices=get_choices_new_protected(), widget=forms.RadioSelect() )
app_name = 'discussboard'
my_title = _(u'Diskussionsforum ändern')
data_init = {
'title' : decode_html(item_container.item.title),
'nav_title' : decode_html(item_container.container.nav_title),
'sub_title' : item_container.item.sub_title,
'text' : remove_link_icons(item_container.item.text),
'text_more' : remove_link_icons(item_container.item.text_more),
'image_url' : item_container.item.image_url,
'image_url_url' : item_container.item.image_url_url,
'image_extern' : item_container.item.image_extern,
'is_wide' : item_container.item.is_wide,
'is_important' : item_container.item.is_important,
'info_slot_right' : info_slot_to_header(item_container.item.info_slot_right),
'section' : decode_html(item_container.section),
'has_comments' : item_container.item.has_comments,
'has_user_support': item_container.item.has_user_support,
'is_moderated' : item_container.item.is_moderated,
'is_browseable' : item_container.is_browseable,
'visible_start' : item_container.visible_start,
'visible_end' : item_container.visible_end,
'integer_4' : item_container.item.integer_4
}
if request.method == 'POST' :
data = request.POST.copy ()
else :
data = data_init
f = dms_itemForm ( data )
# --- Reihenfolge, Ueberschriften, Hilfetexte // Sonderfall: Startseite
tabs = [
('tab_base' , ['title', 'sub_title', 'nav_title', 'section', ]),
('tab_intro' , ['text', 'text_more', 'image_url', 'image_url_url', 'image_extern',
'is_wide', 'is_important']),
('tab_user_support', ['has_user_support', 'integer_4', 'is_moderated', 'has_comments']),
('tab_frame' , ['info_slot_right',]),
('tab_visibility', ['is_browseable', 'visible_start', 'visible_end',]),
]
content = get_tabbed_form(tabs, help_form, app_name ,f)
if request.method == 'POST' and not f.errors :
save_values(item_container, data_init, f.data)
return HttpResponseRedirect(get_site_url(item_container, 'index.html'))
else :
vars = get_folderish_vars_edit(request, item_container, app_name, my_title, content, f, get_dont())
return render_to_response ( 'app/base_edit.html', vars )
|
[
"hans.rauch@gmx.net"
] |
hans.rauch@gmx.net
|
12686b41f48293ecb798c9a1ddd3b0cd93d17050
|
f8adcf8dd868fda7ba7098eb94e383c7d588bffb
|
/test/readme_example_generate.py
|
b3435a74ea47747f4da915274d866f3ee169057f
|
[
"MIT"
] |
permissive
|
bobyguo/frugally-deep
|
a847ad4fcb31d8d891ae6456d307a0e1e26e062f
|
20e2507e6055d1e64f4cf5a0a9a5a71bf3b3e97e
|
refs/heads/master
| 2020-03-08T08:26:21.410373
| 2018-04-09T09:24:02
| 2018-04-09T09:24:02
| 128,021,948
| 0
| 0
|
MIT
| 2018-04-04T07:08:09
| 2018-04-04T07:08:09
| null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
#!/usr/bin/env python3
import numpy as np
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.models import Model
inputs = Input(shape=(4,))
x = Dense(5, activation='relu')(inputs)
predictions = Dense(3, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='categorical_crossentropy', optimizer='nadam')
model.fit(
np.asarray([[1,2,3,4], [2,3,4,5]]),
np.asarray([[1,0,0], [0,0,1]]), epochs=10)
model.save('readme_example_model.h5', include_optimizer=False)
|
[
"editgym@gmail.com"
] |
editgym@gmail.com
|
792c81288e99d8d6ff55699c1e6d26a7002d0431
|
8bb062d48354fd7a9cca14c0637871e803a1a8ce
|
/agregator/business/__init__.py
|
69cb3bd528e7daa45010680730ba95ba237eb8f3
|
[] |
no_license
|
denislamard/aggregator
|
b58887fbee4bf5beb833a847e3518d82ff3e3414
|
c040d36dab1b083c1ce2d518af458fc3b19cca6c
|
refs/heads/master
| 2020-06-22T18:26:17.015586
| 2019-07-19T12:49:18
| 2019-07-19T12:49:18
| 197,771,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
import os
import json
FILENAME = "entries.json"
class Agregator:
def __init__(self):
self._routes = None
self._loaddata(self.filedata)
def _loaddata(self, path: str):
with open(path) as json_file:
self._routes = json.load(json_file)
def _savedata(self, path: str):
with open(path, 'w') as json_file:
json.dump(self._routes, json_file, indent=4)
def addentry(self, entry: dict):
if self._routes is not None:
self._routes.append(entry)
self._savedata(self.filedata)
def findentry(self, entry: str) -> dict:
if self._routes is None:
return None
for route in self._routes:
if route['name'] == entry:
return route
return None
routes = property(lambda self: self._routes)
filedata = property(lambda self: os.path.join(os.path.dirname(os.path.realpath(__file__)), FILENAME))
|
[
"noreply@github.com"
] |
denislamard.noreply@github.com
|
99cc6f137b9f513dd32357037e6f41e2231fad35
|
920b9cb23d3883dcc93b1682adfee83099fee826
|
/itsm/project/models/base.py
|
747edf57e059aed9c831fc4991b3f24c7f758c0a
|
[
"MIT",
"LGPL-2.1-or-later",
"LGPL-3.0-only"
] |
permissive
|
TencentBlueKing/bk-itsm
|
f817fb166248d3059857b57d03e8b5ec1b78ff5b
|
2d708bd0d869d391456e0fb8d644af3b9f031acf
|
refs/heads/master
| 2023-08-31T23:42:32.275836
| 2023-08-22T08:17:54
| 2023-08-22T08:17:54
| 391,839,825
| 100
| 86
|
MIT
| 2023-09-14T08:24:54
| 2021-08-02T06:35:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-ITSM 蓝鲸流程服务 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-ITSM 蓝鲸流程服务 is licensed under the MIT License.
License for BK-ITSM 蓝鲸流程服务:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.db import models
from django.utils.translation import ugettext as _
from itsm.component.constants import LEN_NORMAL
class Model(models.Model):
FIELDS = ('creator', 'create_at', 'updated_by', 'update_at')
creator = models.CharField(_("创建人"), max_length=LEN_NORMAL, null=True, blank=True)
create_at = models.DateTimeField(_("创建时间"), auto_now_add=True)
update_at = models.DateTimeField(_("更新时间"), auto_now=True)
updated_by = models.CharField(_("修改人"), max_length=LEN_NORMAL, null=True, blank=True)
class Meta:
app_label = 'project'
abstract = True
|
[
"1758504262@qq.com"
] |
1758504262@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.