blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74b88d61b79bd5ebc7c8ea4b42c60bfc7ba59bc5
|
130a98632d2ab4c171503b79e455b7aa27a1dda4
|
/models/official/utils/flags/_conventions.py
|
b6f248b443a689f82770c2a725791ec76dc46591
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
aboerzel/German_License_Plate_Recognition
|
d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787
|
6fc53292b1d3ce3c0340ce724c2c11c77e663d27
|
refs/heads/master
| 2023-01-30T18:08:37.339542
| 2023-01-07T07:41:36
| 2023-01-07T07:41:36
| 245,586,430
| 34
| 12
|
MIT
| 2023-01-07T07:41:37
| 2020-03-07T07:16:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Central location for shared argparse convention definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import codecs
import functools
from absl import app as absl_app
from absl import flags
# This codifies help string conventions and makes it easy to update them if
# necessary. Currently the only major effect is that help bodies start on the
# line after flags are listed. All flag definitions should wrap the text bodies
# with help wrap when calling DEFINE_*.
_help_wrap = functools.partial(
flags.text_wrap, length=80, indent="", firstline_indent="\n")
# Pretty formatting causes issues when utf-8 is not installed on a system.
def _stdout_utf8():
try:
codecs.lookup("utf-8")
except LookupError:
return False
return getattr(sys.stdout, "encoding", "") == "UTF-8"
if _stdout_utf8():
help_wrap = _help_wrap
else:
def help_wrap(text, *args, **kwargs):
return _help_wrap(text, *args, **kwargs).replace(u"\ufeff", u"")
# Replace None with h to also allow -h
absl_app.HelpshortFlag.SHORT_NAME = "h"
|
[
"andreas.boerzel@gmx.de"
] |
andreas.boerzel@gmx.de
|
f01c7a44eccbf92d7cc1e3ecbafa94dc41b95918
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/easy/8_15.py
|
86dea0108cdf7dbb420de013fbc38390cad29623
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,388
|
py
|
Python – Reading last N lines of a file
**Prerequisite:** Read a file line-by-line in Python
Given a text file _fname_ , a number _N_ , the task is to read the last N
lines of the file.
As we know, Python provides multiple in-built features and modules for
handling files. Let’s discuss different ways to read last N lines of a file
using Python.
** _File:_**

**Method 1: Naive approach**
In this approach, the idea is to use a negative iterator with the
readlines() function to read all the lines requested by the user from the
end of file.
__
__
__
__
__
__
__
# Python implementation to
# read last N lines of a file
# Function to read
# last N lines of the file
def LastNlines(fname, N):
# opening file using with() method
# so that file get closed
# after completing work
with open(fname) as file:
# loop to read iterate
# last n lines and print it
for line in (file.readlines() [-N:]):
print(line, end ='')
# Driver Code:
if __name__ == '__main__':
fname = 'File1.txt'
N = 3
try:
LastNlines(fname, N)
except:
print('File not found'
---
__
__
**Output:**
Eighth line
Ninth line
Tenth line
**Method 2: Using OS module and buffering policy**
In this approach, the idea is to work on the buffering policy in the python. A
buffer stores a part of data received from a file stream of the operating
system for a time period it is used and then more data comes in.
The buffer size determines the size of the data that can be stored at a time
until it is used. We have the option to pass an integer to buffering in order
to set buffering policy and if we do not specify any policy then the size of
the buffer depends upon the device’s block size. Usually, the buffer is 4096
or 8192 bytes long. In this approach size of the buffer is 8192 bytes.
Moreover, the **st_size** attribute of os.stat() method in the OS module is
used to represent the size of the file in bytes.
Below is the implementation of the above approach.
__
__
__
__
__
__
__
# Python implementation to
# read last N lines of a file
# Using OS module and buffering policy
# importing os module
import os
# Function to read
# last N lines of the file
def LastNlines(fname, N):
# taking buffer size of 8192 bytes
bufsize = 8192
# calculating size of
# file in bytes
fsize = os.stat(fname).st_size
iter = 0
# opening file using with() method
# so that file get closed
# after completing work
with open(fname) as f:
if bufsize > fsize:
# adjusting buffer size
# according to size
# of file
bufsize = fsize-1
# list to store
# last N lines
fetched_lines = []
# while loop to
# fetch last N lines
while True:
iter += 1
# moving cursor to
# the last Nth line
# of file
f.seek(fsize-bufsize * iter)
# storing each line
# in list upto
# end of file
fetched_lines.extend(f.readlines())
# halting the program
# when size of list
# is equal or greater to
# the number of lines requested or
# when we reach end of file
if len(fetched_lines) >= N or f.tell() == 0:
print(''.join(fetched_lines[-N:]))
break
# Driver Code:
if __name__ == '__main__':
fname = 'File1.txt'
N = 3
try:
LastNlines(fname, N)
except:
print('File not found')
---
__
__
**Output:**
Eighth line
Ninth line
Tenth line
**Method 3: Through Exponential search**
In this method, the idea is to use Exponential Search algorithm which is
generally used for searching sorted, unbounded or infinite lists. To get
information about exponential search click here.
This approach uses assert statement which acts as a debugging tool to checks a
condition. The program will continue to execute if the given statement is true
otherwise, it generates an **AssertionError exception**. To get more details
of assert statements click here.
Click here to get familiar with different kinds of use of seek() method.
Below is the implementation of the above approach.
__
__
__
__
__
__
__
# Python implementation to
# read last N lines of a file
# through Exponential search
# Function to read
# last N lines of the file
def LastNlines(fname, N):
# assert statement check
# a condition
assert N >= 0
# declaring variable
# to implement
# exponential search
pos = N + 1
# list to store
# last N lines
lines = []
# opening file using with() method
# so that file get closed
# after completing work
with open(fname) as f:
# loop which runs
# until size of list
# becomes equal to N
while len(lines) <= N:
# try block
try:
# moving cursor from
# left side to
# pos line from end
f.seek(-pos, 2)
# exception block
# to hadle any run
# time error
except IOError:
f.seek(0)
break
# finally block
# to add lines
# to list after
# each iteration
finally:
lines = list(f)
# increasing value
# of variable
# exponentially
pos *= 2
# returning the
# whole list
# which stores last
# N lines
return lines[-N:]
# Driver Code:
if __name__ == '__main__':
fname = 'File1.txt'
N = 3
try:
lines = LastNlines(fname, N)
for line in lines:
print (line, end ='')
except:
print('File not found')
---
__
__
**Output:**
Eighth line
Ninth line
Tenth line
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
3f2ab7df5f4f7fc432c79ef2ed7a0604e20a3ceb
|
e3afad642b98f0c6cdda91b550c080f7cd6fdf4c
|
/epi/10-binary-trees-15-bst/BinaryTreeNode.py
|
dcca3a3d0241a37beb165c28593cec5c1728f1ca
|
[] |
no_license
|
danielcodes/practice-problems
|
50e7ceb71305f69eafcd50c1507f9aa8829a8a2c
|
b06dfaed6b71e5a86dc43940fb15bc2e7f07903f
|
refs/heads/master
| 2020-04-12T06:19:25.552398
| 2016-11-30T02:13:06
| 2016-11-30T02:13:06
| 65,409,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# module for binary trees
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
self.parent = None
def printInOrder(root):
nodes = []
printInOrderUtil(root, nodes)
return nodes
def printInOrderUtil(root, nodes):
if root:
printInOrderUtil(root.left, nodes)
nodes.append(root.value)
printInOrderUtil(root.right, nodes)
|
[
"daniel_1118@hotmail.com"
] |
daniel_1118@hotmail.com
|
7b48d917637fb4d2a301c58da1464aef2f61fbe6
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res/scripts/client/tmpconsolecmds.py
|
0c0a4a16136a52bf97658edd60418a96123dc691
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 2,405
|
py
|
# 2015.11.18 11:51:09 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/TmpConsoleCmds.py
import BigWorld
from debug_utils import *
class LanServers:
def search(self):
BigWorld.serverDiscovery.searching = 1
def stop(self):
BigWorld.serverDiscovery.searching = 0
def show(self):
for server in BigWorld.serverDiscovery.servers:
print server
def searchAndConnect(self, owner, user):
self.search()
self.__owner = owner
self.__user = user
BigWorld.serverDiscovery.changeNotifier = self.__checkIfFound
def __checkIfFound(self):
for server in BigWorld.serverDiscovery.servers:
if server.ownerName == self.__owner:
self.__host = server.serverString
del self.__owner
self.stop()
self.__login()
break
def __login(self):
class LoginInfo:
pass
login = LoginInfo()
login.username = self.__user
BigWorld.connect(self.__host, login, self.__progressFn)
def __progressFn(self, stage, status, serverMsg):
print stage, status, serverMsg
def printPeriodTime():
arena = BigWorld.player().arena
print '%f / %f' % (arena.periodEndTime - BigWorld.serverTime(), arena.periodLength)
def printStatistics(byTotal = False, bots = True):
statistics = BigWorld.player().arena.statistics
teams = (None, [], [])
for (name, team), stats in statistics.iteritems():
if bots or not name.startswith('Bot'):
teams[team].append((name, stats))
key = 'totalFrags' if byTotal else 'frags'
teams[1].sort(lambda x, y: cmp(x[1]['key'], y[1]['key']))
teams[2].sort(lambda x, y: cmp(x[1]['key'], y[1]['key']))
for i in xrange(1, 3):
print 'Team %d\n' % i
for name, stats in teams[i]:
print '%s\t%d\t%d' % (name, stats['frags'], stats['totalFrags'])
return
def printConst(module, prefix, value):
mod = __import__(module)
for c in dir(mod):
if c.startswith(prefix) and getattr(mod, c) == value:
print c
return
print 'Not found'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\tmpconsolecmds.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:51:09 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
a08261dfb20e2f5f2b1ccac1c04c7469f46f54e4
|
25ebec4dd05334b63d62e238ccaa1700fdb7dcc4
|
/Arase/PWE/DeleteDate.py
|
54b02b15eff997334804e5fcbe1ec03e7b387862
|
[
"MIT"
] |
permissive
|
mattkjames7/Arase
|
2513416f0211f82a75d6b963fc8d6ea081dbc4f0
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
refs/heads/master
| 2023-04-07T03:54:59.407811
| 2021-05-20T12:03:50
| 2021-05-20T12:03:50
| 187,637,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
import numpy as np
from ..Tools.Downloading._DeleteDate import _DeleteDate
from .. import Globals
def DeleteDate(Date,subcomp,L,prod,Confirm=True):
'''
delete all of the files from a given date
'''
if subcomp == 'hfa' and L == 3:
idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}.dat'.format(L,subcomp)
datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/'.format(subcomp,L)
else:
idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}-{:s}.dat'.format(L,subcomp,prod)
datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/{:s}/'.format(subcomp,L,prod)
_DeleteDate(Date,idxfname,datapath,Confirm)
|
[
"mattkjames7@gmail.com"
] |
mattkjames7@gmail.com
|
041645ad029ae1da78cd282c6980c9e489f5b47e
|
59cdb8b3995ee5938dc4710e32f29ac273410265
|
/_archive/nosepoke_code/_old/cuedtaste_abu3.py
|
ca2954d2612ada74593f061d92279ad1919ac11a
|
[] |
no_license
|
abuzarmahmood/firing_space_plot
|
15ff667fada8f4e985a6a6c6f31261b72b0f4b60
|
9fe925d9b443fda96d8e23d6d2d2d2aa60b08f15
|
refs/heads/master
| 2023-07-25T01:39:31.942434
| 2023-07-15T14:24:38
| 2023-07-15T14:24:38
| 139,602,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,345
|
py
|
"""
Code to ONLY CONTROL cue and RESPOND to nosepoke
No recording --> Leave that to Intan Board
"""
from threading import Thread, Lock
import time
import RPi.GPIO as GPIO
import datetime
import numpy as np
class nosepoke_task:
"""
- Class to run nosepoke task
- Stores main variables and performs appropriate setup
- Will allow delivery of tastant and control of laser dependent on nosepoke parameters
- No need to involve INTAN board since laser is either on or off for all trials
and nosepoke parameters will stay the same
"""
class nosepoke_trigger:
"""
Class to control cue and activate outputs
"""
def __init__(self, nosepoke_gpio, cue_gpio, freq,
taste_output, laser_output, iti):
"""
nosepoke_gpio :: Which port to read from
freq :: Frequency of readings
"""
# Initialize board details
GPIO.setmode(GPIO.BOARD)
GPIO.setup(nosepoke_gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(cue_gpio, GPIO.OUT)
self.nosepoke_gpio = nosepoke_gpio
self.cue_gpio = cue_gpio
self.freq = np.float(freq)
self.poke_bool = 0
self.stopped = 0
self.taste_output = taste_output
self.laser_output = laser_output
self.freq = freq
self.iti_delta = datetime.timedelta(seconds = iti)
self.latest_trigger_time = 0
self.wait_till = datetime.datetime.now()
self.iti = iti
self.cue_freq = np.float(2)
self.cue_on = 1
def update(self):
# Keep looping indefinitely till thread is stopped
while True:
time.sleep(1/self.freq)
if self.stopped:
return
temp_read = GPIO.input(self.nosepoke_gpio)
#temp_read = np.random.choice([0,1], p = [0.9,0.1])
#print(temp_read)
if not temp_read: # Assuming 1 indicates poke
self.action_check()
def action_check(self):
"""
Checks whether action should be allowed to pass
"""
current_time = datetime.datetime.now()
#print("Check initiated")
#if current_time > self.wait_till:
self.cue_on = 0
#self.latest_trigger_time = current_time
#self.wait_till = current_time + self.iti
print("ACTION COMPLETED")
time.sleep(self.iti)
self.cue_on = 1
return
def cue_protocol(self):
while True:
time.sleep(1/self.cue_freq)
GPIO.output(self.cue_gpio, 0)
if not self.stopped:
time.sleep(0.5/self.cue_freq)
GPIO.output(self.cue_gpio, 1)
def start_update(self):
# Start thread to write from buffer
t = Thread(target = self.update(), name = 'check_thread', args = ())
t.daemon = True
t.start()
return self
def start_cue(self):
# Start thread to write from buffer
t = Thread(target = self.cue_protocol(), name = 'cue_thread', args = ())
t.daemon = True
t.start()
return self
def stop_all(self):
self.stopped = True
self.out_connect.close()
freq = 100
light = 36
beam = 11
test_poke_io = nosepoke_trigger(beam,light,freq,1,1,10)
test_poke_io.start_update()
test_poke_io.start_cue()
|
[
"abuzarmahmood@gmail.com"
] |
abuzarmahmood@gmail.com
|
f0e71dab23c9d10b9ed81e223aeb65c5444569bd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02784/s847410997.py
|
fd50024c37c9f2ef3942f98bcea7d5ff189e9a6b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
import sys
from itertools import accumulate
H, N = map(int, next(sys.stdin.buffer).split())
A = map(int, next(sys.stdin.buffer).split())
ans = any(map(lambda v: v >= H, accumulate(A, initial=0)))
print('Yes' if ans else 'No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
725ae5cb559adac73aa5ac94b2f193dbed895a91
|
a2b7fba22a16f379ccca2e38d9d6291b9562abc3
|
/Graph Theory/Connectivity/Biconnected_Components.py
|
4967813f98137ae85ef4c6f0d35ab613b52ff0ee
|
[] |
no_license
|
neelamy/Algorithm
|
565c1cea72715745653e90a3dabbba1e9e283fd8
|
7c9f53ff27bcb840b9dbc20d520f003f4d76fe17
|
refs/heads/master
| 2020-06-10T15:53:12.967832
| 2017-07-18T07:59:32
| 2017-07-18T07:59:32
| 75,953,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,445
|
py
|
# Python program to find biconnected components in a given
# undirected graph
#Complexity : O(V+E)
from collections import defaultdict
#This class represents an directed graph using adjacency list representation
class Graph:
def __init__(self,vertices):
self.V= vertices #No. of vertices
self.graph = defaultdict(list) # default dictionary to store graph
self.Time = 0 # time is used to find discovery times
self.count = 0 # Count is number of biconnected components
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
self.graph[v].append(u)
'''A recursive function that finds and prints strongly connected
components using DFS traversal
u --> The vertex to be visited next
disc[] --> Stores discovery times of visited vertices
low[] -- >> earliest visited vertex (the vertex with minimum
discovery time) that can be reached from subtree
rooted with current vertex
st -- >> To store visited edges
result -->> To store all edges already printed
'''
def BCCUtil(self,u, parent, low, disc, st, result):
#Count of children in current node
children =0
# Initialize discovery time and low value
disc[u] = self.Time
low[u] = self.Time
self.Time += 1
#Recur for all the vertices adjacent to this vertex
for v in self.graph[u]:
# If v is not visited yet, then make it a child of u
# in DFS tree and recur for it
if disc[v] == -1 :
parent[v] = u
children += 1
st.append((u, v)) #store the edge in stack
self.BCCUtil(v, parent, low, disc, st, result)
# Check if the subtree rooted with v has a connection to
# one of the ancestors of u
# Case 1 -- per Strongly Connected Components Article
low[u] = min(low[u], low[v])
# If u is an articulation point,pop all edges from stack till (u, v)
if parent[u] == -1 and children > 1 or parent[u] != -1 and low[v] >= disc[u]:
self.count +=1 # increment count
w = -1
while w != (u,v):
w = st.pop()
result.append(w) # store output edges
print w,
print""
elif v != parent[u]:
#Update low value of 'u' only of 'v' is still in stack
low[u] = min(low [u], disc[v])
# add the edge if (u,v) and (v,u) are not already in stack
# or result
if ((u,v) not in st and (v,u) not in st and
(u,v) not in result and (v,u) not in result):
st.append((u,v))
#The function to do DFS traversal. It uses recursive BCCUtil()
def BCC(self):
# Initialize disc and low, and parent arrays
disc = [-1] * (self.V)
low = [-1] * (self.V)
parent = [-1] * (self.V)
st = []
result = []
# Call the recursive helper function to find articulation points
# in DFS tree rooted with vertex 'i'
for i in range(self.V):
if disc[i] == -1:
self.BCCUtil(i, parent, low, disc, st, result)
#If stack is not empty, pop all edges from stack
if st:
self.count = self.count + 1
while st:
w = st.pop()
result.append(w) # store output edges
print w,
print ""
# Create a graph given in the above diagram
g = Graph(12)
g.addEdge(0,1)
g.addEdge(1,2)
g.addEdge(1,3)
g.addEdge(2,3)
g.addEdge(2,4)
g.addEdge(3,4)
g.addEdge(1,5)
g.addEdge(0,6)
g.addEdge(5,6)
g.addEdge(5,7)
g.addEdge(5,8)
g.addEdge(7,8)
g.addEdge(8,9)
g.addEdge(10,11)
g.BCC();
print ("Above are %d biconnected components in graph" %(g.count));
|
[
"neelamyadav.jss@gmail.com"
] |
neelamyadav.jss@gmail.com
|
0675fa79cb50d6f70ae4d1f4b9bfa46070797d0a
|
67b0379a12a60e9f26232b81047de3470c4a9ff9
|
/slideshow/admin.py
|
365cd2affc5bde2acbcc881b16c1a4523658de1c
|
[] |
no_license
|
vintkor/whitemandarin
|
8ea9022b889fac718e0858873a07c586cf8da729
|
5afcfc5eef1bb1cc2febf519b04a4819a7b9648f
|
refs/heads/master
| 2021-05-06T03:35:09.367375
| 2017-12-20T15:43:08
| 2017-12-20T15:43:08
| 114,904,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
from django.contrib import admin
from .models import *
from content.fields import AdminImageWidget
from django.db import models
class SlidAdmin(admin.ModelAdmin):
list_display = ('name', 'pic', 'published', 'ordering')
list_editable = ('published', 'ordering')
formfield_overrides = {
models.ImageField: {'widget': AdminImageWidget},
}
admin.site.register(Slid, SlidAdmin)
admin.site.register(Category)
|
[
"alkv84@yandex.ru"
] |
alkv84@yandex.ru
|
82ae6081af92203e7de8a4eae701c34ca3048032
|
fb0c02a5529f41384598dab941180152f39fa10e
|
/dopzad.py
|
d4825d0e0ef8fa4c07a4e4ec12b31c244ee1f015
|
[] |
no_license
|
Sezimm/Problem2
|
951200c92dcdbd43da3daefd32491c8850184745
|
0a3f392f025d22db4ef2ca8045c6d8a770b54f9f
|
refs/heads/main
| 2023-02-22T02:03:25.424031
| 2021-01-26T15:49:45
| 2021-01-26T15:49:45
| 333,133,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#2
'''
text = input()
l = text.split()
q=""
for i in sorted(l,key=lambda a: len(a)):
q = q + " " + i
print(q)
'''
p = input("vedite dannye cherez probel: \n")
s = int(input("vedite shag: "))
x = p.split()
for j in range(int(len(x))):
|
[
"you@example.com"
] |
you@example.com
|
2e424e970f298b6e3f96fe5aaf6fdb58c9648820
|
ee4265c3c6c99f189e2202e7f0d2b5a78475376d
|
/code_forces/Accommodation.py
|
58c0a2f2021e7ef312b1bdda00f5506ed981caeb
|
[] |
no_license
|
akashgkrishnan/HackerRank_Solutions
|
96f762848a77b731748d3331d17314f5e343abae
|
75eeb5bd0f6e81c62ecd2898eb7de9b540e98b46
|
refs/heads/master
| 2023-03-18T18:01:21.402781
| 2020-09-30T18:08:43
| 2020-09-30T18:08:43
| 271,615,533
| 1
| 0
| null | 2021-03-20T05:03:59
| 2020-06-11T18:09:23
|
Python
|
UTF-8
|
Python
| false
| false
| 143
|
py
|
count = 0
for _ in range(int(input())):
p, q = list(map(int, input().split()))
if q - p >= 2:
count += 1
print(count)
|
[
"krishnanag1996@gmail.com"
] |
krishnanag1996@gmail.com
|
e71c26708fa1dac89ce425302946faba0f6cdd88
|
e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6
|
/venv/Lib/site-packages/pybrain/structure/modules/biasunit.py
|
a85d6212c196f190500ec537e31bc49c033bddc8
|
[
"MIT"
] |
permissive
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
cdf0f23a58617e17d6b938e3a9df17daae8585e4
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
refs/heads/master
| 2021-09-11T01:39:26.228392
| 2018-04-05T14:33:39
| 2018-04-05T14:33:39
| 117,153,454
| 0
| 0
|
MIT
| 2018-03-27T05:20:37
| 2018-01-11T21:05:33
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
__author__ = 'Tom Schaul, tom@idsia.ch'
from neuronlayer import NeuronLayer
from module import Module
class BiasUnit(NeuronLayer):
"""A simple bias unit with a single constant output."""
dim = 1
def __init__(self, name=None):
Module.__init__(self, 0, 1, name = name)
def _forwardImplementation(self, inbuf, outbuf):
outbuf[:] = 1
|
[
"shatserka@gmail.com"
] |
shatserka@gmail.com
|
a472871209ba36dddc71717632c40210ffcdb40d
|
ad0d092d26b126ebf9c5f79a0254e79320a4d732
|
/avgtest_pico.py
|
8801fc479c10cafaf34933348fe63907e7b0caf7
|
[
"MIT"
] |
permissive
|
peterhinch/micropython-filters
|
be9bcbc3ace92987a1ef23fca5faa90af2391d35
|
4e0b737574073bab36ec1c776e8dfb80b8fe5f9f
|
refs/heads/master
| 2022-02-07T23:01:52.492287
| 2022-01-26T18:29:34
| 2022-01-26T18:29:34
| 30,693,766
| 59
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
# Demo program for moving average filter
# Author: Peter Hinch
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2021 Peter Hinch
# 16th Dec 2021
import array
from time import ticks_us, ticks_diff
from avg_pico import avg
data = array.array('i', (0 for _ in range(19))) # Average over 16 samples
data[0] = len(data)
def test():
for x in range(16):
print(avg(data, 1000, 4)) # Scale by 4 bits (divide by 16)
for x in range(18):
print(avg(data, 0, 4))
def timing():
t = ticks_us()
avg(data, 10, 4)
t1 = ticks_diff(ticks_us(), t) # Time for one call with timing overheads
t = ticks_us()
avg(data, 10, 4)
avg(data, 10, 4)
t2 = ticks_diff(ticks_us(), t) # Time for two calls with timing overheads
print(t2-t1,"uS") # Time to execute the avg() call
test()
print("Timing test")
timing()
|
[
"peter@hinch.me.uk"
] |
peter@hinch.me.uk
|
65f9e5229c6c9a6a1e020096dd3b187b66fcee09
|
e86fa6b618822fc800bdc699b95efd404065509e
|
/python/practice/matrixMultiplication.py
|
475810eb7ae26f8568fb5ce2c80ae20e1d8d212e
|
[] |
no_license
|
KimYeong-su/programmers
|
97b0ba53833176690bf5f87243d4e98402526f86
|
caf3bcb824c4199832ca94b073340b8c49ada31d
|
refs/heads/master
| 2021-08-29T07:06:12.936109
| 2021-08-06T12:40:02
| 2021-08-06T12:40:02
| 251,619,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
'''
# zip(a) 표현과 zip(*a) unpacking 표현을 잘 기억하자..
def solution(arr1, arr2):
return [[sum(a*b for a, b in zip(A_row,B_col)) for B_col in zip(*arr2)] for A_row in arr1]
'''
def solution(arr1, arr2):
answer = [[0 for _ in range(len(arr2[0]))]for _ in range(len(arr1))]
for i in range(len(arr1)):
for j in range(len(arr2[0])):
for k in range(len(arr2)):
answer[i][j] += arr1[i][k]*arr2[k][j]
return answer
|
[
"suwon0521@naver.com"
] |
suwon0521@naver.com
|
cde830d7a4274d28fb3f1d7f4f807e7245e65ec0
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_omens.py
|
6723d6d1487e28f10d04f20ab87de03a0fc7e444
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
#calss header
class _OMENS():
def __init__(self,):
self.name = "OMENS"
self.definitions = omen
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['omen']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4067df6deabc0c09665985515f58a01b388540cf
|
0fb9e72ca0e41a06b08dd03c49f340a5c3af583d
|
/main.py
|
e579bc240cf9a2dc0834118f9a3bd25c531290f8
|
[] |
no_license
|
osmlab/routerelationranger
|
74146da70cad1f989f0bff764859e4b43112b998
|
1fcd00493ca6e612fcc0ce6b2878a0749fb6e869
|
refs/heads/master
| 2020-06-14T06:35:33.626186
| 2016-12-13T18:53:05
| 2016-12-13T18:53:05
| 75,222,534
| 8
| 1
| null | 2016-12-13T19:22:04
| 2016-11-30T20:05:44
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
from flask import Flask, render_template, jsonify, request
import requests
import json
import pycountry
import us
app = Flask(__name__)
route_type = ''
@app.route("/")
def index():
return render_template('index.html')
@app.route('/countries')
def get_countries():
return json.dumps([[c.alpha_2, c.name] for c in pycountry.countries])
@app.route('/states/<countrycode>')
def get_states(countrycode):
try:
states = [[s.code, s.name] for s in pycountry.subdivisions.get(country_code=countrycode)]
return json.dumps(states)
except KeyError:
return jsonify([])
return jsonify([])
@app.route('/routes/interstate/<country_code>')
def get_interstate_relations(country_code):
# get route type parameter
overpass_query = '[out:json];relation[network="{country_code}:I"][ref];out meta;'.format(country_code=country_code)
print(overpass_query)
response = perform_overpass(overpass_query)
relations = response.json()
if 'elements' in relations and len(relations['elements']) > 0:
out = process_elements(relations['elements'])
return jsonify(out)
return jsonify([])
@app.route('/routes/bicycle/<country_code>/<state_code>')
def get_bicycle_relations(country_code, state_code):
# get route type parameter
overpass_query = '[out:json];area[name="{statename}"]->.a;relation[route=bicycle][network](area.a);out meta;'.format(
statename=pycountry.subdivisions.get(code='{}-{}'.format(country_code, state_code)).name)
print(overpass_query)
response = perform_overpass(overpass_query)
relations = response.json()
if 'elements' in relations and len(relations['elements']) > 0:
out = process_elements(relations['elements'])
return jsonify(out)
return jsonify([])
@app.route('/routes/state/<country_code>/<state_code>')
def get_relations(country_code, state_code):
overpass_query = '[out:json];relation[network="{country_code}:{state_code}"][ref];out meta;'.format(
country_code=country_code,
state_code=state_code)
print(overpass_query)
response = perform_overpass(overpass_query)
relations = response.json()
if 'elements' in relations and len(relations['elements']) > 0:
out = process_elements(relations['elements'])
return jsonify(out)
return jsonify([])
def process_elements(elements):
out = []
for element in elements:
element = cleanup_element(element)
out.append(element)
return out
def perform_overpass(query):
overpass_api_url = 'https://overpass-api.de/api/interpreter'
payload = {'data': query}
return requests.get(overpass_api_url, params=payload)
def cleanup_element(element):
#print(element)
osmid = element['id']
# remove members we don't need em
if 'members' in element:
del element['members']
# flatten tags
if 'tags' in element:
for tag in element['tags']:
element[tag] = element['tags'][tag]
# delete original tags
del element['tags']
return element
def split_code(state_code):
# format is COUNTRY_CODE-STATE_CODE
return state_code.split('-')
if __name__ == "__main__":
app.run()
|
[
"m@rtijn.org"
] |
m@rtijn.org
|
01fb37b7d5d16a167a3f98e0b2b0c9ed7a36cd06
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part005250.py
|
a115bdd05a7d58de6b87e28027cd20e59675b0ad
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,998
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher94883(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.4.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.4.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher94883._instance is None:
CommutativeMatcher94883._instance = CommutativeMatcher94883()
return CommutativeMatcher94883._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 94882
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 94884
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.4.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 94885
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 95169
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.4.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 95170
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher94887.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 94888
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst1
if pattern_index == 1:
pass
# State 95171
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part005251 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
d5c9b86ee0c37f13f1cbb6d03e3008cd35a1b9bb
|
1bdb0da31d14102ca03ee2df44f0ec522b0701a4
|
/Lombardia/MM/ReteAcquedotti/AggiornamentoReteAcquedotti.py
|
c746357b05945e6e29e8a6e020d668da83aa3a93
|
[] |
no_license
|
figuriamoci/Acqua
|
dc073d90c3c5e5899b22005685847916de1dfd95
|
aef22fcd0c80c92441e0e3df2468d7a2f23a848a
|
refs/heads/master
| 2020-12-15T04:00:26.855139
| 2020-06-08T21:17:55
| 2020-06-08T21:17:55
| 234,986,179
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
import os, geojson, logging, acqua.labelCollection as lc
def create_water_supply_network(geoJsonfile, regione):
#logging.info( "reading %s from %s...", geoJsonfile, regione )
with open( geoJsonfile ) as f:
geo = geojson.load( f )
fc = []
for f in geo['features']:
logging.info(f)
if f['geometry']['type'] == 'Point':
logging.info('Skip %s',f)
else:
name = f['properties']['NIL']
new_properties = {'name': name,'comune':'MM', 'regione': regione}
f['properties'] = new_properties
fc.append( f )
feature_collection = geojson.FeatureCollection( fc )
logging.info( "Collected %s feature(s).", len( fc ) )
return feature_collection
os.chdir( '/Lombardia/MM/ReteAcquedotti' )
geoJsonFile = '../Medadata/Quartieri_Milano.geojson'
geoJsonFile_standardized = 'rete_acquedotti_milano.geojson'
regione = 'Lombardia'
fc = create_water_supply_network( geoJsonFile,regione)
lc.to_file( fc, geoJsonFile_standardized )
ll = lc.to_mongoDB_ReteAcuquedotti( geoJsonFile_standardized )
|
[
"an.fantini@gmail.com"
] |
an.fantini@gmail.com
|
1b4e0dba6cccc7a01d2f1f2473da5593ccf3a7ee
|
6a68b230964540bc3f7eb19a9a5adc9b218370c7
|
/MiddlewareScan/F-MiddlewareScan.py
|
c00c9bde26d9c4c15f126b1dc017a4972d1497ec
|
[] |
no_license
|
chuxuantinh/hack-tools
|
7705e485959d1612fee1786a80d98a3fe5500e20
|
c2203e8fa5c42b26e23b9c3db5e88ec7d11ea120
|
refs/heads/master
| 2023-01-09T03:13:40.074380
| 2020-10-31T19:17:37
| 2020-10-31T19:17:37
| 308,955,506
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,741
|
py
|
#coding:utf-8
#author:wolf@future-sec
import getopt
import sys
import Queue
import threading
import socket
import urllib2
import time
import ssl
import os
queue = Queue.Queue()
sys.path.append("plugins")
mutex = threading.Lock()
timeout = 10
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
class ThreadNum(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
try:
if queue.empty():break
queue_task = self.queue.get()
except:
break
try:
task_type,task_host,task_port = queue_task.split(":")
if task_type == 'portscan':
port_status = scan_port(task_type,task_host,task_port)
if port_status == True:
queue.put(":".join(['discern',task_host,task_port]))
elif task_type == 'discern':
discern_type = scan_discern(task_type,task_host,task_port)
if discern_type:
queue.put(":".join([discern_type,task_host,task_port]))
else:
scan_vul(task_type,task_host,task_port)
except:
continue
def scan_port(task_type,host,port):
try:
socket.setdefaulttimeout(timeout/2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((str(host),int(port)))
log(task_type,host,port)
sock.close()
return True
except:
return False
def log(scan_type,host,port,info=''):
mutex.acquire()
time_str = time.strftime('%X', time.localtime( time.time()))
if scan_type == 'portscan':
print "[%s] %s:%d open"%(time_str,host,int(port))
elif scan_type == 'discern':
print "[%s] http://%s:%d is %s"%(time_str,host,int(port),info)
else:
if info:
print "[*%s] %s"%(time_str,info)
log_file = open('result.log','a')
log_file.write("[*%s] %s\r\n"%(time_str,info))
log_file.close()
else:
print "[%s] http://%s:%s call plugin %s"%(time_str,host,port,scan_type)
mutex.release()
def read_config(config_type):
if config_type == 'discern':
mark_list=[]
config_file = open('discern_config.ini','r')
for mark in config_file:
name,location,key,value = mark.strip().split("|")
mark_list.append([name,location,key,value])
config_file.close()
return mark_list
elif config_type == 'plugin':
plugin_list = {}
config_file = open('plugin_config.ini','r')
for plugin in config_file:
name,plugin_file_list = plugin.strip().split("|")
plugin_list[name]=[]
plugin_list[name] = plugin_file_list.split(",")
config_file.close()
return plugin_list
def scan_discern(scan_type,host,port):
mark_list = read_config('discern')
for mark_info in mark_list:
if mark_info[1] == 'header':
try:
header = urllib2.urlopen("http://%s:%d"%(host,int(port)),timeout=timeout).headers
except urllib2.HTTPError,e:
header = e.headers
except Exception,e:
return False
try:
if mark_info[3].lower() in header[mark_info[2]].lower():
log(scan_type,host,port,mark_info[0])
return mark_info[0]
except Exception,e:
continue
elif mark_info[1] == 'file':
try:
re_html = urllib2.urlopen("http://%s:%d/%s"%(host,int(port),mark_info[2]),timeout=timeout).read()
except urllib2.HTTPError,e:
re_html = e.read()
except Exception,e:
return False
if mark_info[3].lower() in re_html.lower():
log(scan_type,host,port,mark_info[0])
return mark_info[0]
def scan_vul(scan_type,host,port):
vul_plugin = read_config("plugin")
for plugin_name in vul_plugin[scan_type]:
try:
req = __import__(plugin_name)
log(plugin_name,host,port)
vul_data = req.check(host,port,timeout)
if vul_data.split("|")[0].upper()=="YES":
log(scan_type,host,port,vul_data.split("|")[1])
except:
continue
def get_ip_list(ip):
ip_list = []
iptonum = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
numtoip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
if '-' in ip:
ip_range = ip.split('-')
ip_start = long(iptonum(ip_range[0]))
ip_end = long(iptonum(ip_range[1]))
ip_count = ip_end - ip_start
if ip_count >= 0 and ip_count <= 65536:
for ip_num in range(ip_start,ip_end+1):
ip_list.append(numtoip(ip_num))
else:
print '-h wrong format'
elif '.ini' in ip:
ip_config = open(ip,'r')
for ip in ip_config:
ip_list.extend(get_ip_list(ip.strip()))
ip_config.close()
else:
ip_split=ip.split('.')
net = len(ip_split)
if net == 2:
for b in range(1,255):
for c in range(1,255):
ip = "%s.%s.%d.%d"%(ip_split[0],ip_split[1],b,c)
ip_list.append(ip)
elif net == 3:
for c in range(1,255):
ip = "%s.%s.%s.%d"%(ip_split[0],ip_split[1],ip_split[2],c)
ip_list.append(ip)
elif net ==4:
ip_list.append(ip)
else:
print "-h wrong format"
return ip_list
def t_join(m_count):
tmp_count = 0
i = 0
while True:
time.sleep(1)
ac_count = threading.activeCount()
if ac_count < m_count and ac_count == tmp_count:
i+=1
else:
i = 0
tmp_count = ac_count
#print ac_count,queue.qsize()
if (queue.empty() and threading.activeCount() <= 1) or i > 5:
break
def put_queue(ip_list,port_list):
for ip in ip_list:
for port in port_list:
queue.put(":".join(['portscan',ip,port]))
if __name__=="__main__":
msg = '''
A vulnerability detection scripts for middleware services author:wolf@future-sec
Usage: python F-MiddlewareScan.py -h 192.168.1 [-p 7001,8080] [-m 50] [-t 10]
'''
if len(sys.argv) < 2:
print msg
try:
options,args = getopt.getopt(sys.argv[1:],"h:p:m:t:")
ip = ''
port = '80,4848,7001,7002,8000,8001,8080,8081,8888,9999,9043,9080'
m_count = 100
for opt,arg in options:
if opt == '-h':
ip = arg
elif opt == '-p':
port = arg
elif opt == '-m':
m_count = int(arg)
elif opt == '-t':
timeout = int(arg)
if ip:
ip_list = get_ip_list(ip)
port_list = []
if '.ini' in port:
port_config = open(port,'r')
for port in port_config:
port_list.append(port.strip())
port_config.close()
else:
port_list = port.split(',')
put_queue(ip_list,port_list)
for i in range(m_count):
t = ThreadNum(queue)
t.setDaemon(True)
t.start()
t_join(m_count)
except Exception,e:
print msg
|
[
"50776038+chuxuantinh@users.noreply.github.com"
] |
50776038+chuxuantinh@users.noreply.github.com
|
7fe5bc22dac1909ed556d1c7bb7127e52a4d4dae
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_189/ch20_2019_04_02_12_58_25_892579.py
|
fe0235bbac5d83917113e11d70f62f63d46439b1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
nome=str(input('digite seu nome: ')
if nome='chris':
return ('todo mundo odeia o chris')
else:
return ('olá, {0}'.format (nome))
|
[
"you@example.com"
] |
you@example.com
|
c3c8c62c75b3a96cd5d71067e6269516d0c95ee0
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/4baf47439edb0ab2a6d043f8872032cbd5c6dee035b265d6f6b2b9443945c60a/pyexpat/errors.py
|
ead44496b49caa3d6d90ff34ba43efbfbac4b2f6
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,532
|
py
|
# encoding: utf-8
# module pyexpat.errors
# from C:\Users\Doly\Anaconda3\lib\site-packages\skimage\io\_plugins\_colormixer.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to describe error conditions. """
# no imports
# Variables with simple values
XML_ERROR_ABORTED = 'parsing aborted'
XML_ERROR_ASYNC_ENTITY = 'asynchronous entity'
XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF = 'reference to external entity in attribute'
XML_ERROR_BAD_CHAR_REF = 'reference to invalid character number'
XML_ERROR_BINARY_ENTITY_REF = 'reference to binary entity'
XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING = 'cannot change setting once parsing has begun'
XML_ERROR_DUPLICATE_ATTRIBUTE = 'duplicate attribute'
XML_ERROR_ENTITY_DECLARED_IN_PE = 'entity declared in parameter entity'
XML_ERROR_EXTERNAL_ENTITY_HANDLING = 'error in processing external entity reference'
XML_ERROR_FEATURE_REQUIRES_XML_DTD = 'requested feature requires XML_DTD support in Expat'
XML_ERROR_FINISHED = 'parsing finished'
XML_ERROR_INCOMPLETE_PE = 'incomplete markup in parameter entity'
XML_ERROR_INCORRECT_ENCODING = 'encoding specified in XML declaration is incorrect'
XML_ERROR_INVALID_TOKEN = 'not well-formed (invalid token)'
XML_ERROR_JUNK_AFTER_DOC_ELEMENT = 'junk after document element'
XML_ERROR_MISPLACED_XML_PI = 'XML or text declaration not at start of entity'
XML_ERROR_NOT_STANDALONE = 'document is not standalone'
XML_ERROR_NOT_SUSPENDED = 'parser not suspended'
XML_ERROR_NO_ELEMENTS = 'no element found'
XML_ERROR_NO_MEMORY = 'out of memory'
XML_ERROR_PARAM_ENTITY_REF = 'illegal parameter entity reference'
XML_ERROR_PARTIAL_CHAR = 'partial character'
XML_ERROR_PUBLICID = 'illegal character(s) in public id'
XML_ERROR_RECURSIVE_ENTITY_REF = 'recursive entity reference'
XML_ERROR_SUSPENDED = 'parser suspended'
XML_ERROR_SUSPEND_PE = 'cannot suspend in external parameter entity'
XML_ERROR_SYNTAX = 'syntax error'
XML_ERROR_TAG_MISMATCH = 'mismatched tag'
XML_ERROR_TEXT_DECL = 'text declaration not well-formed'
XML_ERROR_UNBOUND_PREFIX = 'unbound prefix'
XML_ERROR_UNCLOSED_CDATA_SECTION = 'unclosed CDATA section'
XML_ERROR_UNCLOSED_TOKEN = 'unclosed token'
XML_ERROR_UNDECLARING_PREFIX = 'must not undeclare prefix'
XML_ERROR_UNDEFINED_ENTITY = 'undefined entity'
XML_ERROR_UNEXPECTED_STATE = 'unexpected parser state - please send a bug report'
XML_ERROR_UNKNOWN_ENCODING = 'unknown encoding'
XML_ERROR_XML_DECL = 'XML declaration not well-formed'
__loader__ = None
__spec__ = None
# no functions
# no classes
# variables with complex values
codes = {
'XML declaration not well-formed': 30,
'XML or text declaration not at start of entity': 17,
'asynchronous entity': 13,
'cannot change setting once parsing has begun': 26,
'cannot suspend in external parameter entity': 37,
'document is not standalone': 22,
'duplicate attribute': 8,
'encoding specified in XML declaration is incorrect': 19,
'entity declared in parameter entity': 24,
'error in processing external entity reference': 21,
'illegal character(s) in public id': 32,
'illegal parameter entity reference': 10,
'incomplete markup in parameter entity': 29,
'junk after document element': 9,
'mismatched tag': 7,
'must not undeclare prefix': 28,
'no element found': 3,
'not well-formed (invalid token)': 4,
'out of memory': 1,
'parser not suspended': 34,
'parser suspended': 33,
'parsing aborted': 35,
'parsing finished': 36,
'partial character': 6,
'recursive entity reference': 12,
'reference to binary entity': 15,
'reference to external entity in attribute': 16,
'reference to invalid character number': 14,
'requested feature requires XML_DTD support in Expat': 25,
'syntax error': 2,
'text declaration not well-formed': 31,
'unbound prefix': 27,
'unclosed CDATA section': 20,
'unclosed token': 5,
'undefined entity': 11,
'unexpected parser state - please send a bug report': 23,
'unknown encoding': 18,
}
messages = {
1: 'out of memory',
2: 'syntax error',
3: 'no element found',
4: 'not well-formed (invalid token)',
5: 'unclosed token',
6: 'partial character',
7: 'mismatched tag',
8: 'duplicate attribute',
9: 'junk after document element',
10: 'illegal parameter entity reference',
11: 'undefined entity',
12: 'recursive entity reference',
13: 'asynchronous entity',
14: 'reference to invalid character number',
15: 'reference to binary entity',
16: 'reference to external entity in attribute',
17: 'XML or text declaration not at start of entity',
18: 'unknown encoding',
19: 'encoding specified in XML declaration is incorrect',
20: 'unclosed CDATA section',
21: 'error in processing external entity reference',
22: 'document is not standalone',
23: 'unexpected parser state - please send a bug report',
24: 'entity declared in parameter entity',
25: 'requested feature requires XML_DTD support in Expat',
26: 'cannot change setting once parsing has begun',
27: 'unbound prefix',
28: 'must not undeclare prefix',
29: 'incomplete markup in parameter entity',
30: 'XML declaration not well-formed',
31: 'text declaration not well-formed',
32: 'illegal character(s) in public id',
33: 'parser suspended',
34: 'parser not suspended',
35: 'parsing aborted',
36: 'parsing finished',
37: 'cannot suspend in external parameter entity',
}
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
1cc9928d529ad5736e478cc4ac402889c072e6f5
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/waf_write_f/xss-match-set_create.py
|
c7702ffe2384dfbda2f82dad83e597d3abc55b61
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-xss-match-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/delete-xss-match-set.html
get-xss-match-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/get-xss-match-set.html
list-xss-match-sets : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/list-xss-match-sets.html
update-xss-match-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/update-xss-match-set.html
"""
write_parameter("waf", "create-xss-match-set")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
34c447b2702d0a89243b9206af1af98f49ae8b5b
|
5832f65747e6142d1b8de9d46aa507092782aafc
|
/Codeforces/1304/d/d1.py
|
1153970f359f16501943bc2d0f927e46b15fc1ac
|
[] |
no_license
|
subhashreddykallam/Competitive-Programming
|
64cc42c5b23c03536187a1bb54e2b2ed82ee7844
|
973b66b4eb81352b98409ca52fa3aa75c28d8b6f
|
refs/heads/master
| 2022-05-28T21:07:43.012922
| 2020-05-05T20:34:20
| 2020-05-05T20:34:20
| 226,814,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,618
|
py
|
from itertools import permutations
def CeilIndex(A, l, r, key):
while (r - l > 1):
m = l + (r - l)//2
if (A[m] >= key):
r = m
else:
l = m
return r
def LongestIncreasingSubsequenceLength(A, size):
tailTable = [0 for i in range(size + 1)]
len = 0
tailTable[0] = A[0]
len = 1
for i in range(1, size):
if (A[i] < tailTable[0]):
tailTable[0] = A[i]
elif (A[i] > tailTable[len-1]):
tailTable[len] = A[i]
len += 1
else:
tailTable[CeilIndex(tailTable, -1, len-1, A[i])] = A[i]
return len
s = input()
n = len(s)+1
z = [i for i in range(1, n+1)]
z = list(permutations(z))
maxp, minp = [], []
maxpl, minpl = 0, 100
for perm in z:
flag = 1
for i in range(n-1):
if s[i] == '>':
if perm[i] < perm[i+1]:
flag = 0
else:
if perm[i] > perm[i+1]:
flag = 0
if flag:
if LongestIncreasingSubsequenceLength(perm, len(perm)) > maxpl:
maxp = [perm]
maxpl = LongestIncreasingSubsequenceLength(perm, len(perm))
elif LongestIncreasingSubsequenceLength(perm, len(perm)) == maxpl:
maxp.append(perm)
if LongestIncreasingSubsequenceLength(perm, len(perm)) < minpl:
minp = [perm]
minpl = LongestIncreasingSubsequenceLength(perm, len(perm))
elif LongestIncreasingSubsequenceLength(perm, len(perm)) == minpl:
minp.append(perm)
for i in maxp:
print(i, 'max')
print()
for i in minp:
print(i)
|
[
"42376739+Storm1seven@users.noreply.github.com"
] |
42376739+Storm1seven@users.noreply.github.com
|
b3463b6fc44f470bc1c49964be9a1293d2c14279
|
38d93c5fd72fee380ec431b2ca60a069eef8579d
|
/Baekjoon,SWEA, etc/프로그래머스/소수 찾기.py
|
d4f4170daebf494386f6c0311a6f986a270b6131
|
[] |
no_license
|
whgusdn321/Competitive-programming
|
5d1b681f5bee90de5678219d91cd0fa764476ddd
|
3ff8e6b1d2facd31a8210eddeef851ffd0dce02a
|
refs/heads/master
| 2023-01-01T01:34:22.936373
| 2020-10-24T11:05:08
| 2020-10-24T11:05:08
| 299,181,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
import itertools
# permutations = []
#
#
# def make_permutations(nn, n, permutation, visited):
# '''
# if n == 4, create [[0,1,2,3],[0,1,3,2],[0,2,3,4] . . .]
# '''
# if len(permutation) == nn:
# permutations.append(permutation)
# return
# for i in range(n):
# if not visited[i]:
# visited[i] = True
# permutation.append(i)
# make_permutations(nn, n, permutation.copy(), visited.copy())
# permutation.pop()
# visited[i] = False
#
# def isprime(str_num):
# num = int(str_num)
# if num == 1:
# return False
# flag = True
# for i in range(2, num//2+1):
# if num %i == 0:
# flag = False
# break
# return flag
#
#
# def solution(numbers):
# global permutations
# n = len(numbers)
# for nn in range(1, n+1):
# visited = [False] * n
# make_permutations(nn, n, [], visited)
#
# cnt = 0
#
# numberVisited = []
#
# for permutation in permutations:
# number =''
# for index in permutation:
# number += numbers[index]
#
# j = -1
# while j != len(number)-1 and number[j+1] == '0':
# j += 1
#
# if j == len(number)-1:
# continue
# else:
# number = number[j+1:]
# if number not in numberVisited:
# numberVisited.append(number)
# if isprime(number):
# cnt += 1
# return cnt
def isPrime(num):
if num <= 1 :
return False
for i in range(2, num//2):
if num % i == 0:
return False
return True
def solutions(numbers):
sett = set([])
N = len(numbers)
cnt = 0
for n in range(1, N+1):
for permu in itertools.permutations(numbers, n):
#print('permu : ', ''.join(permu))
candidate = int(''.join(permu))
sett.add(candidate)
for number in sett:
if isPrime(number):
cnt += 1
return cnt
print(solutions('011'))
|
[
"blackgoldace@naver.com"
] |
blackgoldace@naver.com
|
b0ddb85d5d6654d0bbf42e807f0356fc1e877ba3
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AntfortuneContentCommunityContentEventSaveResponse.py
|
460049c5dc88e8c5dfdbefc078a9cfe3196f03ef
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AntfortuneContentCommunityContentEventSaveResponse(AlipayResponse):
def __init__(self):
super(AntfortuneContentCommunityContentEventSaveResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AntfortuneContentCommunityContentEventSaveResponse, self).parse_response_content(response_content)
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
56bc7e285a243b20416b0354cc8bd2200989d3a0
|
ac6e4102dfb49a4e49de0e2766feb6e80ab0b5c2
|
/test/test_networking_project_netgw_attach.py
|
74a8534b8692359526d9c3f667bbf6100f6e6b64
|
[
"MIT"
] |
permissive
|
hyperonecom/h1-client-python
|
df01f05ad295121e3dd391a3274c41e2f5b88e53
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
refs/heads/master
| 2023-04-05T01:51:31.637002
| 2021-03-29T00:05:41
| 2021-03-29T00:05:41
| 319,309,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import h1
from h1.model.netgw_private import NetgwPrivate
globals()['NetgwPrivate'] = NetgwPrivate
from h1.model.networking_project_netgw_attach import NetworkingProjectNetgwAttach
class TestNetworkingProjectNetgwAttach(unittest.TestCase):
"""NetworkingProjectNetgwAttach unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetworkingProjectNetgwAttach(self):
"""Test NetworkingProjectNetgwAttach"""
# FIXME: construct object with mandatory attributes with example values
# model = NetworkingProjectNetgwAttach() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"action@github.com"
] |
action@github.com
|
2e3c7259450c57b1ae6db5382c7f7aa424a5a2c9
|
a5b6dfd04e49b86a4ec3ba1ca1fc941233fd1906
|
/leetCode/46Permutations.py
|
5ac95b13a5ac47b2245845689b668f8e2a71e15e
|
[] |
no_license
|
JuDa-hku/ACM
|
c57423c4c619991ab5b8df170ace6c68fbe6bb48
|
3add05a6b07ec60ae148290f7f25d122336de47d
|
refs/heads/master
| 2021-06-25T09:33:05.396914
| 2016-10-29T03:27:03
| 2016-10-29T03:27:03
| 21,881,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def permute(self, nums):
res, C = [], []
self.permuteHelp(nums, res, C)
return C
def permuteHelp(self, nums,res, C):
if len(nums) == 0:
C.append(res)
return
for num in nums:
tmpNum, tmpRes = nums[:], res[:]
tmpNum.remove(num)
tmpRes.append(num)
self.permuteHelp(tmpNum,tmpRes,C)
s = Solution()
print s.permute([1,2,3])
|
[
"juda@ubuntu.ubuntu-domain"
] |
juda@ubuntu.ubuntu-domain
|
47fe148cfd3970866527a76d900bd6a16e5902c0
|
fb4fc6ca3b6ea208a2377325fd1d41e6fe068734
|
/tests/test_speed.py
|
40c06c1d58652a5c103c60146cc0bb2ed869b958
|
[] |
no_license
|
nadia-el/ontology-semsim-py
|
d15977cebe5d750c256a4ce3ff3fdb6c6c7098a1
|
216ee2a7b83b951b3bce7865e1dd7c94acc17211
|
refs/heads/master
| 2022-04-16T21:08:53.126204
| 2020-04-03T23:12:47
| 2020-04-03T23:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
from ontology_semsim.util import time_all_ancestors, time_all_jaccard
from ontology_semsim.fast_semsim import FastSemSimEngine
from pytest import approx
from typing import Optional, Set, List, Union, Dict, Any
from rdflib import URIRef, BNode, Literal, Graph, RDFS, OWL, Namespace
from ontology_semsim.semsim_rdflib import RdfSemSimEngine
from pytest import approx
import logging
g = Graph()
g.parse("tests/data/chromosome.owl", format="xml")
GO = Namespace("http://purl.obolibrary.org/obo/GO_")
BFO = Namespace("http://purl.obolibrary.org/obo/BFO_")
NC = GO['0000228'].toPython()
MC = GO['0000262'].toPython()
Ch = GO['0005694'].toPython()
Mt = GO['0005739'].toPython()
SUBCLASS_OF = RDFS['subClassOf'].toPython()
PART_OF = BFO['0000050'].toPython()
logging.basicConfig(level=logging.INFO)
def test_timings():
print('')
rdf_sse = RdfSemSimEngine(g)
rpt('rdf0', rdf_sse)
rpt('rdf1', rdf_sse)
rpt('rdf2', rdf_sse)
rpt('rdf3', rdf_sse)
rdf_sse = RdfSemSimEngine(g)
fast_sse = FastSemSimEngine(rdf_sse)
rpt('fast0', fast_sse)
rpt('fast1', fast_sse)
rpt('fast2', fast_sse)
rpt('fast3', fast_sse)
# to see output: pytest -s tests/test_speed.py
def rpt(n, sse):
t = time_all_ancestors(sse)
print(f'A {n} :: {t}')
t = time_all_jaccard(sse)
print(f'J {n} :: {t}')
|
[
"cjm@berkeleybop.org"
] |
cjm@berkeleybop.org
|
6d60e2ec9b7cae3397a6fc0edadaa40037633b41
|
275c25b4a8f8009799dc0e7aebab21df1e43f542
|
/FruitStore/main.py
|
e2bc96dcd13b8dfc1b3f07acd8704117a6564c5a
|
[] |
no_license
|
aranooke/Python-Data-Structure
|
9a81cdf39a7d9a865a43b73b4e7db1f4f8a1e570
|
3e4ad8488385823b0c36f453159b463831a19335
|
refs/heads/master
| 2023-06-05T06:34:55.095557
| 2021-06-29T09:46:51
| 2021-06-29T09:46:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,708
|
py
|
from FStore import FStore
from FStore import Cart
import time
import getpass
import logging
import json
def getAvilableStock():
stockInfo = open(r"FruitStore\stock.json", "r")
return json.load(stockInfo)
openStore = FStore(getAvilableStock())
cartInstance = Cart()
def getUserInput(fromWhichMenu):
inputMessage = ''
if fromWhichMenu == "fromMainMenu":
inputMessage = "Please enter your choice : "
elif fromWhichMenu == "fruitMenu":
inputMessage = "Please enter fruit id : "
elif fromWhichMenu == "numbers":
inputMessage = "how many you need? "
elif fromWhichMenu == "addMoreItems":
try:
choice = input("Do you want to add more items to your cart? Y or N ").strip()
if choice == "Y" or choice == "y" or choice == "yes" or choice == "YES":
return True
else:
return False
except ValueError:
print("That's not an int!")
elif fromWhichMenu == "adminStuff":
try:
choice = getpass.getpass("Enter admin password")
if choice == "admin123":
return True
else:
return False
except ValueError:
print("That's not a valid password!")
try:
choice = input(inputMessage).strip()
except ValueError:
print("That's not an int!")
return choice
def displayMainMenu():
print("""
1. Show available fruits
2. Buy Fruits
3. Show Cart
4. Checkout
5. Exit
6. Display available Stocks (only store admin can access)
""")
def addMoreItems():
if (getUserInput("addMoreItems")):
displayFruitMenu()
choice = getUserInput("fruitMenu")
return choice
else:
print("purchase done")
def displayFruitMenu():
for i in enumerate(openStore.listOfFruits(), start=1):
print(i[0], i[1])
def billFormat(billObj):
for fruitName, price in billObj.items():
print(fruitName + " - " + str(price))
print("Total Bill amount to pay " + str(sum(billObj.values())) + " Rupees \n")
def checkOutCart():
billMap = {}
cartItems = cartInstance.showCart()
for fn,count in cartItems.items():
fruitPrice = openStore.getFruitPrice(fn)
billMap[fn] = fruitPrice * count
billFormat(billMap)
def showAvailableFruits():
availableFruits = openStore.listOfFruits()
print("Here's the available fruits, happy purchasing\n")
for id, fruit in availableFruits.items():
print(str(id) + " - " + fruit[0] + "(each " + fruit[0] + " cost " + str(fruit[1]) + " Rupees)")
def buyFruit(fruitId):
if int(fruitId) in openStore.getFruitsIDs():
fruitCount = int(getUserInput("numbers"))
if fruitCount <= openStore.getAvailableCountForFruit(fruitId):
cartInstance.addToCart(openStore.getFruitName(fruitId), fruitCount)
openStore.updateStock(openStore.getFruitName(fruitId), fruitCount)
print(str(fruitCount) + " " +openStore.getFruitName(fruitId) + " added to your cart \n")
else:
print("The count you entered is either exceeding or we nearing out of stock soon")
else:
print("ID which's entered isn't matching with any fruits which we have!")
if __name__ == "__main__":
while True:
displayMainMenu()
userChoice = getUserInput("fromMainMenu")
if userChoice == '1':
showAvailableFruits()
elif userChoice == '2':
showAvailableFruits()
choice = getUserInput("fruitMenu")
buyFruit(choice)
if(getUserInput("addMoreItems")):
for i in range(len(openStore.giveAvailableFruitsInStock())):
showAvailableFruits()
choice = getUserInput("fruitMenu")
buyFruit(choice)
else:
displayFruitMenu()
elif userChoice == '3':
cartItems = cartInstance.showCart()
print("Currently you have below items in your cart, ")
for itemName, itemCount in cartItems.items():
print(itemName + "-" + str(itemCount))
time.sleep(7)
elif userChoice == '4':
checkOutCart()
print("Enjoy Shopping at Ram's Fruit Store!\n")
break
elif userChoice == '5':
break
elif userChoice == '6':
if(getUserInput("adminStuff")):
openStore.displayStock()
break
else:
print("Invalid input. Please enter number between 1-6 ")
|
[
"sanjay.siddha3@gmail.com"
] |
sanjay.siddha3@gmail.com
|
0a239158da7a4e929daf2fab7fc0257797715246
|
3fcb1d6e8566f1d31237c934a75ffbfa4b5742e0
|
/app_case/migrations/0001_initial.py
|
9d41d9298f47319eed946430f88a15e6ecd4fdd2
|
[] |
no_license
|
xuxushenlan/itest_platform
|
42714bd9ee5dc776aefecb80fdeff3bfa654785e
|
56ced10fc9fe5ba05f6b699c98b882b93e9982e3
|
refs/heads/master
| 2021-02-23T08:02:16.418178
| 2019-12-21T10:11:49
| 2019-12-21T10:11:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
# Generated by Django 2.2.6 on 2019-11-30 14:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('app_manage', '0002_module'),
]
operations = [
migrations.CreateModel(
name='TestCase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('url', models.TextField(verbose_name='URL')),
('method', models.IntegerField(verbose_name='请求方法')),
('header', models.TextField(verbose_name='请求头')),
('parameter_type', models.IntegerField(verbose_name='参数类型')),
('parameter_body', models.TextField(verbose_name='参数内容')),
('result', models.TextField(verbose_name='结果')),
('assert_type', models.IntegerField(verbose_name='断言类型')),
('assert_text', models.TextField(verbose_name='结果')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_manage.Module')),
],
),
]
|
[
"fnngj@126.com"
] |
fnngj@126.com
|
8f9f2b4084e6feae2c4e3634ec6c31e48e4bc526
|
b64c45e75aa215ddcf7249fb92e047f3e7731187
|
/projectdir/utils.py
|
a0dfa25016c6994023a415d114a185558e6928ca
|
[] |
no_license
|
johngaitho05/CohMat
|
6731b4dfb94475c75f1cd1d2ec55cc810729f939
|
ff5b8e5eb877f68a0477f4f19b78c6e7c407af2c
|
refs/heads/master
| 2022-12-12T15:55:53.363782
| 2021-04-04T13:17:05
| 2021-04-04T13:17:05
| 239,868,710
| 1
| 0
| null | 2022-11-04T19:31:50
| 2020-02-11T21:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
import random
from datetime import datetime, date, timedelta
from django.template.defaultfilters import timesince, register
from django.utils import timezone
import pytz
from django.utils import timezone
class AgoTime:
def __init__(self, date_time):
ago = get_ago_time(date_time)
if type(ago) != str or 'Yesterday' in ago:
self.time = ago
else:
self.time = ago.replace(u'\xa0', ' ')
def count(self):
if type(self.time) == str:
return int(self.time.split(' ')[0]) if 'Yesterday' not in self.time else None
return
def desc(self):
if type(self.time) == str:
return self.time[len(str(self.count())) + 1:] if 'Yesterday' not in self.time else None
return
def __str__(self):
if type(self.time) == datetime:
if timezone.now().year == self.time.year:
return self.time.strftime("%d/%m at %H:%M")
else:
return self.time.strftime("%m/%Y")
return self.time
@register.filter
def get_ago_time(passed_time):
yesterday = timezone.now().date() - timedelta(days=1)
diff = abs(passed_time - timezone.now())
d = diff.days
if d <= 30:
span = timesince(passed_time)
span = span.split(",")[0] # just the most significant digit
if passed_time.date() == yesterday:
return "Yesterday at %s" % passed_time.strftime('%H:%M')
return "%s ago" % span
return passed_time
class CustomTimezoneMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
tzname = request.session.get('custom_timezone')
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
return self.get_response(request)
def randomColor():
letters = "0123456789ABCDEF"
color = '#'
for i in range(6):
j = random.randint(0, 14)
color += letters[j]
return color if 'FF' not in color else randomColor()
|
[
"johngaitho05@gmail.com"
] |
johngaitho05@gmail.com
|
a1d57be50b7c6b7de643f9c7e0d3ee889b0adefe
|
cf7118bcfbde5d2bfae51f74bffb44a5f39b3961
|
/examples/discoro_client3.py
|
ee9165856b1f16aa58adac8fc11d8cec2376b2f3
|
[
"MIT"
] |
permissive
|
tierralibre/asyncoro
|
b6aba8654a6c974898590f7814190c805704da1f
|
d0b7b4bb4f4e569235ee74ccc52810d74fe8af12
|
refs/heads/master
| 2021-01-17T07:56:01.467307
| 2016-08-06T19:24:56
| 2016-08-06T19:24:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,981
|
py
|
# Run 'discoronode.py' program to start processes to execute
# computations sent by this client, along with this program.
# Example where this client sends computation to remote discoro process to run
# as remote coroutines. Computations are scheduled with custom scheduler
# (without using RemoteCoroScheduler). Remote coroutines and client can use
# message passing to exchange data.
import asyncoro.disasyncoro as asyncoro
from asyncoro.discoro import *
from asyncoro.discoro_schedulers import RemoteCoroScheduler
# objects of C are exchanged between client and servers
class C(object):
def __init__(self, i):
self.i = i
self.n = None
def __repr__(self):
return '%d: %s' % (self.i, self.n)
# this generator function is sent to remote discoro servers to run
# coroutines there
def compute(obj, client, coro=None):
# obj is an instance of C
import math
# this coroutine and client can use message passing; client sends
# data to this coro as a message
print('process at %s received: %s' % (coro.location, obj.n))
yield coro.sleep(obj.n)
obj.n = math.sqrt(obj.n)
# send result back to client
yield client.deliver(obj, timeout=5)
def client_proc(computation, njobs, coro=None):
# distribute computation to server
if (yield computation.schedule()):
raise Exception('schedule failed')
# create a separate coroutine to receive results, so they can be processed
# as soon as received
def recv_results(coro=None):
for i in range(njobs):
msg = yield coro.receive()
print(' result for job %d: %s' % (i, msg))
results_coro = asyncoro.Coro(recv_results)
# remote coroutines send replies as messages to this coro
for i in range(njobs):
cobj = C(i)
cobj.n = random.uniform(5, 10)
# as noted in 'discoro_client1.py', 'schedule' method is used to run
# jobs sequentially; use 'submit' to run multiple jobs on one server
# concurrently
print(' request %d: %s' % (i, cobj.n))
rcoro = yield rcoro_scheduler.schedule(compute, cobj, results_coro)
if not isinstance(rcoro, asyncoro.Coro):
print('failed to create rcoro %s: %s' % (i, rcoro))
# wait for all results
yield results_coro.finish()
yield computation.close()
if __name__ == '__main__':
import logging, random
asyncoro.logger.setLevel(logging.DEBUG)
# if scheduler is not already running (on a node as a program),
# start it (private scheduler):
Scheduler()
# send generator function and class C (as the computation uses
# objects of C)
computation = Computation([compute, C])
# use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
# created before computation is scheduled (next step below)
rcoro_scheduler = RemoteCoroScheduler(computation)
# create 10 remote coroutines (jobs)
asyncoro.Coro(client_proc, computation, 10)
|
[
"pgiri@yahoo.com"
] |
pgiri@yahoo.com
|
a1812f7f2ebf3080a7e2a9e6e77a45f739bd7ac5
|
e65a428ca7ee11d2f62d702842d4afbd493f08a4
|
/dictionaries/odd_occurrences.py
|
94e312ac04aa7a670ac1baa72e8af5f96bccd5f7
|
[] |
no_license
|
NikiDimov/SoftUni-Python-Fundamentals
|
d8ba24a06c4366e76bdc69f1c5225dca29fe955e
|
5bb1bf5928e40f2bac867d33566c8b9dac13f566
|
refs/heads/main
| 2023-07-15T05:57:57.085880
| 2021-08-19T10:27:45
| 2021-08-19T10:27:45
| 323,631,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
words = input().split()
words = [el.lower() for el in words]
dictionary = {}
final_list = []
value = 1
for index in range(len(words)):
key = words[index]
if key in dictionary:
dictionary[key] += 1
continue
dictionary[key] = value
for key, value in dictionary.items():
if dictionary[key] % 2 == 0:
continue
final_list.append(key)
print(' '.join(final_list))
|
[
"niki.dimov86@gmail.com"
] |
niki.dimov86@gmail.com
|
63f0b3714d9174e540a1a9c7a40db8b81ca459e1
|
21b0b4c27193898207751c91b8b2ed168a1b1638
|
/py/py_0074_digit_factorial_chains.py
|
576fc3629b2ebe4479494eab79c28b6178f4a805
|
[
"MIT"
] |
permissive
|
lcsm29/project-euler
|
67560a4e66968f1671a3d7ecf2dda6c956893dca
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
refs/heads/main
| 2023-07-04T11:45:24.374841
| 2021-08-07T08:20:41
| 2021-08-07T08:20:41
| 371,808,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
# Solution of;
# Project Euler Problem 74: Digit factorial chains
# https://projecteuler.net/problem=74
#
# The number 145 is well known for the property that the sum of the factorial
# of its digits is equal to 145:1! + 4! + 5! = 1 + 24 + 120 = 145Perhaps less
# well known is 169, in that it produces the longest chain of numbers that
# link back to 169; it turns out that there are only three such loops that
# exist:169 → 363601 → 1454 → 169871 → 45361 → 871872 → 45362 → 872It is not
# difficult to prove that EVERY starting number will eventually get stuck in a
# loop. For example,69 → 363600 → 1454 → 169 → 363601 (→ 1454)78 → 45360 → 871
# → 45361 (→ 871)540 → 145 (→ 145)Starting with 69 produces a chain of five
# non-repeating terms, but the longest non-repeating chain with a starting
# number below one million is sixty terms. How many chains, with a starting
# number below one million, contain exactly sixty non-repeating terms?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 74
timed.caller(dummy, n, i, prob_id)
|
[
"lcsm29@outlook.com"
] |
lcsm29@outlook.com
|
801a2b62b4da99a4dcc49110be6c373608da7381
|
7b1a2930931191444c76d0ee4863912dc811ff4f
|
/advertising/templatetags/image.py
|
0d586ae2cdf441bfb2b19177898112acb045fa95
|
[
"BSD-3-Clause"
] |
permissive
|
django-ve/django-advertising
|
97a68e49dc2102bfc2dc50eac4fbd443b44f3af8
|
d369de379c224161e1c8d5eecde14bfb2b7423c3
|
refs/heads/master
| 2022-11-21T20:31:53.675161
| 2020-07-23T16:27:11
| 2020-07-23T16:27:11
| 281,835,835
| 0
| 0
| null | 2020-07-23T02:59:43
| 2020-07-23T02:59:42
| null |
UTF-8
|
Python
| false
| false
| 2,527
|
py
|
import sys
from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe
from ..models import Advertising
register = template.Library()
@register.simple_tag
def get_images_advertising(height=100, campaign="", *args, **kwargs):
"""
@method: get_images_advertising
@descrip: Method thar return images advertising
@param height: height img
"""
if sys.version_info >= (3, 0):
if not isinstance(height, str):
height = str(height)
else:
if not isinstance(height, basestring):
height = str(height)
if campaign is None:
try:
data = Advertising.objects.all()[0]
except Advertising.DoesNotExist:
data = None
else:
try:
data = Advertising.objects.get(id_advertising=campaign)
except Advertising.DoesNotExist:
data = None
html = ""
if data:
id_adv = data.id_advertising.strip()
if data.timeout:
timeout = data.timeout * 1000
html += """
<script>
window.TimeOutAdvertising_""" + id_adv + """ = """ + str(timeout) + """
</script>"""
# Style css
class_parent = "position: relative; min-height: "+height+"px;"
class_img = "position: absolute; width: 100%; height: auto;"
if hasattr(data, 'images'):
html += '<div class="img-advertising" id="images_advertising_' + id_adv + '"'
html += ' style="' + class_parent + '">'
counter = 0
for image in data.images.all():
html += '<div id="image_container_advertising_' + str(counter)
html += '_' + id_adv + '"'
html += '> <a target="_blank" href="' + image.url + '">'
html += '<img src="' + settings.MEDIA_URL + str(image.photo)
html += '" style="' + class_img + '"'
html += ' id="img_advertising_' + str(counter) + '_' + id_adv
html += '"></a>'
html += '</div>'
counter = counter + 1
html += '</div>'
html += """
<script>
document.addEventListener("DOMContentLoaded", function(event) {
advertisingModule.initialize('""" + id_adv + """');
});
</script>
"""
else:
html = ""
return mark_safe(html)
|
[
"martinpeveri@gmail.com"
] |
martinpeveri@gmail.com
|
482f5d49eaccc3c22ece3c7ac66ee332cc9347d4
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200627215602.py
|
ad264859fade5fe3cd565586e3a4f0428ad647bf
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139
| 2020-07-19T12:35:12
| 2020-07-19T12:35:12
| 272,783,233
| 0
| 0
| null | 2020-06-16T18:28:15
| 2020-06-16T18:28:15
| null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
# import xlml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
print(url)
yield scrapy.Request(url=url,callback=self.parse)
def parse(self, response):
soup = bs(response.text,'html.parser')
print(soup.text)
return soup
for i in soup.find_all('div',attrs={'class' : 'movie-item-info'}):\
item = MaoyanspidersItem()
title = i.find('p',attrs={'class':'name'}).find('a')
name = title.get('title')
link = 'https://maoyan.com/'+ title.get('href')
time = i.find('p',attrs={'class' : 'releasetime'}).text
item['films_name'] = name
item['release_time'] = t
yield scrapy.Request(url=link, meta={'item':item},callback=self.parse1)
return item
def parse1(self, response):
|
[
"31039587+ydbB@users.noreply.github.com"
] |
31039587+ydbB@users.noreply.github.com
|
374696dce42ea18decb6f012afe4ef136ea501a1
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/1/1.two-sum.333959566.Runtime-Error.leetcode.python3.py
|
8a44ca6d16b23521fd72653ab990f48e7fc81b5b
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
class Solution:
def firstBadVersion(self, n):
MAX = n
MIN = 1
while MAX >= MIN:
MID = (MAX + MIN) // 2
if isBadVersion(MID):
MAX = MID - 1
else:
MIN = MID + 1
return MAX + 1
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
93aa196eba09a4b348af0294abef4a924b0caa0e
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/Django/进阶部分/day68orm/day68orm/app01/migrations/0007_auto_20180510_1228.py
|
bb626764f5aef8969f86f26f887e6cae0ca42e17
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 494
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-05-10 04:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app01', '0006_auto_20180510_1227'),
]
operations = [
migrations.AlterModelTable(
name='book',
table='book',
),
migrations.AlterModelTable(
name='publisher',
table='publisher',
),
]
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
15113c318997014e892984c93d19e78847c9149d
|
bc183f7357cda3ad064f8c2ff34a176c406446d3
|
/pastepwn/analyzers/tests/alwaystrueanalyzer_test.py
|
0973040b894968d1844e79f9968191617269cd59
|
[
"MIT"
] |
permissive
|
luton1507/pastepwn
|
b8a790168ce08f10c62574eeb0a68f0dedd5425d
|
9b2fee22857e54a5312fdb3d388b472a7d271c50
|
refs/heads/master
| 2022-11-10T20:18:40.102277
| 2020-06-19T23:34:14
| 2020-06-19T23:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
# -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.actions.basicaction import BasicAction
from pastepwn.analyzers.alwaystrueanalyzer import AlwaysTrueAnalyzer
class TestAlwaysTrueAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = AlwaysTrueAnalyzer(None)
self.paste = mock.Mock()
def test_match(self):
self.paste.body = "Test"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = None
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = ""
self.assertTrue(self.analyzer.match(self.paste))
self.paste = None
self.assertTrue(self.analyzer.match(self.paste))
def test_actions_present(self):
action = mock.MagicMock(spec=BasicAction)
analyzer = AlwaysTrueAnalyzer(action)
self.assertEqual([action], analyzer.actions)
if __name__ == '__main__':
unittest.main()
|
[
"d-Rickyy-b@users.noreply.github.com"
] |
d-Rickyy-b@users.noreply.github.com
|
df97e2cec6be03f872168b844e9078036280d682
|
0cfb5831a748ebd46e438e3ad7e7a09c1d196499
|
/com/chapter_08/section_03/task_8.3.4_functionWhile.py
|
f303ebafb04927959b06504d64048c0a65946b9e
|
[] |
no_license
|
StevenGeGe/pythonFromIntroductionToPractice01
|
7cfe8cdb4bc5c0ddbe25b44976231d72d9e10108
|
9d2ba499056b30ded14180e6c4719ee48edd9772
|
refs/heads/master
| 2023-02-15T04:08:59.878711
| 2020-12-28T13:27:55
| 2020-12-28T13:27:55
| 310,980,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/20 21:26
# @Author : Yong
# @Email : Yong_GJ@163.com
# @File : task_8.3.4_functionWhile.py
# @Software: PyCharm
def get_formatted_name(first_name, last_name):
"""返回整洁的姓名"""
full_name = first_name + ' ' + last_name
return full_name.title()
# 这是一个无限循环!
# while True:
# print("\nPlease tell me your name:")
# f_name = input("First name: ")
# l_name = input("Last name: ")
# formatted_name = get_formatted_name(f_name, l_name)
# print("\nHello, " + formatted_name + "!")
def get_formatted_name(first_name, last_name):
"""返回整洁的姓名"""
full_name = first_name + ' ' + last_name
return full_name.title()
while True:
print("\nPlease tell me your name:")
print("(enter 'q' at any time to quit)")
f_name = input("First name: ")
if f_name == 'q':
break
l_name = input("Last name: ")
if l_name == 'q':
break
formatted_name = get_formatted_name(f_name, l_name)
print("\nHello, " + formatted_name + "!")
|
[
"Yong_GJ@163.com"
] |
Yong_GJ@163.com
|
a4bfce16b09eb37e9cdb42148c47e285c832cacc
|
98a834b6cd7f5cb5f596b3818eb84fca0855d62e
|
/data_gen.py
|
fa368b0ec07470c9a71b08e50965d5f818b8be3b
|
[] |
no_license
|
lmb633/transformer
|
2e6fe2200942a6e8eb9f24ebfd47eb15478004e8
|
e60c743da2078430b764aa68e224e0046b91384e
|
refs/heads/master
| 2020-09-06T11:05:33.875047
| 2019-11-12T03:47:07
| 2019-11-12T03:47:07
| 220,407,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
import pickle
import time
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
data_file = 'data.pkl'
vocab_file = 'vocab.pkl'
IGNORE_ID = 0
pad_id = 0
print('loading samples...')
start = time.time()
with open(data_file, 'rb') as file:
data = pickle.load(file)
elapsed = time.time() - start
print('elapsed: {:.4f}'.format(elapsed))
def text_to_sequence(text, char2idx):
result = [char2idx[char] for char in text]
return result
def sequence_to_text(seq, idx2char):
result = [idx2char[idx] for idx in seq]
return result
def get_data(filename):
with open(filename, 'r') as file:
data = file.readlines()
data = [line.strip() for line in data]
return data
def pad_collate(batch):
max_input_len = float('-inf')
max_target_len = float('-inf')
for elem in batch:
src, tgt = elem
max_input_len = max_input_len if max_input_len > len(src) else len(src)
max_target_len = max_target_len if max_target_len > len(tgt) else len(tgt)
for i, elem in enumerate(batch):
src, tgt = elem
input_length = len(src)
padded_input = np.pad(src, (0, max_input_len - len(src)), 'constant', constant_values=pad_id)
padded_target = np.pad(tgt, (0, max_target_len - len(tgt)), 'constant', constant_values=IGNORE_ID)
batch[i] = (padded_input, padded_target, input_length)
# sort it by input lengths (long to short)
batch.sort(key=lambda x: x[2], reverse=True)
return default_collate(batch)
class AiChallenger2017Dataset(Dataset):
def __init__(self, split):
self.samples = data[split]
def __getitem__(self, i):
sample = self.samples[i]
src_text = sample['in']
tgt_text = sample['out']
return np.array(src_text, dtype=np.long), np.array(tgt_text, np.long)
def __len__(self):
return len(self.samples)
def main():
valid_dataset = AiChallenger2017Dataset('valid')
print(valid_dataset[0])
with open(vocab_file, 'rb') as file:
data = pickle.load(file)
src_idx2char = data['dict']['src_idx2char']
tgt_idx2char = data['dict']['tgt_idx2char']
src_text, tgt_text = valid_dataset[0]
src_text = sequence_to_text(src_text, src_idx2char)
src_text = ' '.join(src_text)
print('src_text: ' + src_text)
tgt_text = sequence_to_text(tgt_text, tgt_idx2char)
tgt_text = ' '.join(tgt_text)
print('tgt_text: ' + tgt_text)
if __name__ == "__main__":
main()
|
[
"limingbo@focusmedia.cn"
] |
limingbo@focusmedia.cn
|
a8f7de53cd21c1dc7f0beac6dcb19aab8614a3b6
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/croix_rouge_francaise_fr.py
|
d9186b484a1d64d25affb1fa82f39a5e9c319099
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,581
|
py
|
from scrapy import Spider
from scrapy.http import JsonRequest
from locations.dict_parser import DictParser
from locations.hours import DAYS_FR, OpeningHours, sanitise_day
class CroixRougeFrancaiseFRSpider(Spider):
name = "croix_rouge_francaise_fr"
item_attributes = {"brand": "Croix-Rouge française", "brand_wikidata": "Q3003244"}
allowed_domains = ["backend.structure.croix-rouge.fr"]
start_urls = ["https://backend.structure.croix-rouge.fr/graphql"]
def start_requests(self):
graphql_query = """query GET_SEARCH_STRUCTURE_ELASTICSEARCH_QUERY($actionIds: [ID], $activityIds: [ID], $from: Int, $lat: Float, $lon: Float, $search: String!, $size: Int) {
searchStructuresDocuments(
actionIds: $actionIds
activityIds: $activityIds
from: $from
lat: $lat
lon: $lon
search: $search
size: $size
) {
items {
actions
activities { activity }
address_complement
address_number
address_place
address_street
address_street_type
city
contentful_content_id
distance
id
latitude
longitude
name
slug
schedule
specialities
structure_type
zip_code
}
}
}"""
data = {
"operationName": "GET_SEARCH_STRUCTURE_ELASTICSEARCH_QUERY",
"query": graphql_query,
"variables": {
"actionIds": [],
"activityIds": [],
"from": 0,
"lat": 44.8624,
"lon": -0.5848,
"search": "",
"size": 10000,
},
}
for url in self.start_urls:
yield JsonRequest(url=url, method="POST", data=data)
def parse(self, response):
for location in response.json()["data"]["searchStructuresDocuments"]["items"]:
item = DictParser.parse(location)
if location.get("address_complement"):
item["street_address"] = location["address_complement"]
if location.get("slug"):
item["website"] = "https://www.croix-rouge.fr/" + location["slug"]
item["opening_hours"] = OpeningHours()
for day_hours in location["schedule"]:
item["opening_hours"].add_range(
sanitise_day(day_hours["day"], DAYS_FR), day_hours["open"], day_hours["closed"]
)
yield item
|
[
"noreply@github.com"
] |
alltheplaces.noreply@github.com
|
311fac66bd5619c74f93b0a3b033e01376dc2ce5
|
5b3caf64b77161748d0929d244798a8fb914d9c5
|
/Python Excel Examples/CellsApiDemo/row/setRowStyle.py
|
b7416ef885f311a5d401678b9b4094cb9616b63c
|
[] |
no_license
|
EiceblueCloud/Spire.Cloud.Excel
|
0d56864991eaf8d44c38f21af70db614b1d804b7
|
d9845d5cefd15a3ab408b2c9f80828a4767e2b82
|
refs/heads/master
| 2021-07-20T23:44:39.068568
| 2021-07-15T03:04:49
| 2021-07-15T03:04:49
| 230,225,396
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
from spirecloudexcel.api.cells_api import CellsApi
appId = "your id"
appKey = "your key"
baseUrl = "https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.cells_api.CellsApi(configuration)
name = "SetRowStyle_1.xlsx"
sheetName = "Sheet2"
row_index = 3
style = spirecloudexcel.models.Style()
font = spirecloudexcel.models.Font()
font.underline = "Single"
font.size = 8
font.is_italic = True
font.is_bold = True
font.name = "Algerian"
style.font = font
borders = []
topBorder = spirecloudexcel.models.Border("Medium", spirecloudexcel.models.Color(255, 255, 0, 0), "EdgeTop")
rightBorder = spirecloudexcel.models.Border("DashDot", spirecloudexcel.models.Color(255, 0, 255, 0),
"EdgeRight")
borders.append(topBorder)
borders.append(rightBorder)
style.border_collection = borders
style.horizontal_alignment = "Center"
style.background_color = spirecloudexcel.models.Color(255, 0, 255, 0)
storage = ""
folder = "/Cells/Row/"
api.set_row_style(name, sheet_name=sheetName, row_index=row_index, style=style, folder=folder, storage=storage)
|
[
"noreply@github.com"
] |
EiceblueCloud.noreply@github.com
|
c4ab0b017776466c61193e88cafc3391fe2ec6a6
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/yubinbai/pcuva-problems/UVa 11349 symmetric matrix/main.py
|
5b0dbb58cb2e77b0c306a72f8344e573ab95df48
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664
| 2016-09-12T12:38:32
| 2016-09-12T12:38:32
| 65,951,766
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
import math
import sys
def symM(matrix, n):
flag = True
for x in range(n & 1 + n >> 1):
for y in range(n):
flag &= (matrix[x][y] == matrix[n - 1 - x][n - 1 - y])
if not flag:
break
if not flag:
break
return flag
sys.stdin = open('input.txt')
for case in range(1, 1 + int(input())):
n = int(raw_input().strip().rpartition('=')[2])
mat = []
for x in xrange(1, n + 1):
mat.append(map(int, raw_input().split()))
if symM(mat, n):
print 'Test #%d: Symmetric.' % case
else:
print 'Test #%d: Non-symmetric.' % case
|
[
"xenron@outlook.com"
] |
xenron@outlook.com
|
93ef50c85ce3b6ab9f8d2d735078a7f3d4f8fa8f
|
35dbd536a17d7127a1dd1c70a2903ea0a94a84c2
|
/tests/sentry/integrations/jira_server/test_utils.py
|
cd17caeb9eacffca5d0e6932a2c9fe1ac566c6bf
|
[
"Apache-2.0",
"BUSL-1.1"
] |
permissive
|
nagyist/sentry
|
efb3ef642bd0431990ca08c8296217dabf86a3bf
|
d9dd4f382f96b5c4576b64cbf015db651556c18b
|
refs/heads/master
| 2023-09-04T02:55:37.223029
| 2023-01-09T15:09:44
| 2023-01-09T15:09:44
| 48,165,782
| 0
| 0
|
BSD-3-Clause
| 2022-12-16T19:13:54
| 2015-12-17T09:42:42
|
Python
|
UTF-8
|
Python
| false
| false
| 525
|
py
|
from fixtures.integrations.mock_service import StubService
from sentry.integrations.jira_server.utils import build_user_choice
from sentry.testutils import TestCase
from sentry.testutils.silo import control_silo_test
@control_silo_test
class BuildUserChoiceTest(TestCase):
def test_jira_server(self):
user_response = StubService.get_stub_data("jira", "jira_server_user.json")
assert build_user_choice(user_response, "name") == (
"bob",
"Bobby - bob@example.org (bob)",
)
|
[
"noreply@github.com"
] |
nagyist.noreply@github.com
|
d1230b257269e14de6d6a92780f184655cea298a
|
38ba13df9ea6e53c7b924cad1f3bea2de59c7a6a
|
/nibbler/trading/collectors/testfiles/XMRpusher.py
|
259b074e21ed320eb51482ce968fe4705c991153
|
[] |
no_license
|
JizzFactoryEmployee/nibblerppman
|
0fbc1ce662cf8b4868b41a97291250fae29dc41d
|
160e557578a3e8a614450354f6ade233d32b052f
|
refs/heads/master
| 2022-11-14T01:10:31.743000
| 2020-07-04T01:21:52
| 2020-07-04T01:21:52
| 273,835,770
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
import time
import subprocess
import inotify.adapters
def XMRrunner():
while 1 <2:
i = inotify.adapters.Inotify()
i.add_watch(r'/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/XMR/1m')
events = i.event_gen(yield_nones=False, timeout_s=1)
a = list(events)
if a == []:
pass
if a != []:
b = str(a)
b.split(',')
if 'XMR' in b:
print('ACTIVATING XMRPUSHBOT')
XMR = subprocess.Popen(['python', '/home/nibbler/nibblerppman/nibbler/trading/collectors/testfiles/XMRMAGIC.py'], shell=False)
XMRrunner()
|
[
"52958901+JizzFactoryEmployee@users.noreply.github.com"
] |
52958901+JizzFactoryEmployee@users.noreply.github.com
|
f4c7922a4d689c89746373c980775b0a21ce13b7
|
500047f47a6b372fa7ff1e96b11315ee26acf5ef
|
/Chapter-05/badExample.py
|
c4cad1b499e3b50f0b20cd4f19c9a7030f4bff3b
|
[] |
no_license
|
ra2003/Tkinter-In-Action
|
9f3a80bb2cab8dccf78621915f234f80cf79c58d
|
2a35ae029c2cfabb53adee8dae5fd0a7c6db817f
|
refs/heads/master
| 2022-03-02T16:25:26.146299
| 2019-10-07T06:36:41
| 2019-10-07T06:36:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,524
|
py
|
#!/usr/bin/python3
import tkinter as tk
class RefactorExample(tk.Frame):
def __init__(self,parent ):
super().__init__()
self.parent = parent
panel = tk.Frame(self.parent, bg="white")
prevButton = tk.Button(panel, text="<< PREV", command=self.OnPrev).place(x=50, y=0)
nextButton = tk.Button(panel, text="NEXT >>",command=self.OnNext ).place(x=130, y=0)
panel.pack(fill=tk.BOTH, expand=1)
m_main = tk.Menu(self.master, bd = 1)
m_file = tk.Menu(m_main, tearoff=0, bd = 1)
s_menu = tk.Menu(m_file)
m_edit = tk.Menu(m_main, tearoff=0, bd=1)
m_main.add_cascade(label="File", underline=0, menu=m_file)
m_main.add_cascade(label="Edit", underline=0, menu=m_edit)
m_file.add_command(label="Open", underline=0, command=self.OnOpen)
m_file.add_command(label="Quit", underline=0, command=self.OnCloseWindow)
m_edit.add_command(label="Copy",underline=0,command=self.OnCopy)
m_edit.add_command(label="Cut",underline=1,command=self.OnCut)
m_edit.add_command(label="Paste",underline=0,command=self.OnPaste)
self.master.config(menu=m_main)
static = tk.Label(panel, text = "First Name", bg="white").place(x=10, y=50)
#tk.Entry doesn't have property of "height"
text = tk.Entry(panel, width=10, bg="white").place(x=80, y=50)
static2 = tk.Label(panel, text = "Last Name", bg="white").place(x=10, y=80)
#tk.Entry doesn't have property of "height"
text2 = tk.Entry(panel, width=10, bg="white").place(x=80, y=80)
firstButton = tk.Button(panel, text="FIRST", command=self.OnFirst).place(x=0, y=0)
lastButton = tk.Button(panel, text="LAST", command=self.OnLast).place(x=210, y=0)
m_edit.add_separator()
m_edit.add_command(label="Options", underline=0, command=self.OnOptions)
def OnPrev(self, event): pass
def OnNext(self, event): pass
def OnLast(self, event): pass
def OnFirst(self, event): pass
def OnOpen(self, event): pass
def OnCopy(self, event): pass
def OnCut(self, event): pass
def OnPaste(self, event): pass
def OnOptions(self, event): pass
def OnCloseWindow(self,):
self.master.destroy()
def main():
app = tk.Tk()
app.geometry("340x200")
app.title('Refactor Example')
frame = RefactorExample(app)
app.mainloop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
ra2003.noreply@github.com
|
b8addbaf31dce94cbf9e67adfeee954a02ca3942
|
b3237e2600cfd2e84dbba3760a020c8434033e72
|
/Assignments/Exam Preparation/Python Advanced Retake Exam - 08 April 2020/03. Find the Eggs.py
|
893dfadef18cfd65e739782e2b18fa2140091f16
|
[
"MIT"
] |
permissive
|
KaloyankerR/python-advanced-repository
|
94a22a5015bb66afa6c61b3fb8ad150dc7028d6a
|
711672d0f033a5adffc3ca689d02d2a7a9a26bfb
|
refs/heads/master
| 2023-04-11T18:48:40.538000
| 2021-04-21T10:55:30
| 2021-04-21T10:55:30
| 298,027,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,464
|
py
|
def find_strongest_eggs(elements: list, sub_list):
sub_listed_elements = [[] for x in range(sub_list)]
ind = 0
while elements:
if ind == sub_list:
ind = 0
element = elements.pop(0)
sub_listed_elements[ind].append(element)
ind += 1
valid = []
for sub_listed_el in sub_listed_elements:
pairs = len(sub_listed_el) // 2
# mid_egg = sub_listed_el[pairs]
# left_egg = sub_listed_el[pairs - 1]
# right_egg = sub_listed_el[pairs + 1]
# condition1 = left_egg < mid_egg > right_egg
# condition2 = right_egg > left_egg
#
# if condition1 and condition2:
# valid.append(mid_egg)
is_valid = True
middle_position_egg = sub_listed_el[pairs]
if middle_position_egg > sub_listed_el[pairs + 1] > sub_listed_el[pairs - 1]:
for i in range(pairs):
first_el = sub_listed_el[i]
second_el = sub_listed_el[-(i + 1)]
if first_el > second_el:
is_valid = False
break
if is_valid:
valid.append(middle_position_egg)
return valid
test = ([-1, 7, 3, 15, 2, 12], 2)
print(find_strongest_eggs(*test))
test = ([-1, 0, 2, 5, 2, 3], 2)
print(find_strongest_eggs(*test))
test = ([51, 21, 83, 52, 55], 1)
print(find_strongest_eggs(*test))
test = ([1, 10, 2], 1)
print(find_strongest_eggs(*test))
|
[
"kaloyankulov2003kk@gmail.com"
] |
kaloyankulov2003kk@gmail.com
|
69114fbbf1b5dd496c9af5359ad301b2f1eeb8b4
|
26ca1e0906feece27896bd267a1f58882fcb0513
|
/archive/Basics/multi_func.py
|
9820caf039d6f0b84af655361ed8812fdcd57056
|
[] |
no_license
|
icecore2/python-training2019
|
092984c6dec1b05e70f9f899ee213d126c45ff63
|
ee39f93adabab506c9eef68c5e686ddb59953de9
|
refs/heads/master
| 2020-09-02T21:19:27.959213
| 2020-04-23T20:06:08
| 2020-04-23T20:06:08
| 219,306,742
| 0
| 2
| null | 2020-01-17T15:07:06
| 2019-11-03T13:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
import sys
import os
# from hints.time_gists import timeFormat
firstNumber = 2
secondNumber = 3
# secondNumber.date = 22
def multiFunc(firstNumber, secondNumber):
multiResult = firstNumber * secondNumber
# time = timeFormat('%d-%m-%Y / %H:%M:%S')
# print(time)
return multiResult
pass
print("The multiply result is: %d" + multiFunc(firstNumber, secondNumber))
|
[
"admin@example.com"
] |
admin@example.com
|
234e88f3b0a9d275b613902e63d48a31b12c0038
|
0a3bd0fc84263bd65559cf95b19a6752743f7f64
|
/src/guis/make_svg_camm
|
ce5acafd13805ae35b9ba00de0042e3ae860ed81
|
[] |
no_license
|
shohei/fabmodules_jp
|
b38487e5e64599fe8f3de2c404c1f730e81c616c
|
67dc16709bb9eff12b63532e83a13aa410f76311
|
refs/heads/master
| 2016-09-06T16:25:46.636267
| 2014-08-21T06:49:37
| 2014-08-21T06:49:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# make_svg_camm
# .svg to .camm GUI wrapper
#
# Neil Gershenfeld
# CBA MIT 7/13/11
#
# (c) Massachusetts Institute of Technology 2011
# Permission granted for experimental and personal use;
# license for commercial sale available from MIT.
#
# imports
#
import wx,sys
from fab_set import fab_frame
from panel_control import control_panel
from panel_svg import svg_panel
from panel_svg_path import svg_path_panel
from panel_path_camm import path_camm_panel
#
# command line
#
print "command line: make_svg_camm [input_file [size]]"
print " input_file = input .svg file (optional)"
print " size = image panel size (optional)"
#
# start wx
#
app = wx.App()
#
# add panels to frame
#
frame = fab_frame("make_svg_camm",sys.argv)
frame.control_panel = control_panel(frame)
frame.sizer.Add(frame.control_panel,(0,0),span=(1,4),flag=wx.ALIGN_CENTER_HORIZONTAL)
frame.svg_panel = svg_panel(frame)
frame.sizer.Add(frame.svg_panel,(1,0))
frame.path_panel = svg_path_panel(frame)
frame.sizer.Add(frame.path_panel,(1,1))
frame.camm_panel = path_camm_panel(frame)
frame.sizer.Add(frame.camm_panel,(1,2))
#
# defaults
#
frame.set_svg_camm()
#
# fit and show frame
#
frame.Fit()
frame.Show()
#
# start mainloop
#
app.MainLoop()
|
[
"shoaok@gmail.com"
] |
shoaok@gmail.com
|
|
42847c974fe12ff0e8c68e79c9bf6085ad3133f3
|
1915a3f90059f4a125b81675d16a427c85428626
|
/post.py
|
6ab84f631b32119601408481bcca0279c07a68e8
|
[] |
no_license
|
AntLouiz/location_photo_bot
|
b09fd9939513d21b755c6204199c29c31284240c
|
0fcf85553efb26f4eec238aa4b0b8f2b57d9f276
|
refs/heads/master
| 2020-03-31T15:30:04.605136
| 2018-10-20T01:44:38
| 2018-10-20T01:44:38
| 152,339,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
import json
import datetime
class Post(object):
def __init__(self):
self.location = None
self.photo = None
self.date = None
def clean(self):
self.location = None
self.photo = None
self.date = None
def save(self):
data = {
'location': {
'latitude': self.location.latitude,
'longitude': self.location.longitude
},
'photo': self.photo,
'date': datetime.datetime.now().strftime("%Y%m%d")
}
with open('data.json', 'r') as file:
file_data = json.load(file)
file_data['data'].append(data)
with open('data.json', 'w') as file:
file.write(json.dumps(file_data))
|
[
"luizrodrigo46@hotmail.com"
] |
luizrodrigo46@hotmail.com
|
ca9afadd672ce387fd79fc8c543b9111e07090f5
|
0460b645ac0697433e4526ea9215ac25c97a64bb
|
/venv/bin/pyhtmlizer
|
9c15bcee75f63931c24a55fe21eaa476ebb0d585
|
[] |
no_license
|
veujs/weibo
|
0e20645d07196193537f523a677892d3da1abf88
|
b33d0c41fc82608fd828e2790a2dcc2c9a246f36
|
refs/heads/master
| 2020-05-30T12:03:22.631450
| 2019-06-01T10:18:36
| 2019-06-01T10:18:36
| 189,718,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
#!/home/wangzhipeng/myproject/myspider/weibo/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.2.0','console_scripts','pyhtmlizer'
__requires__ = 'Twisted==19.2.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.2.0', 'console_scripts', 'pyhtmlizer')()
)
|
[
"624040034@qq.com"
] |
624040034@qq.com
|
|
1b3017e4d9efb6b748173059fdf1e8e745873df8
|
cda215558ad8448ed8e2cbb89719de312c382a95
|
/enteletaor_lib/libs/hooks/__init__.py
|
56572dc3372dc7674f8e8348206a7e4528940f44
|
[
"BSD-3-Clause"
] |
permissive
|
cr0hn/enteletaor
|
63fc6a9f832ea7b6b08f3f786445a8235b9a4618
|
a975b5cb06bc5f819b32e65d0cd2258a37370661
|
refs/heads/master
| 2023-05-11T13:38:25.213779
| 2023-05-08T08:41:31
| 2023-05-08T08:41:31
| 52,361,896
| 166
| 31
|
NOASSERTION
| 2023-05-08T08:41:36
| 2016-02-23T13:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 4,021
|
py
|
# -*- coding: utf-8 -*-
#
# Enteletaor - https://github.com/cr0hn/enteletaor
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This file contains utils for handle decorators
"""
import logging
import functools
from collections import defaultdict
log = logging.getLogger()
# --------------------------------------------------------------------------
# Config decorators
# --------------------------------------------------------------------------
def on_config_loaded(func):
"""
This decorator mark a function or method as hook to run when:
Running config is loaded
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
func_wrapper.hook_type = "config"
return func_wrapper
# --------------------------------------------------------------------------
# Find hooks
# --------------------------------------------------------------------------
def find_hooks():
"""
Find all hooks and return pointers to functions categorized by hook type.
:return: dict with hooks and type as format: dict(hook_type: function_pointer)
:rtype: dict(str: function)
"""
import os
import os.path
import inspect
base_dir = os.path.abspath(os.path.dirname(__file__))
# Modules found
results = defaultdict(list)
for root, dirs, files in os.walk(base_dir):
# Check if folder is a package
if "__init__.py" not in files:
continue
# Remove files or path that starts with "_"
if any(True for x in root.split("/") if x.startswith("_")):
continue
for filename in files:
if filename.endswith(".py") and \
not filename.startswith("celery") and \
not filename.startswith("test_"):
if filename.startswith("_"):
if filename != "__init__.py":
continue
# loop_file = os.path.join(root, filename)
loop_file = os.path.join(root, filename) \
.replace(base_dir, '') \
.replace(os.path.sep, '.') \
.replace('.py', '')
loop_file = loop_file[1:] if loop_file.startswith(".") else loop_file
# Load module info
try:
classes = __import__("%s.%s" % (__package__, loop_file), globals=globals(), locals=locals(), level=loop_file.count("."))
except ImportError:
classes = __import__(loop_file, globals=globals(), locals=locals(), level=loop_file.count("."))
# Get Modules instances
for m in dir(classes):
_loaded_module = getattr(classes, m)
if inspect.isfunction(_loaded_module) and hasattr(_loaded_module, "hook_type"):
log.debug("Loading hook: %s" % _loaded_module.__name__)
results[_loaded_module.hook_type].append(_loaded_module)
return results
|
[
"cr0hn@cr0hn.com"
] |
cr0hn@cr0hn.com
|
de7091480f2f208de4c42fa97e6a868185619552
|
8746f4e3da5e230ec0ca4b924bb06a9951dd03da
|
/setup.py
|
9082c72d9f540e3e1d1fc8b9b0e4754df699af35
|
[
"MIT"
] |
permissive
|
Apeopl/django-dj-plugin
|
845a508dd9088ceb9d9e03de56f5c11d5d1d07a4
|
2711c1af0185eea0fe5d1aed2eca5cd0422b387d
|
refs/heads/master
| 2021-04-19T06:04:23.658893
| 2020-03-26T06:59:50
| 2020-03-26T06:59:50
| 249,585,770
| 0
| 0
|
MIT
| 2020-03-26T01:49:01
| 2020-03-24T01:42:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from django_dj_plugin/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("django_dj_plugin", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-dj-plugin',
version=version,
description="""Django plgooge practice modules.""",
long_description=readme + '\n\n' + history,
author='Jian Dai',
author_email='daijian1@qq.com',
url='https://github.com/daimon99/django-dj-plugin',
packages=[
'django_dj_plugin',
],
include_package_data=True,
install_requires=[],
license="MIT",
zip_safe=False,
keywords='django-dj-plugin',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
[
"daijian1@qq.com"
] |
daijian1@qq.com
|
4e88beb03ff92ac889515bdb3172e288f961c20b
|
c5d87c7f25e3fe9b17c1e88993b0ed6831e52acb
|
/Django_HelpWord/mysite3/upload/templates/__init__.py
|
7c35217eb0fb5679a9ba58649325514db58ce182
|
[] |
no_license
|
GIS90/python_base_use
|
e55d55f9df505dac45ddd332fb65dcd08e8e531f
|
7166ca85975bb7c56a5fbb6b723fd8300c4dd5d1
|
refs/heads/master
| 2020-04-02T08:33:49.461307
| 2018-10-23T03:33:41
| 2018-10-23T03:33:41
| 154,249,857
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
# -*- coding: utf-8 -*-
"""
------------------------------------------------
describe:
------------------------------------------------
"""
__version__ = "v.10"
__author__ = "PyGo"
__time__ = "2017/3/30"
if __name__ == '__main__':
pass
|
[
"mingliang.gao@qunar.com"
] |
mingliang.gao@qunar.com
|
3937e4a4be12d7fa734792ababc9590adee4697e
|
aeb40bfa5a685bb739e818e7ea906a748795ba59
|
/data/management/commands/insert_dynamic_data.py
|
c6bd4bddbe9e2c54d30ac01bfb78a64a3ab95d03
|
[] |
no_license
|
jaebradley/nba_persistence
|
d1be548967266b4af09625fc140ce9fb4cd88a25
|
177129ad195c07dc0ff93a6c2c8f7b34770da116
|
refs/heads/master
| 2021-01-18T23:56:52.512661
| 2017-02-16T21:17:05
| 2017-02-16T21:17:05
| 48,635,050
| 10
| 2
| null | 2016-09-23T02:44:36
| 2015-12-27T06:14:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
from django.core.management.base import BaseCommand
from data.inserters.dynamic import insert_players, insert_games, insert_box_scores
class Command(BaseCommand):
def __init__(self, stdout=None, stderr=None, no_color=False):
super(Command, self).__init__(stdout, stderr, no_color)
def handle(self, *args, **options):
Command.insert()
@staticmethod
def insert():
insert_players()
insert_games()
insert_box_scores()
|
[
"jae.b.bradley@gmail.com"
] |
jae.b.bradley@gmail.com
|
7d0ac5540cd33b68e73c38be260d8192538f2a02
|
2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b
|
/1550.存在连续三个奇数的数组.py
|
4b37608348efa8136e2a1c0fa66108f9a3ea9483
|
[] |
no_license
|
mqinbin/python_leetcode
|
77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3
|
73e0c81867f38fdf4051d8f58d0d3dc245be081e
|
refs/heads/main
| 2023-03-10T18:27:36.421262
| 2021-02-25T07:24:10
| 2021-02-25T07:24:10
| 314,410,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
#
# @lc app=leetcode.cn id=1550 lang=python3
#
# [1550] 存在连续三个奇数的数组
#
# @lc code=start
class Solution:
def threeConsecutiveOdds(self, arr: List[int]) -> bool:
odd_count = 0
for e in arr:
if e%2:
odd_count +=1
if odd_count ==3:
return True
else:
odd_count =0
return False
# @lc code=end
|
[
"mqinbin@gmail.com"
] |
mqinbin@gmail.com
|
287e89cd4c348d4ddc8853c3630fe6bc44339f34
|
719e7b35f2b1c3196ff8084b5c5c46cbd2a22f5f
|
/setup.py
|
c11bcae02e8fed764919d18313cdb3ad23794389
|
[
"MIT"
] |
permissive
|
noobermin/pynoob3a
|
72817b87dd7f96652487d139f42373adac820dca
|
fa8b6650ac286b52803b98b6b596f3cdc9db87cb
|
refs/heads/master
| 2021-02-09T14:07:35.912782
| 2020-03-02T05:48:52
| 2020-03-02T05:48:52
| 244,291,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from setuptools import setup
setup(name='pynoob3a',
version='0.0.1',
description='dumb binary format',
author='noobermin',
author_email='ngirmang@protonmail.com',
license='MIT',
packages=['noob3a'],
zip_safe=False);
|
[
"ngirmang.1@osu.edu"
] |
ngirmang.1@osu.edu
|
be85893dfa6895326d2afa7685f39f168fc17af4
|
d596796c1488ea6dc67fb2030e2fd1fbf45e54cb
|
/free/index.py
|
7bccfb1fd092f52382826ca46e34b1e4c277d164
|
[] |
no_license
|
zerc/draft
|
83aad5b08fc1d7569295aa595821f117cb29decd
|
a972f1c341c711530c0894b5340a6639b206ec41
|
refs/heads/master
| 2021-01-15T23:02:10.509905
| 2010-06-22T16:15:56
| 2010-06-22T16:15:56
| 730,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
# zerc.ru
from os import popen
def get_free():
s = tuple(popen('free -m').readlines()[1].split()[1:4])
return s
def index():
html = """
<html>
<head><title>web free</title></head>
<body>
<style type="text/css">
body {margin:0;padding:0;background:#aeaeae;color:#ffffff;}
.main {width:300px;margin:0 auto 0 auto;text-align:center;}
.main table {border:1px solid #444444;}
.main table td {padding:5px;width:100px;text-align:center;}
.main table .title {background-color:#cccccc;color:#000000;font-weight:bolder;}
.main h3 {text-transform:uppercase;font-size:16px;margin:5px 0 5px 0;}
.main .copy {width:300px;text-align:right;margin:5px 0 5px 0;}
</style>
<div class="main">
<h3>Использование оперативки</h3>
<table border="1" cellpadding="0" cellspacing="0" >
<tr class="title">
<td>Всего</td>
<td>Занято</td>
<td>Свободно</td>
</tr>
<tr>
<td>%s, Мб</td>
<td>%s, Мб</td>
<td>%s, Мб</td>
</tr>
</table>
<div class="copy"><b>webFree</b> © zerc</div>
</div>
</body>
</html>""" % get_free()
return html
|
[
"zero13cool@yandex.ru"
] |
zero13cool@yandex.ru
|
2be97047e2001fdc961703b5402a777176f20e26
|
9e4e7b9d3ad410ea84310d1a93122f6817f59b5e
|
/bin/edsig
|
d7d9c4fe903836407868143f4c31d604a3624a26
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Sigterm-no/python-ed25519
|
42105735c53eba865c5b0430eee2487f40b73fea
|
15237f3536b12022c30553a857524768a2d904c7
|
refs/heads/master
| 2021-01-11T05:08:06.617637
| 2014-03-19T23:21:30
| 2014-03-19T23:21:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,709
|
#! /usr/bin/python
import os, sys
import ed25519
from hashlib import sha256
def help():
print """\
Usage: (ed25519 version %s)
edsig generate [STEM]
creates keypair, writes to 'STEM.signing.key' and 'STEM.verifying.key'
default is to 'signing.key' and 'verifying.key'
edsig sign (signing.key|keyfile) message.file
prints signature to stdout
If message.file is "-", reads from stdin.
edsig verify (verifying.key|keyfile) message.file (signature|sigfile)
prints 'good signature!' or raises exception
If message.file is "-", reads from stdin.
Key-providing arguments can either be the key itself, or point to a file
containing the key.
""" % ed25519.__version__
def remove_prefix(prefix, s):
if not s.startswith(prefix):
raise ValueError("no prefix found")
return s[len(prefix):]
def data_from_arg(arg, prefix, keylen, readable):
if (readable
and arg.startswith(prefix)
and len(remove_prefix(prefix, arg))==keylen):
return arg
if os.path.isfile(arg):
return open(arg,"r").read()
raise ValueError("unable to get data from '%s'" % arg)
def message_rep(msg_arg):
if msg_arg == "-":
f = sys.stdin
else:
f = open(msg_arg, "rb")
h = sha256()
while True:
data = f.read(16*1024)
if not data:
break
h.update(data)
return h.digest()
if len(sys.argv) < 2:
help()
elif sys.argv[1] == "generate":
sk,vk = ed25519.create_keypair()
if len(sys.argv) > 2:
sk_outfile = sys.argv[2]+".signing.key"
vk_outfile = sys.argv[2]+".verifying.key"
else:
sk_outfile = "signing.key"
vk_outfile = "verifying.key"
sk_s = sk.to_seed(prefix="sign0-")
vk_s = vk.to_ascii("verf0-", "base32")
open(sk_outfile,"w").write(sk_s)
open(vk_outfile,"w").write(vk_s+"\n")
print "wrote private signing key to", sk_outfile
print "write public verifying key to", vk_outfile
elif sys.argv[1] == "sign":
sk_arg = sys.argv[2]
msg_arg = sys.argv[3]
sk = ed25519.SigningKey(data_from_arg(sk_arg, "sign0-", 52, False),
prefix="sign0-")
sig = sk.sign(message_rep(msg_arg), prefix="sig0-", encoding="base32")
print sig
elif sys.argv[1] == "verify":
vk_arg = sys.argv[2]
msg_arg = sys.argv[3]
sig_arg = sys.argv[4]
vk = ed25519.VerifyingKey(data_from_arg(vk_arg, "verf0-", 52, True),
prefix="verf0-", encoding="base32")
sig = data_from_arg(sig_arg, "sig0-", 103, True)
vk.verify(sig, message_rep(msg_arg),
prefix="sig0-", encoding="base32") # could raise BadSignature
print "good signature!"
else:
help()
|
[
"warner@lothar.com"
] |
warner@lothar.com
|
|
cade8d6acd2d53c2ab00934deb72f9baef388b31
|
7f760365660de815db319d20bb05e1fbd5fc8df4
|
/server/app/outputs/dmx.py
|
150ff08a6742385ae301ebc0a725eae9f721682f
|
[
"MIT"
] |
permissive
|
BasementCat/audio-reactive-led-strip
|
db5ac94eb3c43dfdb6a79501d6d8711579d41c51
|
a98bac8e04c0fae3022de9f5086914dc1f1192d8
|
refs/heads/master
| 2022-07-21T12:39:06.257207
| 2022-07-14T01:12:08
| 2022-07-14T01:12:08
| 216,214,804
| 2
| 0
|
MIT
| 2019-10-19T13:58:07
| 2019-10-19T13:58:07
| null |
UTF-8
|
Python
| false
| false
| 4,487
|
py
|
import os
import glob
import logging
import threading
import time
import subprocess
import re
from dmxpy.DmxPy import DmxPy
from app import Task
from app.lib.misc import FPSCounter
logger = logging.getLogger(__name__)
hexint = lambda v: int(v, 16)
def find_device_file__linux(vendor, product):
if not os.path.exists('/sys') or not os.path.isdir('/sys'):
return None
for dev in glob.glob('/sys/bus/usb-serial/devices/*'):
devname = os.path.basename(dev)
with open(os.path.join(dev, '../uevent'), 'r') as fp:
for line in fp:
line = line.strip()
if line and '=' in line:
param, value = line.split('=')
if param == 'PRODUCT':
testvendor, testproduct = map(hexint, value.split('/')[:2])
if testvendor == vendor and testproduct == product:
return os.path.join('/dev', devname)
def find_device_file__macos(vendor, product):
devices = []
curdevice = {}
res = subprocess.check_output(['ioreg', '-p', 'IOUSB', '-l', '-b']).decode('utf-8')
for line in res.split('\n'):
line = line.strip()
if not line:
continue
match = re.match(u'^\+-o (.+)\s+<', line)
if match:
if curdevice:
devices.append(curdevice)
curdevice = {}
continue
match = re.match(u'^[\|\s]*"([\w\d\s]+)"\s+=\s+(.+)$', line)
if match:
k, v = match.groups()
if v.startswith('"'):
v = v[1:-1]
else:
try:
v = int(v)
except:
pass
curdevice[k] = v
if curdevice:
devices.append(curdevice)
for d in devices:
if d.get('idVendor') == vendor and d.get('idProduct') == product:
return '/dev/tty.usbserial-' + d['USB Serial Number']
def find_device_file(name):
# Name is either a path (/dev/ttyUSB0) which might change, or a device ID (0403:6001) which does not
if name.startswith('/') or ':' not in name:
# Assume file
return name
if ':' not in name:
raise ValueError(f"Not a valid device ID: {name}")
vendor, product = map(hexint, name.split(':'))
for fn in (find_device_file__linux, find_device_file__macos):
try:
file = fn(vendor, product)
if file:
return file
except:
logger.debug("Failure in find device file", exc_info=True)
raise RuntimeError(f"Can't find USB device {name}")
class DMX(Task):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.config.get('DMX_DEVICE'):
raise ValueError("No DMX_DEVICE in config")
self.dmx = None
self.dmx_lock = threading.Lock()
self.dmx_attempt = None
self.delay = 1.0 / float(self.config.get('FPS', 60))
self.last_send = 0
self.fps = FPSCounter('DMX')
self.get_dmx()
def get_dmx(self):
if not self.dmx and self.config.get('DMX_DEVICE') != 'sink':
if self.dmx_attempt is None or time.time() - self.dmx_attempt > 1:
self.dmx_attempt = time.time()
if not self.config.get('DMX_DEVICE'):
if self.config.get('DMX_DEVICE') is None:
logger.error("No DMX device configured")
self.config['DMX_DEVICE'] = False
return
with self.dmx_lock:
try:
self.dmx = DmxPy(find_device_file(self.config['DMX_DEVICE']))
except:
logger.error("Can't open DMX device %s", self.config['DMX_DEVICE'], exc_info=True)
return self.dmx
def run(self, data):
dmx = self.get_dmx()
if dmx:
if data.get('dmx_force'):
with self.fps:
for chan, val in data['dmx_force'].items():
dmx.setChannel(chan, val)
dmx.render()
if data.get('dmx'):
for chan, val in data['dmx'].items():
dmx.setChannel(chan, val)
if time.time() - self.last_send >= self.delay:
self.last_send = time.time()
with self.fps:
dmx.render()
|
[
"alec.elton@gmail.com"
] |
alec.elton@gmail.com
|
32e99b6d1e481856877756ea2cb6756722d16906
|
61ef327bd1d5ff6db7595221db6823c947dab42b
|
/FlatData/EquipmentStatExcelTable.py
|
e079ec5a2805f744c293cc9dd2edff8c4f954a6f
|
[] |
no_license
|
Aikenfell/Blue-Archive---Asset-Downloader
|
88e419686a80b20b57a10a3033c23c80f86d6bf9
|
92f93ffbdb81a47cef58c61ec82092234eae8eec
|
refs/heads/main
| 2023-09-06T03:56:50.998141
| 2021-11-19T12:41:58
| 2021-11-19T12:41:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,555
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class EquipmentStatExcelTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = EquipmentStatExcelTable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsEquipmentStatExcelTable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# EquipmentStatExcelTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# EquipmentStatExcelTable
def DataList(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from FlatData.EquipmentStatExcel import EquipmentStatExcel
obj = EquipmentStatExcel()
obj.Init(self._tab.Bytes, x)
return obj
return None
# EquipmentStatExcelTable
def DataListLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# EquipmentStatExcelTable
def DataListIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def Start(builder): builder.StartObject(1)
def EquipmentStatExcelTableStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddDataList(builder, DataList): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(DataList), 0)
def EquipmentStatExcelTableAddDataList(builder, DataList):
"""This method is deprecated. Please switch to AddDataList."""
return AddDataList(builder, DataList)
def StartDataListVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def EquipmentStatExcelTableStartDataListVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartDataListVector(builder, numElems)
def End(builder): return builder.EndObject()
def EquipmentStatExcelTableEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
|
[
"rkolbe96@gmail.com"
] |
rkolbe96@gmail.com
|
30d2d8bf4d1bd7d1f4a8095bfd4336d191319e46
|
bbe5b336150c38f480a4c3a3a15e1d65a7dfc7d1
|
/tests/app/api/business/validators/application_validator/test_validate_documents.py
|
319034a3aff6b2c513ba2c418c2b4794d506282a
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
AusDTO/dto-digitalmarketplace-api
|
9135785c205fe04bbb07782c561c5c5f8cf8417d
|
af1f0c8979406f80223ab7a68266563abd80b2f4
|
refs/heads/master
| 2022-07-31T04:12:36.364555
| 2022-07-07T04:31:41
| 2022-07-07T04:31:41
| 62,025,672
| 6
| 7
|
MIT
| 2022-05-23T23:32:37
| 2016-06-27T04:34:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
from app.api.business.validators import ApplicationValidator
from app.models import Application
def test_can_get_error_when_no_documents():
application = Application(
data={}
)
errors = ApplicationValidator(application).validate_documents()
assert len(errors) == 1
def test_can_get_error_for_expired_documents():
application = Application(
data={
'documents': {
'indemnity': {
'filename': 'test.pdf',
'expiry': '2018/01/01'
},
'liability': {
'filename': 'test.pdf',
'expiry': '2018/01/01'
},
'workers': {
'filename': 'test.pdf',
'expiry': '2018/01/01'
},
'financial': {
'filename': 'test.pdf'
}
}
}
)
errors = ApplicationValidator(application).validate_documents()
assert len(errors) == 3
def test_can_get_error_for_no_filename():
application = Application(
data={
'documents': {
'indemnity': {
'filename': '',
'expiry': '2018/01/01'
},
'liability': {
'filename': '',
'expiry': '2018/01/01'
},
'workers': {
'filename': '',
'expiry': '2018/01/01'
},
'financial': {
'filename': ''
}
}
}
)
errors = ApplicationValidator(application).validate_documents()
assert len(errors) == 7
|
[
"noreply@github.com"
] |
AusDTO.noreply@github.com
|
e24f4a4008a9f2edade00871369f275ca42462dd
|
5b9bce9fdfc13848b6bacc73741f6e8fc5a4ae99
|
/client/client.py
|
eb5be712516765751211389e5083d166429114f2
|
[] |
no_license
|
ContinuumBridge/bridge_admin
|
4a5a036f4e0cb4e96366a85524aef0c33e82a7ff
|
efd4148a55221f74cb8a11139a8416d1af453408
|
refs/heads/master
| 2022-03-20T15:00:01.422221
| 2020-01-04T11:08:17
| 2020-01-04T11:08:17
| 17,435,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
import httplib
import json
import requests
import websocket
import time
import signal
from twisted.internet import threads
from twisted.internet import defer
from twisted.internet import reactor
#CID 64 on Production. Key: 6b8b20b7r2oPxGmlwjoFuJD/iLKCPTkBOW/ZR9F2vnyxGd1pHLVVime+srwxoOTP
#CB_ADDRESS = "portal.continuumbridge.com"
#KEY = "6b8b20b7r2oPxGmlwjoFuJD/iLKCPTkBOW/ZR9F2vnyxGd1pHLVVime+srwxoOTP"
# Staging service test
CB_ADDRESS = "staging.continuumbridge.com"
KEY = "649e038do23icDEnfrtxf0BRCbLw9exPIyTDKSxJtm8EGm10jG4vMjUFRZqLmbfE"
START_DELAY = 60
SWITCH_INTERVAL = 60
# Staging:
DESTINATION = "BID106/AID29"
# Production
#DESTINATION = "BID167/AID12"
class Connection(object):
def __init__(self):
self.boilerState = 0
reactor.callInThread(self.connect)
reactor.callLater(START_DELAY, self.switchBoiler)
reactor.run()
def connect(self) :
auth_url = "http://" + CB_ADDRESS + "/api/client/v1/client_auth/login/"
auth_data = '{"key": "' + KEY + '"}'
auth_headers = {'content-type': 'application/json'}
response = requests.post(auth_url, data=auth_data, headers=auth_headers)
self.cbid = json.loads(response.text)['cbid']
print "CBID: ", self.cbid
sessionID = response.cookies['sessionid']
ws_url = "ws://" + CB_ADDRESS + ":7522/"
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp(
ws_url,
on_open = self._onopen,
header = ['sessionID: {0}'.format(sessionID)],
on_message = self._onmessage)
self.ws.run_forever()
def _onopen(self, ws):
print "on_open"
def _onmessage(self, ws, message):
msg = json.loads(message)
print "Message received:"
print(json.dumps(msg, indent=4))
def switchBoiler(self):
msg = {
"source": self.cbid,
"destination": DESTINATION,
"body": {
"n": 0,
"d":
[
{
"i": "Boiler",
"s": self.boilerState,
"at": int(time.time() + 20)
}
]
}
}
print "Sending: ", msg
self.ws.send(json.dumps(msg))
print "Message sent"
if self.boilerState == 0:
self.boilerState = 1
else:
self.boilerState = 0
reactor.callLater(SWITCH_INTERVAL, self.switchBoiler)
def signalHandler(self, signal, frame):
logging.debug("%s signalHandler received signal", ModuleName)
reactor.stop()
exit()
if __name__ == '__main__':
connection = Connection()
|
[
"peter.claydon@continuumbridge.com"
] |
peter.claydon@continuumbridge.com
|
41faa5af6577c42cbb30f57da6441bb4991e463c
|
e6f62843d8e7f580a8f2993988cde930a2f5daf2
|
/final_project/work_classifiers.py
|
b665de4b121586dd97b0848c3555e197199060cd
|
[] |
no_license
|
diegoami/ud120-projects
|
614002d317425139948a254293c46a335c1b1e22
|
2a573ea095ac456843c203592d7175800d49c938
|
refs/heads/master
| 2021-01-20T00:27:48.939087
| 2017-06-15T19:07:59
| 2017-06-15T19:07:59
| 89,133,359
| 0
| 0
| null | 2017-04-23T10:17:18
| 2017-04-23T10:17:18
| null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import numpy as np
def gaussian_classificator(features_train, labels_train):
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(features_train, np.ravel(labels_train))
return clf
def tree_classificator(features_train, labels_train, **kwargs):
from sklearn import tree
clf = tree.DecisionTreeClassifier(**kwargs)
clf.fit(features_train, np.ravel(labels_train))
return clf
def svc_classificator(features_train, labels_train):
from sklearn.svm import SVC
### your code goes here!
clf = SVC()
clf.fit(features_train, labels_train)
return clf
gauss_call = {"method": gaussian_classificator, "args": {}}
tree_call = {"method": tree_classificator, "args": dict({"min_samples_split": 2})}
svc_call = {"method": svc_classificator, "args": {}}
classifiers = [ tree_call, gauss_call]
|
[
"diego.amicabile@gmail.com"
] |
diego.amicabile@gmail.com
|
0d03f4997dff14b4117c84cbf186adcb19848f23
|
75ed4fe365819c9cb64522bd2bcb1590295dd4a8
|
/login/jwt_practice.py
|
5a325c3daf7ee8c0583f4b3065523263105acef1
|
[] |
no_license
|
thals7/web
|
2aaa36fecf44851d65031dd0c9f9062748bfb3f5
|
f2c9aca7b3cf0116985fe17190a1274264bdd2c1
|
refs/heads/master
| 2023-02-26T08:30:49.663381
| 2021-01-29T00:48:50
| 2021-01-29T00:48:50
| 298,844,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# https://pyjwt.readthedocs.io/en/latest/usage.html#encoding-decoding-tokens-with-rs256-rsa
import jwt
key = "thisissecret"
encoded = jwt.encode({"some": "payload","like":"user_id"}, key, algorithm="HS256")
print(encoded)
decoded = jwt.decode(encoded, key, algorithms="HS256")
print(decoded)
|
[
"thals_7@naver.com"
] |
thals_7@naver.com
|
cabda38c0a0fe289c78c7072a6bd20d7cfacf53c
|
968aedcc9e58d718bb3895f89de5292d8caabe52
|
/leetcode/Hash-Table/valid-sudoku.py
|
0a49bd177048dcb0d60662a3ac173ff7e1202e64
|
[] |
no_license
|
iCodeIN/competitive-programming-5
|
0729c9f09f12543455121fc633b051eb68529152
|
30bfafb6a7727c9305b22933b63d9d645182c633
|
refs/heads/master
| 2022-04-14T08:50:19.568207
| 2019-09-26T14:49:56
| 2019-09-26T14:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
class Solution:
def isValidSudoku(self, board):
"""
https://leetcode.com/problems/valid-sudoku/description/
:type board: List[List[str]]
:rtype: bool
"""
num = {"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,".":0}
for i in board:
num = {"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,".":0}
for j in i:
if num[j] == 0:
num[j] += 1
elif j != ".":
return False
for i in range(9):
num = {"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,".":0}
for j in board:
if num[j[i]] == 0:
num[j[i]] += 1
elif j[i] != ".":
return False
l = [
[board[0][0],board[0][1],board[0][2],board[1][0],board[1][1],board[1][2],board[2][0],board[2][1],board[2][2]],
[board[0][3],board[0][4],board[0][5],board[1][3],board[1][4],board[1][5],board[2][3],board[2][4],board[2][5]],
[board[0][6],board[0][7],board[0][8],board[1][6],board[1][7],board[1][8],board[2][6],board[2][7],board[2][8]],
[board[3][0],board[3][1],board[3][2],board[4][0],board[4][1],board[4][2],board[5][0],board[5][1],board[5][2]],
[board[3][3],board[3][4],board[3][5],board[4][3],board[4][4],board[4][5],board[5][3],board[5][4],board[5][5]],
[board[3][6],board[3][7],board[3][8],board[4][6],board[4][7],board[4][8],board[5][6],board[5][7],board[5][8]],
[board[6][0],board[6][1],board[6][2],board[7][0],board[7][1],board[7][2],board[8][0],board[8][1],board[8][2]],
[board[6][3],board[6][4],board[6][5],board[7][3],board[7][4],board[7][5],board[8][3],board[8][4],board[8][5]],
[board[6][6],board[6][7],board[6][8],board[7][6],board[7][7],board[7][8],board[8][6],board[8][7],board[8][8]]
]
for i in l:
num = {"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,".":0}
for j in i:
if num[j] == 0:
num[j] += 1
elif j != ".":
return False
return True
|
[
"10962267+arora-aditya@users.noreply.github.com"
] |
10962267+arora-aditya@users.noreply.github.com
|
a7d2a235ea6e49e5cb3cfec57704cd6952825467
|
84c9a6fb5e18741f14a55d0d737e2a556383770d
|
/venv/Lib/site-packages/w3af/core/data/misc/response_cache_key.py
|
14d57dbdfc5945b3463fb0df2398082606620c5c
|
[] |
no_license
|
AravindChan96/Vulcan
|
638a1db2f84df08bc50dd76c7f142014d529fbec
|
5548a6f36f04108ac1a6ed8e707930f9821f0bd9
|
refs/heads/master
| 2022-11-05T15:05:54.224578
| 2020-06-19T20:44:14
| 2020-06-19T20:44:14
| 273,396,348
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,841
|
py
|
"""
response_cache_key.py
Copyright 2019 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import zlib
# pylint: disable=E0401
from darts.lib.utils.lru import SynchronizedLRUDict
# pylint: enable=E0401
from w3af.core.controllers.core_helpers.not_found.response import FourOhFourResponse
from w3af.core.data.misc.xml_bones import get_xml_bones
from w3af.core.data.misc.encoding import smart_str_ignore
def get_response_cache_key(http_response,
clean_response=None,
headers=None):
"""
Note: query.body has been cleaned by get_clean_body()
:param http_response: The HTTP response we want to get a cache key for
:param clean_response: The FourOhFourResponse associated with the HTTPResponse
passed as parameter (optional, will be calculated if not
provided)
:param headers: A string containing the HTTP response headers that have to be
used to calculate the hash
:return: Hash of the HTTP response body
"""
headers = '' or headers
#
# Only some HTTP responses benefit from the XML-bones signature
#
if _should_use_xml_bones(http_response):
body = get_xml_bones(http_response.get_body())
normalized_path = FourOhFourResponse.normalize_path(http_response.get_uri())
else:
#
# Get a clean_response if it was not provided
#
if clean_response is None:
clean_response = FourOhFourResponse.from_http_response(http_response)
body = clean_response.body
normalized_path = clean_response.normalized_path
#
# Calculate the hash using all the captured information
#
key = ''.join([str(http_response.get_code()),
smart_str_ignore(normalized_path),
str(headers),
smart_str_ignore(body)])
return quick_hash(key)
def _should_use_xml_bones(http_response):
# Ignore small responses (the bones for this document is not so
# representative)
if len(http_response.get_body()) < 256:
return False
# Ignore large responses (might break lxml parser)
if len(http_response.get_body()) > 1024 * 1024:
return False
# Check that this document is xml / html
has_expected_content_type = False
for content_type in ('xml', 'html'):
if content_type in http_response.content_type:
has_expected_content_type = True
if not has_expected_content_type:
return False
# Check that it actually has tags
if http_response.get_body().count('<') < 20:
return False
return True
def quick_hash(text):
text = smart_str_ignore(text)
return '%s%s' % (hash(text), zlib.adler32(text))
class ResponseCacheKeyCache(object):
#
# The memory impact of having a large number of items in this cache is
# really low, both the keys and the values are short strings (the result of
# quick_hash)
#
MAX_SIZE = 2000
def __init__(self):
self._cache = SynchronizedLRUDict(self.MAX_SIZE)
def get_response_cache_key(self,
http_response,
clean_response=None,
headers=None):
# When the clean response is available, use that body to calculate the
# cache key. It has been cleaned (removed request paths and QS parameters)
# so it has a higher chance of being equal to other responses / being
# already in the cache
if clean_response is not None:
body = clean_response.body
else:
body = http_response.body
cache_key = '%s%s' % (smart_str_ignore(body), headers)
cache_key = quick_hash(cache_key)
result = self._cache.get(cache_key, None)
if result is not None:
return result
result = get_response_cache_key(http_response,
clean_response=clean_response,
headers=headers)
self._cache[cache_key] = result
return result
def clear_cache(self):
self._cache.clear()
|
[
"aravindchan.96@gmail.com"
] |
aravindchan.96@gmail.com
|
1e1ef944556644234b01de801bc01eac0e0e4c2e
|
48637665afeacae58d99e5203524ed8f2313d649
|
/drawBot/context/gifContext.py
|
2beff8238f2b3c31daaab1128958396ab581b1af
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
bitforks/drawbot
|
35853a63d2d685cda7789d2dc3ca812bad0cc6d0
|
e66f12cf1a4fdd412d27d3b9f36092112b6cbd4f
|
refs/heads/master
| 2021-01-20T16:44:45.084558
| 2015-09-17T14:40:19
| 2015-09-17T14:40:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
import AppKit
import Quartz
import os
import tempfile
import subprocess
from imageContext import ImageContext
gifsiclePath = os.path.join(os.path.dirname(__file__), "tools", "gifsicle")
if not os.path.exists(gifsiclePath):
gifsiclePath = os.path.join(os.getcwd(), "tools", "gifsicle")
class GifContext(ImageContext):
_saveImageFileTypes = {
"gif": AppKit.NSGIFFileType,
}
fileExtensions = _saveImageFileTypes.keys()
_delay = 10
def __init__(self):
super(GifContext, self).__init__()
self._delayData = []
def _frameDuration(self, seconds):
# gifsicle -h: Set frame delay to TIME (in 1/100sec).
self._delayData[-1] = int(seconds * 100)
def _newPage(self, width, height):
super(GifContext, self)._newPage(width, height)
self._delayData.append(self._delay)
def _writeDataToFile(self, data, path, multipage):
pdfDocument = Quartz.PDFDocument.alloc().initWithData_(data)
pageCount = pdfDocument.pageCount()
shouldBeAnimated = pageCount > 1
tempPath = path
if shouldBeAnimated:
tempPath = tempfile.mkstemp(suffix=".gif")[1]
inputPaths = super(GifContext, self)._writeDataToFile(data, tempPath, shouldBeAnimated)
if shouldBeAnimated:
cmds = [
# gifsicle path
gifsiclePath,
# optimize level
# "-O3",
# force to 256 colors
"--colors", "256",
# make it loop
"--loop",
]
# add source paths with delay for each frame
for i, inputPath in enumerate(inputPaths):
cmds += [
# add the frame duration
"--delay", "%i" % self._delayData[i],
# add the input gif for each frame
inputPath
]
cmds += [
# output path
"--output",
path
]
# make a string of escaped commands
cmds = subprocess.list2cmdline(cmds)
# go
popen = subprocess.Popen(cmds, shell=True)
popen.wait()
# remove the temp input gifs
for inputPath in inputPaths:
os.remove(inputPath)
|
[
"frederik@typemytype.com"
] |
frederik@typemytype.com
|
bda72a45c56ce6b1c4ea927d9a8567f2951fdb18
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_sobers.py
|
86f6f5f194a353070e1f9fe678676c2bc8cd808f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _SOBERS():
def __init__(self,):
self.name = "SOBERS"
self.definitions = sober
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sober']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
e8e3d8de20abef556139aa2fe44ad71d69297b8a
|
ba3231b25c60b73ca504cd788efa40d92cf9c037
|
/nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_dospolicy_binding.py
|
c16c8f0e9fb625627902597e36abce766c98e1f5
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zhuweigh/vpx13
|
f6d559ae85341e56472e3592cbc67062dac34b93
|
b36caa3729d3ca5515fa725f2d91aeaabdb2daa9
|
refs/heads/master
| 2020-07-04T22:15:16.595728
| 2019-09-20T00:19:56
| 2019-09-20T00:19:56
| 202,435,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,292
|
py
|
#
# Copyright (c) 2008-2019 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_dospolicy_binding(base_resource) :
""" Binding class showing the dospolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = None
self._priority = None
self._name = None
self.___count = None
@property
def priority(self) :
r"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
r"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
r"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
r"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_dospolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_dospolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch lbvserver_dospolicy_binding resources.
"""
try :
if not name :
obj = lbvserver_dospolicy_binding()
response = obj.get_resources(service, option_)
else :
obj = lbvserver_dospolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of lbvserver_dospolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count lbvserver_dospolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of lbvserver_dospolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_dospolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_dospolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_dospolicy_binding = [lbvserver_dospolicy_binding() for _ in range(length)]
|
[
"zhuwei@xsky.com"
] |
zhuwei@xsky.com
|
e4eb072d8f1eeaaa4cdf7f001effd5d4efd96d64
|
3f84f51751c4191bb81c9df7094578461fb12a2d
|
/AtcoderProblems/ABC/ABC063/C.py
|
a0d1edc24569a3ff1337dee5f6488331c804eeb4
|
[] |
no_license
|
rikukawamura/atcoder
|
7ff49f1bd8534b99d87fe81ef950e1ba77eee8b8
|
09c0cfe3ce25be56d338614a29e996f4106117cd
|
refs/heads/master
| 2023-08-13T21:21:19.058219
| 2021-09-28T10:02:42
| 2021-09-28T10:02:42
| 329,206,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
import pdb
import itertools
N = int(input())
S = sorted([int(input()) for _ in range(N)], reverse=False)
non_ten = []
ten = []
for s in S:
if s % 10 == 0:
ten.append(s)
else:
non_ten.append(s)
ten = sorted(ten)
non_ten = sorted(non_ten)
total = sum(ten)+sum(non_ten)
'''
1. 総合点が10の倍数且つ,10の倍数でない問題がある場合
2. 総合点が10の倍数且つ,10の倍数でない問題がない場合
3. 総合点が10の倍数でない
の三つに場合分け
'''
if total % 10 == 0 and non_ten != []:
print(total-non_ten[0])
elif total % 10 == 0 and non_ten == []:
print(0)
else:
print(total)
|
[
"49993650+rikukawamura@users.noreply.github.com"
] |
49993650+rikukawamura@users.noreply.github.com
|
50899501d48aefbf8069736a20d4adf832d5c014
|
195f19578f3eea3f8c3e3a780655ce2f8dd009d0
|
/caixa_racional/views.py
|
11f834c7fb9fdce95b8fa501e59832eee8e6a44b
|
[] |
no_license
|
osmarsalesjr/TheBeeFreshTeam
|
5b68c26d413940badc0814fb5c4cfc953b4fb695
|
9ca839083d903236054a813b265b0d09f34cb288
|
refs/heads/master
| 2022-12-17T11:22:30.482340
| 2019-07-14T00:11:50
| 2019-07-14T00:11:50
| 194,682,998
| 0
| 0
| null | 2022-12-08T01:47:50
| 2019-07-01T13:57:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import generics
from caixa_racional.models import Temperatura, BaseDeDados
from caixa_racional.serializers import TemperaturaSerializer, BaseDeDadosSerializer
class TemperaturaList(generics.ListCreateAPIView):
queryset = Temperatura.objects.order_by('-tempo')
serializer_class = TemperaturaSerializer
name = 'temperatura-list'
class TemperaturaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Temperatura.objects.all()
serializer_class = TemperaturaSerializer
name = 'temperatura-detail'
class BaseDeDadosList(generics.ListCreateAPIView):
queryset = BaseDeDados.objects.all()
serializer_class = BaseDeDadosSerializer
name = 'base-de-dados-list'
class BaseDeDadosDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = BaseDeDados.objects.all()
serializer_class = BaseDeDadosSerializer
name = 'base-de-dados-detail'
# Create your views here.
class ApiRoot(generics.GenericAPIView):
name = 'api-root'
def get(self, request, *args, **kwargs):
return Response({
'temperaturas': reverse(TemperaturaList.name, request=request),
})
|
[
"osmarsalesjr@gmail.com"
] |
osmarsalesjr@gmail.com
|
c44e31915ea2575dc522eb49e10a41f4cfbe1772
|
308d75172c81bddb45c82429e4ddb3e09a3d220e
|
/01-OpencvPythonTutorial/ch22/06-hist-normalized.py
|
2040ee82d34de1984caff9ba18df6ac7a162de4c
|
[] |
no_license
|
Damon0626/OpenCV3ForBeigner
|
956f2163249b5a3d1426089e3650695467a0427f
|
b3e8c5c201b6adabe067c9f2d1f614d93dcef447
|
refs/heads/master
| 2020-04-04T16:49:32.806739
| 2019-02-21T13:54:12
| 2019-02-21T13:54:12
| 156,095,139
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
# -*-coding:utf-8-*-
# @Author: Damon0626
# @Time : 18-12-16 下午11:25
# @Email : wwymsn@163.com
# @Software: PyCharm
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('contrast75.png')
hist, bins = np.histogram(image.flatten(), 256, [0, 256])
cdf = hist.cumsum() # 累积分布
cdf_mormalized = cdf*hist.max()/cdf.max()
plt.plot(cdf_mormalized, color='b')
plt.hist(image.flatten(), 256, [0, 256], color='r')
plt.xlim([0, 256])
plt.legend(['cdf', 'histogram'], loc='upper left')
plt.show()
|
[
"2404448093@qq.com"
] |
2404448093@qq.com
|
85e05e3c33ca86dc0280e5c8edbaa2ac143212c9
|
6930e9d3372e83cf43a47ae8ad165f83a218aee2
|
/capture/noworkflow/now/utils/bytecode/f_trace.py
|
93e8b89a211b8519aa8d55ca65e360e7e73c8d43
|
[
"MIT"
] |
permissive
|
hugobowne/noworkflow
|
02ab47a8b3377ee56f1e7c4552a8dbcb3d15e5f0
|
333cbe274348428f1a9514fe81406f8416036845
|
refs/heads/master
| 2021-01-17T20:27:16.524245
| 2015-11-18T23:53:28
| 2015-11-18T23:53:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,164
|
py
|
# Copyright (c) 2015 Universidade Federal Fluminense (UFF)
# Copyright (c) 2015 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
""" Define f_trace related interpreters and functions """
# pylint: disable=R0902
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import sys
from .code_interpreter import CodeInterpreter, PyInterpreter
class AlmostReadOnlyDict(dict):
""" Use it to avoid changes on the original dict """
def __init__(self, *args, **kwargs):
super(AlmostReadOnlyDict, self).__init__(*args, **kwargs)
self.other = {}
def __getitem__(self, item):
if item in self.other:
return self.other[item]
return super(AlmostReadOnlyDict, self).__getitem__(item)
def __setitem__(self, item, value):
self.other[item] = value
def __delitem__(self, item):
if item in self.other:
del self.other[item]
class FindFTrace(CodeInterpreter):
""" Find <expr>.f_trace attribution """
def __init__(self, *args, **kwargs):
# Disable operations that may cause effect
# Default
# self.store_fast = self.nop
self.store_subscr = self.nop
# self.store_name = self.nop
self.store_global = self.nop
# self.delete_fast = self.nop
self.delete_subscr = self.nop
# self.delete_name = self.nop
self.delete_attr = self.nop
self.delete_global = self.nop
self.print_expr = self.nop
# Python 2
self.store_slice__0 = self.nop
self.store_slice__0 = self.nop
self.store_slice__1 = self.nop
self.store_slice__2 = self.nop
self.store_slice__3 = self.nop
self.delete_slice__0 = self.nop
self.delete_slice__1 = self.nop
self.delete_slice__2 = self.nop
self.delete_slice__3 = self.nop
super(FindFTrace, self).__init__(*args, **kwargs)
self._locals = AlmostReadOnlyDict(self._locals)
self._globals = AlmostReadOnlyDict(self._globals)
def store_attr(self):
""" STORE_ATTR opcode """
if self.names[self.oparg] == 'f_trace':
self._stop = True
self.result = self.stack.pop() if self.stack else True
f_trace_name = u'FTraceExe' if sys.version_info >= (3, 0) else b'FTraceExe'
FTraceExe = type(f_trace_name, (FindFTrace, PyInterpreter), {})
def get_f_trace(code, loc, glob):
""" Get frame from frame.f_trace attribution """
interpreter = FTraceExe(code, loc, glob)
interpreter.execute()
return interpreter.result
def find_f_trace(code, loc, glob, lasti):
""" Check if code has frame.f_trace attribution """
if 'f_trace' not in code.co_names:
return False
interpreter = FindFTrace(code, loc, glob)
interpreter.execute()
if not interpreter.result:
return False
last_offset = 0
for offset in interpreter.linestarts:
if offset >= interpreter.opi:
return lasti == last_offset
last_offset = offset
return False
|
[
"joaofelipenp@gmail.com"
] |
joaofelipenp@gmail.com
|
84272d0c64a8128d488da1959bb8af7afbd979c7
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/not/sample_good807.py
|
55e1c22bfa14474792c197d5bc8d4af90bf8e645
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674
| 2020-05-05T08:37:16
| 2020-05-05T08:37:16
| 138,386,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
import stringprep
import difflib
import math
import array
import textwrap
import datetime
import readline
import random
nterms = 618
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 618 > 0:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count -= -1
|
[
"barnsa@uni.coventry.ac.uk"
] |
barnsa@uni.coventry.ac.uk
|
7012967cb00baae4f581bb049013311b240da290
|
61bc53ec90d92aece91753ec5ec9d25e0879a1e2
|
/content/pythia/pythia/tasks/vqa/textvqa/dataset.py
|
c4bc2ee38892409d39789dc107a8a061951eb380
|
[
"BSD-3-Clause"
] |
permissive
|
aluka1994/textvqa
|
08a16c9b21ea9c5eca05f5d4d1763c190d2d7275
|
694cb2be08def519ba73be78e34664afa2c607b5
|
refs/heads/master
| 2021-05-26T23:44:21.973827
| 2020-04-08T22:05:58
| 2020-04-08T22:05:58
| 254,190,630
| 0
| 0
|
MIT
| 2020-04-08T20:14:11
| 2020-04-08T20:14:10
| null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from pythia.tasks.vqa.vizwiz import VizWizDataset
from pythia.utils.text_utils import word_tokenize
class TextVQADataset(VizWizDataset):
def __init__(self, dataset_type, imdb_file_index, config, *args, **kwargs):
super().__init__(dataset_type, imdb_file_index, config, *args, **kwargs)
self._name = "textvqa"
def format_for_evalai(self, report):
answers = report.scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for idx, question_id in enumerate(report.question_id):
answer_id = answers[idx].item()
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = word_tokenize(report.context_tokens[idx][answer_id])
else:
answer = self.answer_processor.idx2word(answer_id)
predictions.append({"question_id": question_id.item(), "answer": answer})
return predictions
|
[
"anandkumar@instance-1.us-central1-a.c.andromanit.internal"
] |
anandkumar@instance-1.us-central1-a.c.andromanit.internal
|
a355eeec111864ef7af555c51e8460f11f29c365
|
0581988cad7e0ea62a638d551548e409af1e5dc1
|
/20200529/UI_PPT2PDF/myUI_ppt2pdf.py
|
cc3732bbc0196e33f92063a50d906881b836f73f
|
[] |
no_license
|
Aimee888/python-20200513
|
7c1dff7d7f0fdea08e12735efeb2e889fedeee10
|
578c388be5582dc7f1556f95168adf0399b7ea1f
|
refs/heads/master
| 2023-01-06T10:21:35.014780
| 2020-11-03T01:07:04
| 2020-11-03T01:07:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
#!/usr/bin/env python
# _*_ coding: UTF-8 _*_
"""=================================================
@Project -> File : six-dialog_design -> myUI_ppt2pdf.py
@IDE : PyCharm
@Author : Aimee
@Date : 2020/5/29 17:11
@Desc :
================================================="""
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtGui import QPainter, QPixmap
from ui_ppt2pdf import Ui_MainWindow
class QmyMainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent) # 调用父类构造函数
self.ui = Ui_MainWindow() # 创建UI对象
self.ui.setupUi(self) # 构造UI
if __name__ == '__main__':
app = QApplication(sys.argv) # 创建app
form = QmyMainWindow()
form.show()
sys.exit(app.exec_())
|
[
"961745931@qq.com"
] |
961745931@qq.com
|
673df8917fcffa493359510bf3037a32df3e67b3
|
70bc77336e4544031ad7d7d29a2e964ef2626076
|
/ui/RepositoryTreeObject.py
|
981473acc6c301a58ddafa660cc03bbf33b220b2
|
[] |
no_license
|
DronMDF/vanadis
|
9af7a8c9281bf0eb17df593f5c9fc9345e474612
|
de692207bbd127c5a9952e3144653492a0ba969f
|
refs/heads/master
| 2020-04-17T08:11:18.411429
| 2016-12-21T20:50:05
| 2016-12-21T20:50:05
| 66,539,179
| 1
| 0
| null | 2016-12-21T20:50:06
| 2016-08-25T08:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 532
|
py
|
from pathlib import Path
from ui import RepositoryId
class RepositoryTreeObject:
''' This is a tree object (blob or tree) '''
def __init__(self, entry, prefix, repo=None):
self.entry = entry
self.prefix = prefix
self.repo = repo
def id(self):
return RepositoryId(self.entry.id)
def path(self):
return str(Path(self.prefix, self.entry.name))
def name(self):
return self.entry.name
def is_dir(self):
return self.entry.type == 'tree'
def content(self):
return self.repo[self.entry.id].data.decode('utf8')
|
[
"dron.valyaev@gmail.com"
] |
dron.valyaev@gmail.com
|
8be2f70f024036be4e4f7cc27126fd302cb87bd6
|
a4fd9f5d765351fb771455db18290e48affc3747
|
/password_generator/migrations/0001_initial.py
|
6cba7dcf9ebd26fb59d017987760ec2b182b24db
|
[
"MIT"
] |
permissive
|
rwgeaston/django-password-generator
|
a7aa9627c721b8a0f5ba66149d681c333f79da59
|
62607905bafc111802c61223a1c3c34aa927c9fc
|
refs/heads/master
| 2020-03-28T18:50:35.512795
| 2018-09-15T22:03:23
| 2018-09-15T22:03:23
| 148,917,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
# Generated by Django 2.0.6 on 2018-09-15 15:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=15)),
('word_length', models.IntegerField(db_index=True)),
('count', models.IntegerField(db_index=True, default=0)),
],
),
migrations.CreateModel(
name='Wordset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.AddField(
model_name='word',
name='wordset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='words', to='password_generator.Wordset'),
),
]
|
[
"rwgeaston@gmail.com"
] |
rwgeaston@gmail.com
|
0a87c613070041cd6ca47ab39268421df5ed335a
|
f06680ed95c01f50f0b484ffa81a5baea022282d
|
/data/parse_wiki.py
|
80f18cd87ef5eb7edc85c99419d97b5428901daf
|
[
"MIT"
] |
permissive
|
mohit1997/DeepZip
|
d3980935517b311b06d39a429546de9f024a73e5
|
8c35502397a1488c89fa282ed033cc9d5fd4b4dc
|
refs/heads/master
| 2022-03-16T02:17:55.479656
| 2022-03-04T22:09:48
| 2022-03-04T22:09:48
| 141,168,415
| 126
| 28
| null | 2019-04-23T23:42:20
| 2018-07-16T17:03:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
import io
import sys
import numpy as np
import json
import argparse
parser = argparse.ArgumentParser(description='Input')
parser.add_argument('-param_file', action='store', dest='param_file',
help='param file file', default='params.json')
parser.add_argument('-input', action='store', dest='input_file_path',
help='input file path', default='enwik8')
parser.add_argument('-output', action='store',dest='output_file_path',
help='output file path', default='npwik8')
args = parser.parse_args()
f = io.open(args.input_file_path, mode="r", encoding="utf-8")
data = f.read()
print(len(data))
vals = list(set(data))
char2id_dict = {c: i for (i,c) in enumerate(vals)}
id2char_dict = {i: c for (i,c) in enumerate(vals)}
params = {'char2id_dict':char2id_dict, 'id2char_dict':id2char_dict}
with open(args.param_file, 'w') as f:
json.dump(params, f, indent=4)
print(char2id_dict)
print(id2char_dict)
out = [char2id_dict[c] for c in data]
integer_encoded = np.array(out)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
print(integer_encoded[:10])
print(data[:10])
np.save(args.output_file_path, integer_encoded)
|
[
"goyal.mohit999@gmail.com"
] |
goyal.mohit999@gmail.com
|
bd88a28726322d9e402e7e14401d7b7c2c0e8786
|
109ac2891c5af60cc0a5c9e988048315314014b3
|
/Data Structure ZJU/printN2.py
|
719f3203bd1e631382a9a080b659424415e69ebe
|
[] |
no_license
|
francislinking/PTA-codes
|
485c6019a458fa1705dde6f84a69b33c0bd7de81
|
fea40800c6813300fe56f8b14f159d971b745a6b
|
refs/heads/master
| 2021-07-10T14:50:16.643802
| 2021-05-02T02:16:35
| 2021-05-02T02:16:35
| 244,085,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 18:48:24 2020
@author: Deng Jie
"""
def printN( N ):
if N == 1:
print(1)
else:
printN( N-1 )
print(N)
n = eval(input())
printN(n)
|
[
"francis.linking@gmail.com"
] |
francis.linking@gmail.com
|
fd3e9d3a74cc896ead349afb4520114128eef17e
|
fa81f1c5039da7554277a87d334cbee675e91995
|
/yelp/migrations/0002_userreview.py
|
7f9ee9895e7e1142e55b6537075c7be7a23e96e6
|
[] |
no_license
|
hldai/labelel
|
b5eaaac5cef73ccf6941ffed474e8b544c76a944
|
87c36972d1d7f4c1b146c185bcdee5207c030b8d
|
refs/heads/master
| 2020-12-30T13:39:45.673434
| 2017-07-09T06:20:32
| 2017-07-09T06:20:32
| 91,239,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-17 08:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yelp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=64)),
('review_id', models.CharField(max_length=64)),
],
),
]
|
[
"hldai@outlook.com"
] |
hldai@outlook.com
|
d26db268212730243e31afef9dac80f44edda814
|
d7ccb4225f623139995a7039f0981e89bf6365a4
|
/.history/store/views_20211010181824.py
|
5404e2083299cd5fa19e695dd5bbf5bc15364268
|
[] |
no_license
|
tonnymuchui/django-mall
|
64fd4abc3725c1bd0a3dcf20b93b490fe9307b37
|
55c083d8433be3c77adc61939cd197902de4ce76
|
refs/heads/master
| 2023-08-23T04:59:20.418732
| 2021-10-13T15:59:37
| 2021-10-13T15:59:37
| 415,668,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
from django.shortcuts import get_object_or_404, render
from store.models import Product
from cate import member
# Create your views here.
def store(request, category_slug=None):
categories = get_object_or_404(Category, slug=category_slug)
products = Product.objects.all().filter(is_available=True)
products_count = products.count()
content = {
'products': products,
'products_count': products_count,
}
return render(request, 'store/store.html', content)
|
[
"tonykanyingah@gmail.com"
] |
tonykanyingah@gmail.com
|
a7e1a5f16a4ea6519e2f6f5df35e23b32f5345ba
|
9a486a87e028303a551fbd0d1e1b6b650387ea14
|
/parse_tlog/guide_flow.py
|
2cebc5bd734a8a21dc044b2fbc02dd903521f052
|
[] |
no_license
|
shanlihou/pythonFunc
|
7b8e7064fddd4522e492c915c086cc6c5abc6eec
|
646920256551ccd8335446dd4fe11aa4b9916f64
|
refs/heads/master
| 2022-08-24T20:33:12.287464
| 2022-07-21T12:00:10
| 2022-07-21T12:00:10
| 24,311,639
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
# coding:utf-8
import utils
import const
import LogOne
import csv_output
def guide_flow():
fname = utils.filter_from_origin('GuideFlow')
id_dic = {}
avatar_count = utils.get_avatar_count()
with utils.utf8_open(fname) as fr:
for line in fr:
lo = LogOne.get_log_from_line(line)
if not lo:
continue
id_dic.setdefault(lo.guide_id, set())
id_dic[lo.guide_id].add(lo.gbid)
rets = [(int(k), len(v)) for k, v in id_dic.items()]
rets.sort(key=lambda x: x[0])
csv = csv_output.CSVOutPut()
csv.set(0, 0, '节点')
csv.set(0, 1, '创角数')
csv.set(0, 2, '节点通过人数')
csv.set(0, 3, '节点通过率')
idx = 1
for key, num in rets:
csv.set(idx, 0, key)
csv.set(idx, 1, avatar_count)
csv.set(idx, 2, num)
csv.set(idx, 3, num / avatar_count)
idx += 1
out_name = utils.get_out_name('out', 'guide_flow.csv')
csv.output(out_name)
if __name__ == '__main__':
guide_flow()
|
[
"shanlihou@gmail.com"
] |
shanlihou@gmail.com
|
4d2f1b947c37bea509fec0603fec028f2816c5f7
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/level_head/aws_company_right_program.py
|
a269a3cf60adfad0ae305533decf4c19f884f035
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
import srt_translate as st #must use python 3
#import os
#subdirec = 'home/ben/test'
#os.chdir(test)
# Replace the subscriptionKey string value with your valid subscription key.
subscriptionKey = 'a3487bf0249992cc26cd5aaf14d5f0b0'
# Language codes and names:
# English: en
# Greek: el
# More: http://www.emreakkas.com/internationalization/microsoft-translator-api-languages-list-language-codes-and-names
inputfile='test.srt'
outputfile='test_result.srt'
fromlang = 'en'
tolang = 'el'
st.convert_srt_file(inputfile, outputfile, subscriptionKey, tolang)#, fromlang)
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
1ff640fe7e36b1fa053268a8444e4d290cd90c50
|
f514f2746c69726ac38f8e8679eb2b646d11ec91
|
/dota2_stats/views/matches.py
|
857690b007094ca61e06f0758e261b316c1b1ecb
|
[] |
no_license
|
bobbyrward/dota2_stats
|
871b99ca6550496acc95ff44947a23566708861f
|
b3d2e7fbe4712dcb08f75e3a15b358a8388711a3
|
refs/heads/master
| 2021-01-19T03:23:54.158490
| 2013-02-15T22:56:08
| 2013-02-15T22:56:08
| 8,169,782
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
from pyramid.view import view_config
from dota2_stats.models import DBSession
from dota2_stats.models import Match
from dota2_stats.models import PlayerMatch
from dota2_stats.models import Player
from dota2_stats.models import Hero
from dota2_stats.views.common import template_params
@view_config(route_name='recent_matches', renderer='templates/recent_matches.jinja2')
def recent_matches(request):
matches = DBSession.query(Match).order_by(Match.start_time.desc()).limit(25).all()
return template_params(request, matches=matches)
@view_config(route_name='match_details', renderer='templates/match_details.jinja2')
def match_details(request):
match = DBSession.query(Match).get(request.matchdict['id'])
return template_params(request, match=match)
|
[
"bobbyrward@gmail.com"
] |
bobbyrward@gmail.com
|
93f9ff7f0c20a17eac24bdb842d09cdd06d72f77
|
94bfb1346a9ce4cf6ca8bfeeb5194b7a467731a6
|
/aclark/db/migrations/0031_siteconfiguration_company.py
|
d98d10bc777b9bace6f33643961cd5d7281078eb
|
[
"MIT"
] |
permissive
|
aclark4life/aclarknet-best-pro
|
4006cad37c2eec166a98a73e988b9b490a10e5cb
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
refs/heads/master
| 2023-03-01T09:10:04.041913
| 2020-12-01T18:40:07
| 2020-12-01T18:40:07
| 140,634,961
| 0
| 0
|
MIT
| 2021-02-10T01:57:38
| 2018-07-11T22:49:33
|
CSS
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
# Generated by Django 2.2.3 on 2019-07-30 18:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("db", "0030_company")]
operations = [
migrations.AddField(
model_name="siteconfiguration",
name="company",
field=models.ForeignKey(
blank=True,
limit_choices_to={"active": True},
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="db.Company",
),
)
]
|
[
"aclark@aclark.net"
] |
aclark@aclark.net
|
87ef5d8ed682d518f47eb6e1e03850066d251895
|
ff268c31f10cbd3e1c44261ca65a45c88ed3dae5
|
/Transfer Learning/Code/classify.py
|
2670af12d79b9e3e4f9dd6f648aa8ad8c6399325
|
[
"MIT"
] |
permissive
|
gyani91/Machine-Learning
|
6642c65359ed48b212a0f4296f5ce908ed6e95e3
|
2fabaa6386d3be24e56aaa9a19d58cd19d225198
|
refs/heads/master
| 2023-05-27T10:25:55.222053
| 2023-05-15T18:12:45
| 2023-05-15T18:12:45
| 114,811,646
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
import tensorflow as tf
from resize import *
from convert import *
#IMAGE_PATH = "Classify/panda.jpg"
TRAINED_GRAPH = "sets_graph.pb"
LABELS = "label.txt"
FINAL_TENSOR_NAME = "final_tensor"
def classify(IMAGE_PATH):
# Convert the image to JPEG
converted_image = convert(IMAGE_PATH)
# Resize the image
resized_image = resize(converted_image)
# Read the input_image
input_image = tf.gfile.FastGFile(resized_image, 'rb').read()
# Load labels
class_labels = [line.rstrip() for line
in tf.gfile.GFile(LABELS)]
#Load the trained model
with tf.gfile.FastGFile(TRAINED_GRAPH, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Feed the input_image to the graph and get the prediction
softmax_tensor = sess.graph.get_tensor_by_name(FINAL_TENSOR_NAME+':0')
predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': input_image})
# Sort the labels of the prediction in order of confidence
sorted_labels = predictions[0].argsort()[-len(predictions[0]):][::-1]
print('Classification:')
for index in sorted_labels:
class_label = class_labels[index]
percentage = predictions[0][index]*100
print(('%s (%.2f' % (class_label, percentage))+'%)')
|
[
"noreply@github.com"
] |
gyani91.noreply@github.com
|
61d21cd5046836892b809cc0fc7f1a977605c227
|
5c2824ff58eb8a57d71b3c24873c4695c7c3a2ba
|
/Fundamentals_Final_Exam/03.Problem_Three.py
|
1e7d798f28b43c1d3e3e8e862cdef755df0fecb0
|
[] |
no_license
|
svetoslavastoyanova/Python_Fundamentals_Mid_and_Final_Exams
|
e7ff6677bc762b24262019a0ebb0ed6a5952c50d
|
781e03fd5f540d55b41fbe6ef1d722d39ed62176
|
refs/heads/main
| 2023-04-17T17:35:38.894988
| 2021-05-02T12:28:26
| 2021-05-02T12:28:26
| 349,411,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
line = input()
minutes = int(input())
seconds = int(input())
total_time = 0
text = ""
while line != "Finish":
total_time = minutes*60 + seconds
if total_time < 55:
text = "Gold"
elif 55 <= total_time <= 85:
text = "Silver"
elif 85 < total_time <= 120:
text = "Bronze"
line = input()
minutes = int(input())
seconds = int(input())
|
[
"svetoslava_stoyanova92@abv.bg"
] |
svetoslava_stoyanova92@abv.bg
|
16a3a8560d14738c480e32368a3b4c2b7f240037
|
fd474c0c0df7de6c09f802586068a2069222aadd
|
/reviewboard/reviews/evolutions/file_attachment_comment_extra_data.py
|
86e3d2fc9eb496b260de4b12d19e98ab74fb6221
|
[
"MIT"
] |
permissive
|
pombredanne/reviewboard
|
a2970fa18cfff4b15adfe65fd0098287d73c650e
|
15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d
|
refs/heads/master
| 2022-03-09T22:24:19.951964
| 2022-02-09T07:12:23
| 2022-02-09T07:12:23
| 2,324,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
from django_evolution.mutations import AddField
from djblets.db.fields import JSONField
MUTATIONS = [
AddField('FileAttachmentComment', 'extra_data', JSONField, null=True)
]
|
[
"chipx86@chipx86.com"
] |
chipx86@chipx86.com
|
ec8da544cd59eff81c89e4f327ad9a081c8125d6
|
b45d33675b38fd3bd15fb2f73a29851a3cc4037d
|
/0x01-python-if_else_loops_functions/1-last_digit.py
|
6fd126b08396c288541339756d823b1a022c70f4
|
[] |
no_license
|
angelah1994/holbertonschool-higher_level_programming-1
|
38b8ca1859af2ec08aa50a862ecf37cabf993b46
|
61ab83696ed45686456317c485f7adb7220654ff
|
refs/heads/master
| 2023-03-16T04:08:44.868909
| 2020-05-15T15:40:50
| 2020-05-15T15:40:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
#!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
last = abs(number) % 10
if number < 10:
last = last * -1
if last > 5:
print('Last digit of {} is {} and is greater than 5'. format(number, last))
elif last < 6 and last != 0:
str = 'Last digit of {} is {} and is less than 6 and not 0'
print(str. format(number, last))
else:
print('Last digit of {} is {} and is 0'. format(number, last))
|
[
"amendez72@misena.edu.co"
] |
amendez72@misena.edu.co
|
e5ec7d80a9bd8ec0d31e8695546aa7bfb197c39d
|
209a7a4023a9a79693ec1f6e8045646496d1ea71
|
/COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/arrays/boolean/test_reduction.py
|
7ac6c13a933d640f3b303c790f7220a60ada525b
|
[
"MIT"
] |
permissive
|
anzhao920/MicrosoftProject15_Invictus
|
5e2347015411bbffbdf0ceb059df854661fb240c
|
15f44eebb09561acbbe7b6730dfadf141e4c166d
|
refs/heads/main
| 2023-04-16T13:24:39.332492
| 2021-04-27T00:47:13
| 2021-04-27T00:47:13
| 361,913,170
| 0
| 0
|
MIT
| 2021-04-26T22:41:56
| 2021-04-26T22:41:55
| null |
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
import numpy as np
import pytest
import pandas as pd
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.mark.parametrize(
"values, exp_any, exp_all, exp_any_noskip, exp_all_noskip",
[
([True, pd.NA], True, True, True, pd.NA),
([False, pd.NA], False, False, pd.NA, False),
([pd.NA], False, True, pd.NA, pd.NA),
([], False, True, False, True),
# GH-33253: all True / all False values buggy with skipna=False
([True, True], True, True, True, True),
([False, False], False, False, False, False),
],
)
def test_any_all(values, exp_any, exp_all, exp_any_noskip, exp_all_noskip):
# the methods return numpy scalars
exp_any = pd.NA if exp_any is pd.NA else np.bool_(exp_any)
exp_all = pd.NA if exp_all is pd.NA else np.bool_(exp_all)
exp_any_noskip = pd.NA if exp_any_noskip is pd.NA else np.bool_(exp_any_noskip)
exp_all_noskip = pd.NA if exp_all_noskip is pd.NA else np.bool_(exp_all_noskip)
for con in [pd.array, pd.Series]:
a = con(values, dtype="boolean")
assert a.any() is exp_any
assert a.all() is exp_all
assert a.any(skipna=False) is exp_any_noskip
assert a.all(skipna=False) is exp_all_noskip
assert np.any(a.any()) is exp_any
assert np.all(a.all()) is exp_all
@pytest.mark.parametrize("dropna", [True, False])
def test_reductions_return_types(dropna, data, all_numeric_reductions):
op = all_numeric_reductions
s = pd.Series(data)
if dropna:
s = s.dropna()
if op == "sum":
assert isinstance(getattr(s, op)(), np.int_)
elif op == "prod":
assert isinstance(getattr(s, op)(), np.int_)
elif op in ("min", "max"):
assert isinstance(getattr(s, op)(), np.bool_)
else:
# "mean", "std", "var", "median", "kurt", "skew"
assert isinstance(getattr(s, op)(), np.float64)
|
[
"ana.kapros@yahoo.ro"
] |
ana.kapros@yahoo.ro
|
1029307e17ff37e33f2b89833d70f7879f9f5e45
|
60dbecafad0eb3baf67265ebda5c6230dfc99088
|
/old_plotter_files/CLUSTER_PLOT_NEW.py
|
d4f5332e0882a5300eb2ffad687c5550409349dd
|
[] |
no_license
|
shanto268/NaSch_CA_Traffic_Flow_Analysis_Software
|
fbddadd70a70458b96a9a12c5a1c731d29266e34
|
d9065df9b8288790aa688bf5bf4c30750ba2889c
|
refs/heads/master
| 2020-09-20T01:47:41.301182
| 2020-05-17T03:28:20
| 2020-05-17T03:28:20
| 224,346,779
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,044
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 4 15:13:35 2019
@author: Owner
"""
import matplotlib.pyplot as plt
import csv
def plot1(fname):
fn = fname
nn = fn.split('.')
fr = 'processed_' + str(fname) + '.txt'
#dnewdata = "0.0, 0.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, "
dnewdata = "dnew line"
with open(fn, 'r') as f:
lines = f.read().split('\n')
#to delete line use "del lines[4]"
#to replace line:
for i in range(0,len(lines)):
if (i % 100) == 0 or (i % 100) < 19 and i > 0: #or (i % 4) == 1 :
lines[i] = dnewdata
with open(fr,'w') as f:
f.write('\n'.join(lines))
with open(fr, "r") as f:
lines = f.readlines()
with open(fr, "w") as f:
for line in lines:
if line.strip("\n") != "dnew line":
f.write(line)
with open(fn, "r+") as f: #fr change
a = f.read()
# with open(fr, "w+") as f: #fr change
# f.write("0.0, 0.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n" + a)
density = []
flow = []
updates = []
densityrv = []
flowrv = []
densityav = []
flowav = []
clnum = []
avgclsize = []
# clsize = []
with open(fr,'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
density.append(float(row[0]))
flow.append(float(row[1]))
updates.append(float(row[2]))
densityrv.append(float(row[3]))
flowrv.append(float(row[4]))
densityav.append(float(row[5]))
flowav.append(float(row[6]))
clnum.append(int(row[13]))
avgclsize.append(float(row[14]))
# clsize.append(float(row[11]))
plt.plot(updates, clnum,':' ,linewidth =1, )
plt.xlabel("Timesteps")
plt.ylabel("Number of Clusters")
plt.title("Number of Clusters over time")
plt.savefig("final/new/cluster_num_"+str(nn[0])+".pdf")
plt.show()
plt.plot(updates, avgclsize ,linewidth =1,)
plt.xlabel("Timesteps")
plt.ylabel("Average Size of Clusters")
plt.title("Average Size of Clusters over time")
plt.savefig("final/new/cluster_size_"+str(nn[0])+".pdf")
plt.show()
#r1m1 = plot1('type_aware_crit_density.txt')
#r1m2 = plot1('type_unaware_crit_density.txt')
#r2m1 = plot1('control_crit_density.txt')
r = plot1('type_unaware_low_density_same_vf.txt')
#show histograms:
#cluster numbers at each time period
#average size at each time period
#size
#combined graphs:
|
[
"sadman-ahmed.shanto@ttu.edu"
] |
sadman-ahmed.shanto@ttu.edu
|
1f36a514482352b15f26407d72ee9ba6027dac94
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/anagram/09ab13ac53e54a21819b45d33c272459.py
|
30c00dfe1a41d1c24e13263850e15a2fbd439762
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
def detect_anagrams(src, words):
ret = []
t1 = sort_string(src)
for wd in words:
t2 = sort_string(wd)
if t1 == t2 and src.lower() != wd.lower():
ret.append(wd)
return ret
def sort_string(st):
ls = []
ret = ''
for c in range(len(st)):
ls.append((st[c]).lower())
ls.sort()
for c in ls:
ret += c
return ret
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
92b8cb7b47b9ee887f6ae8c34067590019668fc1
|
77ec9edf40b34b48477a627d149b6c2054b98a93
|
/abc_188_d.py
|
07fb56df0536dddc7d23de1cb381a788cdc223e9
|
[] |
no_license
|
junkhp/atcorder
|
fa4eeb204e3a4ac713001ab89c205039703abc88
|
028ddf7a39534d5907232c4576a03af79feb6073
|
refs/heads/main
| 2023-04-11T02:15:10.088883
| 2021-04-22T07:06:06
| 2021-04-22T07:06:06
| 313,284,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
# -*- coding: utf-8 -*-
def main():
n, C = map(int, input().split())
abc_list = [list(map(int, input().split())) for i in range(n)]
keikaku_dict = {}
day_set = set()
for i, abc in enumerate(abc_list):
a = abc[0]
b = abc[1] + 1
if a in day_set:
keikaku_dict[a].append([i, True])
else:
keikaku_dict[a] = [[i, True]]
day_set.add(a)
if b in day_set:
keikaku_dict[b].append([i, False])
else:
keikaku_dict[b] = [[i, False]]
day_set.add(b)
day_set = sorted(day_set)
day_cost = 0
day_cost_dict = {}
for day in day_set:
for xxx in keikaku_dict[day]:
service = xxx[0]
is_in = xxx[1]
if is_in:
day_cost += abc_list[service][2]
else:
day_cost -= abc_list[service][2]
day_cost_dict[day] = day_cost
ans = 0
for i in range(len(day_set) - 1):
ans += min(C, day_cost_dict[day_set[i]]) * (day_set[i + 1] - day_set[i])
print(ans)
if __name__ == '__main__':
main()
|
[
"oshiba@m.cs.osakafu-u.ac.jp"
] |
oshiba@m.cs.osakafu-u.ac.jp
|
0827574c78dbf6f8411927ec8c2f368c165aded5
|
135254b8c00935efd0efd33c708ce69470e23741
|
/Hard/335. Self Crossing.py
|
84c1eaf99e013d78a368297707b298a1dacf7618
|
[] |
no_license
|
MinecraftDawn/LeetCode
|
4974e6f96612f01e4774ecd5c30bc42dfff79467
|
0404bcce27ff363430e6ab71dbc27a69055fd261
|
refs/heads/master
| 2021-06-19T05:50:08.000396
| 2021-06-14T05:57:09
| 2021-06-14T05:57:09
| 188,446,485
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
# Reference: https://leetcode.com/problems/self-crossing/discuss/386087/Python-simple-case-easy-to-read
class Solution:
def isSelfCrossing(self, edge: list) -> bool:
if len(edge) < 4:
return False
for i in range(3, len(edge)):
if edge[i-1] <= edge[i-3] and edge[i] >= edge[i-2]:
return True
if i >= 4 and edge[i-1] == edge[i-3] and edge[i] + edge[i-4] >= edge[i-2]:
return True
if i >= 5 and edge[i-1] <= edge[i-3] and edge[i-3] <= edge[i-1] + edge[i-5] and edge[i] + edge[i-4] >= edge[i-2] and edge[i-4] <= edge[i-2]:
return True
return False
|
[
"eric4902077@gmail.com"
] |
eric4902077@gmail.com
|
10f7e3c347eb30004151f1556f490b053246fe90
|
d42b771f64bc2185a8c0dca0f5bcfa5a2e13c5ed
|
/users/migrations/0004_auto_20210401_1122.py
|
c6031beb0bb860b42025e95c43890b7f126098f1
|
[] |
no_license
|
bgy1060/Daily_Project
|
4b38de59c09f5e3f82211a9860e1f32a8ef46b37
|
bcc955bddd9941f2bc54f7577c26c1ddc6b36a48
|
refs/heads/main
| 2023-05-15T17:26:56.858438
| 2021-06-17T05:59:10
| 2021-06-17T05:59:10
| 353,864,798
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
# Generated by Django 3.1.7 on 2021-04-01 02:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0003_customuser_username'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='is_staff',
),
migrations.RemoveField(
model_name='customuser',
name='is_superuser',
),
]
|
[
"40761315+bgy1060@users.noreply.github.com"
] |
40761315+bgy1060@users.noreply.github.com
|
6b0e78d50451bb8ccc11aab0a05214bf732c8bdb
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2479/60586/261474.py
|
a9f57d33b28f100b2c88c064fd239af6152dd305
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
def exam4():
t=int(input())
res=[]
for i in range(t):
a=list(input())
b=list(input())
for item in a:
if b.count(item)==0:
res.append(item)
for item in b:
if a.count(item)==0:
res.append(item)
res.sort()
res=list(set(res))
s="".join(res)
print(s)
exam4()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
210c32bffdfcbe2fea8b31746088ce7e6896645f
|
2a39fe8bd203531c9bcdb470d19b80beac665eae
|
/read_best.py
|
380979ab3e9af8575ef0e733e5486bb67e30d4e4
|
[] |
no_license
|
davidharvey1986/lenstoolTools
|
7bf11af1a38700503a731c6fe7e83fdc92bf58c1
|
85bcf729603d34341f5f41c57c4e233b08055baa
|
refs/heads/master
| 2021-09-08T14:29:52.695461
| 2018-03-10T13:54:50
| 2018-03-10T13:54:50
| 124,657,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,896
|
py
|
from lensing.lenstool import potentiels
from write_par import *
import csv as csv
import ipdb as pdb
def read_best( filename='best.par',
pot_type='AUTO',
verbose=False,
return_limits=False,
return_image=False):
'''
Read the input into lenstool, the .par file and read it
into a python class
It assumes that the input file is best.par however this
can be changed with the keyword
RETURNS :
2 Rec arrays
- run_mode : contains the reference position of the halo
- potentiel_list : a n halo list of the potentiels found in the recon
For more information on these rec arrays see write_par.py and potentiels.py
BUGS:
It does require the output best.par to be in a paticular order
(the one output from lenstool)
UDPATE : CAN NOW READ INPUT PAR FILES (25/05/2016) DRH
'''
best_obj = open( filename, "rb")
run_mode = runmode()
mode = 'Comment'
pot = -1
ind=0
iLine=0
comment_flag = 0
limit_list = []
limit_flag = 0
z_m_limit = []
for iline in best_obj:
line = iline.splitlines()[0].split('\t')
if verbose:
print line
if (line[0] != 'runmode') & (comment_flag ==0):
continue
#if the length of the line is 1 then it is a key word
if line[0] == 'runmode':
mode='runmode'
comment_flag = 1
if line[0] == 'grille':
mode='grille'
if line[0] == 'image':
mode='image'
ret_image = image()
if (len(line) == 1):
if line[0].strip()[0] == '#':
if verbose:
print 'SKIPPING',line
continue
if line[0].split()[0] == 'potentiel':
mode='potentiel'
pot +=1
pot_name = line[0].split()[1]
if line[0].split()[0] == 'limit':
mode='limit'
limit_list.append(limits.get_limit( potentiel_list[pot]['profil']['int'] ))
limit_flag = 1
if len(line) > 1:
if line[1].strip()[0] == '#':
if verbose:
print 'SKIPPING ',line
continue
if line[1].strip() != 'end':
if (mode == 'runmode') :
option=line[1].split()
keys = run_mode[option[0]].dtype.names
for iKey in xrange(len(keys)):
if iKey < len(option):
run_mode[option[0]][keys[iKey]] = option[iKey]
else:
continue
if (mode == 'grille'):
option=line[1].split()
#If the filename is the best.par then
#the number of lenses is the nlentille
#if the input par file then tihs is the nlens_opt
if (option[0] == 'nlentille') | (option[0] == 'nlens'):
nlens = np.int(option[1])
#If I assume pot is NFW use this
#otherwise get the potentiel type automatically
if pot_type == 'NFW':
potentiel_list = [ potentiels.nfw() for i in xrange( nlens ) ]
else:
if pot_type == 'AUTO':
potentiel_list = get_potential_list( best_file = filename )
if (mode == 'image'):
option=line[1].split()
if option[0] == 'sigposArcsec':
option[0] = 'sigpos_arcsec'
if (option[0] == 'z_m_limit'):
dtype = [('name', object), ('im_label', float),
('int', np.int), ('lo', float), ('hi', float), ('res', float)]
iz_m_limit = np.array(('z_m_limit '+str(option[1]),
option[2],option[3],
option[4], option[5],
option[6]), dtype=dtype)
ret_image[ 'z_m_limit '+str(option[1]) ] = \
iz_m_limit
else:
image_keys = ret_image[option[0]].dtype.names
for i in xrange( 1, len(image_keys) ):
ret_image[ option[0] ][image_keys[i]] = option[i]
if (mode == 'potentiel'):
if pot >= nlens:
continue
option = line[1].split()
try:
data_type = potentiel_list[pot][option[0]].dtype.names[1]
potentiel_list[pot][option[0]][data_type] = \
np.array(option[1]).astype(data_type)
except:
if verbose == True:
print option[0],' does not exist in potentiel'
else:
pass
ra_halo = run_mode['reference']['ra'] - \
potentiel_list[pot]['x_centre']['float']/3600./\
np.cos(run_mode['reference']['dec']*np.pi/180.)
dec_halo = run_mode['reference']['dec'] + \
potentiel_list[pot]['y_centre']['float']/3600.
potentiel_list[pot]['ra'] = \
np.array(('ra', ra_halo), dtype=[('name', object), ('float', float)])
potentiel_list[pot]['dec'] = \
np.array(('dec', dec_halo), dtype=[('name', object), ('float', float)])
potentiel_list[pot]['identity'] =\
np.array(('identity', pot_name), dtype=[('name', object), ('str', object)])
if (mode == 'limit'):
option = line[1].split()
try:
data_type = limit_list[pot][option[0]].dtype.names[1]
limit_list[pot][option[0]][data_type] = \
np.array(option[1]).astype(data_type)
except:
if verbose == True:
print option[0],' does not exist in potentiel'
else:
pass
else:
mode = 'end'
if return_limits:
if limit_flag == 1:
return run_mode, potentiel_list, limit_list
else:
print 'NO LIMIT SECTION > IS THIS A BEST.PAR?'
return 0
elif return_image:
return ret_image
else:
return run_mode, potentiel_list
def get_potential_list( best_file='best.par', verbose=False ):
'''
Run this script to get the list of potentials
which may not be necessarily NFW
Currently only valid for NFW or PIEMD since these
are the only ones in limit.py and potentiels.py
'''
run_mode, pots = read_best( best_file, pot_type='NFW', verbose=verbose )
return [ potentiels.get_profile(iPot['profil']['int']) for iPot in pots ]
def get_limit_list( pots ):
return [ limits.get_limit( iPot['profil']['int']) for iPot in pots ]
|
[
"davidharvey1986@googlemail.com"
] |
davidharvey1986@googlemail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.