blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4cf072a621ad81b2299a47964c8c457f274039b | 277538ab518bc60ad4e692f3d996c05db4a387d0 | /venv/bin/django-admin | ea3f0d5c97ec19acfd929d8d603fb6b19daafa59 | [] | no_license | vsingh1438/iWant-1 | 22368c13b78a04160fb42dfc37479b2873b7f682 | 828b8b465e27181b68f8fbaea6dbecaa507d54d1 | refs/heads/master | 2020-12-29T01:00:38.216428 | 2015-10-04T01:55:40 | 2015-10-04T01:55:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | #!/Users/sidmalladi007/GitHub/iWant/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"sid.malladi.007@gmail.com"
] | sid.malladi.007@gmail.com | |
7084db3062d66581c38fbdc43d86b9d20a9172c9 | 4926667354fa1f5c8a93336c4d6e2b9f6630836e | /1534.py | 13cc2b14de3bdc53a7c3d07c0f26668d8b35111d | [] | no_license | nascarsayan/lintcode | 343b3f6e7071479f0299dd1dd1d8068cbd7a7d9e | 4da24b9f5f182964a1bdf4beaa8afc17eb7a70f4 | refs/heads/master | 2021-07-13T12:31:45.883179 | 2020-07-20T02:27:53 | 2020-07-20T02:27:53 | 185,825,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: root of a tree
@return: head node of a doubly linked list
"""
def treeToDoublyList(self, root):
# Write your code here.
def recurse(root):
if root is None:
return (None, None)
st, fl = root, root
if root.left is not None:
lst, lfl = recurse(root.left)
lfl.right = root
root.left = lfl
st = lst
if root.right is not None:
rst, rfl = recurse(root.right)
root.right = rst
rst.left = root
fl = rfl
return (st, fl)
if root is None:
return None
hd, tl = recurse(root)
hd.left = tl
tl.right = hd
return hd
| [
"nascarsayan@iitkgp.ac.in"
] | nascarsayan@iitkgp.ac.in |
abc195cc80dee6e430b01353bec29e628b93de8c | 5bdc32f325456ec402e78c656e5fd3c24af85a1d | /map-headers.py | fd02d9be8d7598aadefa16497bc158b1e36f3d5a | [] | no_license | naltak/m-c-tools-code-coverage | 6a15f388f462c330cbb8a8315ab9dba55ff4a06a | c08ad0f9f2b33973119847545f0034d3144d586b | refs/heads/master | 2021-01-16T21:21:28.772244 | 2012-11-23T14:14:52 | 2012-11-23T14:14:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | #!/usr/bin/python
import os, sys
def main(argv):
from optparse import OptionParser
o = OptionParser()
o.add_option('-m', '--map-string', dest="map_string", default="dist/include/",
help="Try to map files containing STRING", metavar="STRING")
o.add_option('-b', '--base-string', dest="base_string", default="try-lnx64-dbg/build/",
help="Truncate path up to (including) STRING", metavar="STRING")
o.add_option('-d', '--directory', dest="base_dir",
help="Base directory for relative paths", metavar="STRING")
o.add_option('-o', '--output', dest="outfile",
help="File to output data to FILE", metavar="FILE")
(opts, args) = o.parse_args(argv)
files = {}
# Store it to output
if opts.outfile != None:
print >> sys.stderr, "Writing to file %s" % opts.outfile
outfd = open(opts.outfile, 'w')
else:
outfd = sys.stdout
infd = open(args[0], 'r')
for line in infd:
if line.startswith("SF:"):
filename = line.split(':')[1].strip()
idx = filename.find(opts.base_string)
if idx >= 0:
filename = filename[idx+len(opts.base_string):]
if opts.base_dir != None and filename[0] != os.sep:
filename = os.path.join(opts.base_dir, filename)
if filename.find(opts.map_string) == -1:
files[filename] = 1
infd.close()
infd = open(args[0], 'r')
for line in infd:
if line.startswith("SF:"):
filename = line.split(':')[1].strip()
mapidx = filename.find(opts.map_string)
mapped = False
if mapidx != -1:
#basename = filename[mapidx+len(opts.map_string):]
basename = os.path.basename(filename)
for key in files.iterkeys():
if key.endswith(os.sep + basename):
mapped = True
outfd.write("SF:" + key + "\n")
break
if not mapped:
idx = filename.find(opts.base_string)
if idx >= 0:
filename = filename[idx+len(opts.base_string):]
if opts.base_dir != None and filename[0] != os.sep:
filename = os.path.join(opts.base_dir, filename)
outfd.write("SF:" + filename + "\n")
else:
outfd.write(line)
outfd.close()
if __name__ == '__main__':
main(sys.argv[1:])
| [
"choller@mozilla.com"
] | choller@mozilla.com |
1c21123833308e103bf917a24ffbcd7612967ca3 | 5e67f9e8ff1d489367ee3f6742af019c2315565c | /causal_meta/utils/data_utils.py | 7e119cff29dbf025fc891713f647822b87a901c9 | [] | no_license | yuanpeng16/EDCR | 1590a9ce585388a9659138f479d9d61596f4aa9f | 61724fbb37aa75a1249a68c31f01270abaaf101f | refs/heads/master | 2022-12-24T18:05:35.860353 | 2020-10-01T22:15:58 | 2020-10-01T22:15:58 | 187,728,785 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import torch
import torch.nn as nn
import numpy as np
import scipy
from scipy import interpolate
try:
import matplotlib.pyplot as plt
except Exception:
plt = None
class RandomSplineSCM(nn.Module):
def __init__(self, input_noise=False, output_noise=True,
span=6, num_anchors=10, order=3, range_scale=1.):
super(RandomSplineSCM, self).__init__()
self._span = span
self._num_anchors = num_anchors
self._range_scale = range_scale
self._x = np.linspace(-span, span, num_anchors)
self._y = np.random.uniform(-range_scale * span, range_scale * span,
size=(num_anchors,))
self._spline_spec = interpolate.splrep(self._x, self._y, k=order)
self.input_noise = input_noise
self.output_noise = output_noise
def forward(self, X, Z=None):
if Z is None:
Z = self.sample(X.shape[0])
if self.input_noise:
X = X + Z
X_np = X.detach().cpu().numpy().squeeze()
_Y_np = interpolate.splev(X_np, self._spline_spec)
_Y = torch.from_numpy(_Y_np).view(-1, 1).float().to(X.device)
if self.output_noise:
Y = _Y + Z
else:
Y = _Y
return Y
def sample(self, N):
return torch.normal(torch.zeros(N), torch.ones(N)).view(-1, 1)
def plot(self, X, title="Samples from the SCM", label=None, show=True):
Y = self(X)
if show:
plt.figure()
plt.title(title)
plt.scatter(X.squeeze().numpy(), Y.squeeze().numpy(), marker='+', label=label)
if show:
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
def generate_data_categorical(num_samples, pi_A, pi_B_A):
"""Sample data using ancestral sampling
x_A ~ Categorical(pi_A)
x_B ~ Categorical(pi_B_A[x_A])
"""
N = pi_A.shape[0]
r = np.arange(N)
x_A = np.dot(np.random.multinomial(1, pi_A, size=num_samples), r)
x_Bs = np.zeros((num_samples, N), dtype=np.int64)
for i in range(num_samples):
x_Bs[i] = np.random.multinomial(1, pi_B_A[x_A[i]], size=1)
x_B = np.dot(x_Bs, r)
return np.vstack((x_A, x_B)).T.astype(np.int64)
def generate_data_multivariate_normal(num_samples, mean_A, cov_A, beta_0, beta_1, cov_B_A):
""" Sample data using ancestral sampling
x_A ~ MultivariateNormal(mean_A, cov_A)
x_B ~ MultivariateNormal(beta_1 * x_A + beta_0, cov_B_A)
"""
dim = mean_A.shape[0]
A = np.random.multivariate_normal(mean_A, cov_A, size=num_samples) # (num_samples, dim)
noise = np.random.multivariate_normal(np.zeros(dim), np.eye(dim), size=num_samples)
scaled_noise = np.matmul(noise, np.transpose(scipy.linalg.sqrtm(cov_B_A))) # (num_samples, dim)
B = np.matmul(A, np.transpose(beta_1)) + beta_0 + scaled_noise
return np.stack([A, B]).astype(np.float64) # (2, num_samples, dim)
| [
"ec6dde01667145e58de60f864e05a4@gmail.com"
] | ec6dde01667145e58de60f864e05a4@gmail.com |
35905c104473f51d31d16699f3bdd5ba4edfad55 | d67b24db7a6175735cdd6fac05dd03014c8a0680 | /detection/configs/retinanet_res34_coco_3x_800size.py | a376518e58a5723de8c9e75c75f1135aa33e7565 | [] | no_license | ZaccurLi/megengine_detection | aea0c6399b467a654efd7c9f50b8d9746e29bbf5 | 4bbb25622e2b5831640c26060c0bf5440b01c9b4 | refs/heads/main | 2023-08-04T12:16:00.149550 | 2021-09-10T10:38:06 | 2021-09-10T10:38:06 | 404,299,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from megengine import hub
from detection import models
class CustomRetinaNetConfig(models.RetinaNetConfig):
def __init__(self):
super().__init__()
self.backbone = "resnet34"
self.fpn_in_channels = [128, 256, 512]
self.fpn_top_in_channel = 512
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/"
"retinanet_res34_coco_3x_800size_38dot4_3485f9ec.pkl"
)
def retinanet_res34_coco_3x_800size(**kwargs):
r"""
RetinaNet trained from COCO dataset.
`"RetinaNet" <https://arxiv.org/abs/1708.02002>`_
`"FPN" <https://arxiv.org/abs/1612.03144>`_
`"COCO" <https://arxiv.org/abs/1405.0312>`_
"""
cfg = CustomRetinaNetConfig()
cfg.backbone_pretrained = False
return models.RetinaNet(cfg, **kwargs)
Net = models.RetinaNet
Cfg = CustomRetinaNetConfig
| [
"zaccurli@outlook.com"
] | zaccurli@outlook.com |
e01a94dd39942adc94f53e0ca4113b6f0c4832af | c6726a9fc351c42594741cc0a8e09cb55bd2d705 | /Pi files/LoRaUSB.py | 61b361cd10ec85dbc6db1196d79bf4e9542ebf3a | [] | no_license | ITheCon/LoRaWAN_Mapping_5th_Year_group | fb3040bc82553ba0163b9e5a8662d90c6d0236fb | 295df26675e148b0bb5ec8e22c3cc295c2e05780 | refs/heads/main | 2023-04-01T18:38:14.038588 | 2021-04-07T14:27:40 | 2021-04-07T14:27:40 | 337,122,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | #!/usr/bin/env python
import os
import serial
from time import sleep
from datetime import datetime
import time
def getCoord(channel):
LoRa.write("RD " + str(channel))
received_data = LoRa.read()
sleep(0.05)
data_left = LoRa.inWaiting()
received_data += LoRa.read(data_left)
return received_data
def saveData(data, channel):
now = datetime.now()
clock = str(now.hour) + ":" + str(now.minute) + ":" + str(now.second)
USB.write("c" + str(channel) + "," + clock + "," + data)
LoRa = serial.Serial("/dev/ttyS0", 9600, timeout = 10)
USB = serial.Serial("/dev/ttyGS0", 9600)
channels = []
while True:
LoRa.write("ID " + str(channels))
resp = LoRa.read()
sleep(0.05)
remain = LoRa.inWaiting()
resp += LoRa.read(remain)
if (resp == ""):
break;
else:
trim = resp.split()
channels.append(trim[0])
sleep(0.5)
if (channels == []):
quit()
while True:
start = time.time()
for channel in channels:
data = getCoord(channel)
attempt = 0
while data == "" and attempt < 2 :
attempt = attempt + 1
data = getCoord(channel)
if data == "" :
data = "No response,No response\n"
saveData(data, channel)
while start+5 > time.time():
sleep(1)
| [
"ben@rathobyres.co.uk"
] | ben@rathobyres.co.uk |
f2f210f4cfed106ef2a257e05eac5a26cf8d21d1 | a1cefcb39eb538535edfd4d41c4e57c780646dc3 | /components/studio/controller/urls.py | cdc9189db5fa1120c2c230fc0e13255c46ae5b40 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cawik/stackn | 108562d9f12c7fbc1d6c509ab78bade0b04713a7 | 45641c8ed56dfce22f1cd5fc0bafa3b3d9f326d2 | refs/heads/master | 2023-02-17T08:00:19.999488 | 2020-11-02T15:21:18 | 2020-11-02T15:21:18 | 311,961,277 | 0 | 0 | Apache-2.0 | 2020-11-13T09:29:14 | 2020-11-11T12:15:36 | null | UTF-8 | Python | false | false | 345 | py | from django.urls import path
from . import views
app_name = 'controller'
urlpatterns = [
# path('<int:project_id>/notebooks/<int:notebook_id>', views.index, name='notebooks')
# path('<int:project_id>', views.index, name='notebooks')
# path('pause', views.pause, name='pause'),
# path('resume', views.resume, name='resume'),
]
| [
"morgan@scaleoutsystems.com"
] | morgan@scaleoutsystems.com |
221f904130a40fe0b1ec2fb66a8dfd6a62d01cf5 | 973d698403dfaac56e2348aa0a9a73459f4abde6 | /hello/passwordsafe/x.py | 823735c8b8590350e41b66bea08a4fec4e948fc9 | [
"MIT"
] | permissive | JonasAZou/hellodjango | 6631ee53997ee7f522832b61539fc42e92f3be8b | 7965c1373da2b8c56ab33d769c20ee7fd14458bb | refs/heads/master | 2021-03-12T22:33:39.809063 | 2013-09-02T10:18:41 | 2013-09-02T10:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from Crypto.Cipher import DES, AES
from Crypto import Random
key = b'Sixteen byte key'
tp = b'attack at dawnxx'
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
cipher2 = AES.new(key, AES.MODE_CBC, iv)
msg = cipher.encrypt(tp)
print cipher2.decrypt(msg)
| [
"dlutxx@gmail.com"
] | dlutxx@gmail.com |
a2550f4a7f4e7d574584d7a2ef5db74376595e1b | de5d7bab4e35b3f2a6426ce09e0d9726443c3737 | /simple_ecommerce/simple_ecommerce/middleware.py | ec466cee584dc3e13ae19a081d1c7cd23816016e | [] | no_license | lucyemmel/speedwagon-foundation-shop | 595135643e7ac325d356c46c68b6ab02221b4882 | e62bc4593dfafb74e260e46339f55c0d4d0345f8 | refs/heads/main | 2022-12-30T20:22:16.477284 | 2020-10-21T15:08:57 | 2020-10-21T15:08:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | class FramingControlMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
response['Content-Security-Policy'] = 'frame-ancestors http://localhost:3000 http://127.0.0.1:3000'
return response
| [
"s8daemme@stud.uni-saarland.de"
] | s8daemme@stud.uni-saarland.de |
2801ad06b45347ea2e6d78a2191b54f5ecb06a64 | c3a8df823bed7ad10f510889f6c757723f630ba2 | /visualization/__main__.py | d6275b308d8e35634f8aad9380039b3d3ad11cc5 | [
"MIT"
] | permissive | pkarakal/serial-networking | f049a4168e78a42f1b12f1c82ce3ea42cbf09603 | 4adf5311b49b64dd0ec04f053006cb18496f087e | refs/heads/master | 2023-04-11T07:24:14.321885 | 2021-04-22T10:14:20 | 2021-04-22T10:14:20 | 340,639,557 | 1 | 0 | MIT | 2021-04-22T10:14:20 | 2021-02-20T11:36:53 | Java | UTF-8 | Python | false | false | 1,804 | py | from pandas import read_csv
from matplotlib import pyplot
import glob
import cv2
import sys
def parse_echo_csv():
all_files = glob.glob("echo*.csv")
for filename in all_files:
fig = pyplot.figure()
frame = read_csv(filename, header=0, index_col=0, usecols=[0, 3])
pyplot.plot(frame["Duration"])
pyplot.xlabel("Packets")
pyplot.ylabel("Duration")
fig.text(.5, .05, f"Mean response time: {calculate_mean_response_time(frame)}", ha='center')
pyplot.show()
def parse_ack_csv():
all_files = glob.glob("ack_echo*.csv")
for filename in all_files:
fig = pyplot.figure()
frame = read_csv(filename, header=0, index_col=0, usecols=[0, 3, 4])
# plot_times_sent(frame)
pyplot.plot(frame["Duration"])
pyplot.xlabel("Packets")
pyplot.ylabel("Duration (s)")
fig.text(.5, .05, f"Bit Rate Error (BER): {calculate_ber(frame)}", ha='center')
pyplot.show()
def calculate_ber(frame):
ack = len(frame)
res = frame.groupby("Time sent").count()
nack = 0
for key, value in enumerate(res.Duration.keys()):
nack += res["Duration"].values[key] * value if value > 1 else 0
return 1.0 - (float(ack) / float(ack + nack)) ** (1.0/128.0)
def calculate_mean_response_time(frame):
return frame["Duration"].mean()
def plot_times_sent(frame):
frame.groupby("Time sent")["Time sent"].hist(bins=10)
def open_images():
all_images = glob.glob("image*.jpg")
for image in all_images:
img = cv2.imread(image)
if img is None:
sys.exit("Could not read the image.")
cv2.imshow(image, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
open_images()
parse_echo_csv()
parse_ack_csv()
| [
"pavloc.kara@outlook.com"
] | pavloc.kara@outlook.com |
e07f624ea0d255df65ac483eff918d2f319b22b5 | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/services/migrations/0009_auto_20201209_1404.py | b2e2a364f136f17ebd91a275a705f8061d4ef9ea | [] | no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # Generated by Django 3.1.2 on 2020-12-09 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0008_auto_20201209_1400'),
]
operations = [
migrations.AlterField(
model_name='service',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Valor'),
),
]
| [
"jocsadm@gmail.com"
] | jocsadm@gmail.com |
078898a80e009d9bfb46d60def7e52767e2d9afd | dcc7ab0e239401a6ae11b45cf2ab8ed7500a0a9e | /article/urls.py | 3c6c40569e3a26132a84281a17f0e34d4bca2d74 | [] | no_license | technologysansar/blogger | e35a37d87b2dbed90458bc2a4fc5d44f0142ff13 | ec84505a07971d26d5d557dab25b8abe5c8df5a2 | refs/heads/master | 2023-04-27T09:45:53.024416 | 2021-05-17T11:11:09 | 2021-05-17T11:11:09 | 367,368,189 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from .import views
urlpatterns = [
path("", views.index, name="index"),
path("contact/", views.contact, name="Contact"),
path("subscribe/", views.subscribe, name="Subscribe"),
path("search/", views.search, name="Search"),
path("search/searchblogdetail/<int:id>", views.searchblogdetail, name="View"),
path("search/searchmobiledetail/<int:id>", views.searchmobiledetail, name="View"),
path("search/searchlaptopdetail/<int:id>", views.searchlaptopdetail, name="View"),
path("mobilecat/", views.mobilecat, name="MobilCategory"),
path("mobilecat/mobiledetail/<int:id>", views.mobiledetail, name="View"),
path("laptopcat/", views.laptopcat, name="Laptopcat"),
path("laptopcat/laptopdetail/<int:id>", views.laptopdetail, name="View"),
path("blog/", views.blog, name="Blog"),
path("blog/blogview/<int:id>", views.blogview, name="BlogView"),
path("dellview/", views.dellview, name="Dellview"),
path("dellview/delldetail/<int:id>", views.delldetail, name="Delldetail"),
path("appleview/", views.appleview, name="Appleview"),
path("appleview/appledetail/<int:id>", views.appledetail, name="appledetail"),
path("lenevoview/", views.lenevoview, name="Lenevoview"),
path("lenevoview/lenevodetail/<int:id>", views.lenevodetail, name="lenevodetail"),
path("acerview/", views.acerview, name="Acerview"),
path("acerview/acerdetail/<int:id>", views.acerdetail, name="Acerdetail"),
path("msiview/", views.msiview, name="Msivew"),
path("msiview/msidetail/<int:id>", views.msidetail, name="msidetail"),
path("applemview/", views.applemobileview, name="Mobileview"),
path("applemview/applemobiledetail/<int:id>", views.applemobiledetail, name="Mobiledetail"),
path("miview/", views.mimobileview, name="Mobileview"),
path("miview/mimobiledetail/<int:id>", views.mimobiledetail, name="Mobiledetail"),
path("nokiaview/", views.nokiamobileview, name="Mobileview"),
path("nokiaview/nokiamobiledetail/<int:id>", views.nokiamobiledetail, name="Mobiledetail"),
path("oppoview/", views.oppomobileview, name="Mobileview"),
path("oppoview/oppomobiledetail/<int:id>", views.oppomobiledetail, name="Mobiledetail"),
path("samsungview/", views.samsungmobileview, name="Mobileview"),
path("samsungview/samsungmobiledetail/<int:id>", views.samsungmobiledetail, name="Mobiledetail"),
path("vivoview/", views.vivomobileview, name="Mobileview"),
path("vivoview/vivomobiledetail/<int:id>", views.vivomobiledetail, name="Mobiledetail"),
]
| [
"devendra mishra"
] | devendra mishra |
101feda1a0f140f3e9c0891e6c61e0269a85ac2e | dda862418770f3885256d96e9bdb13d0759c5f43 | /codeforces/div-2/nastya-and-rice.py | a2c25afd65589336a3210b2dd8ff1e66d0aefc44 | [
"MIT"
] | permissive | bellatrixdatacommunity/data-structure-and-algorithms | d56ec485ebe7a5117d4922caeb0cd44c5dddc96f | d24c4001a797c12347973263a0f4f98939e86900 | refs/heads/master | 2022-12-03T00:51:07.944915 | 2020-08-13T20:30:51 | 2020-08-13T20:30:51 | 270,268,375 | 4 | 0 | MIT | 2020-08-13T20:30:53 | 2020-06-07T10:19:36 | Python | UTF-8 | Python | false | false | 3,299 | py | """
[A. Nastya and Rice](https://codeforces.com/contest/1341/problem/A)
time limit per test1 second
memory limit per test256 megabytes
inputstandard input
outputstandard output
Nastya just made a huge mistake and dropped a whole package of rice on the floor. Mom will come soon. If she sees this,
then Nastya will be punished.
In total, Nastya dropped 𝑛 grains. Nastya read that each grain weighs some integer number of grams from 𝑎−𝑏 to 𝑎+𝑏,
inclusive (numbers 𝑎 and 𝑏 are known), and the whole package of 𝑛 grains weighs from 𝑐−𝑑 to 𝑐+𝑑 grams, inclusive
(numbers 𝑐 and 𝑑 are known). The weight of the package is the sum of the weights of all 𝑛 grains in it.
Help Nastya understand if this information can be correct. In other words, check whether each grain can have such a
mass that the 𝑖-th grain weighs some integer number 𝑥𝑖 (𝑎−𝑏≤𝑥𝑖≤𝑎+𝑏), and in total they weigh from 𝑐−𝑑 to 𝑐+𝑑,
inclusive (𝑐−𝑑≤∑𝑖=1𝑛𝑥𝑖≤𝑐+𝑑).
Input
The input consists of multiple test cases. The first line contains a single integer 𝑡 (1≤𝑡≤1000) — the number of test
cases.
The next 𝑡 lines contain descriptions of the test cases, each line contains 5 integers: 𝑛 (1≤𝑛≤1000) — the number of
grains that Nastya counted and 𝑎,𝑏,𝑐,𝑑 (0≤𝑏<𝑎≤1000,0≤𝑑<𝑐≤1000) — numbers that determine the possible weight of
one grain of rice (from 𝑎−𝑏 to 𝑎+𝑏) and the possible total weight of the package (from 𝑐−𝑑 to 𝑐+𝑑).
Output
For each test case given in the input print "Yes", if the information about the weights is not inconsistent, and print
"No" if 𝑛 grains with masses from 𝑎−𝑏 to 𝑎+𝑏 cannot make a package with a total mass from 𝑐−𝑑 to 𝑐+𝑑.
Example
inputCopy
5
7 20 3 101 18
11 11 10 234 2
8 9 7 250 122
19 41 21 321 10
3 10 8 6 1
outputCopy
Yes
No
Yes
No
Yes
Note
In the first test case of the example, we can assume that each grain weighs 17 grams, and a pack 119 grams, then really
Nastya could collect the whole pack.
In the third test case of the example, we can assume that each grain weighs 16 grams, and a pack 128 grams, then really
Nastya could collect the whole pack.
In the fifth test case of the example, we can be assumed that 3 grains of rice weigh 2, 2, and 3 grams, and a pack is 7
grams, then really Nastya could collect the whole pack.
In the second and fourth test cases of the example, we can prove that it is impossible to determine the correct weight
of all grains of rice and the weight of the pack so that the weight of the pack is equal to the total weight of all collected grains.
"""
import sys
if __name__ == "__main__":
input = sys.stdin.read()
data = list(map(int, input.split()))
T = int(data[0])
it = 1
while T > 0:
n = data[it]
a = data[it + 1]
b = data[it + 2]
c = data[it + 3]
d = data[it + 4]
mini = c - d
maxi = c + d
min_rice = mini / n if n != 0 else 0
max_rice = maxi / n if n != 0 else 0
if max_rice < (a - b) or min_rice > (a + b):
print("No")
else:
print("Yes")
it += 5
T -= 1
| [
"adityaraman96@gmail.com"
] | adityaraman96@gmail.com |
9ba6ce568f47a0172f7d99b0040d87c309339a1c | 5891f624625616eba56ab29283ca9b12c7dbb334 | /inshape/migrations/0019_auto_20190305_2019.py | 3ae875820be9442eaeefcd2a7936afc393e74148 | [] | no_license | markwcorbin/mcweb | b842572da42ca2df8b395c7af65dfc524b6233af | c7a85e4e7553063ccb0106d85847b46532f14224 | refs/heads/master | 2023-01-20T19:42:19.076624 | 2023-01-16T16:38:40 | 2023-01-16T16:38:40 | 115,457,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | # Generated by Django 2.1.1 on 2019-03-06 02:19
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('inshape', '0018_auto_20181209_1726'),
]
operations = [
migrations.CreateModel(
name='WorkoutCardio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=50, null=True)),
('distance', models.DecimalField(decimal_places=1, max_digits=4)),
('duration', models.TimeField()),
('avg_speed', models.DecimalField(decimal_places=1, max_digits=3)),
('time_in_zone', models.TimeField(blank=True, null=True)),
('avg_hr', models.IntegerField(blank=True, null=True)),
('max_hr', models.IntegerField(blank=True, null=True)),
('notes', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.AlterField(
model_name='workout',
name='workout_date',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 5, 20, 19, 20, 4378)),
),
migrations.AddField(
model_name='workoutcardio',
name='workout',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inshape.Workout'),
),
]
| [
"34870054+markwcorbin@users.noreply.github.com"
] | 34870054+markwcorbin@users.noreply.github.com |
a1bbc613138962a859a79aef00e324ca92e8e6e5 | 7ee2d709a56b7dd5062e6d60ce59387062e49a33 | /plot_array.py | aad979137c11b02ba391b2defb0b00def19ddccd | [] | no_license | garethgeorge/prima-p2p | 160b3600aa83f86c4e4acd7624be77ffc2574cc6 | 2c63de7e6cb460a5c32c8c5bf127a3458e29a521 | refs/heads/master | 2021-01-09T04:21:39.282048 | 2020-02-21T23:01:31 | 2020-02-21T23:01:31 | 242,244,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | import pulse2percept as p2p
import numpy as np
import matplotlib as plt
from prima import Prima
from pulse2percept.viz import plot_implant_on_axon_map
# Array Shape plotting
implant = Prima(x=-50, y=50, rot=np.deg2rad(0))
plt, _ = plot_implant_on_axon_map(
implant, annotate_implant=False, marker_style='hw')
# plt, _ = plot_implant_on_axon_map(
# implant, annotate_implant=False)
plt.savefig("array_shape.svg")
# Plot a pulse train on one electrode
# argus = Prima()
# stim = p2p.stimuli.PulseTrain(0.005/1000.0)
# sim = p2p.Simulation(argus, engine='serial')
# sim.set_ganglion_cell_layer('Nanduri2012')
# sim.set_optic_fiber_layer(sampling=250, decay_const=2)
# percept = sim.pulse2percept({"A1": stim, "B2": stim}, layers=["OFL", "GCL"])
# plt.pcolor(percept.data[:, :, 50000])
# plt.savefig("blargh.png")
| [
"garethgeorge97@gmail.com"
] | garethgeorge97@gmail.com |
4c4e498f8f69a2285e2c364574d94132fee73875 | 4dd695521343d56ff943e8c1768343d7680714e3 | /experiments/scripts_auto_closedset_ynoguti/config_iVector_200_fold6.py | e61d28d540206408e386efddf28c2f7122093a8b | [] | no_license | natharb/environment | ea659ee541f6473e92b5b30c549e52b66f47b280 | 86e6cee6e01d2370abeb7c55a2c8a15001735919 | refs/heads/master | 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista (nathbapt@decom.fee.unicamp.br)
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/closedset_ynoguti/iVector/200/fold_6/temp/'
result_directory = './results/closedset_ynoguti/iVector/200/fold_6/results/'
sub_directory = 'subdirectory'
database = 'database_iVector_200_fold6.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.IVector(subspace_dimension_of_t = 200, tv_training_iterations = 10, update_sigma = True, use_whitening = True, use_lda = False, use_wccn = False, use_plda = False, lda_dim = 50, plda_dim_F = 50, plda_dim_G = 50, plda_training_iterations = 50, number_of_gaussians = 256)
parallel = 40
verbose = 2 | [
"nathbapt@decom.fee.unicamp.br"
] | nathbapt@decom.fee.unicamp.br |
878241c38b1cc894592d1d2fe871e4d0039996ca | 3853c858d97a8cf1a9f18fa422b1c428cad13521 | /Pitch_control/pitch_env.py | e847849583d3015091eb56ccb8837e0b9e4496c4 | [] | no_license | jinhyun9402/Reinforce_Learning | ff04a21aecbe07377c597eb3feda7ed382512deb | 12fee64b7fd67f6aeae8f3ee0dab57349daa3494 | refs/heads/master | 2022-12-03T11:02:35.714874 | 2020-08-18T19:11:41 | 2020-08-18T19:11:41 | 285,758,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,573 | py | import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class PitchControlEnv(gym.Env):
def __init__(self):
self.deltaE_mag = 0.01 * math.pi / 180
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'rk4'
# Angle at which to fail the episode
self.theta_threshold_radians = 5 * math.pi / 180
self.deltaEle_threshold_radians = 25 * math.pi / 180
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds.
high = np.array([self.theta_threshold_radians * 2, np.finfo(np.float32).max], dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.action_space = spaces.Discrete(2)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.deltaEle = 0
def step(self, action, theta_limit_max, theta_limit_min, theta_cmd):
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
theta, Q = self.state
# Action에 따른 Elevator 동작
deltaE = self.deltaE_mag if action == 0 else -self.deltaE_mag
self.deltaEle += deltaE
if self.deltaEle >= self.deltaEle_threshold_radians:
self.deltaEle = self.deltaEle_threshold_radians
elif self.deltaEle <= -self.deltaEle_threshold_radians:
self.deltaEle = self.deltaEle_threshold_radians
else:
self.deltaEle =self.deltaEle
# Equation
thetadot = Q
Qdot = -theta + Q - self.deltaEle
if self.kinematics_integrator == 'rk4':
theta_k1 = thetadot
theta_k2 = thetadot + self.tau * theta_k1 /2
theta_k3 = thetadot + self.tau * theta_k2 /2
theta_k4 = thetadot + self.tau * theta_k3
theta_next = theta + self.tau * (theta_k1 + 2 * theta_k2 + 2 * theta_k3 + theta_k4) / 6
Q_k1 = Qdot
Q_k2 = Qdot + self.tau * Q_k1 /2
Q_k3 = Qdot + self.tau * Q_k2 /2
Q_k4 = Qdot + self.tau * Q_k3
Q_next = Q + self.tau * (Q_k1 + 2 * Q_k2 + 2 * Q_k3 + Q_k4) / 6
self.state = (theta_next, Q_next)
# 제한 조건
done = bool(theta < theta_limit_min or theta > theta_limit_max)
if not done:
if abs(theta_cmd - theta) > abs(theta_cmd - theta_next):
reward = 1
else:
reward = -1
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
if abs(theta_cmd - theta) > abs(theta_cmd - theta_next):
reward = 1
else:
reward = -1
else:
if self.steps_beyond_done == 0:
logger.warn(
"You are calling 'step()' even though this "
"environment has already returned done = True. You "
"should always call 'reset()' once you receive 'done = "
"True' -- any further steps are undefined behavior."
)
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done
def reset(self):
self.state = (np.random.uniform(low=-self.theta_threshold_radians, high=self.theta_threshold_radians), 0)
self.steps_beyond_done = None
return np.array(self.state) | [
"jinhyun9402@naver.com"
] | jinhyun9402@naver.com |
0ff014ea72de43ccb2ca47c4aabd5df946377039 | 7e8ac76f11c6bbe34dac30a570484f8ed8bbf83e | /model_creation/src/CaptchaGetter.py | d8a064a5dc7644ec52ce32664fc9971256be243c | [] | no_license | mahieyin-rahmun/rds_captcha_solver | bf9e247b1acf2f5e41b7b1b94dd80989df6bd2e5 | a408dadb57f92924ce68892078a9a5e272edec2f | refs/heads/master | 2021-06-25T21:42:32.905222 | 2021-01-23T13:39:21 | 2021-01-23T13:39:21 | 188,572,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | import urllib.request
import os
class CaptchaGetter:
def __init__(self, num_of_rqsts, img_extension, path):
self.url = 'https://rds3.northsouth.edu/index.php/captcha'
self.iterations = num_of_rqsts
self.dump_path = os.path.join(os.path.abspath(path), 'captcha_images')
self.img_extension = img_extension
if not os.path.isdir(self.dump_path):
os.mkdir(self.dump_path)
def dump_images(self):
for i in range(self.iterations):
img_name = f'{i}.{self.img_extension}'
response = urllib.request.urlopen(self.url)
output = open(os.path.join(self.dump_path, img_name), 'wb')
output.write(response.read())
output.close()
print(f"Saved image {img_name}")
def get_dump_path(self):
return self.dump_path
| [
"mahieyin.rahmun@gmail.com"
] | mahieyin.rahmun@gmail.com |
b05cf28737c13f71606d6764edd0c984a3f32776 | 372211ef59abfec8443d5d69bd443a57640180bb | /src/main_task5.py | e06096f97638978a1e980b185601a093976a08fd | [] | no_license | ivanBobrov/itmoAlgorithmsLab | 097c69336d4a1a4a63fd635baaa7219977a333e3 | daadf12968cc2037e866b837365557d2721fe2cc | refs/heads/master | 2020-08-01T01:31:02.414874 | 2019-12-25T03:35:08 | 2019-12-25T03:35:08 | 210,814,346 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | import queue
import networkx as nx
import matplotlib.pyplot as plt
print("Generating random graph")
graph = nx.gnm_random_graph(100, 2000, 84640)
plt.figure(dpi=300)
plt.box(False)
nx_pos = nx.spring_layout(graph, seed = 184673)
nx.draw(graph, nx_pos, node_size=30, with_labels=False,
edge_color='#00000006', node_color='#a00000')
plt.show()
def find_path_bfs(graph, node_from, node_to):
nodes_queue = queue.Queue()
nodes_queue.put(node_from)
node_parents = [None] * graph.number_of_nodes()
while not nodes_queue.empty():
node = nodes_queue.get()
for neighbor in graph.neighbors(node):
node_parents[neighbor] = node
nodes_queue.put(neighbor)
if neighbor == node_to:
result = [node_to]
parent = node
while not parent == node_from:
result.append(parent)
parent = node_parents[parent]
result.append(node_from)
result.reverse()
return result
return []
print("Breadth first algorithm. Searching path")
bfs_path = find_path_bfs(graph, 1, 5)
bfs_edges = [(bfs_path[n], bfs_path[n + 1]) for n in range(len(bfs_path) - 1)]
print("Resulting path: ", bfs_path)
plt.figure(dpi=300)
plt.box(False)
nx.draw(graph, nx_pos, node_size=30, with_labels=False,
edge_color='#00000006', node_color='#888888')
nx.draw(graph, nx_pos, node_size=30, with_labels=False,
edge_color='#a00000', node_color='#a00000',
nodelist=bfs_path, edgelist=bfs_edges)
plt.show()
def dfs_connected(graph, starting_node):
stack = [starting_node]
visited = [False] * graph.number_of_nodes()
def not_visited_neighbors_count(node):
count = 0
for n in graph.neighbors(node):
if not visited[n]:
count += 1
return count
def get_next_not_visited(node):
for n in graph.neighbors(node):
if not visited[n]:
return n
node = starting_node
visited[node] = True
while len(stack) != 0:
if not_visited_neighbors_count(node) != 0:
node = get_next_not_visited(node)
visited[node] = True
stack.append(node)
continue
stack.pop()
if len(stack) == 0:
break
node = stack[-1]
return visited
print("Depth fisrt search. Searching for connected components in sparse graph")
sparse_graph = nx.gnm_random_graph(100, 60, 84640)
visited = dfs_connected(sparse_graph, 3)
visited_nodes = []
for i in range(len(visited)):
if visited[i]:
visited_nodes.append(i)
print("Graph connected component nodes: ", visited_nodes)
plt.figure(dpi=300)
plt.box(False)
nx_sparse_pos = nx.spring_layout(sparse_graph, seed = 13432)
nx.draw(sparse_graph, nx_sparse_pos, node_size=30, with_labels=False,
edge_color='#00000060', node_color='#888888')
nx.draw(sparse_graph, nx_sparse_pos, node_size=30, with_labels=False,
edge_color='#a00000', node_color='#a00000',
nodelist=visited_nodes, edgelist=[])
plt.show()
| [
"id.bobr@gmail.com"
] | id.bobr@gmail.com |
097adabf537be9d5a6651b1d6727f3853fa809ea | 3d3ef26a174f8ba24c835d3d196c78ef25c501e2 | /5-Proj/2-MultiWindow.py | 358c71a0fc735850a853d55acd7aaf8986039d5e | [] | no_license | nithincyberitus/QtPython-0.1 | 296e83195036a55c6daf7825d797aa0b00a5103d | e614c4d1acd970bf9d3e2e93f921c4abe106a2fe | refs/heads/master | 2020-03-21T02:07:01.417683 | 2018-07-13T11:03:34 | 2018-07-13T11:03:34 | 137,980,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,817 | py | import sys
from PySide2.QtWidgets import QApplication,QMainWindow,QWidget,QVBoxLayout,QStackedWidget,QPushButton,QHBoxLayout
from PySide2.QtCore import Qt
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.mainWidget = MainWidget(self)
self.setCentralWidget(self.mainWidget)
self.initUI()
def initUI(self):
self.resize(300, 500)
self.statusBar()
self.statusBar().showMessage('Elo Elo')
self.setWindowTitle('StartApp Welcome')
# Instead of overriding this method you should look into using QActions instead.
""" Esc zamyka program (keyPressEvent) """
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
else:
# Don't forget to call the super class method otherwise any key
# other than the escape key won't do anything.
super(MainWindow, self).keyPressEvent(e)
# This class is where we handle switching between QStackedWidget pages
class MainWidget(QWidget):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.initUI()
def initUI(self):
layout = QVBoxLayout(self)
self.stack = QStackedWidget(parent=self)
self.search = SearchWidget(parent=self)
self.search.searchButton.clicked.connect(self.goSearch)
self.back = BackWidget(parent=self)
self.back.backButton.clicked.connect(self.goBack)
self.stack.addWidget(self.search)
self.stack.addWidget(self.back)
layout.addWidget(self.stack)
def goSearch(self):
self.stack.setCurrentWidget(self.back)
def goBack(self):
self.stack.setCurrentWidget(self.search)
class SearchWidget(QWidget):
def __init__(self, parent=None):
super(SearchWidget, self).__init__(parent)
self.initUI()
def initUI(self):
self.searchButton = QPushButton('searchButton', parent=self)
optionButton = QPushButton('optionButton', parent=self)
quitButton = QPushButton('quitButton', parent=self)
listButton = QPushButton('listButton', parent=self)
vbox = QVBoxLayout(self)
vbox.addStretch(1)
vbox.addWidget(self.searchButton)
vbox.addWidget(optionButton)
hbox = QHBoxLayout()
hbox.addWidget(listButton)
hbox.addWidget(quitButton)
vbox.addLayout(hbox)
class BackWidget(QWidget):
def __init__(self, parent=None):
super(BackWidget, self).__init__(parent)
self.initUI()
def initUI(self):
self.backButton = QPushButton('GoBack', parent=self)
def main():
app = QApplication(sys.argv)
frame = MainWindow()
frame.show()
app.exec_()
if __name__ == '__main__':
main() | [
"nithin.gollapalli@cyber-itus.com"
] | nithin.gollapalli@cyber-itus.com |
ca6c4014e8b19a81eee8347fdb6f90318957ef64 | 9f9ac37f22333569ae3bec78075d0918c3ad2742 | /resources/reference/source/conf.py | 943a1876975f4250eed61417efc92d2d98a2483f | [
"MIT"
] | permissive | credativ/pg_backup_ctl-plus | 602bcd0ce2bcce1653dd340e7b134c3b8f92973d | d1655f9791be9227e17b60731829bbd8572e850b | refs/heads/master | 2023-03-20T22:15:21.159701 | 2021-03-16T18:08:53 | 2021-03-16T18:08:53 | 348,447,804 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,621 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pg_backup_ctl++ documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 1 13:23:34 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pg_backup_ctl++'
copyright = '2018, Bernd Helmle'
author = 'Bernd Helmle'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pg_backup_ctldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pg_backup_ctl.tex', 'pg\\_backup\\_ctl++ Documentation',
'Bernd Helmle', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pg_backup_ctl', 'pg_backup_ctl++ Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pg_backup_ctl', 'pg_backup_ctl++ Documentation',
author, 'pg_backup_ctl', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| [
"bernd.helmle@credativ.de"
] | bernd.helmle@credativ.de |
edfa698a2b59a1f3f4933f667ae163d842cb428d | f06ddca5258290a1e7448a18e1d24a9d20226fbd | /pytext/common/constants.py | 3b0c31b01f7bc57811441b3f5a267b920e948602 | [
"BSD-3-Clause"
] | permissive | mruberry/pytext | 6d64bc37429e3dd5581e5b3b6bf60bd216b6f445 | 3bba58a048c87d7c93a41830fa7853896c4b3e66 | refs/heads/master | 2022-07-16T07:41:47.781126 | 2020-05-14T04:52:35 | 2020-05-14T04:54:33 | 263,892,770 | 2 | 0 | NOASSERTION | 2020-05-14T11:11:33 | 2020-05-14T11:11:32 | null | UTF-8 | Python | false | false | 2,357 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from enum import Enum
class DatasetFieldName:
DOC_LABEL_FIELD = "doc_label"
WORD_LABEL_FIELD = "word_label"
UTTERANCE_FIELD = "utterance"
TEXT_FIELD = "word_feat"
SEQ_FIELD = "seq_word_feat"
DICT_FIELD = "dict_feat"
RAW_DICT_FIELD = "sparsefeat"
CHAR_FIELD = "char_feat"
DENSE_FIELD = "dense_feat"
CONTEXTUAL_TOKEN_EMBEDDING = "contextual_token_embedding"
DOC_WEIGHT_FIELD = "doc_weight"
WORD_WEIGHT_FIELD = "word_weight"
RAW_WORD_LABEL = "raw_word_label"
TOKEN_INDICES = "token_indices"
TOKEN_RANGE = "token_range"
TOKENS = "tokens"
LANGUAGE_ID_FIELD = "lang"
SEQ_LENS = "seq_lens"
TARGET_SEQ_LENS = "target_seq_lens"
RAW_SEQUENCE = "raw_sequence"
SOURCE_SEQ_FIELD = "source_sequence"
TARGET_SEQ_FIELD = "target_sequence"
NUM_TOKENS = "num_tokens"
class PackageFileName:
SERIALIZED_EMBED = "pretrained_embed_pt_serialized"
RAW_EMBED = "pretrained_embed_raw"
class DFColumn:
DOC_LABEL = "doc_label"
WORD_LABEL = "word_label"
UTTERANCE = "text"
ALIGNMENT = "alignment"
DICT_FEAT = "dict_feat"
DENSE_FEAT = "dense_feat"
RAW_FEATS = "raw_feats"
MODEL_FEATS = "model_feats"
DOC_WEIGHT = "doc_weight"
WORD_WEIGHT = "word_weight"
TOKEN_RANGE = "token_range"
LANGUAGE_ID = "lang"
SOURCE_SEQUENCE = "source_sequence"
CONTEXT_SEQUENCE = "context_sequence"
TARGET_SEQUENCE = "target_sequence"
SOURCE_FEATS = "source_feats"
TARGET_TOKENS = "target_tokens"
SEQLOGICAL = "seqlogical"
TARGET_PROBS = "target_probs"
TARGET_LOGITS = "target_logits"
TARGET_LABELS = "target_labels"
class Padding:
WORD_LABEL_PAD = "PAD_LABEL"
WORD_LABEL_PAD_IDX = 0
DEFAULT_LABEL_PAD_IDX = -1
class VocabMeta:
UNK_TOKEN = "<unk>"
UNK_NUM_TOKEN = f"{UNK_TOKEN}-NUM"
PAD_TOKEN = "<pad>"
EOS_TOKEN = "</s>"
INIT_TOKEN = "<s>"
PAD_SEQ = "<pad_seq>"
EOS_SEQ = "</s_seq>"
INIT_SEQ = "<s_seq>"
class BatchContext:
IGNORE_LOSS = "ignore_loss"
INDEX = "row_index"
TASK_NAME = "task_name"
class Stage(Enum):
TRAIN = "Training"
EVAL = "Evaluation"
TEST = "Test"
OTHERS = "Others"
class RawExampleFieldName:
ROW_INDEX = "row_index"
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
d6373eff10443fed41f31fbc6f731d44a1c41826 | 02b3e28fa0b4f6ece144a9455c32194e63f4bf17 | /vickitrix/__init__.py | a5c33435030889c262f303092f8264c59058f6b6 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | gitter-badger/vickitrix | 7b1be0813bc21503b995d75f57819397b2fcb84e | b33a6593837c5302dcc95867e982f1713d234bc9 | refs/heads/master | 2021-01-15T17:15:14.147257 | 2017-08-08T14:09:41 | 2017-08-08T14:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,542 | py | #!/usr/bin/env python
"""
vickitrix
Checks tweets using http://www.tweepy.org/ and
uses rules specified in file to make market trades on GDAX using
https://github.com/danpaquin/GDAX-Python. Default rules are stored in
rules/vicki.py and follow the tweets of @vickicryptobot.
"""
from __future__ import print_function
import sys
# For 2-3 compatibility
try:
input = raw_input
except NameError:
pass
_help_intro = """vickitrix allows users to base GDAX trades on tweets."""
_key_derivation_iterations = 5000
try:
import gdax
except ImportError as e:
e.message = (
'vickitrix requires GDAX-Python. Install it with "pip install gdax".'
)
raise
try:
from twython import TwythonStreamer, Twython, TwythonError
except ImportError as e:
e.message = (
'vickitrix requires Twython. Install it with '
'"pip install twython".'
)
raise
try:
from Crypto.Cipher import AES
from Crypto.Protocol import KDF
from Crypto import Random
except ImportError:
e.message = (
'vickitrix requires PyCrypto. Install it with '
'"pip install pycrypto".'
)
raise
import os
import errno
import time
import argparse
import getpass
import datetime
import base64
import json
# In case user wants to use regular expressions on conditions/funds
import re
def help_formatter(prog):
""" So formatter_class's max_help_position can be changed. """
return argparse.HelpFormatter(prog, max_help_position=40)
def print_to_screen(message, newline=True, carriage_return=False):
""" Prints message to stdout as well as stderr if stderr is redirected.
message: message to print
newline: True iff newline should be printed
carriage_return: True iff carriage return should be printed; also
clears line with ANSI escape code
No return value.
"""
full_message = ('\x1b[K' + message + ('\r' if carriage_return else '')
+ (os.linesep if newline else ''))
try:
sys.stderr.write(full_message)
if sys.stderr.isatty():
sys.stderr.flush()
else:
try:
# So the user sees it too
sys.stdout.write(full_message)
sys.stdout.flush()
except UnicodeEncodeError:
sys.stdout.write(
unicodedata.normalize(
'NFKD', full_message
).encode('ascii', 'ignore')
)
sys.stdout.flush()
except UnicodeEncodeError:
sys.stderr.write(
unicodedata.normalize(
'NFKD', full_message
).encode('ascii', 'ignore')
)
sys.stderr.flush()
def timestamp():
""" Returns timestamp string. """
return time.strftime('%A, %b %d, %Y at %I:%M:%S %p %Z || ',
time.localtime(time.time()))
def prettify_dict(rule):
""" Prettifies printout of dictionary as string.
rule: rule
Return value: rule string
"""
return json.dumps(rule, sort_keys=False,
indent=4, separators=(',', ': '))
def get_dough(gdax_client, status_update=False):
""" Retrieve dough in user accounts
gdax_client: instance of gdax.AuthenticatedClient
status_update: True iff status update should be printed
Return value: dictionary mapping currency to account information
"""
dough = {}
for account in gdax_client.get_accounts():
dough[account['currency']] = account['available']
if status_update:
print_to_screen(''.join([timestamp(), 'Available to trade: ',
', '.join(map(' '.join,
[el[::-1] for el in dough.items()]))]))
return dough
class TradeListener(TwythonStreamer):
""" Trades on GDAX based on tweets. """
def __init__(self, rules, gdax_client,
app_key, app_secret, oauth_token, oauth_token_secret,
timeout=300, retry_count=None, retry_in=10, client_args=None,
handlers=None, chunk_size=1, sleep_time=0.5):
super(TradeListener, self).__init__(
app_key, app_secret, oauth_token, oauth_token_secret,
timeout=300, retry_count=None, retry_in=10, client_args=None,
handlers=None, chunk_size=1
)
self.rules = rules
self.gdax_client = gdax_client
self.sleep_time = sleep_time
self.available = get_dough(self.gdax_client, status_update=False)
self.public_client = gdax.PublicClient() # for product order book
def on_success(self, status):
for rule in self.rules:
if ((not rule['handles'])
or status['user']['screen_name'].lower()
in rule['handles']) and ((not rule['keywords'])
or any([keyword in status['text'].lower()
for keyword in rule['keywords']])) and eval(
rule['condition'].format(
tweet='status["text"]',
available=self.available
)):
if (('retweeted_status' in status
and status['retweeted_status'])
or status['in_reply_to_status_id']
or status['in_reply_to_status_id_str']
or status['in_reply_to_user_id']
or status['in_reply_to_user_id_str']
or status['in_reply_to_screen_name']):
# This is an RT or reply; don't do anything
return
# Condition satisfied! Perform action
print_to_screen(
''.join(
[timestamp(), 'TWEET MATCHED || @',
status['user']['screen_name'] , ': ',
status['text']]
)
)
for order in rule['orders']:
self.available = get_dough(self.gdax_client,
status_update=True)
order_book = self.public_client.get_product_order_book(
order['product_id']
)
inside_bid, inside_ask = (
order_book['bids'][0][0],
order_book['asks'][0][0]
)
not_enough = False
for money in ['size', 'funds', 'price']:
try:
'''If the hundredths rounds down to zero,
ain't enough'''
order[money] = str(eval(
order[money].format(
tweet='status.text',
available=self.available,
inside_bid=inside_bid,
inside_ask=inside_ask
)
))
not_enough = (
int(float(order[money]) * 100) == 0
)
except KeyError:
pass
print_to_screen(''.join(
[timestamp(), 'PLACING ORDER', os.linesep] +
[prettify_dict(order)]
))
if not_enough:
print_to_screen(
timestamp() +
'One of {"price", "funds", "size"} is zero! ' +
'Order not placed.'
)
return
if order['side'] == 'buy':
self.gdax_client.buy(**order)
else:
assert order['side'] == 'sell'
self.gdax_client.sell(**order)
print_to_screen(timestamp() + 'Order placed.')
time.sleep(self.sleep_time)
get_dough(self.gdax_client, status_update=True)
def on_error(self, status_code, status):
if status_code == 420:
# Rate limit error; bail and wait to reconnect
self.disconnect()
def go():
""" Entry point """
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=_help_intro,
formatter_class=help_formatter)
subparsers = parser.add_subparsers(help=(
'subcommands; add "-h" or "--help" '
'after a subcommand for its parameters'),
dest='subparser_name'
)
config_parser = subparsers.add_parser(
'configure',
help=(
'creates profile for storing keys/secrets; '
'all keys are stored in "{}".'.format(
os.path.join(
os.path.expanduser('~'),
'.vickitrix',
'config')
)
)
)
trade_parser = subparsers.add_parser(
'trade',
help='trades based on tweets'
)
# Add command-line arguments
trade_parser.add_argument('--profile', '-p', type=str, required=False,
default='default',
help='which profile to use for trading'
)
trade_parser.add_argument('--rules', '-r', type=str, required=False,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'rules', 'vicki.py'),
help=('rules file; this is Python that sets the variable "rules" '
'to a list of dictionaries')
)
trade_parser.add_argument('--interval', '-i', type=float, required=False,
default=905,
help=('how long to wait (in s) before reattempting to connect '
'after getting rate-limited')
)
trade_parser.add_argument('--sleep', '-s', type=float, required=False,
default=0.5,
help='how long to wait (in s) after an order has been placed'
)
args = parser.parse_args()
key_dir = os.path.join(os.path.expanduser('~'), '.vickitrix')
if args.subparser_name == 'configure':
try:
os.makedirs(key_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Grab and write all necessary credentials
config_file = os.path.join(key_dir, 'config')
print('Enter a name for a new profile (default): ', end='')
profile_name = input()
if not profile_name: profile_name = 'default'
salt = Random.new().read(AES.block_size)
key = KDF.PBKDF2(getpass.getpass((
'Enter a password for this profile. The password will be used '
'to generate a key so all GDAX/Twitter passcodes/secrets '
'written to {} are further encoded with AES256. '
'You will have to enter a profile\'s password every time you '
'run "vickitrix trade": '
).format(config_file)), salt,
dkLen=32, count=_key_derivation_iterations)
previous_lines_to_write = []
if os.path.exists(config_file):
'''Have to check if the profile exists already. If it does, replace
it. Assume the config file is under vickitrix's control and thus
has no errors; if the user chooses to mess it up, that's on
them.'''
with open(config_file, 'rU') as config_stream:
line = config_stream.readline().rstrip('\n')
while line:
if line[0] == '[' and line[-1] == ']':
if profile_name == line[1:-1]:
# Skip this profile
for _ in range(8): config_stream.readline()
line = config_stream.readline().rstrip('\n')
continue
previous_lines_to_write.append(line)
for _ in range(8):
previous_lines_to_write.append(
config_stream.readline().rstrip('\n')
)
line = config_stream.readline().rstrip('\n')
with open(config_file, 'w') as config_stream:
print(''.join(['[', profile_name, ']']), file=config_stream)
# Now change permissions
try:
os.chmod(config_file, 0o600)
except OSError as e:
if e.errno == errno.EPERM:
print >>sys.stderr, (
('Warning: could not change permissions of '
'"{}" so it\'s readable/writable by only the '
'current user. If there are other users of this '
'system, they may be able to read your credentials '
'file.').format(
config_file
)
)
raise
with open(config_file, 'a') as config_stream:
print(''.join(['Salt: ', base64.b64encode(salt).decode()]),
file=config_stream)
for token in ['GDAX key', 'GDAX secret', 'GDAX passphrase',
'Twitter consumer key', 'Twitter consumer secret',
'Twitter access token key',
'Twitter access token secret']:
if 'key' in token:
print(''.join(['Enter ', token, ': ']), end='')
'''Write it in plaintext if it's a public key; then the
user can open the config file and know which keys are in
use.'''
print(''.join([token, ': ', input()]),
file=config_stream)
else:
# A warning to developers in a variable name
unencoded_and_not_to_be_written_to_disk = getpass.getpass(
''.join(['Enter ', token, ': '])
)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
print(''.join([
token,
' (AES256-encrypted using profile password): ',
base64.b64encode(iv + cipher.encrypt(
unencoded_and_not_to_be_written_to_disk
)).decode()]), file=config_stream)
for line in previous_lines_to_write:
print(line, file=config_stream)
print(('Configured profile "{}". Encrypted credentials have been '
'stored in "{}". '
'Now use the "trade" subcommand to '
'trigger trades with new tweets.').format(
profile_name,
config_file
))
elif args.subparser_name == 'trade':
# Set and check rules
from imp import load_source
try:
rules = load_source('rules', args.rules).rules
except IOError as e:
e.message = 'Cannot find or access rules file "{}".'.format(
args.rules
)
raise
import copy
# Add missing keys so listener doesn't fail
new_rules = copy.copy(rules)
order_vocab = set(['client_oid', 'type', 'side', 'product_id', 'stp',
'price', 'size', 'time_in_force', 'cancel_after',
'post_only', 'funds', 'overdraft_enabled',
'funding_amount'])
for i, rule in enumerate(rules):
# Check 'condition'
try:
eval(rule['condition'].format(
tweet='"The rain in Spain stays mainly in the plain."',
available={
'ETH' : .01,
'USD' : .01,
'LTC' : .01,
'BTC' : .01
}
))
except KeyError:
# 'condition' isn't required, so make default True
new_rules[i]['condition'] = 'True'
except:
raise RuntimeError(''.join([
('"condition" from the following rule in the file '
'"{}" could not be '
'evaluated; check the format '
'and try again: ').format(args.rules),
os.linesep, prettify_dict(rule)
])
)
# Check handles or keywords
if 'handles' not in rule and 'keywords' not in rule:
raise RuntimeError(''.join([
('A rule must have at least one of {{"handles", '
'"keywords"}}, but this rule from the file "{}" '
'doesn\'t:').format(args.rules),
os.linesep, prettify_dict(rule)
])
)
if 'handles' not in rule:
new_rules[i]['handles'] = []
if 'keywords' not in rule:
new_rules[i]['keywords'] = []
new_rules[i]['handles'] = [
handle.lower() for handle in new_rules[i]['handles']
]
new_rules[i]['keywords'] = [
keyword.lower() for keyword in new_rules[i]['keywords']
]
'''Validate order; follow https://docs.gdax.com/#orders for
filling in default values.'''
if 'orders' not in rule or not isinstance(rule['orders'], list):
raise RuntimeError(''.join([
('Every rule must have an "orders" list, but '
'this rule from the file "{}" doesn\'t:').format(
args.rules), os.linesep, prettify_dict(rule)
])
)
for j, order in enumerate(rule['orders']):
if not isinstance(order, dict):
raise RuntimeError(''.join([
('Every order must be a dictionary, but order #{} '
'from this rule in the file "{}" isn\'t:').format(
j+1, args.rules), os.linesep, prettify_dict(rule)]))
unrecognized_keys = [
key for key in order if key not in order_vocab
]
if unrecognized_keys:
raise RuntimeError(''.join([
'In the file "{}", the "order" key(s) '.format(
args.rules),
os.linesep, '[',
', '.join(unrecognized_keys), ']', os.linesep,
('are invalid yet present in order #{} of '
'the following rule:').format(j+1),
os.linesep, prettify_dict(rule)
]))
try:
if order['type'] not in [
'limit', 'market', 'stop'
]:
raise RuntimeError(''.join([
('An order\'s "type" must be one of {{"limit", '
'"market", "stop"}}, which order #{} in this '
'rule from the file "{}" doesn\'t '
'satisfy:').format(j+1, args.rules),
os.linesep, prettify_dict(rule)
]))
except KeyError:
# GDAX default is limit
new_rules[i]['orders'][j]['type'] = 'limit'
if 'side' not in order:
raise RuntimeError(''.join([
('An order must have a "side", but order #{} in '
'this rule from the file "{}" doesn\'t:').format(
j+1, args.rules), os.linesep, prettify_dict(rule)
])
)
if order['side'] not in ['buy', 'sell']:
raise RuntimeError(''.join([
('An order\'s "side" must be one of {{"buy", '
'"sell"}}, which order #{} in this rule '
'from the file "{}" doesn\'t satisfy:').format(
j+1, args.rules), os.linesep, prettify_dict(rule)
])
)
if 'product_id' not in order:
raise RuntimeError(''.join([
('An order must have a "product_id", but in the '
'file "{}", order #{} from this rule '
'doesn\'t:').format(args.rules, j+1),
os.linesep, prettify_dict(rule)
]))
if new_rules[i]['orders'][j]['type'] == 'limit':
for item in ['price', 'size']:
if item not in order:
raise RuntimeError(''.join([
('If an order\'s "type" is "limit", the order '
'must specify a "{}", but in the file "{}",'
'order #{} from this rule doesn\'t:').format(
item, args.rules, j+1),
os.linesep, prettify_dict(rule)
]))
elif new_rules[i]['orders'][j]['type'] in ['market', 'stop']:
if 'size' not in order and 'funds' not in order:
raise RuntimeError(''.join([
('If an order\'s "type" is "{}", the order '
'must have at least one of {{"size", '
'"funds"}}, but in file "{}", order #{} '
'of this rule doesn\'t:').format(
new_rules[i]['orders'][j]['type'],
args.rules, j+1
), os.linesep, prettify_dict(rule)]))
for stack in ['size', 'funds', 'price']:
try:
eval(order[stack].format(
tweet=('"The rain in Spain stays mainly '
'in the plain."'),
available={
'ETH' : .01,
'USD' : .01,
'LTC' : .01,
'BTC' : .01
}, inside_bid=200, inside_ask=200))
except KeyError:
pass
except Exception as e:
raise RuntimeError(''.join([
('"{}" from order #{} in the following '
'rule from the file "{}" could not be '
'evaluated; check the format '
'and try again:').format(
stack, j+1, args.rules
), os.linesep, prettify_dict(rule)]))
rules = new_rules
# Use _last_ entry in config file with profile name
key = None
try:
with open(os.path.join(key_dir, 'config'), 'rU') as config_stream:
line = config_stream.readline().rstrip('\n')
while line:
profile_name = line[1:-1]
if profile_name == args.profile:
salt = base64.b64decode(
config_stream.readline().rstrip(
'\n').partition(': ')[2]
)
if key is None:
key = KDF.PBKDF2(getpass.getpass(
'Enter password for profile "{}": '.format(
profile_name
)
), salt,
dkLen=32, count=_key_derivation_iterations
)
keys_and_secrets = []
for _ in range(7):
item, _, encoded = config_stream.readline().rstrip(
'\n').partition(': ')
if 'key' in item:
# Not actually encoded; remove leading space
keys_and_secrets.append(encoded)
continue
encoded = base64.b64decode(encoded)
cipher = AES.new(
key, AES.MODE_CFB,
encoded[:AES.block_size]
)
keys_and_secrets.append(
cipher.decrypt(
encoded
)[AES.block_size:]
)
else:
# Skip profile
for _ in range(8): config_stream.readline()
line = config_stream.readline().rstrip('\n')
except IOError as e:
e.message = (
'Cannot find vickitrix config file. Use '
'"vickitrix configure" to configure vickitrix '
'before trading.'
)
raise
try:
# Instantiate GDAX and Twitter clients
gdax_client = gdax.AuthenticatedClient(
*keys_and_secrets[:3]
)
# Are they working?
get_dough(gdax_client, status_update=True)
twitter_client = Twython(*keys_and_secrets[3:7])
trade_listener = TradeListener(
*([rules, gdax_client] + keys_and_secrets[3:7]),
sleep_time=args.sleep
)
except Exception as e:
from traceback import format_exc
print_to_screen(format_exc())
print_to_screen(''.join(
[os.linesep,
'Chances are, this opaque error happened because either ',
os.linesep,
'a) You entered incorrect security credentials '
'when you were configuring vickitrix.',
os.linesep,
'b) You entered the wrong password above.']
))
exit(1)
print_to_screen('Twitter/GDAX credentials verified.')
# Get all handles to monitor
handles, keywords = set(), set()
for rule in rules:
handles.update(rule['handles'])
keywords.update(rule['keywords'])
handles_to_user_ids = {}
for handle in handles:
try:
handles_to_user_ids[handle] = twitter_client.show_user(
screen_name=handle
)['id_str']
except TwythonError as e:
if 'User not found' in e.message:
print(
'Handle {} not found; skipping rule...'.format(handle)
)
else:
raise
if not handles_to_user_ids:
raise RuntimeError('No followable Twitter handles found in rules!')
while True:
print_to_screen('Listening for tweets; hit CTRL+C to quit...')
trade_listener.statuses.filter(
follow=handles_to_user_ids.values(),
track=list(keywords)
)
print_to_screen(
timestamp()
+ 'Rate limit error. Restarting in {} s...'.format(
args.interval
)
)
time.sleep(args.interval)
| [
"anellore@gmail.com"
] | anellore@gmail.com |
a623df557e19255a003aa296bc2801320feac1d1 | b2e9f579a2c3849ac8f76d741b3c71efbfec77b3 | /venv/bin/easy_install-3.7 | f147075b6d4b175c8e45de5438035b03717587f5 | [] | no_license | LabMem010/instagram | 2d4c53561b3b7e1f663556473fe5173d163b17f7 | 060fb22ef8ccc58e3426c91a019db9983d6b9f5f | refs/heads/master | 2022-05-24T10:56:27.297165 | 2020-04-26T12:32:34 | 2020-04-26T12:32:34 | 258,706,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | 7 | #!/Users/labman001/PycharmProjects/instagram/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"38588297+LabMem010@users.noreply.github.com"
] | 38588297+LabMem010@users.noreply.github.com |
87a9d5fc07b1eeb59551a66e38e121c1bcb52f4b | bb0eeade4685dc89ff8a53beb813afdf7394989d | /ML2018/commend sys/readers.py | 7d306676d9c3cffdfe249ecd0402e19a6f313dbb | [] | no_license | zhaocheng1996/pyproject | 72929cd0ba2f0486d7dc87a7defa82656bf75a8e | 0a1973dda314f844f9898357bc4a5c8ee3f2246d | refs/heads/master | 2021-10-26T08:38:43.675739 | 2019-04-11T13:52:46 | 2019-04-11T13:52:46 | 176,939,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
def read_file(filname, sep="\t"):
col_names = ["user", "item", "rate", "st"]#st是timestamps时间戳
df = pd.read_csv(filname, sep=sep, header=None, names=col_names, engine='python')
df["user"] -= 1
df["item"] -= 1
for col in ("user", "item"):
df[col] = df[col].astype(np.int32)
df["rate"] = df["rate"].astype(np.float32)
#print(len(df))
return df
#print(df)
# user item rate st
# 0 0 1192 5.0 978300760
# 1 0 660 3.0 978302109
# 2 0 913 3.0 978301968
class ShuffleIterator(object):
"""
Randomly generate batches
"""
def __init__(self, inputs, batch_size=10):
self.inputs = inputs
self.batch_size = batch_size
self.num_cols = len(self.inputs)
self.len = len(self.inputs[0])
self.inputs = np.transpose(np.vstack([np.array(self.inputs[i]) for i in range(self.num_cols)]))
def __len__(self):
return self.len
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
ids = np.random.randint(0, self.len, (self.batch_size,))
out = self.inputs[ids, :]
return [out[:, i] for i in range(self.num_cols)]
class OneEpochIterator(ShuffleIterator):
"""
Sequentially generate one-epoch batches, typically for test data
"""
def __init__(self, inputs, batch_size=10):
super(OneEpochIterator, self).__init__(inputs, batch_size=batch_size)
if batch_size > 0:
self.idx_group = np.array_split(np.arange(self.len), np.ceil(self.len / batch_size))
else:
self.idx_group = [np.arange(self.len)]
self.group_id = 0
def next(self):
if self.group_id >= len(self.idx_group):
self.group_id = 0
raise StopIteration
out = self.inputs[self.idx_group[self.group_id], :]
self.group_id += 1
return [out[:, i] for i in range(self.num_cols)]
read_file('./ml-1m/ratings.dat', sep="::")
| [
"34829837+zhaocheng1996@users.noreply.github.com"
] | 34829837+zhaocheng1996@users.noreply.github.com |
7f1904267f59ff86bc553cd97a4fe96e88f4d2d9 | 5e642d1d47506f33a35a070538dd400c6a0b52ca | /my_app/migrations/0002_auto_20171025_1411.py | fa6a48a5afff760e46d3d938edb77a27f47af7bf | [] | no_license | okwow123/pythonanywhere | 8abb89763461e09059549df298089793bb50bfb2 | 8fe5bd98d8ee400d30987905ab38c749027e6657 | refs/heads/master | 2021-08-06T16:25:42.774299 | 2017-11-06T13:32:57 | 2017-11-06T13:32:57 | 107,761,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-25 14:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_app', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='text',
new_name='content',
),
migrations.RemoveField(
model_name='post',
name='created_date',
),
migrations.RemoveField(
model_name='post',
name='id',
),
migrations.RemoveField(
model_name='post',
name='published_date',
),
migrations.AddField(
model_name='post',
name='category',
field=models.CharField(default='test', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='no',
field=models.AutoField(default=0, primary_key=True, serialize=False),
preserve_default=False,
),
]
| [
"okwow123@naver.com"
] | okwow123@naver.com |
e1b04397e1ceb2278327aa1a1914a2952f396dd6 | 783b56ae30be9481b5489a6f3aefffb40754d96e | /sagar/urls.py | 49eae2257795c5c10148b20f2c5f72d938b70b87 | [] | no_license | ramsagar2215/my-first-blog | b18b19c2ae0b4ddcb945173072e046440d8de2bd | c7cb7322d95f7fd1db823606722825eab02a838b | refs/heads/master | 2021-01-10T05:37:55.138093 | 2016-02-19T06:41:08 | 2016-02-19T06:41:08 | 52,067,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'sagar.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"sagar@django.com"
] | sagar@django.com |
f8ea387181d506a9e98135526689cde8994b93d5 | 64dae0662afd7d2646225df09333d9a365e5c6b2 | /furthest_point.py | 2e1f83797c3633ff2d54acc1e919a0d9b9d46240 | [] | no_license | sligocki/furthest-point | 4f9937ced141b9756de40372fac927f1e80c6e22 | abdc1912aacd535a01e9e6c8de6a7f0c5f5f9485 | refs/heads/main | 2022-02-04T04:28:09.903389 | 2018-12-31T18:33:00 | 2018-12-31T18:33:00 | 163,263,613 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,289 | py | import math
class FurthestPointError(Exception):
pass
class Point:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __neg__(self):
return Point(-self.x, -self.y, -self.z)
def __sub__(self, other):
return Point(self.x - other.x,
self.y - other.y,
self.z - other.z)
def __truediv__(self, scalar):
return Point(self.x / scalar,
self.y / scalar,
self.z / scalar)
def length(self):
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def normalize(self):
return self / self.length()
def arc_dist(self, other):
return math.acos(self.dot(other))
def dot(self, other):
return (self.x * other.x +
self.y * other.y +
self.z * other.z)
def __repr__(self):
return "(%r, %r, %r)" % (self.x, self.y, self.z)
def dist(p1, p2):
return (p2 - p1).length()
def cross(p1, p2):
return Point(p1.y * p2.z - p2.y * p1.z,
p1.z * p2.x - p2.z * p1.x,
p1.x * p2.y - p2.x * p1.y)
def antipodal2(p1, p2):
"""Given 2 points on a unit sphere, find the two
(antipodal) points on the sphere which are the max
and min distance from p1 & p2."""
bisector_normal = p2 - p1
plane_normal = cross(p1, p2)
antipodal_vector = cross(bisector_normal, plane_normal)
res_p = antipodal_vector.normalize()
return res_p, -res_p
def antipodal_points(p1, p2, p3):
"""Given 3 points on a unit sphere (p1, p2, p3).
Find the two (antipodal) points on the sphere which
are the exact same distance from p1, p2 and p3."""
# First we produce normal vecors to the planes going through the
# perpedicular bisectors of p1-p2 and p1-p3.
normal_p12 = p2 - p1
normal_p13 = p3 - p1
antipodal_vector = cross(normal_p12, normal_p13)
res_p = antipodal_vector.normalize()
return res_p, -res_p
def dist_set(x, ps):
"""dist from x to closest point in ps."""
min_d = 3.
for p in ps:
min_d = min(min_d, x.arc_dist(p))
return min_d
def extreme_point(ps):
if len(ps) == 1:
return -ps[0], 2 * math.pi
best_point = None
max_dist = 0
for i in range(len(ps)):
for j in range(i + 1, len(ps)):
aps = antipodal2(ps[i], ps[j])
for ap in aps:
d = dist_set(ap, ps)
if d > max_dist:
best_point = ap
max_dist = d
for k in range(j + 1, len(ps)):
aps = antipodal_points(ps[i], ps[j], ps[k])
for ap in aps:
d = dist_set(ap, ps)
if d > max_dist:
best_point = ap
max_dist = d
return best_point, max_dist
def latlong2point(lat_d, long_d):
lat = lat_d / 180. * math.pi
long = long_d / 180. * math.pi
return Point(math.cos(lat) * math.cos(long),
math.cos(lat) * math.sin(long),
math.sin(lat))
def point2latlong(p):
lat = math.asin(p.z)
long = math.atan2(p.y / math.cos(lat),
p.x / math.cos(lat))
return (lat / math.pi * 180.,
long / math.pi * 180.)
if __name__ == "__main__":
import sys
ps = []
for i in range(1, len(sys.argv), 2):
lat = float(sys.argv[i])
long = float(sys.argv[i+1])
ps.append(latlong2point(lat, long))
x, d = extreme_point(ps)
print(point2latlong(x), d * 6371)
| [
"sligocki@gmail.com"
] | sligocki@gmail.com |
b9bb249fb650618fddb226037ef8ab1ef57c6afa | 570f19a623a2ed26b1ac31efe5ac390b41a58a13 | /Fastq_Parser.py | aa959f6b0dbdc9837259f5ce42d23d583c81af58 | [] | no_license | RichieJu520/Fasta_Tools | 5d234b21b94fa52be58b23181bcc1ea3a4289f83 | cfcb83a1926be5a8d09bff028e1f596f864c49c4 | refs/heads/master | 2021-05-09T19:07:06.277248 | 2019-01-09T06:43:47 | 2019-01-09T06:43:47 | 118,632,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | print 'This script is written for parsing Large Fasta file into small sub-fastas!'
import os
import time, sys
Parameters=raw_input("Enter two parameters: [Fastaname],[NUM](*10000)(number of sequences in each sub-fasta), sepeated by Space: ")
while True:
try:
filename=Parameters.split(' ')[0]
Num=Parameters.split(' ')[1]
num=int(float(Num)*10000)
break
except:
Parameters=raw_input("Enter two parameters: [Fastaname],[NUM](*10000)(number of sequences in each sub-fasta), sepeated by Space: ")
start=time.time() # Timing begins
wrerr = sys.stderr.write
if os.path.exists(filename+'_divided'):
for root, dirs, files in os.walk(filename+'_divided'):
for name in files:
os.remove(os.path.join(root,name))
else:
os.mkdir(filename+'_divided')
j=-1
f=open(filename+'_divided\\'+filename.replace('.fasta','')+'-1.fasta','w')
for line in open(filename,'r'):
if '@' in line:
j+=1
if j%int(num)==0 and j!=0:
f=open(filename+'_divided\\'+filename.replace('.fasta','')+'-'+str(1+j/int(num))+'.fasta','a')
f.write(line)
else:
f.write(line)
f.close()
end=time.time() # Timing ends
wrerr("OK, work finished in %3.2f secs\n" % (end-start))
f.close()
raw_input('Press <Enter> to close this window!')
| [
"noreply@github.com"
] | noreply@github.com |
9c4b096bbf52535f6feab8be9e0f43eab3a83ab0 | 21274a63e8c65d636d94e157488cddfeedf97512 | /hw8-1-animal.py | 7164fbc5e7d74bccadb83ce3df625cac38c0771b | [] | no_license | abdullah-alanazi/Python | beaa5d06a214df2423f3890397ca9d8ce570b552 | d7d81d43060de68a7634fac389b4f726415fc70a | refs/heads/master | 2016-09-06T20:01:01.588954 | 2015-12-06T01:49:18 | 2015-12-06T01:49:18 | 41,886,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py |
class Animal:
AList = {"elephant": ("\033[33;3mI have exceptional memory\033[0m", "\033[33;3mI am the largest land-living mammal in the world\033[0m", "\033[33;3myou can play with me in India & I've a long noise ^_*\033[0m"),
"tiger": ("\033[33;3mI am the biggest cat\033[0m", "\033[33;3mI come in black and white or orange and black\033[0m", "\033[33;3mI'm living in jungles\033[0m "),
"bat": ("\033[33;3mI use echo-location\033[0m", "\033[33;3mI can fly\033[0m", "\033[33;3mI see well in dark\033[0m")}
def __init__(self, Xanimal):
self.Xanimal = Xanimal
def guess_who_am_i(self):
c= 0
print("\033[31;1mI will give you 3 hints, guess what animal I am\033[0m")
while (True):
if (c < 3):
print (Animal.AList[self.Xanimal][c])
X = input("\033[35;7mWho am I?:\033[0m ").lower()
if X != self.Xanimal:
print ("\n\033[35;7mNope, try again!\033[0m\n")
c +=1
else:
print("\n\033[35;7mYou got it! I am ", X)
break
else:
print("\033[35;7mI'm out of hints! The answer is:\033[0m", self.Xanimal)
break
e = Animal("elephant")
t = Animal("tiger")
b = Animal("bat")
e.guess_who_am_i()
t.guess_who_am_i()
b.guess_who_am_i()
| [
"afa1ksa@gmail.com"
] | afa1ksa@gmail.com |
61b3acfd2b2dc593d4df188e6b56b40a0ee6f003 | 9697f67b3c38993f1512e550186f78195ac0ff4d | /sandbox/v03/drawtk.py | aa83fa017f7a4dca35da23f3f59f13bc514028b7 | [
"Apache-2.0"
] | permissive | zorost/Agentum | 20d63154a1d0b3fcf428bc8404494f9e507f3cdb | e074b9a66795936ba48074d68ec2b1d1da5e3eb4 | refs/heads/master | 2020-12-28T23:35:08.954608 | 2014-03-26T17:11:14 | 2014-03-26T17:11:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | try:
# Python2
import Tkinter as tk
except ImportError:
# Python3
import tkinter as tk
canvas = None
root = None
cell_size = 3
cells = {}
def draw_init(grid):
global canvas, root
if not canvas:
dim = grid.dimensions
root = tk.Tk()
root.configure()
canvas = tk.Canvas(root, width=dim[0] * cell_size, height=dim[1] * cell_size, bg='black')
canvas.pack()
if not cells:
for idx, cell in grid.cells():
cells[idx] = canvas.create_rectangle(
idx[0] * cell_size,
idx[1] * cell_size,
(idx[0] + 1) * cell_size,
(idx[1] + 1) * cell_size,
#fill = 'red',
fill = heat_to_color(cell.heat)
)
def draw_grid(grid):
# 2D grid only.
for idx, cell in grid.cells():
canvas.itemconfigure(cells[idx], fill = heat_to_color(cell.heat))
root.update()
#tk.mainloop()
import matplotlib.pyplot as plt
color_map = {}
color_steps = 20
def heat_to_color(heat, scale=3.0, cm=plt.cm.hot):
quant = int(min((heat / scale), 1) * 20)
if not color_map:
for step in range(color_steps + 1):
color_map[step] = "#%02x%02x%02x" % tuple(x * 255 for x in cm(float(step) / color_steps)[:3])
return color_map[quant]
#tk.mainloop() | [
"dev@mydeb.(none)"
] | dev@mydeb.(none) |
66fe87cc8722b0751987b14bbb644a572cd0ba9e | 0ac14167bd2f2cc0c657d9aa0a40e954cd0ff6f1 | /api/core/urls.py | 0d36b821c8121ca47067a38189d07e194eebdb0b | [] | no_license | bonifacius/my_drf_blog | 3de158a106ae558f98534d9fed809a36a2610aad | 60e7fa6720782f58bbc717b14a7d8e6dc7edad2b | refs/heads/master | 2023-04-30T14:27:41.446727 | 2021-05-15T10:13:00 | 2021-05-15T10:13:00 | 356,218,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import PostViewSet, TagDetailView, TagView, AsideView, FeedBackView, RegisterView, ProfileView, CommentView
router = DefaultRouter()
router.register('posts', PostViewSet, basename='posts')
urlpatterns = [
path("", include(router.urls)),
path("tags/", TagView.as_view()),
path("tags/<slug:tag_slug>/", TagDetailView.as_view()),
path("aside/", AsideView.as_view()),
path("feedback/", FeedBackView.as_view()),
path('register/', RegisterView.as_view()),
path('profile/', ProfileView.as_view()),
path("comments/", CommentView.as_view()),
path("comments/<slug:post_slug>/", CommentView.as_view()),
]
| [
"basmanovroman@mail.ru"
] | basmanovroman@mail.ru |
4fcdf50c43cf0c0a802c7899882d88c66afb5521 | e70b678712a355a0b51632728c7781b0bdcf29f4 | /Algorithms/Python/Best-Time-to-Buy-and-Sell-Stock.py | aa4097ebb2db64fb2c8d11bb08368e8d97f353a7 | [] | no_license | keyi/Leetcode_Solutions | b3e3c6835ed335d7d4ad53a1b37e59ac15fcf3af | 69e4e969b435ff2796bd7c4b5dad9284a853ab54 | refs/heads/master | 2020-05-21T23:36:20.450053 | 2018-11-11T03:45:28 | 2018-11-11T03:45:28 | 33,714,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) < 2:
return 0
ans, minNum = 0, prices[0]
for i in range(1, len(prices)):
if prices[i] > minNum:
ans = max(prices[i] - minNum, ans)
else:
minNum = prices[i]
return ans
| [
"yike921012@gmail.com"
] | yike921012@gmail.com |
0824939ce9c4e02f434c2ed443ca20b887c961dc | bdbb3a8a3d2f4b6c87347046b86dfd6bd1cf12c8 | /pizzas/migrations/0006_comment.py | a8dc7396c71a5ea03a123d2c204b8649595a6127 | [] | no_license | loclincy10/Pizzeria | 92aa6b7a6f4ce6d8902d93ffe62b444fdfabade0 | eeb6a396d7e5a667cdeeb93d5ad2a29427be0a90 | refs/heads/master | 2022-06-27T11:17:05.742593 | 2020-05-06T04:56:21 | 2020-05-06T04:56:21 | 260,974,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # Generated by Django 3.0.5 on 2020-05-06 01:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pizzas', '0005_delete_comment'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('date_added', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pizzas.Pizza')),
],
),
]
| [
"Lothario_Clincy1@baylor.edu"
] | Lothario_Clincy1@baylor.edu |
152e0657215d3db3c16210c391cd6db5bbccef15 | 4856c4d7bae0da3865c958aeeb5c221fa4c0d584 | /client_for_threaded_server.py | 2cb63bd7363cfaf839198071ac12037a8f507b1e | [] | no_license | pjumruspun/socketTicTacToe | a6fcd45a7e411ac514ea9fe58fdaf7625981a385 | f366871f5f28f3fa2f8b66bf33ad5a2ac246f624 | refs/heads/master | 2021-02-05T07:29:09.521097 | 2020-02-28T12:28:42 | 2020-02-28T12:28:42 | 243,757,149 | 1 | 1 | null | 2020-02-28T12:27:04 | 2020-02-28T12:27:03 | null | UTF-8 | Python | false | false | 1,480 | py | import socket
import sys
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "127.0.0.1"
port = 8888
try:
s.connect((host, port))
except:
print("Connection error")
sys.exit()
print("\n===============================")
print("input play or bye")
while True:
message = input(' -> ').strip()
if message in ('play', 'bye'):
break
print('Please Enter play or bye')
while message.lower().strip() != 'bye':
s.send(message.encode())
data = s.recv(1024).decode()
while True:
print("\n" + data)
player = input().strip()
if player.upper() in ('O', 'X'):
break
print('Please Enter O or X')
s.send(player.encode())
data = s.recv(1024).decode()
while "Win" not in data:
print("\nBoard:\n" + data)
while True:
message = input('Enter index <0-9> or <r> to display board -> ').strip()
if message in [str(i+1) for i in range(9)] + ['r']:
break
print('Please Enter number 1-9 for index, or enter <r> to display board')
s.send(message.encode())
data = s.recv(1024).decode()
print(data)
print("\n===============================")
print("input play or bye")
message = input(" -> ")
if __name__ == "__main__":
main() | [
"pokemonlizardon@gmail.com"
] | pokemonlizardon@gmail.com |
be55b35724dee1373f80ce49cdbcb9538d913f9c | d61ad40cacad5b5f38d57bc443583dcf623fc4b0 | /blogcrawler/blogcrawler/spiders/blogspider.py | 64ce764000ebd241237c8dfa1292efc134f73f7a | [] | no_license | alexhanna/hse-scraping | 59cef4098bdfaa41c824f0cc74cc034def5517b9 | 0a8bb2e8898161166c7704e2f78bdc1fb2a3122d | refs/heads/master | 2021-01-16T19:20:38.352259 | 2013-08-22T06:18:33 | 2013-08-22T06:18:33 | 12,169,963 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/python
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from blogcrawler.items import BlogcrawlerItem
from urlparse import urlparse
class BlogSpider(CrawlSpider):
name = "blogspider"
allowed_domains = [
"wordpress.com",
"blogspot.com",
"blogger.com",
"livejournal.com",
"typepad.com",
"tumblr.com"]
start_urls = ["http://badhessian.org"]
rules = (
Rule(SgmlLinkExtractor(
allow=('/', ),
deny=('www\.blogger\.com', 'profile\.typepad\.com',
'http:\/\/wordpress\.com', '.+\.trac\.wordpress\.org',
'.+\.wordpress\.org', 'wordpress\.org', 'www\.tumblr\.com',
'en\..+\.wordpress\.com', 'vip\.wordpress\.com'),
), callback = "parse_item", follow = True),
)
def parse_item(self, response):
item = BlogcrawlerItem()
item['url1'] = urlparse(response.request.headers.get('Referer'))[1]
item['url2'] = urlparse(response.url)[1]
yield item | [
"alex.hanna@gmail.com"
] | alex.hanna@gmail.com |
cdbd67b1a12b3c7320da2aafaa87a06508e9b4de | 5ef19fdf04970ed0481ff29234a11b812b55a257 | /OS/SRT.py | 89a0d94dba43339334289d424480551cfb9d8b02 | [] | no_license | priyamshah112/Study | 636bfadee2384b39399b8b2c03349c9faf8853df | 2ea4341d8497573b014a5275d548289696fe3662 | refs/heads/master | 2021-06-28T15:19:24.002518 | 2020-10-02T07:36:24 | 2020-10-02T07:36:24 | 149,157,682 | 0 | 0 | null | 2020-10-02T07:36:25 | 2018-09-17T16:48:41 | Java | UTF-8 | Python | false | false | 1,762 | py | class Process:
def __init__(self, p_no, at, bt,wt,tat,nt,ct,rt):
self.p_no = p_no
self.at = at
self.bt = bt
self.wt =wt
self.tat =tat
self.nt =nt
self.ct=ct
self.rt=rt
def Shift(alist):
alist.sort(key=lambda x:x.rt)
return alist
def main():
n=int(input("Enter number of processes : "))
q=1
pt = []
chart = []
queue=[]
time=0
ap=0 #arrived processes
rp=0 #ready processes
done=0
start=0
avgwt=0
avgtat=0
avgnt=0
for i in range(0,n):
pt.insert(i,Process(i,int(input("Enter Arrival Time : ")),int(input("Enter Burst Time :")),0.0,0.0,0.0,0,0))
pt[i].rt=pt[i].bt
while(done<n):
for i in range(ap,n):
if time>=pt[i].at:
queue.append(pt[i])
ap+=1
rp+=1
if rp<1:
chart.append(pt[0].p_no)
time+=1
continue
if start:
queue = Shift(queue)
if queue[0].rt > 0:
for g in range(time, time+q):
chart.append(queue[0].p_no)
time+=q
queue[0].rt-=q
else:
pt[queue[0].p_no].ct=time
queue.pop(0)
done+=1
rp-=1
start=1
print(chart)
for i in range(0,n):
pt[i].tat = pt[i].ct-pt[i].at
avgtat+=pt[i].tat
pt[i].wt = pt[i].tat - pt[i].bt
avgwt+=pt[i].wt
pt[i].nt = pt[i].tat / pt[i].bt
avgnt+=pt[i].nt
print("Process no.\t AT\t BT\t WT\t TAT\t NT\t CT\t")
for i in range(0,n):
print(str(pt[i].p_no)+" \t\t "+str(pt[i].at)+" \t "+str(pt[i].bt)+" \t "+str(round(pt[i].wt,2))+" \t "+str(round(pt[i].tat,2))+" \t "+str(round(pt[i].nt,2))+" \t "+str(pt[i].ct))
print("Average Waiting time",avgwt/n)
print("Average TAT",avgtat/n)
print("Average Normalized Time",avgnt/n)
main()
| [
"priyamshah112@gmail.com"
] | priyamshah112@gmail.com |
3569af3986a45ecc6f2342c1dc31a185ba38d237 | 4e67005ced35b46ce56fd485e1be570bcefdb99c | /Exercise1and2.py | 51c2739a4c1c9c4d6a13bf9c73087fab3ba01e98 | [] | no_license | EvaAmmann/PyCharmProjects | 314b7078d8671e48356518eed8a3d76bcd248a6a | 5fe17f35fdc066868ef3fafaccdae340777622cc | refs/heads/master | 2021-02-11T02:41:14.239476 | 2020-07-02T09:18:18 | 2020-07-02T09:18:18 | 244,443,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | #Exercise 1 and 2 (Homework.)
name = input("Dear user, what is your name? ")
age = int(input ("Dear user, what is your age? "))
nextage = 0
#Inserting the +1 so that a 60 year old gets another ten years
while nextage < age+1:
nextage = nextage + 10
yearsuntildecade=nextage-age
#Filter children out so they do not have to enter the gender
if age < 18:
print("{}, in {} years you will be {} years old!".format(name, yearsuntildecade, nextage))
else:
#Only allow valid (binary) genders
gender = input ("Please specify your gender: Enter m for male and f for female: ")
if gender == "f":
print("Thank you for entering a valid gender input!")
elif gender == "m":
print("Thank you for entering a valid gender input!")
else:
#Exit code if invalid gender is entered
print("I am sorry, you chose an invalid gender. You will exit the code immediately. Bye!")
exit()
#Filtering between men and women
if gender == "f":
print("Dear Mrs. {}, in {} years you will be {} years old!".format(name, yearsuntildecade, nextage))
else:
print("Dear Mr. {}, in {} years you will be {} years old!".format(name,yearsuntildecade,nextage))
#Question: How can I make just one line execute?
| [
"eva.m.ammann@gmail.com"
] | eva.m.ammann@gmail.com |
3eda452594630d14e50ede0ff4387b68e0cf952a | 89a8c748f0c52656f0ec1b060aed8d826f5d7e57 | /python/VPIAlgo.py | 902038fa604b8363a8fad7ef4925bf880f272a4b | [] | no_license | vlongle/BayesianTTG | d73fb8b3d4a30044291443daf4e8390489c22f15 | d4a19cbd0f5786ab044c60f972f5b2801c3b943b | refs/heads/master | 2023-03-03T21:19:46.764808 | 2021-02-09T20:59:33 | 2021-02-09T20:59:33 | 316,132,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,006 | py | from utils import *
#from scipy.special import softmax
from stateEliminationAlgo import *
class VPIAlgo(StateEliminationAlgo):
def generate_proposals(self):
# compute all possible proposals that this agent can give
for agent in self.game.agents:
agent.proposals = generate_proposals(self.game.state, agent)
def evaluate_coalition(self, coalition):
W = [agent.agent_type for agent in coalition]
return eval_coalition(W, self.game.tasks, False)
@jit
def softmax(self, x):
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # only difference
def calculate_VPIs(self, proposer, best_proposals, best_value, second_best, proposals):
VPIs = [] # value of perfect information of each of the proposals
for proposal in proposals:
coalition, div = proposal
proposer_share =div[coalition.index(proposer)]
VPI = 0
for agent_types in product(range(1, self.game.T + 1), repeat=len(coalition)):
prob = reduce(mul, \
[proposer.belief[agent][other_agent_type - 1] for \
agent, other_agent_type in zip(coalition, agent_types)])
predicted_reward = proposer_share * eval_coalition(list(agent_types), self.game.tasks)
if proposal in best_proposals and predicted_reward < second_best:
VPI += prob * (second_best - predicted_reward)
elif predicted_reward > best_value:
VPI += prob * (predicted_reward - best_value)
VPIs.append(VPI)
return VPIs
def proposal_outcome(self):
# one-step proposer process using informed belief
# only return feasible coalition!! Singleton for the proposer
# is always feasible!
proposer = nature_pick_proposer(self.game.state)
proposals = [] # list of proposals
proposal_values = [] # corresponding list of values
best_value = second_best = 0
best_proposals = set()
# consider all proposals and all divisions possible!
# choose proposal based on softmax selection to facilitate exploration
#for proposal in proposer.proposals:
for proposal in generate_proposals(self.game.state, proposer, True):
coalition, div = proposal
responses, predicted_reward, continuation_payoff = \
predict_responses(proposer, proposal, self.game, ret_extra=True)
disagree = [response == 'no' for player, response in responses.items() if player != proposer]
if any(disagree):
proposal_value = continuation_payoff[proposer]# proposal fails! the proposer get the reserve value
else:
proposal_value = div[coalition.index(proposer)] * predicted_reward
#print('proposal, responses, predicted_reward, cont_payoff, proposal_val:', proposal,
# responses, predicted_reward, continuation_payoff, proposal_value)
proposals.append(proposal)
proposal_values.append(proposal_value)
if proposal_value > best_value:
second_best = best_value
best_value = proposal_value
best_proposals = set(proposal)
elif proposal_value > second_best and\
proposal_value < best_value:
second_best = proposal_value
elif proposal_value == best_value:
best_proposals.add(proposal)
VPIs = self.calculate_VPIs(proposer, best_proposals, best_value, second_best, proposals)
QVs = np.array(VPIs) + np.array(proposal_value)
probs = self.softmax(QVs)
chosen = np.random.choice(range(len(proposals)), p=probs)
#print('ret:', proposals[ret])
return proposals[chosen], proposer
def response(self, proposal):
coalition, div = proposal
responses = {}
for agent in coalition:
# each agent calculate their QVs value again restricted to only yes (current proposal)
# and no (singleton proposal) and then use softmax selection!
accept_reward = div[coalition.index(agent)] * expected_coalition_value(coalition, agent, self.game)
reject_reward = expected_continuation_payoff(agent, self.game)[agent]
#rewards = sort
#self.calculate_VPIs(agent, best_proposals, best_value, second_best, proposals)
# simply softmax these guys for now to avoid dealing with edge cases when two choices are already equal!
probs = self.softmax([accept_reward, reject_reward])
agent_response = np.random.choice(['yes', 'no'], p=probs)
responses[agent] = agent_response
return responses
| [
"vietlong.lenguyen@gmail.com"
] | vietlong.lenguyen@gmail.com |
bf8e991732fe53af4d0697cfcb8adfadac87a5d2 | 9b6dce3f247d384355a5376ead457cec9c1598b2 | /pbgpp/Output/Filter.py | 7326e6ff5ea5af88ddb0a20c565061dee3c6b9a0 | [
"Apache-2.0"
] | permissive | deleskie/pbgp-parser | 44841e0913ae1654f8b69859d63f03f8e780059b | 4606f91a63c16e44da6909c94fd0ab3c88d8fa70 | refs/heads/master | 2021-01-11T09:16:51.516823 | 2017-02-03T08:23:56 | 2017-02-03T08:23:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | #
# This file is part of PCAP BGP Parser (pbgpp)
#
# Copyright 2016 DE-CIX Management GmbH
# Author: Tobias Hannaske <tobias.hannaske@de-cix.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class BGPFilter:
# Define filter types
FILTER_ERROR = 0
FILTER_NEXT_HOP = 1
def __init__(self, values=[]):
self.values = values
| [
"tobias.hannaske@de-cix.net"
] | tobias.hannaske@de-cix.net |
a9cd9dd578b1c065b077eaf2fea10c8ab0f16b43 | 8d146decd35e0f376d8107343d38e1662b94a737 | /easy_django/learning_templates/settings.py | b60d15ae7e34095e26614e979f7cc723ee524f58 | [] | no_license | aLLGold/deployment-exa | 6bc3c19abb30c825aef0b5c17bf893dce2b29ea7 | f8279c61b4fb06787bf7b69be206ba5f3615e579 | refs/heads/master | 2022-08-16T17:29:49.787034 | 2020-05-22T05:31:12 | 2020-05-22T05:31:12 | 262,042,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,202 | py | """
Django settings for learning_templates project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+#%dpk3zcz$3&3a%4_6owyei^*kt%s@nwzrt36am55)9b0w!y7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basic_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_templates.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_templates.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"jamba0702@gmail.com"
] | jamba0702@gmail.com |
fa81b3e05faf268c458edae4c6bb118c72a07655 | 48eb03edddd57415e25e84b562afea5a6ad95980 | /03_face_recognition.py | 679fe674bbc756106266d5c0231d6ae200eb92b4 | [] | no_license | sayeedap/Paperless-Ticketing-Using-Face-Recognition-System | 12a351c5c81bf973394a0f851632b0996fc647d8 | 2798b9b65b1924c6810f78b743110c731a7dcf62 | refs/heads/master | 2021-10-02T19:03:23.750345 | 2018-11-30T08:35:36 | 2018-11-30T08:35:36 | 151,741,263 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,558 | py | import cv2
import numpy as np
import os
import mysql.connector
import datetime
import time
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="python"
)
mycursor = mydb.cursor()
def logged(station_id,station_name):
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#iniciate id counter
id = 0
#mycursor.execute("SELECT id, name FROM test")
#names = ['x']
#myresult = mycursor.fetchall()
#for x in myresult:
#print(x)
# names.append(x)
# names related to ids: example ==> Marcelo: id=1, etc
#names = ['None', 'Sayeed', 'suthi', 'amru', 'anil', 'manisha','tree']
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img =cam.read()
border_color=(0,0,255)#bgr
disp_status='Please Contact Administrative'
img = cv2.flip(img, 1) # Flip vertically
cv2.rectangle(img,(0,0),(640,60),(0,255,255),3)
cv2.putText(img,'Welcome To '+station_name+' Station',(5,45), font, 1,(255,255,255),2,cv2.LINE_AA)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
#print(len(faces))
#if len(faces)!=0:
# disp_status='###########################'
#else:
# disp_status='@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
for(x,y,w,h) in faces:
#cv2.putText(img,'Welcome To '+station_name+' Station',(5,45), font, 1,(255,255,255),2,cv2.LINE_AA)
#cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
ids="unknown"
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence > 30):
sql2 = "SELECT name, status FROM customer WHERE id = %s and fromstation=%s and (status=%s or status=%s )"
adr = (id,station_id,0,1 )
mycursor.execute(sql2, adr)
myresult = mycursor.fetchall()
for xy in myresult:
ids = xy[0]
status=xy[1]
if status==0:
disp_status="Success:Ticket activated"
border_color=(0,255,0)
else:
disp_status="Your time stamp already started"
border_color=(0,255,255)
cv2.rectangle(img,(20,420),(620,639),border_color,3)
cv2.putText(img,disp_status,(30,460), font, 1,border_color,2,cv2.LINE_AA)
#time.sleep( 5 )
#if status==0:
# t = Timer(30.0, hello)
# t.start()
confidence = " {0}%".format(round(100 - confidence))
else:
ids = "unknown"
border_color=(0,0,255)
disp_status='Please Contact Administrative'
confidence = " {0}%".format(round(100 - confidence))
cv2.rectangle(img,(20,420),(620,639),border_color,3)
cv2.putText(img,disp_status,(30,460), font, 1,border_color,2,cv2.LINE_AA)
cv2.rectangle(img, (x,y), (x+w,y+h), border_color, 2)
cv2.putText(img, str(ids), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
#cv2.namedWindow('Entry Level', cv2.WND_PROP_FULLSCREEN)
#cv2.setWindowProperty('Entry Level', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
#else:
#cv2.putText(img,'Welcome To '+station_name+' Station',(5,45), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
#code in entry point
if status==0:
date3=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#print (date3)
sql3 = "UPDATE customer SET status = '1', date=%s WHERE id = %s"
stat = (date3,id, )
mycursor.execute(sql3,stat)
mydb.commit()
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
print("***********************WELCOME TO KOCHI METRO***********************")
while(True):
user_name = input('\n Enter user name : ')
password = input('\n Enter password : ')
sql3 = "SELECT * FROM station WHERE user_name = %s and password=%s"
login = (user_name,password,)
mycursor.execute(sql3, login)
myresult = mycursor.fetchall()
validate=len(myresult)
if validate==1:
for x in myresult:
station_id=x[0]
station_name=x[1]
print ("\n***********************Welcome",station_name,"***********************")
logged(station_id,station_name)
break
else:
print("\n[INFO] Please enter valid username or password")
| [
"noreply@github.com"
] | noreply@github.com |
17154c3406f2ba8d861e1e362d362380cd84198f | 5ecc2a46c53bf2b65dd1fac65a772647784b5ef5 | /venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/io/__init__.py | 684ae18a05c5288ad3199755726ad1af1fe49c08 | [] | no_license | kanungosuyash/CSCI527game | 8578185853d14aebe04e099ab056da8c5233d8de | 7fbc9da0360756402fa01d6eebb87a8bb9236d71 | refs/heads/master | 2023-08-02T12:40:28.694846 | 2021-09-13T16:11:49 | 2021-09-13T16:11:49 | 401,088,055 | 1 | 0 | null | 2021-09-13T00:19:46 | 2021-08-29T16:22:30 | Python | UTF-8 | Python | false | false | 3,173 | py | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.io namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from . import gfile
from tensorflow.python.framework.graph_io import write_graph
from tensorflow.python.lib.io.tf_record import TFRecordCompressionType
from tensorflow.python.lib.io.tf_record import TFRecordOptions
from tensorflow.python.lib.io.tf_record import TFRecordWriter
from tensorflow.python.lib.io.tf_record import tf_record_iterator
from tensorflow.python.ops.data_flow_ops import PaddingFIFOQueue
from tensorflow.python.ops.data_flow_ops import PriorityQueue
from tensorflow.python.ops.data_flow_ops import QueueBase
from tensorflow.python.ops.data_flow_ops import RandomShuffleQueue
from tensorflow.python.ops.gen_decode_proto_ops import decode_proto_v2 as decode_proto
from tensorflow.python.ops.gen_encode_proto_ops import encode_proto
from tensorflow.python.ops.gen_image_ops import decode_and_crop_jpeg
from tensorflow.python.ops.gen_image_ops import decode_bmp
from tensorflow.python.ops.gen_image_ops import decode_gif
from tensorflow.python.ops.gen_image_ops import decode_jpeg
from tensorflow.python.ops.gen_image_ops import decode_png
from tensorflow.python.ops.gen_image_ops import encode_jpeg
from tensorflow.python.ops.gen_image_ops import extract_jpeg_shape
from tensorflow.python.ops.gen_io_ops import matching_files
from tensorflow.python.ops.gen_io_ops import write_file
from tensorflow.python.ops.gen_parsing_ops import decode_compressed
from tensorflow.python.ops.gen_parsing_ops import parse_tensor
from tensorflow.python.ops.gen_string_ops import decode_base64
from tensorflow.python.ops.gen_string_ops import encode_base64
from tensorflow.python.ops.image_ops_impl import decode_image
from tensorflow.python.ops.image_ops_impl import encode_png
from tensorflow.python.ops.image_ops_impl import is_jpeg
from tensorflow.python.ops.io_ops import read_file
from tensorflow.python.ops.io_ops import serialize_tensor
from tensorflow.python.ops.parsing_config import FixedLenFeature
from tensorflow.python.ops.parsing_config import FixedLenSequenceFeature
from tensorflow.python.ops.parsing_config import RaggedFeature
from tensorflow.python.ops.parsing_config import SparseFeature
from tensorflow.python.ops.parsing_config import VarLenFeature
from tensorflow.python.ops.parsing_ops import decode_csv
from tensorflow.python.ops.parsing_ops import decode_json_example
from tensorflow.python.ops.parsing_ops import decode_raw_v1 as decode_raw
from tensorflow.python.ops.parsing_ops import parse_example
from tensorflow.python.ops.parsing_ops import parse_sequence_example
from tensorflow.python.ops.parsing_ops import parse_single_example
from tensorflow.python.ops.parsing_ops import parse_single_sequence_example
from tensorflow.python.ops.sparse_ops import deserialize_many_sparse
from tensorflow.python.ops.sparse_ops import serialize_many_sparse
from tensorflow.python.ops.sparse_ops import serialize_sparse
from tensorflow.python.training.input import match_filenames_once
del _print_function
| [
"msingh60@usc.edu"
] | msingh60@usc.edu |
c1d2f54840345e337ecd6b95f9cf7e884483c124 | 5c967a181f978875bb7d812da39d7d356d4aeea4 | /models/nasbench_101/base_ops.py | 8262d85ca8708a573f00b53542ae86d1f9ca59c8 | [
"MIT"
] | permissive | czhhbp/to_share_or_not_to_share | 9d8540c4109c3855911ef24b472bfda3f0be33e1 | 538a50636c2d293215fe2cd9097ea72b50d843a0 | refs/heads/master | 2022-09-23T08:18:48.389888 | 2020-06-02T15:29:22 | 2020-06-02T15:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,974 | py | import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.batchnorm import BatchNorm2d
from torch.nn.modules.conv import Conv2d
from torch.nn.modules.utils import _pair
class MaxPool(nn.Module):
"""
MaxPool
"""
def __init__(self, kernel_size, stride, padding):
super(MaxPool, self).__init__()
self.op = nn.Sequential(
nn.MaxPool2d(kernel_size, stride=stride, padding=padding),
)
def forward(self, x, *args):
return self.op(x)
class ConvBNRelu(nn.Module):
"""
Conv -> BN -> Relu
"""
def __init__(self, c_in_max, c_out_max, kernel_size, stride, padding,
bn_momentum, bn_eps, conv_bias=False, bn_affine=True):
super(ConvBNRelu, self).__init__()
self.conv = VarConv2d(c_in_max, c_out_max, kernel_size, stride=stride, padding=padding, bias=conv_bias)
self.bn = VarBatchNorm2d(c_out_max, affine=bn_affine, momentum=bn_momentum, eps=bn_eps)
self.relu = nn.ReLU(inplace=False)
self.c_in_max = c_in_max
self.c_out_max = c_out_max
self.kernel_size = kernel_size
def forward(self, x, c_out=None, kernel_size=None):
if c_out is None:
c_out = self.c_out_max
if kernel_size is None:
kernel_size = self.kernel_size
N, C, H, W = x.size()
out = self.conv(x, C, c_out, kernel_size)
out = self.bn(out, c_in=c_out)
out = self.relu(out)
return out
class VarConv2d(Conv2d):
"""
Conv2d with variable input and output size
"""
def __init__(self, c_in_max, c_out_max, kernel_size_max, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size_max = _pair(kernel_size_max)
stride, padding, dilation = _pair(stride), _pair(padding), _pair(dilation)
super(VarConv2d, self).__init__(c_in_max, c_out_max, kernel_size_max, stride,
padding, dilation, groups, bias)
self.k_max = kernel_size_max
def forward(self, x, c_in, c_out, kernel_size):
w_tmp = self.weight[:c_out, :c_in,
self.k_max[0] // 2 - kernel_size // 2:self.k_max[0] // 2 + kernel_size // 2 + 1,
self.k_max[1] // 2 - kernel_size // 2:self.k_max[1] // 2 + kernel_size // 2 + 1]
padding = kernel_size // 2
return F.conv2d(x, w_tmp, None, self.stride,
padding, self.dilation, self.groups)
class VarBatchNorm2d(BatchNorm2d):
"""
BatchNorm2d with variable input and output size
"""
def __init__(self, c_in_max, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(VarBatchNorm2d, self).__init__(c_in_max, eps, momentum, affine,
track_running_stats)
def forward(self, x, c_in):
self._check_input_dim(x)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that if gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
return F.batch_norm(
x, self.running_mean[:c_in], self.running_var[:c_in], self.weight[:c_in] if self.affine else None,
self.bias[:c_in] if self.affine else None, self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
| [
"alois.pourchot@gleamer.ai"
] | alois.pourchot@gleamer.ai |
6013c6c9a4abbc2aae07194e23f3416551465b96 | 0feaa7c38e85dbe6afc9d88eadf811d22be683e0 | /imgmeta/views.py | c4f0f4e66be9640602ce5c9be80dceae1a644e50 | [] | no_license | shreyansh-sinha/Cloud-Assignment2-Image-Metadata-App | 257c2ce66a2d1d5a4180ef2257c0e6c081b36811 | 3f44a8ff82b96615a5649885d274127496c23acc | refs/heads/main | 2023-01-09T21:41:14.824530 | 2020-11-12T14:02:29 | 2020-11-12T14:02:29 | 312,292,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from PIL import Image
from PIL.ExifTags import TAGS
from .models import *
from .forms import *
from pymongo import MongoClient
uri = "mongodb://root:root@52.188.19.176:27017/?authSource=admin&authMechanism=SCRAM-SHA-256"
client = MongoClient(uri)
mydb = client["clouddatabase"]
mycol = mydb["imagemetadata"]
def returnimgfrompath(imagenamelist):
imageslist = ImageSet.objects.all()
retlist = []
for name in imagenamelist:
for img in imageslist:
print(img.id)
if str(img.image.path) == name:
retlist.append(img)
break
return retlist
@login_required(login_url='login')
def imgsearchres(request,searchstr):
querystrlist = searchstr.splitlines()
isinvalid = False
isempty = False
imageslist = []
if searchstr.count("=") != len(querystrlist):
isinvalid = True
else:
querylist = []
for st in querystrlist:
attr, val = st.split('=')
if attr[-1] == " ":
attr = attr[:len(attr)-1]
if val[0] == " ":
val = val[1:]
querylist.append({attr:val})
if(len(querylist) == 1):
query = querylist[0]
else:
query = {"$or":querylist}
print(query)
imagequery = mycol.find(query)
imagequerylist = []
for i in imagequery:
imagequerylist.append(i)
imagenamelist = [i.get("imgpath") for i in imagequerylist]
imageslist = returnimgfrompath(imagenamelist)
if len(imageslist)==0:
isempty=True
else:
isempty=False
context = {'empty' : isempty, 'invalid': isinvalid, 'images' : imageslist}
return render(request, 'imgsearchres.html', context)
@login_required(login_url='login')
def imgsearch(request):
if request.method=='POST':
form = SearchForm(request.POST)
if form.is_valid():
text = form.cleaned_data['searchtext']
url_path = '/imgsearchres/' + str(text)
return redirect(url_path)
else:
context = {'form' : form}
return render(request, 'imgsearch.html', context)
else:
form = SearchForm(request.POST)
context = {'form' : form}
return render(request, 'imgsearch.html', context)
@login_required(login_url='login')
def imgupload(request):
if request.method=='POST':
form = ImageForm(request.POST,request.FILES)
if form.is_valid():
form.save(commit=True)
return redirect('index')
else:
context = {'form' : form}
return render(request, 'imgupload.html', context)
else:
form = ImageForm(request.POST,request.FILES)
context = {'form' : form}
return render(request, 'imgupload.html', context)
| [
"shreyanshsinha2@gmail.com"
] | shreyanshsinha2@gmail.com |
6fcf2dcd7764519a48a71cb3c75717cb30fb2c75 | 840bb24df4cad5ff33bc5ff54a7422b0a576c118 | /Server/webplagiarism/plagiarism_webapp/main/legacy/thresholdEsamble.py | bfc89b20e916dd2e4ee121bfbe416d30fd94b02d | [] | no_license | Mr-Rafo/PlagiarismDetectionAndroid | 6d783880e6c2fad694af10308721610ec5ca8b55 | e4e2a8b702b82bde84efb7db08ff50bbb6d87615 | refs/heads/main | 2023-08-24T07:55:47.521904 | 2021-09-28T14:32:04 | 2021-09-28T14:32:04 | 446,541,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,169 | py | from itertools import combinations
import numpy as np
import csv
import os
#calcola i valori soglia e li salva all'interno di un csv
def threshold(nameCsvInput,t,p,n, k,contoDi0,contoDi01,contoDi02,contoDi03,contoDi04,contoDi05,contoDi06,contoDi07,contoDi08,contoDi09,contoDi1 ):
arrayThreshold= [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,0.9, 1]
arrayDataset = readCSV(nameCsvInput)
lenArray= len(arrayDataset)
tp = []
if(k==-1):
variabile=1
else:
for i in range (0,lenArray):
value=float(arrayDataset[i][t])
valueRound = round(value,1)
valueTwo=float(arrayDataset[i][p])
valueRoundTwo = round(valueTwo,1)
valueThree = float(arrayDataset[i][n])
valueRoundThree= round(valueThree,1)
sum = valueRound+ valueRoundTwo+ valueRoundThree
mean = sum/3
tp.append(round(mean,1))
for j in range(0, len(tp)):
if(k==11 and j==0):
break
if(tp[j] == arrayThreshold[k]):
if(tp[j] == 0.1):
contoDi0 +=1
if(tp[j] == 0.1):
contoDi01 +=1
if(tp[j] == 0.2):
contoDi02 +=1
if(tp[j] == 0.3):
contoDi03 +=1
if(tp[j] == 0.4):
contoDi04 +=1
if(tp[j]==0.5):
contoDi05 +=1
if(tp[j]==0.6):
contoDi06 +=1
if(tp[j]==0.7):
contoDi07 +=1
if(tp[j]==0.8):
contoDi08 +=1
if(tp[j]==0.9):
contoDi09 +=1
if(tp[j]==1):
contoDi1 +=1
k+=1
if(k==11):
k=0
if(j == len(arrayDataset)-1):
if(k==10):
nameCsv = "pareto" + str(t) + str(p) + str(n)+".csv"
generateCSV(nameCsv,contoDi0, contoDi01,contoDi02,contoDi03,contoDi04,contoDi05,contoDi06,contoDi07,contoDi08,contoDi09,contoDi1)
threshold(nameCsvInput,t,p,n,-1,0,0,0,0,0,0,0,0,0,0,0)
k+=1
threshold(nameCsvInput,t,p,n,k,contoDi0,contoDi01,contoDi02,contoDi03,contoDi04,contoDi05,contoDi06,contoDi07,contoDi08,contoDi09,contoDi1)
#genera il numpyarray per calcolare il pareto front
def generateNP(nFile):
array=[]
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
rel_path = 'ensamble/'+nFile
abs_file_path = os.path.join(script_dir, rel_path) # <-- creiamo il path assoluto
with open(abs_file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
array.append(row)
firstString = array[0][0]
secondString = array[2][0]
first = int(firstString)
second = int(secondString)
b = np.array([[first,second]])
for i in range(1,len(array[0])):
firstString=array[0][i]
secondString=array[2][i]
first= int(firstString)
second= int(secondString)
b=np.append(b,[[first,second]],0)
return b
#permette di leggere i csv dei dataset su cui si vuole effettuare l'analisi
def readCSV(nameCsv):
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
rel_path = nameCsv
abs_file_path = os.path.join(script_dir, rel_path) # <-- abbiamo così ottenuto un path funzionante
with open(abs_file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
rowCanzoni=[]
i=0
for row in csv_reader:
rowCanzoni.append(row)
rowCanzoni.pop(0)
arrayDiStringhe =[]
for i in range(len(rowCanzoni)):
if(rowCanzoni[i]):
arrayDiStringhe.append(rowCanzoni[i])
return arrayDiStringhe
#permette di generare i csv per il pareto front
def generateCSV(nameCsv,contoDi0, contoDi01,contoDi02,contoDi03,contoDi04,contoDi05,contoDi06,contoDi07,contoDi08,contoDi09,contoDi1):
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
rel_path = 'ensamble/'+nameCsv
abs_file_path = os.path.join(script_dir, rel_path) # <-- costruiamo il path completo
with open(abs_file_path, 'a') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
filewriter.writerow([contoDi0, contoDi01,contoDi02,contoDi03,contoDi04,contoDi05,contoDi06,contoDi07,contoDi08,contoDi09,contoDi1])
#calcola il pareto front
def identify_pareto(scores):
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
if(len(population_ids[pareto_front]) >= 3):
if(population_ids[pareto_front][1]<=4):
return population_ids[pareto_front][2]
else:
return population_ids[pareto_front][1]
if(len(population_ids[pareto_front]) == 2):
if(population_ids[pareto_front][0]<=4):
return population_ids[pareto_front][1]
else:
return population_ids[pareto_front][0]
if(len(population_ids[pareto_front]) == 3):
return population_ids[pareto_front][2]
if(len(population_ids[pareto_front]) == 1):
return population_ids[pareto_front][0]
def main():
#i numeri in array Metrics rappresentano rispettivamente coseno, jaccard, sor dice, overlap, jaro
arrayMetrics= [4,5,6,7,8]
arrayAllPareto =[]
arrayAllCombinazioni =[]
#creo un array di 3 combinazioni
comb = combinations(arrayMetrics, 3)
for i in list(comb):
combinations1 = i[0]
combinations2= i[1]
combinations3= i[2]
#calcolo le threshold per ogni combinazione
threshold("datasetTP.csv" ,combinations1,combinations2,combinations3,0,0,0,0,0,0,0,0,0,0,0,0)
threshold("datasetFP.csv",combinations1,combinations2,combinations3, 0,0,0,0,0,0,0,0,0,0,0,0)
#creo un array per tracciare le combinazioni
if(combinations1 == 4 and combinations2 == 5 and combinations3==6):
arrayAllCombinazioni.append("cosine" + "/jaccard" + "/sordice")
if(combinations1 == 4 and combinations2 == 5 and combinations3==7):
arrayAllCombinazioni.append("cosine" + "/jaccard" + "/overlap")
if(combinations1 == 4 and combinations2 == 5 and combinations3==8):
arrayAllCombinazioni.append("cosine" + "/jaccard" + "/jaro")
if(combinations1 == 4 and combinations2 == 6 and combinations3==7):
arrayAllCombinazioni.append("cosine" + "/sordice" + "/overlap")
if(combinations1 == 4 and combinations2 == 6 and combinations3==8):
arrayAllCombinazioni.append("cosine" + "/sordice" + "/jaro")
if(combinations1 == 4 and combinations2 == 7 and combinations3==8):
arrayAllCombinazioni.append("cosine" + "/overlap" + "/jaro")
if(combinations1 == 6 and combinations2 == 7 and combinations3==8):
arrayAllCombinazioni.append("sordice" + "/overlap" + "/jaro")
if(combinations1 == 5 and combinations2 == 6 and combinations3==7):
arrayAllCombinazioni.append("jaccard" + "/sordice" + "/overlap")
if(combinations1 == 5 and combinations2 == 6 and combinations3==8):
arrayAllCombinazioni.append("jaccard" + "/sordice" + "/jaro")
if(combinations1 == 5 and combinations2 == 7 and combinations3==8):
arrayAllCombinazioni.append("jaccard" + "/overlap" + "/jaro")
#genero la stringa con il nome del csv per costruire il numpy array
nameCsv = "pareto" + str(combinations1) + str(combinations2) + str(combinations3)+".csv"
#genero numpyArray
scores=generateNP(nameCsv)
#genero il pareto front
scorePareto =identify_pareto(scores)
arrayAllPareto.append(scorePareto)
return arrayAllCombinazioni,arrayAllPareto
| [
"58905671+AnttGiam@users.noreply.github.com"
] | 58905671+AnttGiam@users.noreply.github.com |
e1943ace64dd59945b38d974a17a4157d58023c0 | ad96dedf8544e4fab2b0359611616ccbb5bc86e6 | /HAMO_pred.py | 63e2987384d30c39030e98ff43e3b58097033ee4 | [
"MIT"
] | permissive | siddhanthaldar/EEG_based_activity_recognition | f499714c38eddb8aaf6f03a03227bb5a3a0fb8f4 | 3ed6b7f9e0a7c2f7f2d719aa504aa4bdfda062cd | refs/heads/master | 2020-03-23T14:05:00.726853 | 2018-09-22T18:17:39 | 2018-09-22T18:17:39 | 141,655,000 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | import numpy as np
import pandas as pd
import os
from keras.models import load_model
WL = 150
col_list = ['F8', 'F3', 'AF3']
out_list = ['Nothing', 'Push', 'Lift', 'Drop']
D = len(col_list)
no_of_outputs = len(out_list)
#Reading data
#generalise window size, no. of outputs, no_of_channels etc.
req_arr_list = []
outputs_list = []
files_list = [file[:-4] for file in os.listdir('./EDF_files')]
for i in files_list:
df = pd.read_csv('./csvs/sample_'+ i + '.csv', usecols = col_list)
arr = np.transpose(df.as_matrix())
print('no_of_sensors, no_of_samples')
print(arr.shape)
total_len = int(arr.shape[1]/WL) * WL
print(total_len)
temp = arr[:,0:total_len]
req_arr_list.append(temp)
req_arr = np.concatenate(tuple(req_arr_list), axis = 1)
img_list = []
no_of_windows = int(req_arr.shape[1]/WL)
print(no_of_windows)
for i in range(no_of_windows):
img_list.append(req_arr[:,i*WL:(i + 1) * WL])
print(i)
img_arr = np.asarray(img_list).reshape((-1,D,WL,1))
print(img_arr.shape)
no_of_images = img_arr.shape[0]
train_mean = np.load('train_mean.npy')
train_std = np.load('train_std.npy')
img_arr = (img_arr -train_mean.reshape((-1,D,WL,1)))/train_std.reshape((-1,D,WL,1))
filepath = "./models/WL=150_files_['ktz_lift-2', 'ktz_push-2', 'ktz_lift-1', 'ktz_push-1', 'ktz_drop-1', 'ktz_drop-2', 'ktz_neutral-1']_electrodes_['F8', 'F3', 'AF3']_lr_0.001_filter_no._[50, 50, 20]_filt_frac_img_[0.25, 0.25]_drop_0.2_one_d_400_epochs_400.h5"
model = load_model(filepath)
pred = model.predict(img_arr)
print(pred)
| [
"siddhanthaldar14@gmail.com"
] | siddhanthaldar14@gmail.com |
79dcf66b9517d6c9857138b38aa4bebd074af7e9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy2781.py | 63f0d647daa70d02a644d9fe38bd1a0e985c5100 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,238 | py | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[1]) # number=31
prog.cz(input_qubit[2],input_qubit[1]) # number=32
prog.h(input_qubit[1]) # number=33
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[0]) # number=24
prog.cz(input_qubit[3],input_qubit[0]) # number=25
prog.h(input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[3],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
prog.z(input_qubit[3]) # number=29
prog.cx(input_qubit[3],input_qubit[0]) # number=30
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.cx(input_qubit[3],input_qubit[0]) # number=35
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2781.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
c04fef02387aa5c4e6b6a704956c0caefcfaa6ab | 6168318d3853c3a0eec7e09401d0226050167f60 | /movies/migrations/0002_user_gender.py | e933da51bbdf4b8d120d02ea950efc34b4280bc8 | [] | no_license | sarahcstringer/django-ratings | 5537cff58f6debe87cd2fec6d38c825b92c33fe6 | 9cd675b1931c84b9ca563e49903339f811f9eef6 | refs/heads/master | 2021-01-12T08:13:11.494710 | 2017-05-19T00:10:06 | 2017-05-19T00:10:06 | 76,511,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-14 20:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(blank=True, max_length=1),
),
]
| [
"sarahcstringer@gmail.com"
] | sarahcstringer@gmail.com |
136a6c7a38d6872c86aafa2caeb5e2ce2091320c | afe03ba84eed10acc187a60b3eceded78b0f6b94 | /sum.py | 4934e9961e8699f6e209029abd0834ea2821b0ab | [] | no_license | tromika/BinarySearchTree | 5c5f86d15b9e2c979e1aa69d10a01b15ba57b130 | 949be203d08e206f76ca91693e55a3facb79764f | refs/heads/master | 2020-07-02T00:52:43.651684 | 2014-10-25T20:33:51 | 2014-10-25T20:33:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | def main():
#content = raw_input()
#test = content.split('\n')
#test = [2,-2,6,-6,8]
s = 0
sumList = []
for a in test:
s+=a
sumList.append(s)
sortedSumList = dict((i,sorted(sumList).count(i)) for i in sorted(sumList))
sequences = 0
for key,value in sortedSumList.iteritems():
print key, value
#if key = 0:
if value > 1:
sequences+= value*2-1
print sequences
if __name__ == "__main__":
main() | [
"tromika@gmail.com"
] | tromika@gmail.com |
85a1abb5eb0c0c2f47f5e05bd11dd32306236ed1 | 763ffc8b6dff8684e2ee460235e54ce2581fa2d3 | /1_hatExper/step1.1/test.py | 53fa29d6d03e849e8dcd01d46a16b96cbe3518ce | [] | no_license | fengxu1996/hat_experimentation_2 | a750d7922302efb66a40398e468f634b5d5f6d9c | d773f39dc1f6e2693ba426f0f00fc56349555a01 | refs/heads/master | 2022-03-18T21:51:24.852287 | 2019-12-07T07:12:57 | 2019-12-07T07:12:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
import pandas as pd
path1 = r"E:\pythonCode\RJ_experimentation_1\Data_1\JQ\JQ_affiliationID_sortedName_" \
r"fieldID_fieldName_paperID_authorID_paperYear_awardYear_citationCnt.txt"
path2 = r""
| [
"1243309973@qq.com"
] | 1243309973@qq.com |
7103550a777cf28d04316b074ffc54028892c836 | 5629c6f395d2d475ca091f83171f045408ad65b8 | /hangman_app.py | 1c895b4dd99233061c41aaa7ffefb7857ad03695 | [] | no_license | oluxiwen/pa06_hangman | 8acf48e2bdd601d5fc148e2562535b11ec18ee4f | 1a1de42c154fd961dfa562f05f15b7cbac4a0eb1 | refs/heads/master | 2022-04-14T18:53:58.519727 | 2020-04-13T07:55:39 | 2020-04-13T07:55:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,623 | py | """
hangman_app.py is an app for playing hangman in the terminal
it is also used as a module in the hangman_webapp flask app
"""
import random
words = "dog cat mouse deer snake".split()
def generate_random_word():
"""
read a list of words from a file and pick a random one to return
Skip read list from file
"""
return str(random.choice(words)).lower()
def print_word(word, guessed_letter):
newword = ""
for char in word:
if char in guessed_letter:
newword += char
else:
newword += "-"
print(list(newword))
def pick_a_letter():
input_letters = str(input("pick a letter: "))
while (len(input_letters) < 1):
input_letters = input("pick a letter:: ")
letter = input_letters[:1].lower()
print(letter, "is picked.")
return letter
def print_guesses_left(guesses_left):
print(guesses_left, "choices left")
def all_letter_guessed(word, guessed_letter):
for char in word:
if char not in guessed_letter:
return False
return True;
def want_to_play():
i = input("play again? (y or n):")
return i == "y" or i == "Y"
def print_word(word, guessed_letter):
newword = ""
for char in word:
if char in guessed_letter:
newword += char
else:
newword += "-"
print(list(newword))
# return for webapp
return newword
def play_hangman():
""" this is the python script version of the game """
want_to_play = True
while (want_to_play):
word = generate_random_word()
guessed_letters = []
guesses_left = 6
letter = pick_a_letter()
done = False
while not done:
if letter in guessed_letters:
print(letter, "has picked!")
guesses_left = guesses_left - 1
elif letter not in word:
guessed_letters.append(letter)
print(letter, "is NOT in the word!")
guesses_left = guesses_left - 1
else:
guessed_letters.append(letter)
print(letter, "is in the word!")
print_guesses_left(guesses_left)
if all_letter_guessed(word, guessed_letters):
print("you won!")
done = True
elif guesses_left == 0:
print("you lost!")
done = True
else:
print_word(word, guessed_letters)
letter = pick_a_letter()
want_to_play = input("play again? (y or n):")
if __name__ == '__main__':
play_hangman()
| [
"xiwenlu@Xiwens-MacBook-Pro.local"
] | xiwenlu@Xiwens-MacBook-Pro.local |
5fb88b6a250fc8138e50b016b49f98c4fc0590f7 | 41c5f7da28b87a3034754254d21791b322e819d8 | /madana_apiclient/models/xml_ns0_process.py | 8bfbc39cbd2a85998fdbcc992333a3857479e0a0 | [] | no_license | MADANA-IO/madana-apiclient-python | 16cb3eb807897903df2a885a94a2c02fc405818a | 40dc21ab43d9565ac3dff86d7270093cce112753 | refs/heads/master | 2023-03-08T05:02:32.616469 | 2021-02-11T10:17:30 | 2021-02-11T10:17:30 | 287,797,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | # coding: utf-8
"""
madana-api
<h1>Using the madana-api</h1> <p>This documentation contains a Quickstart Guide, relating client functionality and information about the available endpoints and used datamodels. </p> <p> The madana-api and its implementations are still in heavy development. This means that there may be problems in our protocols, or there may be mistakes in our implementations. We take security vulnerabilities very seriously. If you discover a security issue, please bring it to our attention right away! If you find a vulnerability that may affect live deployments -- for example, by exposing a remote execution exploit -- please send your report privately to info@madana.io. Please DO NOT file a public issue. If the issue is a protocol weakness that cannot be immediately exploited or something not yet deployed, just discuss it openly </p> <br> <p> Note: Not all functionality might be acessible without having accquired and api-license token. For more information visit <a href=\"https://www.madana.io\">www.madana.io</a> </p> <br> # noqa: E501
The version of the OpenAPI document: 0.4.16-master.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from madana_apiclient.configuration import Configuration
class XmlNs0Process(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""XmlNs0Process - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XmlNs0Process):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, XmlNs0Process):
return True
return self.to_dict() != other.to_dict()
| [
"dev@madana.io"
] | dev@madana.io |
327ad346e94f6d6d3c3be000b3c703d8718a101f | 784dda4c400d4e5c42f57e9a7d48883692b2a931 | /pyhawkes/utils/data_manager.py | 3dc6d25d07e8bd12b7188d28e252be3626d20442 | [
"MIT"
] | permissive | yxtj/pyhawkes | bd942aded06dba3dd390a47e28702dcba124961b | ecc6dc23e516a7b06d64e5dbd10c8861b01bd955 | refs/heads/master | 2021-01-15T18:31:12.539149 | 2015-02-25T23:35:50 | 2015-02-25T23:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,746 | py | """
Data manager handles loading the .mat file and setting up the data on the GPU
This could be extended if we ever moved to a distributed setup with multiple GPUs
"""
import numpy as np
import scipy.sparse as sparse
import scipy.io
import os
import pycuda.autoinit
import pycuda.compiler as nvcc
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.curandom as curandom
from pyhawkes.utils.utils import *
# Define constant for the sparse matrix preprocessing
G_LOGISTIC_NORMAL = 0
import logging
# Get handle to global logger
log = logging.getLogger("global_log")
class GpuData:
"""
Inner class to store pointers on the GPU
"""
def __init__(self):
self.Ns = None
self.cumSumNs = None
self.X = None
class DataSet:
"""
Wrapper for a spike data set
"""
def __init__(self):
self.gpu = GpuData()
def loadFromFile(self, path, sortByBlock=False):
"""
Load the specified mat file
"""
mat_data = scipy.io.loadmat(path, appendmat=True)
self.N = int(mat_data["N"])
if "Tstart" in mat_data.keys() and "Tstop" in mat_data.keys():
self.Tstart = float(mat_data["Tstart"])
self.Tstop = float(mat_data["Tstop"])
elif "T" in mat_data.keys():
self.Tstart = 0
self.Tstop = float(mat_data["T"])
else:
log.error("Neither (Tstart,Tstop) nor T were specified in the mat file")
exit()
Sraw = np.ravel(mat_data["S"]).astype(np.float32)
# Some datasets do not have process IDs
if "K" in mat_data.keys() and"C" in mat_data.keys():
self.proc_ids_known = True
self.K = int(mat_data["K"])
Craw = (np.ravel(mat_data["C"])).astype(np.int32)
# Make sure the process IDs are 0-based
if np.max(Craw)==self.K and np.min(Craw)==1:
# The data file is 1-indexed (i.e. generated in Matlab most likely
Craw = Craw -1
else:
# Default to all spikes on the same process. This will be changed
# during inference
self.proc_ids_known = False
self.K = 1
Craw = np.zeros((self.N,), dtype=np.int32)
# Some datasets have associated spatial locations for each spike
# If so, X must be a DxN matrix where D is the dimension of the spatial data
if "X" in mat_data.keys():
self.isspatial = True
Xraw = mat_data["X"].astype(np.float32)
# Make sure Xraw is a DxN matrix
if np.size(Xraw,0)==self.N:
log.debug("Given X is NxD rather than DxN. Transposing...")
Xraw = Xraw.T
self.D = np.size(Xraw,0)
else:
self.isspatial = False
self.X = None
self.D = 0
if not sortByBlock:
(I, Ns, cumSumNs) = self.__argsortSCArray(self.K, Sraw, Craw)
else:
(I, Ns, cumSumNs) = self.__argsortSCArrayByBlock(self.K, Sraw, Craw)
# (I, Ns, cumSumNs) = self.__argsortSCArray(self.K, , Craw)
self.S = Sraw[I]
self.C = Craw[I]
if self.isspatial:
# Slicing with I changes the view and orders as if it were NxD matrix
self.X = np.zeros((self.D,self.N), dtype=np.float32)
for n in np.arange(self.N):
self.X[:,n] = Xraw[:,I[n]]
self.Ns = Ns
self.maxNs = np.max(Ns)
self.cumSumNs = cumSumNs
# Store remaining keys
self.other_data = {}
for key in mat_data.keys():
if key not in ["S","K","C","T","N","X","D"]:
self.other_data[key] = mat_data[key]
self.__initializeGpuArrays()
def loadFromArray(self,N,K,Tstart,Tstop,S,C,X=None,D=0,other_data={},proc_ids_known=True, sortByBlock=False):
"""
Initialize a DataSet object with the given parameters
"""
self.N = N
self.K = K
self.Tstart = Tstart
self.Tstop = Tstop
self.other_data = other_data
self.proc_ids_known = proc_ids_known
self.isspatial = (X!=None)
self.D = D
self.X = None
if N == 0:
self.S = S
self.C = C
self.Ns = np.zeros(K)
return
# Make sure the process IDs are 0-based
if np.max(C)==self.K and np.min(C)==1:
# The data file is 1-indexed (i.e. generated in Matlab most likely
C = C -1
if not sortByBlock:
(I, Ns, cumSumNs) = self.__argsortSCArray(self.K, S, C)
else:
(I, Ns, cumSumNs) = self.__argsortSCArrayByBlock(self.K, S, C)
self.S = S[I]
self.C = C[I]
if self.isspatial:
# Slicing with I changes the view and orders as if it were NxD matrix
self.X = np.zeros((self.D,self.N), dtype=np.float32)
for n in np.arange(self.N):
self.X[:,n] = X[:,I[n]]
self.Ns = Ns
self.maxNs = np.max(Ns)
self.cumSumNs = cumSumNs
# Set correct types
self.S = np.float32(self.S)
self.C = np.int32(self.C)
self.Ns = np.int32(self.Ns)
self.N = int(self.N)
self.K = int(self.K)
self.D = int(self.D)
self.X = np.float32(self.X)
self.__initializeGpuArrays()
def __initializeGpuArrays(self):
"""
Add a dictionary of GPU pointers
"""
self.gpu.Ns = gpuarray.to_gpu(self.Ns.astype(np.int32))
self.gpu.cumSumNs = gpuarray.to_gpu(self.cumSumNs.astype(np.int32))
if self.isspatial:
# self.gpu.X = gpuarray.empty((self.D,self.N), dtype=np.float32)
# self.gpu.X.set(self.X.astype(np.float32))
self.gpu.X = gpuarray.to_gpu(self.X.astype(np.float32))
def __argsortSCArray(self,K,S,C):
"""
Sort an array of spikes, first by their processes, then by their spike times.
We assume S is already sorted but C is not.
"""
# Keep an array of spike counts
Ns = np.zeros(K, dtype=np.int32)
N = np.size(S)
assert np.size(C) == N, "ERROR: Size of S and C do not match!"
# Compute a permutation of S,C,X such that S is sorted in increasing order
Iflat = np.argsort(S)
# Compute Ns
for k in np.arange(K):
Ns[k] = np.count_nonzero(C==k)
# Also compute the cumulative sum of Ns
cumSumNs = np.cumsum(np.hstack(([0], Ns)), dtype=np.int32)
return (Iflat, Ns, cumSumNs)
def __argsortSCArrayByBlock(self,K,S,C):
"""
Sort an array of spikes, first by their processes, then by their spike times.
We assume S is already sorted but C is not.
"""
# Keep an array of spike counts
Ns = np.zeros(K, dtype=np.int32)
N = np.size(S)
assert np.size(C) == N, "ERROR: Size of S and C do not match!"
# Initialize buffers to hold the per-process indices
ppI = {}
buff_sz = int(2*N/K)
for k in np.arange(K):
ppI[k] = np.zeros(buff_sz)
for n in np.arange(N):
cn = C[n]
try:
ppI[cn][Ns[cn]] = n
except:
# Index out of bounds -- grow buffer
ppI[cn] = np.hstack((ppI[cn], np.zeros(buff_sz)))
ppI[cn][Ns[cn]] = n
Ns[cn] += 1
# Flatten the permutation
Iflat = np.zeros(N, dtype=np.int)
off = 0
for k in np.arange(K):
Iflat[off:off+Ns[k]] = ppI[k][:Ns[k]]
off += Ns[k]
# Also compute the cumulative sum of Ns
cumSumNs = np.cumsum(np.hstack(([0], Ns)), dtype=np.int32)
return (Iflat, Ns, cumSumNs)
class DataManager:
def __init__(self, configFile, dataFile=None):
"""
Load the data and preprocess it on the GPU.
"""
self.parse_config_file(configFile)
if not dataFile is None:
self.params["data_file"] = dataFile
pprint_dict(self.params, "Data Manager Params")
def preprocess_for_inference(self, sortByBlock=False):
"""
Load all of the data
"""
data = DataSet()
mat_file = os.path.join(self.params["data_dir"], self.params["data_file"])
data.loadFromFile(mat_file, sortByBlock=sortByBlock)
return data
def preprocess_for_cross_validation(self, sortByBlock=False):
"""
Load all of the data
"""
data = DataSet()
mat_file = os.path.join(self.params["data_dir"], self.params["xv_file"])
data.loadFromFile(mat_file, sortByBlock=sortByBlock)
return data
def preprocess_for_prediction_test(self, Tsplit=0, trainFrac=0.9, sortByBlock=False):
"""
Load all of the data onto the GPU for parameter inference
"""
data = DataSet()
mat_file = os.path.join(self.params["data_dir"], self.params["data_file"])
data.loadFromFile(mat_file)
(trainData, testData) = self.split_test_train_data(data, Tsplit, trainFrac, sortByBlock=sortByBlock)
log.info("Train: %d spikes in time [%.2f,%.2f]", trainData.N, trainData.Tstart,trainData.Tstop)
log.info("Test: %d spikes in time [%.2f,%.2f]", testData.N, testData.Tstart,testData.Tstop)
return (trainData, testData)
def parse_config_file(self, configFile):
"""
Parse the config file for data manager params
"""
# Initialize defaults
defaultParams = {}
# Data location
defaultParams["data_dir"] = "."
defaultParams["xv_file"] = "not given"
# CUDA kernels are defined externally in a .cu file
defaultParams["cu_dir"] = os.path.join("pyhawkes", "cuda", "cpp")
defaultParams["cu_file"] = "preprocessing_unknown_procs.cu"
# Block size
defaultParams["blockSz"] = 1024
# Window the data such that only spikes within a fixed time window can
# have an effect. It is important that this be consistent with the
# prior on the impulse response
defaultParams["dt_max"] = 5.0
defaultParams["max_hist"] = 10*1024
# Create a config parser object and read in the file
cfgParser = ConfigParser(defaultParams)
cfgParser.read(configFile)
# Create an output params dict. The config file is organized into
# sections. Read them one at a time
self.params = {}
self.params["data_dir"] = cfgParser.get("io", "data_dir")
self.params["data_file"] = cfgParser.get("io", "data_file")
self.params["xv_file"] = cfgParser.get("io", "xv_file")
self.params["blockSz"] = cfgParser.getint("cuda", "blockSz")
self.params["cu_dir"] = cfgParser.get("preprocessing", "cu_dir")
self.params["cu_file"] = cfgParser.get("preprocessing", "cu_file")
self.params["dt_max"] = cfgParser.getfloat("preprocessing", "dt_max")
self.params["max_hist"] = cfgParser.getint("preprocessing", "max_hist")
def initialize_gpu_kernels(self):
kernelSrc = os.path.join(self.params["cu_dir"], self.params["cu_file"])
kernelNames = ["computeColumnSizes",
"computeRowIndicesAndDs",
"computeDx"]
src_consts = {"B" : self.params["blockSz"]}
self.gpuKernels = compile_kernels(kernelSrc, kernelNames, srcParams=src_consts)
def initialize_known_proc_gpu_kernels(self):
kernelSrc = os.path.join(self.params["cu_dir"], self.params["cu_file"])
kernelNames = ["computeColPtrs",
"computeDsBufferSize",
"computeRowAndDsOffsets",
"computeRowIndicesAndDs",
"computeColumnSizes",
"computeRowIndicesAndDs"]
src_consts = {"B" : self.params["blockSz"]}
self.gpuKernels = compile_kernels(kernelSrc, kernelNames, srcParams=src_consts)
def split_test_train_data(self, alldata, Tsplit=0, trainFrac=0.9, sortByBlock=False):
"""
Split the data into test and train subsets
alldata must be a sorted Dataset
"""
# First make sure the spike are sorted by time, not by block
# Compute a permutation of S,C,X such that S is sorted in increasing order
Iflat = np.argsort(alldata.S)
S = alldata.S[Iflat]
C = alldata.C[Iflat]
X = alldata.X[:,Iflat] if alldata.X!=None else None
if Tsplit > 0:
# Find the index of the first spike after Tsplit
split_ind = np.min(np.nonzero(S>Tsplit)[0])
elif trainFrac > 0:
split_ind = int(np.floor(trainFrac*alldata.N))
Tsplit = (S[split_ind-1] + S[split_ind])/2.0
else:
log.error("Either Tsplit or trainFrac must be specified!")
exit()
# Create two datasets
trainData = self.get_data_in_interval(alldata,(0,Tsplit), sortByBlock=sortByBlock)
testData = self.get_data_in_interval(alldata,(Tsplit, alldata.T), sortByBlock=sortByBlock)
return (trainData, testData)
def get_data_in_interval(self, alldata, (T_start,T_stop), sortByBlock=False):
"""
Split the data into test and train subsets
alldata must be a sorted Dataset
"""
# First make sure the spike are sorted by time, not by block
# Compute a permutation of S,C,X such that S is sorted in increasing order
Iflat = np.argsort(alldata.S)
S = alldata.S[Iflat]
C = alldata.C[Iflat]
X = alldata.X[:,Iflat] if alldata.X!=None else None
# Find the index of the first spike after Tsplit
start_ind = np.min(np.nonzero(S>T_start)[0])
stop_ind = np.max(np.nonzero(S<T_stop)[0])+1
# Create two datasets
data = DataSet()
data.loadFromArray(stop_ind-start_ind,
alldata.K,
T_start,
T_stop,
S[start_ind:stop_ind],
C[start_ind:stop_ind],
X=X[:,start_ind:stop_ind] if X!=None else None,
D=alldata.D,
other_data=alldata.other_data,
proc_ids_known=alldata.proc_ids_known,
sortByBlock=sortByBlock)
return data
def compute_sparse_spike_intvl_matrices(self, dataSet1, dataSet2):
"""
preprocess the given datasets by computing the intervals between spikes on S1
and spikes on S2 and storing them in a sparse matrix format on the GPU.
The GPU kernels require the spikes to be sorted, first in C and then in S, so
all the spikes on process 0 come first, and within the spikes on process 0
they are sorted in increasing order of S.
"""
# Initialize the kernels with the size of the dataset
self.initialize_known_proc_gpu_kernels()
# Temporarily copy both sets of spike times to the GPU
S1_gpu = gpuarray.to_gpu(dataSet1.S.astype(np.float32))
S2_gpu = gpuarray.to_gpu(dataSet2.S.astype(np.float32))
# Now we can preprocess the interspike intervals on the GPU
# First compute the size of each column for each matrix
# Each spike appears in K1 matrices, so there are K1*N2 columns
colStartBuffer_gpu = gpuarray.empty((dataSet1.K,dataSet2.N), dtype=np.int32)
colEndBuffer_gpu = gpuarray.empty((dataSet1.K,dataSet2.N), dtype=np.int32)
colSizesBuffer_gpu = gpuarray.empty((dataSet1.K,dataSet2.N), dtype=np.int32)
grid_w = int(np.ceil(float(dataSet2.N)/self.params["blockSz"]))
status_gpu = gpuarray.zeros((dataSet1.K,grid_w),dtype=np.int32)
self.gpuKernels["computeColumnSizes"](np.float32(self.params["dt_max"]),
dataSet1.gpu.Ns.gpudata,
dataSet1.gpu.cumSumNs.gpudata,
S1_gpu.gpudata,
np.int32(dataSet2.N),
S2_gpu.gpudata,
colStartBuffer_gpu.gpudata,
colEndBuffer_gpu.gpudata,
colSizesBuffer_gpu.gpudata,
status_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,dataSet1.K)
)
# Compute the column pointers (the cumulative sum) of the
# column sizes for each matrix. There are K1xK2 grid of matrices
colPtrsBuffer_gpu = gpuarray.zeros((dataSet1.K,(dataSet2.N+dataSet2.K)), dtype=np.int32)
colPtrOffsets_gpu = gpuarray.zeros((dataSet1.K,dataSet2.K), dtype=np.int32)
self.gpuKernels["computeColPtrs"](np.int32(dataSet1.K),
np.int32(dataSet2.N),
dataSet2.gpu.Ns.gpudata,
dataSet2.gpu.cumSumNs.gpudata,
colSizesBuffer_gpu.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
block=(1,1,1),
grid=(dataSet1.K,dataSet2.K)
)
# Compute the required size of the data and row buffer
bufferSize_gpu = gpuarray.zeros(1, dtype=np.int32)
self.gpuKernels["computeDsBufferSize"](np.int32(dataSet1.K),
dataSet2.gpu.Ns.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
bufferSize_gpu.gpudata,
block=(1,1,1),
grid=(1,1)
)
bufferSize = int(bufferSize_gpu.get()[0])
log.debug("dS has %d nonzero entries" % bufferSize)
dsBuffer_gpu = gpuarray.empty((bufferSize,), dtype=np.float32)
rowIndicesBuffer_gpu = gpuarray.zeros((bufferSize,), dtype=np.int32)
# Compute the offsets into these buffers for each matrix
rowAndDsOffsets_gpu = gpuarray.empty((dataSet1.K,dataSet2.K), dtype=np.int32)
self.gpuKernels["computeRowAndDsOffsets"](np.int32(dataSet1.K),
dataSet2.gpu.Ns.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
rowAndDsOffsets_gpu.gpudata,
block=(1,1,1),
grid=(1,1)
)
# Now we can actually fill in row and ds buffers
self.gpuKernels["computeRowIndicesAndDs"](np.int32(G_LOGISTIC_NORMAL),
np.int32(dataSet1.K),
dataSet1.gpu.Ns.gpudata,
dataSet1.gpu.cumSumNs.gpudata,
S1_gpu.gpudata,
np.int32(dataSet2.N),
dataSet2.gpu.cumSumNs.gpudata,
S2_gpu.gpudata,
colStartBuffer_gpu.gpudata,
colEndBuffer_gpu.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
rowIndicesBuffer_gpu.gpudata,
dsBuffer_gpu.gpudata,
rowAndDsOffsets_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,dataSet1.K)
)
# If this is a spatial dataset then also compute dX
dxBuffer_gpu = None
if dataSet1.isspatial and dataSet2.isspatial:
D = dataSet1.D
assert dataSet2.D == D, "Error: two datasets have different spatial dimensions"
dxBuffer_gpu = gpuarray.empty((D*bufferSize,), dtype=np.float32)
# Copy the spatial data to the GPU
X1_gpu = gpuarray.to_gpu(dataSet1.X.astype(np.float32))
X2_gpu = gpuarray.to_gpu(dataSet2.X.astype(np.float32))
self.gpuKernels["computeDx"](np.int32(D),
np.int32(dataSet1.N),
dataSet1.gpu.cumSumNs.gpudata,
X1_gpu.gpudata,
np.int32(dataSet2.N),
dataSet2.gpu.cumSumNs.gpudata,
X2_gpu.gpudata,
rowIndicesBuffer_gpu.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
rowAndDsOffsets_gpu.gpudata,
dxBuffer_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,dataSet1.K)
)
ds = dsBuffer_gpu.get()
# assert np.all(ds < self.params["dt_max"]), "ERROR: DS contains entries equal to dt_max!"
# assert np.all(ds > 0), "ERROR: DS contains entries equal to 0!"
# Update gpuData dictionary
gpuData = {}
gpuData["dsBuffer_size"] = bufferSize
gpuData["dsBuffer_gpu"] = dsBuffer_gpu
gpuData["rowIndicesBuffer_gpu"] = rowIndicesBuffer_gpu
gpuData["colPtrsBuffer_gpu"] = colPtrsBuffer_gpu
gpuData["rowAndDsOffsets_gpu"] = rowAndDsOffsets_gpu
gpuData["colPtrOffsets_gpu"] = colPtrOffsets_gpu
gpuData["dxBuffer_gpu"] = dxBuffer_gpu
return gpuData
def compute_sparse_spike_intvl_matrix_unknown_procs(self, S1, S2):
"""
In the case where the process identities are unknown and to be inferred,
it does not make sense to have a grid of sparse matrices for each pair of
process identities. Instead, create a single sparse matrix for spike intervals
"""
# Initialize the kernels with the size of the dataset
self.initialize_gpu_kernels()
# Temporarily copy both sets of spike times to the GPU
N1 = len(S1)
N2 = len(S2)
# Handle the case where there are no spikes, N2=0
if N2 == 0:
gpuData = {}
gpuData["dS_size"] = 0
gpuData["dS"] = gpuarray.zeros(1, dtype=np.float32)
gpuData["rowIndices"] = gpuarray.zeros(1, dtype=np.float32)
gpuData["colPtrs"] = gpuarray.zeros(1, dtype=np.float32)
return gpuData
S1_gpu = gpuarray.to_gpu(S1.astype(np.float32))
S2_gpu = gpuarray.to_gpu(S2.astype(np.float32))
# Now we can preprocess the interspike intervals on the GPU
# First compute the size of each column for each matrix
# Each spike appears in K1 matrices, so there are K1*N2 columns
colStart_gpu = gpuarray.empty((N2,), dtype=np.int32)
colEnd_gpu = gpuarray.empty((N2,), dtype=np.int32)
colSizes_gpu = gpuarray.empty((N2,), dtype=np.int32)
grid_w = int(np.ceil(float(N2)/self.params["blockSz"]))
self.gpuKernels["computeColumnSizes"](np.float32(self.params["dt_max"]),
np.int32(N1),
S1_gpu.gpudata,
np.int32(N2),
S2_gpu.gpudata,
colStart_gpu.gpudata,
colEnd_gpu.gpudata,
colSizes_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,1)
)
# Compute the column pointers (the cumulative sum) of the col sizes
colSizes = colSizes_gpu.get()
colPtrs = np.cumsum(np.hstack(([0],colSizes))).astype(np.int32)
colPtrs_gpu = gpuarray.to_gpu(colPtrs)
# Compute the required size of the data and row buffer
bufferSize = int(colPtrs[-1])
log.debug("dS has %d nonzero entries" % bufferSize)
if bufferSize == 0:
log.warning("There are no preceding parents. Potential parent matrix is empty!")
log.debug("Setting buffer size to 1.")
bufferSize = 1
dS_gpu = gpuarray.empty((bufferSize,), dtype=np.float32)
dS_gpu.fill(1.0)
rowIndices_gpu = gpuarray.zeros((bufferSize,), dtype=np.int32)
# Now we can actually fill in row and ds buffers
self.gpuKernels["computeRowIndicesAndDs"](np.int32(G_LOGISTIC_NORMAL),
S1_gpu.gpudata,
np.int32(N2),
S2_gpu.gpudata,
colStart_gpu.gpudata,
colEnd_gpu.gpudata,
colPtrs_gpu.gpudata,
rowIndices_gpu.gpudata,
dS_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,1)
)
# If this is a spatial dataset then also compute dX
# dX_gpu = None
# if dataSet1.isspatial and dataSet2.isspatial:
# D = dataSet1.D
# assert dataSet2.D == D, "Error: two datasets have different spatial dimensions"
# dX_gpu = gpuarray.empty((D*bufferSize,), dtype=np.float32)
#
# # Copy the spatial data to the GPU
# X1_gpu = gpuarray.to_gpu(dataSet1.X.astype(np.float32))
# X2_gpu = gpuarray.to_gpu(dataSet2.X.astype(np.float32))
#
# self.gpuKernels["computeDx"](np.int32(D),
# np.int32(N1),
# X1_gpu.gpudata,
# np.int32(N2),
# X2_gpu.gpudata,
# rowIndices_gpu.gpudata,
# colPtrs_gpu.gpudata,
# dX_gpu.gpudata,
# block=(1024,1,1),
# grid=(grid_w,1)
# )
ds = dS_gpu.get()
if not np.all(ds > 0):
log.info("Min DS: %f", np.min(ds))
raise Exception("ERROR: DS contains nonpositive entries")
# assert np.all(ds <= self.params["dt_max"]), "ERROR: DS contains entries greater than dt_max!"
# assert np.all(ds < self.params["dt_max"]), "ERROR: DS contains entries equal to dt_max!"
# Update gpuData dictionary
gpuData = {}
gpuData["dS_size"] = bufferSize
gpuData["dS"] = dS_gpu
gpuData["rowIndices"] = rowIndices_gpu
gpuData["colPtrs"] = colPtrs_gpu
# gpuData["dxBuffer_gpu"] = dX_gpu
return gpuData
| [
"scott.linderman@gmail.com"
] | scott.linderman@gmail.com |
04b9361d8abdd9474ccbd043ba51ae2031521dbf | b97082f919a6aa44e770d84bade91e455c0bd936 | /teacherInfo/teacherInfo/pipelines.py | 2330d6ef91b482bf47de4fd538f940ecae6e63da | [
"Apache-2.0"
] | permissive | CareyRay/Python-Spider | f2799689bb104a5352b425da31fa5c5e30bda1fb | 0a83efdc5ba0ff9ee5df4e9aa08197ca4e1ccafd | refs/heads/master | 2020-12-14T22:21:54.201119 | 2020-01-19T11:50:38 | 2020-01-19T11:50:38 | 234,890,957 | 0 | 0 | Apache-2.0 | 2020-01-19T11:45:39 | 2020-01-19T11:45:39 | null | UTF-8 | Python | false | false | 603 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs
class TeacherinfoPipeline(object):
def __init__(self):
self.filename = codecs.open('teacher.json','wb','utf-8')
def process_item(self, item, spider):
print(item)
html = json.dumps(dict(item),ensure_ascii=False)
self.filename.write(html + '\n')
return item
def open_spider(self, spider):
pass
# self.filename.close() | [
"2334497007@qq.com"
] | 2334497007@qq.com |
d205ceebfdd254bfc03112ff574568f8bfe89589 | d93f02be48f41f2f9c17e24adf4af0fa15355c9b | /atom/wsgi.py | 5e52294050ad7549c7dd2c27332dc0f973a5ed62 | [] | no_license | vxvinh1511/atom | ee9b574339dce41e76bc259182834abf6610cd43 | 64ba2917820a82552112584aad5ab8cc07061fd7 | refs/heads/master | 2021-01-10T01:33:47.668962 | 2015-09-25T13:09:25 | 2015-09-25T13:09:25 | 43,142,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for atom project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "atom.settings")
application = get_wsgi_application()
| [
"vxvpastime@gmail.com"
] | vxvpastime@gmail.com |
36beedcc5794bb0e122f7c3f16767dbcc5564a0b | 8b55dec8bf328cc2f8699942d374a27440fcfe8b | /bin/rst2html.py | c14dc2730cbd1208dd45092c2e7c5d9b8c51af63 | [] | no_license | exell/words | e39233989ef8b0b1319d8aa6351d376765490d06 | 5a0851ede2625b7ec1918050ad33c5b2e05b752b | refs/heads/master | 2021-01-01T17:27:38.551358 | 2013-07-02T11:28:26 | 2013-07-02T11:28:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | #!/home/andy/wordsdev/bin/python
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| [
"andy@sussedit.com"
] | andy@sussedit.com |
05c11b1a66da1d79bb4f38af7cb8b32e4edebc87 | 183f2b13d1347819e6a911f1ab15f74a32d8f861 | /src/models/train.py | 4a2ba61d4f67943942535d3929fd5e328b5d1463 | [
"MIT"
] | permissive | Peymankor/MBML_Final_Project | 37880994f40893ae2509a53f7988595779805185 | 1d989b07773f1b8e9a5dd11379845959d62e8780 | refs/heads/master | 2022-09-23T23:30:21.161733 | 2020-05-30T21:12:46 | 2020-05-30T21:12:46 | 262,248,278 | 4 | 1 | MIT | 2020-05-21T18:00:58 | 2020-05-08T06:47:53 | Jupyter Notebook | UTF-8 | Python | false | false | 763 | py | """Handy functions to train Kalman filters in numpyro."""
from jax import random
from numpyro.infer import MCMC, NUTS
def train_kf(model, data, n_train, n_test, x=None, num_samples=9000, num_warmup=3000):
"""Train a Kalman Filter model."""
rng_key = random.PRNGKey(0)
rng_key, rng_key_ = random.split(rng_key)
nuts_kernel = NUTS(model=model)
# burn-in is still too much in comparison with the samples
mcmc = MCMC(
nuts_kernel, num_samples=num_samples, num_warmup=num_warmup, num_chains=1
)
# let T be guessed from the length of observed
if x is None:
mcmc.run(rng_key_, T=n_train, T_forecast=n_test, obs=data)
else:
mcmc.run(rng_key_, T=n_train, T_forecast=n_test, x=x, obs=data)
return mcmc
| [
"carrascomurielj@gmail.com"
] | carrascomurielj@gmail.com |
258bb0e2762aefc4fda2a6a064e89faad4e34e96 | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/hardware/polyinomial_mapper.py | aa3f53521645648ca77a9b4089bb88812a44f0bd | [
"Apache-2.0"
] | permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 2,147 | py | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from numpy import poly1d
from scipy import optimize
from traits.api import HasTraits, List, Float
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_floats
class PolynomialMapper(HasTraits):
"""
list of coefficients. see numpy.poly1d to see exactly how coefficients used
coefficient = 1,2,3
==> 1*x^2+2*x+3
"""
_coefficients = List
output_low = Float(0)
output_high = Float(100)
_polynomial = None
def set_coefficients(self, cs):
self._coefficients = cs
self._polynomial = poly1d(cs)
def parse_coefficient_string(self, s):
self.set_coefficients(csv_to_floats(s))
def map_measured(self, v):
"""
convert a measured value to an output value (Voltage -> Temp)
"""
if self._polynomial:
v = self._polynomial(v)
return v
def map_output(self, v):
"""
convert an output value to measured value (Voltage <- Temp)
"""
c=self._coefficients[:]
c[-1] -= v
return optimize.brentq(poly1d(c), self.output_low, self.output_high)
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
a8edb0da7d8720a5f48f1695b3f768a2a34ec969 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /010_strings/_exercises/Python 3 Most Nessesary/6.10. Search and replace in line.py | c8f366f01071b74a68d0f19128b40eb84fc3a1d0 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,873 | py | # # -*- coding: utf-8 -*-
#
# s = "пример пример Пример"
# print ?.f.. "при" , ?.f.. "При" , ?.f.. "тест"
# # (0, 14, -1)
# print ?.f.. "при", 9 , ?.f.. "при", 0, 6 , ?.f.. "при", 7, 12
# # (-1, 0, 7)
#
#
# s = "пример пример Пример"
# print ?.i..("при" , ?.i..("при", 7, 12 , ?.i..("При", 1
# # (0, 7, 14)
# # print(s.index("тест"))
# # Traceback (most recent call last):
# # File "<pyshell#24>", line 1, in <module>
# # s.index("тест")
# # ValueError: substring not found
#
#
# s = "пример пример Пример Пример"
# print ?.rf.. "при" , ?.rf.. "При" , ?.rf.. "тест"
# # (7, 21, -1)
# print ?.f.. "при", 0, 6 , ?.f.. "При", 10, 20
# # (0, 14)
#
#
# s = "пример пример Пример Пример"
# print ?.ri.. "при" , ?.ri.. "При" , ?.ri.. "при", 0, 6
# # (7, 21, 0)
# # print(s.rindex("тест"))
# # Traceback (most recent call last):
# # File "<pyshell#30>", line 1, in <module>
# # s.rindex("тест")
# # ValueError: substring not found
#
#
# s = "пример пример Пример Пример"
# print ?.c.. "при" , ?.c.. "при", 6 , ?.c.. "При"
# # (2, 1, 2)
# print ?.c.. "тест"
# 0
#
#
# s = "пример пример Пример Пример"
# print ?.st..w.. "при" , ?.st..w.. "При"
# # (True, False)
# print ?.st..w.. "при", 6 , ?.st..w.. "При", 14
# # (False, True)
#
#
# s = "пример пример Пример Пример"
# print ?.st..w.. "при", "При"
# # True
#
#
# s = "подстрока ПОДСТРОКА"
# print ?.e..w.. "ока" , ?.e..w.. "ОКА"
# # (False, True)
# print ?.e..w.. "ока", 0, 9
# # True
#
#
# s = "подстрока ПОДСТРОКА"
# print ?.e..w.. "ока", "ОКА"
# # True
#
# s = "Привет, Петя"
# print ?.re.. "Петя", "Вася"
# # Привет, Вася
# print ?.re.. "петя", "вася" # Зависит от регистра
# # Привет, Петя
# s = "strstrstrstrstr"
# print ?.re.. "str", "" , ?.re.. "str", "", 3
# # ('', 'strstr')
#
#
# s = "Пример"
# d = o.. "П" N.. o.. "р" o.. "Р"
# print ?
# # {1088: 1056, 1055: None}
# print ?.tr.. d
# # 'РимеР'
#
#
# t = st_.m.tr.. "а" "А", "о" "О", "с" N..
# print(t
# # {1072: 'А', 1089: None, 1086: 'О'}
# print "строка".tr.. t
# # 'трОкА'
#
#
# t = st_.m.tr.. "абвгдежзи", "АБВГДЕЖЗИ"
# print(t)
# # {1072: 1040, 1073: 1041, 1074: 1042, 1075: 1043, 1076: 1044,
# # 1077: 1045, 1078: 1046, 1079: 1047, 1080: 1048}
# print "абвгдежзи".tr.. t
# # 'АБВГДЕЖЗИ'
#
#
# t = st_.m.tr.. "123456789", "0" * 9, "str"
# print(t)
# # {116: None, 115: None, 114: None, 49: 48, 50: 48, 51: 48,
# # 52: 48, 53: 48, 54: 48, 55: 48, 56: 48, 57: 48}
# print "str123456789str".tr.. t
# # '000000000' | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
0862876f4cdd8ae2bf38e8b1a144c12ef78b3d02 | b23b1156d6b2c34451db0891d6d2bf0b6d7704c7 | /redboxrental3.py | 307e0e758bf59bc3ef83ddfd8f5d01e5d7319e76 | [
"MIT",
"BSD-3-Clause"
] | permissive | ProgramNoona/Capstone2019AnimeReview | 571447e41657e694bbf21250d19c2096f72fb2a7 | 9875098fd25d2405447d39edaf9ac87dcd15abe4 | refs/heads/master | 2020-04-21T14:33:36.672587 | 2019-05-14T03:37:40 | 2019-05-14T03:37:40 | 169,638,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,838 | py | #CSC221
#m3hw
"""
Author: Reagan
A video rental program, yo
"""
import csv
import random
def main():
#Variable for recieving user answer
answer = 0
#Variable for containing the list of movies
movies = []
#Variable for containing the list of customers
customers = []
#Here are our filenames
filename1 = 'customerdatabase.csv'
filename2 = 'moviedatabase.csv'
#A while statement that makes main run until exit is chosen
while(answer != 7):
print("1. Load customers database.")
print("2. Load movies database.")
print("3. Display currently renting customers.")
print("4. Display overdue customers.")
print("5. Display one customer's invoice.")
print("6. Rent a movie to a customer.")
print("7. Exit")
answer = int(input("Please make a selection "))
#lists the choices for the user
if(answer==1):
loadCust(filename1, customers)
elif(answer==2):
loadMovie(filename2, movies)
elif(answer==3):
showRenters(customers,movies)
elif(answer==4):
showOverdue(customers,movies)
elif(answer==5):
printInvoice(customers,movies)
elif(answer==6):
rentFilm(customers, movies)
elif(answer==7):
exit
else:
print("Please choose a valid answer")
""" This loads the customers into the program. Option 1"""
def loadCust(filename, customers):
firstName = ""
lastName = ""
custID = ""
# Checks if customer is currently renting anything
renting = ""
with open(filename) as f:
reader = csv.reader(f)
next(reader)
for row in reader:
firstName = row[0]
lastName = row[1]
custID = row[2]
renting = row[3]
customer = Customer(firstName, lastName, custID, renting)
customers.append(customer)
print("\nCustomer file has been loaded\n")
"""loads in the movie file into a list. Option 2 """
def loadMovie(filename, movies):
title = ""
ID = ""
genre = ""
# Checks to see if a movie is in or out.
out = ""
rentedBy = ""
# Days a movie has been rented out
daysOut = 0
# Days a movie is over the rental limit
daysOver = 0
with open(filename) as f:
reader = csv.reader(f)
next(reader)
for row in reader:
title = row[0]
ID = row[1]
genre = row[2]
out = row[3]
rentedBy = row[4]
daysOut = int(row[5])
daysOver = int(row[6])
movie = Movie(title, ID, genre, out, rentedBy, daysOut, daysOver)
movies.append(movie)
print("\nMovie file has been loaded\n")
""" Shows the list of customers currently renting movies. Option 3"""
def showRenters(customers,movies):
#check and see if the customers and movies lists are loaded
if len(customers) == 0:
print("\nCustomer file has not been loaded!\n")
if len(movies) == 0:
print("\nCustomer file has not been loaded!\n")
#print all customers who have stuff rented out
print("\nThese are the people currently renting from us: ")
for customer in customers:
if(customer.renting =="yes"):
print("\t",customer.firstName,customer.lastName,)
print("\n")
""" Shows any customers who haven't turned in movies during rental period.
Option 4"""
def showOverdue(customers, movies):
#check and see if the customers and movies lists are loaded
if len(customers) == 0:
print("\nCustomer file has not been loaded!\n")
if len(movies) == 0:
print("\nCustomer file has not been loaded!\n")
print("\n", "These customers are overdue:")
#search movie list for overdue movies
for movie in movies:
if(movie.daysOver != 0):
#after finding an overdue movie, search customer database for renter
for customer in customers:
if(movie.rentedBy == customer.custID):
#print the name of the person who is overdue and why
print(customer.firstName, customer.lastName, "is", movie.daysOver,
"days overdue with movie:", movie.title)
print('\n')
""" Prints out information for one specific customer. Option 5 """
def printInvoice(customers, movies):
#check and see if the customers and movies lists are loaded
if len(customers) == 0:
print("\nCustomer file has not been loaded!\n")
if len(movies) == 0:
print("\nCustomer file has not been loaded!\n")
# Enter the name of the customer you are looking for
custFname = input("Enter the customer's first name: ")
custLname = input("Enter the customer's last name: ")
print("\n")
#variables to determine late fees
allDaysOver = 0
lateFee = 2.0
totalFees = 0.0
#search customer database to see if customer match exists
for customer in customers:
if(custFname == customer.firstName) and (custLname == customer.lastName):
#if a match, searches database for movie rented
for movie in movies:
#if customer code matches movie code, print customer and days overdue
if(customer.custID == movie.rentedBy):
print(movie.title, "is", movie.daysOver, "days overdue.")
#The days overdue for each movie are summed together
allDaysOver += movie.daysOver
break
# Late fees are determined by multiplying days by the daily late fee
totalFees = allDaysOver * lateFee
# If there are no fees, only this fact is printed
if(totalFees == 0):
print("\n",custFname, custLname, "has no late fees.\n")
else:
print(customer.firstName, customer.lastName, "owes", totalFees,
"in late fees.\n")
""" Helps customer rent a movie. Option 6 """
def rentFilm(customers, movies):
#check and see if the customers and movies lists are loaded
if len(customers) == 0:
print("\nCustomer file has not been loaded!\n")
if len(movies) == 0:
print("\nCustomer file has not been loaded!\n")
# Enter the name of the customer you are looking for
custFname = input("Enter the customer's first name: ")
custLname = input("Enter the customer's last name: ")
# Variables in case one needs to add a new customer
firstName = ""
lastName = ""
custID = ""
# Checks if customer is currently renting anything
renting = "yes"
# This saves me trouble by not asking for new customer information for every line in the customer database
check = 0
#searches customer database for customer
for customer in customers:
if(custFname == customer.firstName) and (custLname == customer.lastName):
print("Customer in database.\n")
check = 1
break
# If the customer is not in the database, they have to be entered in
if(check == 0):
print("New customer. Generating data now.")
firstName = custFname
lastName = custLname
# Each customer ID is their initials and four random numbers
custID = custFname[1] + custLname[1] + str(random.randint(1,10)) + str(random.randint(1,10)) + str(random.randint(1,10)) + str(random.randint(1,10))
#renting is already set to "yes", because you're not going to enter someone in a database if they aren't renting a movie.
customer = Customer(firstName, lastName, custID, renting)
customers.append(customer)
print("Customer data entered.\n")
# Resetting the check variable for reuse
check = 0
# variables for asking for a movie to rent
film = ""
rentfilm = 'y'
# variables if a new movie needs to be added
title = ""
ID = ""
genre = ""
# Checks to see if a movie is in or out.
out = ""
rentedBy = ""
# Days a movie has been rented out
daysOut = 0
# Days a movie is over the rental limit
daysOver = 0
asking = 0
while (rentfilm !='n') and (rentfilm != 'N'):
film = input("What movie is the customer renting? ")
print("\n")
for movie in movies:
if(film == movie.title):
if(movie.out == "yes"):
print("This movie is being rented already.\n")
check = 2
break
movie.out = "yes"
movie.rentedBy = customer.custID
check = 2
break
#if the movie isn't in the database, it's added in
if(check == 0):
print("That movie is not in the database. Preparing entry.")
title = film
# A movie id is its first three characters and five random numbers
ID = film[1:3] + str(random.randint(1,10)) + str(random.randint(1,10)) + str(random.randint(1,10)) + str(random.randint(1,10)) + str(random.randint(1,10))
asking = int(input("Is this film a 1. New Release, 2. Children's Film, or 3. General? \nEnter 1, 2, or 3. "))
if(asking == 1):
genre = "New Release"
elif(asking == 2):
genre = "Children's"
elif(asking == 3):
genre = "General"
else:
print("Invalid input. Assuming General.")
genre = "General"
rentedBy = customer.custID
#Other details unnecessary to change as they can be assumed.
movie = Movie(title, ID, genre, out, rentedBy, daysOut, daysOver)
movies.append(movie)
print("Film has been entered.")
rentfilm = input("Would you like to rent another movie? (y or n)")
""" Customer class to create customer objects """
class Customer:
def __init__(self, firstName, lastName, custID, renting):
self.firstName = firstName
self.lastName = lastName
self.custID = custID
self.renting = renting
def setFirst(self, firstName):
self.firstName = firstName
def setLast(self, lastName):
self.lastName = lastName
def setID(self, custID):
self.custID = custID
def setRenting(self, renting):
self.renting = renting
def getFirst(self):
return self.firstName
def getLast(self):
return self.lastName
def getID(self):
return self.custID
def getRenting(self):
return renting
""" Movie class to create movie objects """
class Movie:
def __init__(self, title, ID, genre, out, rentedBy, daysOut, daysOver):
self.title = title
self.ID = ID
self.genre = genre
self.out = out
self.rentedBy = rentedBy
self.daysOut = daysOut
self.daysOver = daysOver
def setTitle(self, title):
self.title = title
def setID(self, ID):
self.ID = ID
def setGenre(self, genre):
self.genre = genre
def setOut(self, out):
self.out = out
def setRentedBy(self, rentedBy):
self.rentedBy = rentedBy
def setDaysOut(self, daysOut):
self.daysOut = daysOut
def setDaysOver(self, daysOver):
self.daysOver = daysOver
def getTitle(self):
return self.title
def getID(self):
return self.ID
def getGenre(self):
return self.genre
def getOut(self):
return self.out
def getRentedBy(self):
return self.rentedBy
def getDaysOut(self):
return self.daysOut
def getDaysOver(self):
return self.daysOver
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
b209c4ca9943cdd93cd8c045a70591778461c70f | 893879ac757f0b623ce8e7335b2d73eeaadcdc08 | /app.py | f1b4f4e3b9841f5307c86e8df86ec61104552fa2 | [] | no_license | debika-samanta/ecommerce | 2336407baa4381c0cf22a12efd02951f4e7d9ad7 | 497199d9fee038fda2520ab3db512c4373e48193 | refs/heads/main | 2023-07-19T15:58:30.429861 | 2021-09-07T19:13:36 | 2021-09-07T19:13:36 | 404,094,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,080 | py | from flask import *
import sqlite3, hashlib, os
import Checksum
import requests
import random
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.secret_key = 'random string'
UPLOAD_FOLDER = 'static/uploads'
ALLOWED_EXTENSIONS = set(['jpeg', 'jpg', 'png', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SECRET_KEY'] = '##onlyme'
trans_id = random.randint(1000000000,9999999999)
# initialize dictionary with request parameters
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
email = "hack.you.hard.3000@gmail.com"
cur.execute("SELECT userId FROM users WHERE email = ?", (email, ))
userId = cur.fetchone()[0]
cur.execute("SELECT products.productId, products.name, products.price, products.image FROM products, kart WHERE products.productId = kart.productId AND kart.userId = ?", (userId, ))
products = cur.fetchall()
totalPrice = 0
for row in products:
totalPrice += row[2]
global cost
cost = totalPrice
global person
person = userId
def getLoginDetails():
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
if 'email' not in session:
loggedIn = False
firstName = ''
noOfItems = 0
else:
loggedIn = True
cur.execute("SELECT userId, firstName FROM users WHERE email = ?", (session['email'], ))
userId, firstName = cur.fetchone()
cur.execute("SELECT count(productId) FROM kart WHERE userId = ?", (userId, ))
noOfItems = cur.fetchone()[0]
conn.close()
return (loggedIn, firstName, noOfItems)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/home")
def root():
loggedIn, firstName, noOfItems = getLoginDetails()
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute('SELECT productId, name, price, description, image, stock FROM products')
itemData = cur.fetchall()
cur.execute('SELECT categoryId, name FROM categories')
categoryData = cur.fetchall()
itemData = parse(itemData)
return render_template('home.html', itemData=itemData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryData=categoryData)
@app.route("/add")
def admin():
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT categoryId, name FROM categories")
categories = cur.fetchall()
conn.close()
return render_template('add.html', categories=categories)
@app.route("/addItem", methods=["GET", "POST"])
def addItem():
if request.method == "POST":
name = request.form['name']
price = float(request.form['price'])
description = request.form['description']
stock = int(request.form['stock'])
categoryId = int(request.form['category'])
#Uploading image procedure
image = request.files['image']
if image and allowed_file(image.filename):
filename = secure_filename(image.filename)
image.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
imagename = filename
with sqlite3.connect('database.db') as conn:
try:
cur = conn.cursor()
cur.execute('''INSERT INTO products (name, price, description, image, stock, categoryId) VALUES (?, ?, ?, ?, ?, ?)''', (name, price, description, imagename, stock, categoryId))
conn.commit()
msg="added successfully"
except:
msg="error occured"
conn.rollback()
conn.close()
print(msg)
return redirect(url_for('root'))
@app.route("/adding")
def addin():
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT categoryId, name FROM categories")
categories = cur.fetchall()
conn.close()
return render_template('adding.html', categories=categories)
@app.route("/addingItem", methods=["GET", "POST"])
def addinItem():
if request.method == "POST":
#Uploading image procedure
imagep = request.files['imagep']
if imagep and allowed_file(imagep.filename):
filename = secure_filename(imagep.filename)
imagep.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
imagepname = filename
imagem = request.files['imagem']
if imagem and allowed_file(imagem.filename):
filename = secure_filename(imagem.filename)
imagep.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
imagemname = filename
with sqlite3.connect('database.db') as conn:
try:
cur = conn.cursor()
cur.execute('''INSERT INTO productView (prope, mask) VALUES (?, ?)''', ( imagepname, imagemname))
conn.commit()
msg="added successfully"
except:
msg="error occured"
conn.rollback()
conn.close()
print(msg)
return redirect(url_for('root'))
@app.route("/remove")
def remove():
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute('SELECT productId, name, price, description, image, stock FROM products')
data = cur.fetchall()
conn.close()
return render_template('remove.html', data=data)
@app.route("/removeItem")
def removeItem():
productId = request.args.get('productId')
with sqlite3.connect('database.db') as conn:
try:
cur = conn.cursor()
cur.execute('DELETE FROM products WHERE productID = ?', (productId, ))
conn.commit()
msg = "Deleted successsfully"
except:
conn.rollback()
msg = "Error occured"
conn.close()
print(msg)
return redirect(url_for('root'))
@app.route("/displayCategory")
def displayCategory():
loggedIn, firstName, noOfItems = getLoginDetails()
categoryId = request.args.get("categoryId")
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT products.productId, products.name, products.price, products.image, categories.name FROM products, categories WHERE products.categoryId = categories.categoryId AND categories.categoryId = ?", (categoryId, ))
data = cur.fetchall()
conn.close()
categoryName = data[0][4]
data = parse(data)
return render_template('displayCategory.html', data=data, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems, categoryName=categoryName)
@app.route("/account/profile")
def profileHome():
if 'email' not in session:
return redirect(url_for('root'))
loggedIn, firstName, noOfItems = getLoginDetails()
return render_template("profileHome.html", loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)
@app.route("/account/profile/edit")
def editProfile():
if 'email' not in session:
return redirect(url_for('root'))
loggedIn, firstName, noOfItems = getLoginDetails()
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT userId, email, firstName, lastName, address1, address2, zipcode, city, state, country, phone FROM users WHERE email = ?", (session['email'], ))
profileData = cur.fetchone()
conn.close()
return render_template("editProfile.html", profileData=profileData, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems)
@app.route("/account/profile/changePassword", methods=["GET", "POST"])
def changePassword():
if 'email' not in session:
return redirect(url_for('loginForm'))
if request.method == "POST":
oldPassword = request.form['oldpassword']
oldPassword = hashlib.md5(oldPassword.encode()).hexdigest()
newPassword = request.form['newpassword']
newPassword = hashlib.md5(newPassword.encode()).hexdigest()
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT userId, password FROM users WHERE email = ?", (session['email'], ))
userId, password = cur.fetchone()
if (password == oldPassword):
try:
cur.execute("UPDATE users SET password = ? WHERE userId = ?", (newPassword, userId))
conn.commit()
msg="Changed successfully"
except:
conn.rollback()
msg = "Failed"
return render_template("changePassword.html", msg=msg)
else:
msg = "Wrong password"
conn.close()
return render_template("changePassword.html", msg=msg)
else:
return render_template("changePassword.html")
@app.route("/updateProfile", methods=["GET", "POST"])
def updateProfile():
if request.method == 'POST':
email = request.form['email']
firstName = request.form['firstName']
lastName = request.form['lastName']
address1 = request.form['address1']
address2 = request.form['address2']
zipcode = request.form['zipcode']
city = request.form['city']
state = request.form['state']
country = request.form['country']
phone = request.form['phone']
with sqlite3.connect('database.db') as con:
try:
cur = con.cursor()
cur.execute('UPDATE users SET firstName = ?, lastName = ?, address1 = ?, address2 = ?, zipcode = ?, city = ?, state = ?, country = ?, phone = ? WHERE email = ?', (firstName, lastName, address1, address2, zipcode, city, state, country, phone, email))
con.commit()
msg = "Saved Successfully"
except:
con.rollback()
msg = "Error occured"
con.close()
return redirect(url_for('editProfile'))
@app.route("/loginForm")
def loginForm():
if 'email' in session:
return redirect(url_for('root'))
else:
return render_template('login.html', error='')
@app.route("/login", methods = ['POST', 'GET'])
def login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
if is_valid(email, password):
session['email'] = email
return redirect(url_for('root'))
else:
error = 'Invalid UserId / Password'
return render_template('login.html', error=error)
@app.route("/productDescription")
def productDescription():
loggedIn, firstName, noOfItems = getLoginDetails()
productId = request.args.get('productId')
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute('SELECT productId, name, price, description, image, stock FROM products WHERE productId = ?', (productId, ))
productData = cur.fetchone()
conn.close()
return render_template("productDescription.html", data=productData, loggedIn = loggedIn, firstName = firstName, noOfItems = noOfItems)
@app.route("/addToCart")
def addToCart():
if 'email' not in session:
return redirect(url_for('loginForm'))
else:
productId = int(request.args.get('productId'))
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT userId FROM users WHERE email = ?", (session['email'], ))
userId = cur.fetchone()[0]
try:
cur.execute("INSERT INTO kart (userId, productId) VALUES (?, ?)", (userId, productId))
conn.commit()
msg = "Added successfully"
except:
conn.rollback()
msg = "Error occured"
conn.close()
return redirect(url_for('root'))
@app.route("/cart")
def cart():
if 'email' not in session:
return redirect(url_for('loginForm'))
loggedIn, firstName, noOfItems = getLoginDetails()
email = session['email']
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT userId FROM users WHERE email = ?", (email, ))
userId = cur.fetchone()[0]
cur.execute("SELECT products.productId, products.name, products.price, products.image FROM products, kart WHERE products.productId = kart.productId AND kart.userId = ?", (userId, ))
products = cur.fetchall()
totalPrice = 0
for row in products:
totalPrice += row[2]
global cost
cost = totalPrice
return render_template("cart.html", products = products, totalPrice=totalPrice, loggedIn=loggedIn, firstName=firstName, noOfItems=noOfItems )
paytmParams = {
# Find your MID in your Paytm Dashboard at https://dashboard.paytm.com/next/apikeys
"MID" : "DIY12386817555501617",
# Find your WEBSITE in your Paytm Dashboard at https://dashboard.paytm.com/next/apikeys
"WEBSITE" : "WEBSTAGING",
# Find your INDUSTRY_TYPE_ID in your Paytm Dashboard at https://dashboard.paytm.com/next/apikeys
"INDUSTRY_TYPE_ID" : "Retail",
# WEB for website and WAP for Mobile-websites or App
"CHANNEL_ID" : "WEB",
# Enter your unique order idbKMfNxPPf_QdZppa
"ORDER_ID" : str(trans_id),
# unique id that belongs to your customer
"CUST_ID" : str(person),
# customer's mobile number
"MOBILE_NO" : " ",
# customer's email
"EMAIL" : "ddsahjgjkm@gmail.com",
# Amount in INR that is payble by customer
# this should be numeric with optionally having two decimal points
"TXN_AMOUNT" : str(cost),
# on completion of transaction, we will send you the response on this URL
"CALLBACK_URL" : "http://127.0.0.1:5000/pay_status",
}
# Generate checksum for parameters we have
# Find your Merchant Key in your Paytm Dashboard at https://dashboard.paytm.com/next/apikeys
checksum = Checksum.generate_checksum(paytmParams, "bKMfNxPPf_QdZppa")
# for Staging
url = "https://securegw-stage.paytm.in/order/process"
@app.route('/paytm')
def paymentgateway():
return render_template('payment.html',paytmParams=paytmParams,checksum=checksum)
@app.route('/pay_status', methods=['GET','POST'])
def paystatus():
paytmChecksum = {}
data = {}
if request.method == 'POST' :
data = request.form
for key, value in data.items():
if key == 'CHECKSUMHASH':
paytmChecksum = value
else:
paytmParams[key] = value
isValidChecksum = Checksum.verify_checksum(paytmParams, "bKMfNxPPf_QdZppa", paytmChecksum)
if isValidChecksum:
print("Checksum Matched")
else:
print("Checksum Mismatched")
return render_template("success.html",data=data)
@app.route("/removeFromCart")
def removeFromCart():
if 'email' not in session:
return redirect(url_for('loginForm'))
email = session['email']
productId = int(request.args.get('productId'))
with sqlite3.connect('database.db') as conn:
cur = conn.cursor()
cur.execute("SELECT userId FROM users WHERE email = ?", (email, ))
userId = cur.fetchone()[0]
try:
cur.execute("DELETE FROM kart WHERE userId = ? AND productId = ?", (userId, productId))
conn.commit()
msg = "removed successfully"
except:
conn.rollback()
msg = "error occured"
conn.close()
return redirect(url_for('root'))
@app.route("/logout")
def logout():
session.pop('email', None)
return redirect(url_for('root'))
def is_valid(email, password):
con = sqlite3.connect('database.db')
cur = con.cursor()
cur.execute('SELECT email, password FROM users')
data = cur.fetchall()
for row in data:
if row[0] == email and row[1] == hashlib.md5(password.encode()).hexdigest():
return True
return False
@app.route("/register", methods = ['GET', 'POST'])
def register():
if request.method == 'POST':
#Parse form data
password = request.form['password']
email = request.form['email']
firstName = request.form['firstName']
lastName = request.form['lastName']
address1 = request.form['address1']
address2 = request.form['address2']
zipcode = request.form['zipcode']
city = request.form['city']
state = request.form['state']
country = request.form['country']
phone = request.form['phone']
with sqlite3.connect('database.db') as con:
try:
cur = con.cursor()
cur.execute('INSERT INTO users (password, email, firstName, lastName, address1, address2, zipcode, city, state, country, phone) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (hashlib.md5(password.encode()).hexdigest(), email, firstName, lastName, address1, address2, zipcode, city, state, country, phone))
con.commit()
msg = "Registered Successfully"
except:
con.rollback()
msg = "Error occured"
con.close()
return render_template("login.html", error=msg)
@app.route("/registerationForm")
def registrationForm():
return render_template("register.html")
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def parse(data):
ans = []
i = 0
while i < len(data):
curr = []
for j in range(7):
if i >= len(data):
break
curr.append(data[i])
i += 1
ans.append(curr)
return ans
if __name__ == '__main__':
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
1aca92db74813603bfe7e43da5741411d701c292 | a1328f6937a9105faf060d21101c34c1f15da1bd | /sintatico.py | f4f22265337ad4fc945e84c20f75ecbbb29becfd | [] | no_license | HarielGiacomuzzi/TrabFinalCompiladores | a0a623bdec791b904f21f3efc7bba3d7281c2d46 | b01644b4e94bca4997f5d2eaaf1c7f3d88f89c83 | refs/heads/master | 2020-06-23T07:44:09.482089 | 2016-11-27T15:24:40 | 2016-11-27T15:24:40 | 74,660,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | # ------------------------------------------------------------
# sintatico.py
# Nomes: Hariel G., Lucas Teixeira, Bruno Kieling
# Descricao: Trabalho da disciplina de compiladores de 2016/2
# ------------------------------------------------------------
import ply.yacc as yacc
# Get the token map from the lexer. This is required.
from lex import tokens
import logging
logging.basicConfig(
level = logging.DEBUG,
filename = "parselog.txt",
filemode = "w",
format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
precedence = (
('nonassoc', 'LESSTHAN', 'GREATERTHAN'), # Nonassociative operators
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('left', 'LESSTHAN', 'GREATERTHAN'),
('right', 'ATTR', 'TIMESEQUAL', 'PLUSEQUAL'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('left', 'EXP'),
('right', 'UMINUS'),
)
# dictionary of names
names = { }
def p_STATMENT(p):
'''STATMENT : EXPR1
| BLOCK
| CMD
| FORS
| DEFINES
| LID'''
def p_EXPR1_EXPR_OPER_EXPR(p):
'''EXPR1 : EXPR OPER EXPR'''
if p[2] == '+':
p[0] = p[1] + p[3]
elif p[2] == '-':
p[0] = p[1] - p[3]
elif p[2] == '*':
p[0] = p[1] * p[3]
elif p[2] == '/':
p[0] = p[1] / p[3]
elif p[2] == '^':
p[0] = p[1] ** p[3]
elif p[2] == '=':
p[0] = p[1] = p[3]
def p_EXPR_NUMBER(p):
'''EXPR : NUMBER'''
p[0] = p[1]
def p_EXPR_ID_ATTR_EXPR(t):
'''EXPR : ID ATTR NUMBER
| ID ATTR BOOL'''
names[t[1]] = t[3]
t[0] = t[3]
def p_EXPR_ID(p):
'''EXPR : ID'''
try:
p[0] = names[p[1]]
except LookupError:
print("Undefined name '%s'" % p[1])
p[0] = 0
def p_EXPR_LPAREN_EXPR_RPAREN(p):
'''EXPR : LPAREN EXPR RPAREN'''
p[0] = p[2]
def p_EXPR_PLUSEQUAL_EXPR(p):
'''EXPR : EXPR PLUSEQUAL EXPR'''
p[0] = p[1] + p[3]
def p_EXPR_MINUSEQUAL_EXPR(p):
'''EXPR : EXPR MINUSEQUAL EXPR'''
p[0] = p[1] - p[3]
def p_EXPR_TIMESEQUAL_EXPR(p):
'''EXPR : EXPR TIMESEQUAL EXPR'''
p[0] = p[1] * p[3]
def p_LID(p):
'''LID : ID'''
p[0] = p[1]
def p_BLOCK(p):
'''BLOCK : LCURLYBRACKETS CMD RCURLYBRACKETS'''
#TODO
pass
def p_OPER(p):
'''OPER : PLUS
| MINUS
| DIVIDE
| TIMES
| LESSTHAN
| GREATERTHAN
| ATTR'''
p[0] = p[1]
def p_CMD(p):
'''CMD : EXPR SEMICOLON
| EXPR SEMICOLON CMD
| empty'''
pass
def p_DEFINES(p):
'''DEFINES : DEFINE ID LPAREN LID RPAREN BLOCK'''
temp = names.get(p[2] , None)
if temp != None:
evaluate(temp)
elif:
names[p[2]] = ('define', p[4], p[6])
def p_FORS(p):
# for(a=0;a<10;a+=1){a+=2;}
'''FORS : FOR LPAREN EXPR SEMICOLON EXPR SEMICOLON EXPR RPAREN BLOCK'''
p[0] = ('for', p[3], p[5], p[7], p[9])
evaluate(p[0])
# Error rule for syntax errors
def p_error(p):
print("Syntax error in input!", p)
# Empty rule for the sake of needing
def p_empty(p):
'empty :'
pass
def evaluate(lst):
if(lst[0] == 'for'):
for i in range(lst[1],lst[2], lst[3]):
print lst[4]
# Build the parser
parser = yacc.yacc(tabmodule='parsingTable', debug=True, debuglog=log, errorlog=log)
while True:
try:
s = raw_input('calc > ')
except EOFError:
break
if not s: continue
if(s == '#help'):
print('#############')
print('# Manual #')
print('#############')
print('''Examples
1+1 Input
2 Result
1 / 3 Input
.33333333333333333333 Result
4 * (6 + 7) Input
52 Result''')
continue
if('#save' in s):
arquivo = open(s.split()[1], 'w+')
for chave,valor in names.items():
arquivo.write(chave+' : '+str(valor)+'\n')
arquivo.close()
continue
if('#load' in s):
fileName = s.split()[1]
arquivo = open(fileName, 'r')
dados = arquivo.read()
result = parser.parse(dados, debug=log)
print(result)
continue
if('#show_all' in s):
for coisa in names:
print(coisa)
continue
if('#show' in s):
ident = s.split()[1]
print(names[ident])
continue
result = parser.parse(s, debug=log)
print(result)
| [
"hariel.dias@acad.pucrs.br"
] | hariel.dias@acad.pucrs.br |
81cfaf449aea22d62d3f5757db4b251b7c51b0c9 | bfb00b910ae3edc6d4b9dae247135788bd5f3ee3 | /exp/guitests/tked.py | 6785058e232059ff47cc92edcd482337ecdef336 | [] | no_license | jc-gonzalez/arestools | 2005e911e2e4809e0ee0981983fc81b9755fccd3 | dccaac8c0dad6682d11d0b702046ad90bd1f3f8f | refs/heads/master | 2020-03-18T09:44:21.916104 | 2019-01-09T10:56:33 | 2019-01-09T10:56:33 | 134,578,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | import sys
from tkinter import *
import tkinter.TkFileDialog
root=Tk("Text Editor")
text=Text(root)
text.grid()
def saveas():
global text
t = text.get("1.0", "end-1c")
savelocation=tkFileDialog.asksaveasfilename()
file1=open(savelocation, "w+")
file1.write(t)
file1.close()
button=Button(root, text="Save", command=saveas)
button.grid()
def FontHelvetica():
global text
text.config(font="Helvetica")
def FontCourier():
global text
text.config(font="Courier")
font=Menubutton(root, text="Font")
font.grid()
font.menu=Menu(font, tearoff=0)
font["menu"]=font.menu
Helvetica=IntVar()
arial=IntVar()
times=IntVar()
Courier=IntVar()
font.menu.add_checkbutton(label="Courier", variable=Courier,
command=FontCourier)
font.menu.add_checkbutton(label="Helvetica", variable=helvetica,
command=FontHelvetica)
root.mainloop()
| [
"JCGonzalez@sciops.esa.int"
] | JCGonzalez@sciops.esa.int |
04398fb29841e18b9505fe74de19ad29fe08b860 | 7a527060afabd2e0867d5dcf4b75592b43ef5005 | /Leetcode/二叉树/103. 二叉树的锯齿形层次遍历.py | d361d16ca05d5ccb931c8c609b61586d0b68b318 | [] | no_license | Stevenzzz1996/MLLCV | ff01a276cf40142c1b28612cb5b43e563ad3a24a | 314953b759212db5ad07dcb18854bf6d120ba172 | refs/heads/master | 2023-02-10T18:11:30.399042 | 2021-01-05T12:05:21 | 2021-01-05T12:05:21 | 267,804,954 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | #!usr/bin/env python
# -*- coding:utf-8 -*-
# author: sfhong2020 time:2020/5/7 15:01
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root: return []
res = []
cur = [root]
depth = 0
while cur:
tmp = []
next_level = []
for node in cur:
tmp.append(node.val)
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
if depth % 2 == 1:
res.append(tmp[::-1])
else:
res.append(tmp)
depth += 1
cur = next_level
return res | [
"2499143041@qq.com"
] | 2499143041@qq.com |
080728dd79f7bff9d345033a81fe4b83e3180222 | 4e47bb6c804150f8be2c7aee96718c8347765cf8 | /sample.py | bfd316339170a9eddb2844089423ec1a214dfd3a | [] | no_license | vvasuki/misc-python | 89955529b32bf32cf06ab726319a2ccbb6e6accb | 5d6d53bfec0dc1f85c24bb5e0cf6e2fcec31a389 | refs/heads/master | 2022-12-09T14:30:20.149062 | 2022-12-01T04:14:17 | 2022-12-01T04:14:17 | 149,946,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,495 | py | #! /usr/bin/python
#easy to use python documentation.. intended for reference and reuse of source code (sample code) slices.
#for help: install python-docs package.
#see this then: file:///usr/share/doc/python-docs-2.4.1/html/tut/tut.html
#to enter interactive mode, type: python
#to exit python shell: EOF character .. ^d
#you can set an environment variable named PYTHONSTARTUP to the name of a file containing your start-up commands.
#interpreter can act as a calculator
#arithmatic operators as in c.
#>>> width = 20
#>>> height = 5*9
#>>> width * height
#900
#9+_ #note underscore (implicit variable)
#909
#complex numbers too
#>>> 1j * 1J
#(-1+0j)
#>>> 1j * complex(0,1)
#(-1+0j)
#>>> a=1.5+0.5j
#>>> a.real
#1.5
#>>> a.imag #that is how you print in interactive mode.. directly quote the variable.
#0.5
#"python -c command [arg] ..."
#"python -m module [arg] ...", which executes the source file for module
#"python file" and "python <file" are different..
#in that the former gets input from stdin.
#sys.argv, a list of strings has the script name and additional arguments from shell.
#no arguments are given,
#sys.argv[0] is an empty string.
#When the script name is given as '-' (meaning standard input), sys.argv[0] is set to '-'.
#When -c command is used, sys.argv[0] is set to '-c'.
#When -m module is used, sys.argv[0] is set to the full name of the located module.
#There are six sequence types: strings, Unicode strings, lists, tuples, buffers, and xrange objects.
#lists are like: [a, b, c]
#tuples are like: a, b, c or () or (d,)
#Buffer objects are not directly supported by Python syntax, but can be created by calling the builtin function buffer().
#Xrange objects are similar to buffers in that there is no specific syntax to create them,
#but they are created using the xrange() function.
#general sequence operators:
#in, not in, +, *, s[i], s[i:j], s[i:j:k], len, min, max
lstTmp = [[]] * 3
#>>> lists
#[[], [], []]
#>>> lists[0].append(3)
#>>> lists
#[[3], [3], [3]]
lstTmp[0:2] = [] #removed elements.. size of list changable. elemensts replacable too.
#functions on lists:
#append extend insert remove(if the arg is matched) pop(can take args) index count sort reverse
#an inbuilt function to make list of numbers:
rngTmp=range(4)
rngTmp=range(2,8)
iTmp=1
iTmp,iTmp1=1,1
if iTmp:
#indentation is necessary for blocks in python
strTmp="iTmp is 1"
print strTmp, " ", iTmp
strTmp='yeah, both single and double quotes can encapsulate strings.\n\
yeah, note the continuation of the string into the next line.'
print strTmp
#any non-zero integer value is true; zero is false.
#The condition may also be a string or list value, in fact any sequence;
#anything with a non-zero length is true, empty sequences are false.
#comparison operators as in C.
strTmp=r'this is a raw string \
oye. it works thus.'
strTmp="""
another way of writing multiline strings.
"""
strTmp='''
yet another way of writing multiline strings.
'''
strTmp="""
look at this piece of string concatenation!
""" "oye. write them side by side.\n" + "or use the '+' sign\n"+ "muaddib "*5
print strTmp
#slice notation: strTmp[0], strTmp[2,5]
#strTmp[:5] and strTmp[0,5] are the same.
#>>> word[-1] # The last character.. from the right. a negative index is used.
#strTmp[0]='p' is not allowed.
#>>> 'x' + word[1:]
#'xelpA'
#is ok.
#degenerate slices are handled gracefully:
#word='HelpA'
#>>> word[1:100]
#'elpA'
#>>> word[10:]
#''
#>>> word[2:1]
#''
#>>> word[-100:]
#'HelpA'
#>>> word[-10] # error
ustrTmp= u' a unicode \u0020 string !'
#u'a unicode string !'
#the lower 256 characters of Unicode are the same as the 256 characters of Latin-1.
#Codecs can convert are Latin-1, ASCII, UTF-8, and UTF-16.
ustrTmp.encode('utf-8')
print ustrTmp
#string formatting options
strTmp="string formatting or interpolation operator %% is like %(familiarFunction)s" \
%{'familiarFunction':"sprintf()"}
print strTmp;
#the following options may be used in %(varName)[formatting]option:
# d i o u x X e E f F g G c %
# r s (for python objects, using repr and str functions)
#
#the following are string related functions:
#strip() len() capitalize() lower() swapcase() l/rjust() center() l/rstrip() title()
#join(sequenceOfStrings) [r]split(delimiter) splitlines()
#[r]find () count(substr[,start,end]) [r]index() translate(table[, deletechars])
#endswith() startswith()
#isalnum() isalpha() isdigit() islower() isspace() isupper() istitle()
#zfill()
#str(), unicode(), float(), int() and long() convert among datatypes
#decision statements: if, else, elif
#looping:
#while looping: while a<b:
#for statement iterates over the items of any sequence: for x in ['cat', 'window', 'defenestrate']:
#iterate over a sequence of numbers: use for with range.
#looping constructs can have else clauses.
#break and continue are as in C.
def function(iTmp):
#reference to the argument is passed.
#default value may be optionally specified..
#it is the value evaluated at the time of making of the function object.
"this is the function's optional docstring"
print "oye, a function was defined here."
#global variables cannot be directly assigned a value within a function
#(unless named in a global statement), although they may be referenced.
#unless the function explicitly returns something,
#it returns None object.
if iTmp:
return [iTmp]
else:
return
print function.__doc__
#a function is actually an object in the global namespace too.
#function can be referenced only after it is defined... "interpreter".. remember?
print function
print function(0), function(1)
iTmp = 5
def function(arg=iTmp):
print arg
iTmp = 6
#default is evaluated only once. rest of the calls, it is shared...
#to be expected. for the default is filled in when the function object is created.
function() #printeth 5
def function(a, L=[]):
L.append(a)
return L #L has scope only within this here block
print function(1)
print function(2)
print function(3)
print function(1,[])
print function(3) #hehe. [1, 2, 3, 3]
#the above function behaved thusly because the default was a mutable object..
#not an immutable one.. like below.
def function(a, L=None):
if L is None:
L = []
L.append(a)
return L
#keyword arguments.
def function(arg1,arg2='ole',arg3='jo'):
pass #this is an empty statement.
print arg1
function(arg2=99, arg1=0231)
#all functions accept a tuple of arguments in place of passing a literal unpacked sequence.
#the contents of the literal tuple,
#though they may contain references to objects,
#are themselves passed by value.
tupTmp=(0231,99)
function(*tupTmp)
#the * operator unpacks the tuple
#variable number of arguments may be passed as below.
#they may be passed in the form of a tuple of arguments, and
#also as a dictionary (hashtable) of arguments.
def function(arg, *argTuple, ** argDictionary):
#see how a for loop is used with a tuple
for argentum in argTuple: pass
#see how argDictioary is used, and notice the use of the dictionary method keys:
keynen = argDictionary.keys()
#see that the sequence keynen has a method called sort
keynen.sort()
function("sa","asdfa","sdf","asdff",
god="allah",
prophet="mohammed")
#lambda forms from Lisp.. functions used to make function objects
def function(arg):
return lambda argLm: arg+argLm
#Like nested function definitions, lambda forms can reference variables from the containing scope
fnTmp=function(strTmp)
print "lambda land ", fnTmp("sdf")
| [
"vishvas.vasuki@gmail.com"
] | vishvas.vasuki@gmail.com |
ae1ccc2844603ea4ab992078b3a8bb73d08eda11 | 1f3d604864a9ea0d704d7d0303e1ecfc7546c3f2 | /setup.py | 5441667f05ab10464e5610b529e1b3ea866d4a79 | [] | no_license | montaglue/Jarvis | 581e924668c2e65d0b3913d3df70f3adca32a871 | fd6c76479965db6a98f1bc336c77546ad8c95508 | refs/heads/master | 2023-08-01T17:52:16.124488 | 2021-09-02T10:57:45 | 2021-09-02T10:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from setuptools import setup, find_packages
def read_requirements():
with open('requirements.txt', 'r') as req:
content = req.read()
requirements = content.split('\n')
return requirements
setup(
name='jarvis',
version='0.1.0',
packages=find_packages(),
include_package_date=True,
install_requires=read_requirements(),
entry_points="""
[console_scripts]
jarvis=jarvis.main:main
""",
)
| [
"kletska.epa@hotmail.com"
] | kletska.epa@hotmail.com |
9269f28f522d0d9b3083bf0059d3b6ed41848195 | d67ae1b2f20d96b7e36c82c3a298882042c951c5 | /src/asyncf.py | 05a406b445e0dbcbd7eb0341c1360003b928bcfe | [
"MIT"
] | permissive | Vistaus/my-weather-indicator | 8a99e69fd9d2c03ab5cca578a89da38d6676a5ab | 32aaa77a14cf2f85edbfb72c45d154e1676abe83 | refs/heads/master | 2021-01-02T12:00:00.506304 | 2020-02-11T19:42:47 | 2020-02-11T19:42:47 | 239,614,123 | 0 | 0 | MIT | 2020-02-10T21:11:07 | 2020-02-10T21:11:06 | null | UTF-8 | Python | false | false | 2,549 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# async.py
#
# This file is part of uPodcatcher
#
# Copyright (C) 2014
# Lorenzo Carbonell Cerezo <lorenzo.carbonell.cerezo@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gi
try:
gi.require_version('GLib', '2.0')
except Exception as e:
print(e)
exit(1)
from gi.repository import GLib
import threading
import traceback
__all__ = ['async_function']
def _async_call(f, args, kwargs, on_done):
def run(data):
f, args, kwargs, on_done = data
error = None
result = None
try:
result = f(*args, **kwargs)
except Exception as e:
e.traceback = traceback.format_exc()
error = 'Unhandled exception in asyn call:\n{}'.format(e.traceback)
GLib.idle_add(lambda: on_done(result, error))
data = f, args, kwargs, on_done
thread = threading.Thread(target=run, args=(data,))
thread.daemon = True
thread.start()
def async_function(on_done=None):
'''
A decorator that can be used on free functions so they will always be
called asynchronously. The decorated function should not use any resources
shared by the main thread.
Example:
def do_async_stuff(self, input_string):
def on_async_done(result, error):
# Do stuff with the result and handle errors in the main thread.
if error:
print(error)
elif result:
print(result)
@async_function(on_done=on_async_done)
def do_expensive_stuff_in_thread(input_string):
# Pretend to do expensive stuff...
time.sleep(10)
stuff = input_string + ' Done in a different thread'
return stuff
do_expensive_stuff_in_thread(input_string)
'''
def wrapper(f):
def run(*args, **kwargs):
_async_call(f, args, kwargs, on_done)
return run
return wrapper
| [
"lorenzo.carbonell.cerezo@gmail.com"
] | lorenzo.carbonell.cerezo@gmail.com |
6697e58f58dc6dc054679c72808f91d06415102d | 88ea7bf2bbc8ffba551e881df553ae5ceac70dd6 | /deblock/codes/models/archs/archs_sub/SRResNet_o2m_spectral_arch.py | 83e92751b2e71f264c06bc251d1ed9cc9b2e4680 | [
"Apache-2.0"
] | permissive | zhouhuanxiang/repo-zhx | 2d1135bb2f925e051e1b0bcfc2ed53fb34ea51c5 | 76b577eea13130c60bf7bff8c486f51766128661 | refs/heads/main | 2023-06-10T02:56:17.978649 | 2021-06-29T02:35:57 | 2021-06-29T02:35:57 | 381,213,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.archs.arch_util as arch_util
class ResidualBlock_Spectral_withZ(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, ni=65, no=64):
super(ResidualBlock_Spectral_withZ, self).__init__()
self.conv1 = nn.utils.spectral_norm(nn.Conv2d(ni, ni, 3, 1, 1, bias=True))
self.conv2 = nn.utils.spectral_norm(nn.Conv2d(ni, no, 3, 1, 1, bias=True))
# initialization
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = F.relu(self.conv1(x), inplace=True)
out = self.conv2(out)
return identity[:, :out.shape[1], :, :] + out
class MSRResNet(nn.Module):
''' modified SRResNet'''
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):
super(MSRResNet, self).__init__()
self.upscale = upscale
self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
# basic_block = functools.partial(ResidualBlock_noBN_withZ, nf=nf)
# self.recon_trunk = arch_util.make_layer(basic_block, nb)
self.recon_trunk = nn.ModuleList([ResidualBlock_Spectral_withZ(nf + 1, nf) for i in range(nb)])
# upsampling
self.upconv1 = nn.Conv2d(nf + 1, nf, 3, 1, 1, bias=True)
self.HRconv = nn.Conv2d(nf + 1, nf, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# initialization
arch_util.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last],
0.1)
def forward(self, x, z):
out = self.lrelu(self.conv_first(x))
# out = self.recon_trunk(fea)
for layer in self.recon_trunk:
out = layer(torch.cat((out, z), dim=1))
out = self.lrelu(self.upconv1(torch.cat((out, z), dim=1)))
out = self.conv_last(self.lrelu(self.HRconv(torch.cat((out, z), dim=1))))
base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
if out.shape[1] == base.shape[1]:
out += base
else:
out += base[:, :3, :, :]
return out
| [
"zhouhx.cn@gmail.com"
] | zhouhx.cn@gmail.com |
dd6d06fac46c83117095da0090eca13ba7a19033 | 317f6801e3c539c1eb3dcc9575d895e5f257c17d | /web/django_exampl/nr1/meth/urls.py | c4777a6e9cf74c8008d3db886c88243ae9f0e4c0 | [] | no_license | Barbara0070/barbara | b2773fe8416e7560103e5eb24c9a334134025a17 | c8ee3892717ee1378d9d94d6e38b5088c7de679d | refs/heads/master | 2020-04-10T09:08:17.511557 | 2019-03-03T15:46:35 | 2019-03-03T15:46:35 | 160,927,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12 | py | #from django | [
"w2ertygf21@gmail.com"
] | w2ertygf21@gmail.com |
8f52540d0566f9d51d85025745e66d4f7753250d | 1f74cbca452ec1ddec3da61cf1ea16b2b92b0088 | /Assignment 2/miditoaudio.py | eeb73f59bf04ae5b63a941647560441ef6168564 | [] | no_license | SmellingSalt/Statistical-Pattern-Recognition-Assignments | ea911bb8349fc24ded251e2cddc6b5230bf4f23b | 3d70687c5632ee06b1c19ed9fdaf749afdf3d105 | refs/heads/master | 2021-07-17T12:20:09.931474 | 2021-02-01T11:31:29 | 2021-02-01T11:31:29 | 236,273,314 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import os
from glob import glob
# convert_command=timidity
# midi_file=0AuraLee.mid
# -Ow -o - | lame - -b 64
# mp3_file=0AuraLee2.mp3
path_to_midi_data=os.path.relpath(os.path.join('MusicDatasets/Beethoven/MIDIFILES'))
midi_data_files_list=glob(os.path.join(path_to_midi_data, "*.mid"))
mp3_data_path=os.path.relpath(os.path.join('MusicDatasets/Beethoven/MP3FILES'))
i=0
for midi_file in midi_data_files_list:
# convert_command="timidity "+midi_file+" -Ow -o - | lame - -b 64 "+mp3_data_path+"/"+str(i)+".mp3"
# print(convert_command)
# os.system(convert_command)
convert_command="timidity "+midi_file+" -Ow "+mp3_data_path+"/"+str(i)+".wav"
os.system(convert_command)
i+=1
print("\n Finished Converting ",i," of ",len(midi_data_files_list))
# os.system("ls")
| [
"ssmahara96@gmail.com"
] | ssmahara96@gmail.com |
b40bac9713b087f67ca3260d194ce949da4c8dae | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/nn/modules/math_ops.py | a16ddf4555f82be980156024d8fa893e24247691 | [
"Apache-2.0"
] | permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 7,845 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Optional, Sequence, Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.modules.module import Module
from oneflow.nn.modules.utils import _check_axis
from oneflow.ops.transpose_util import (
get_inversed_perm,
get_perm_when_transpose_axis_to_last_dim,
)
def asin_op(input):
"""
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sin^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32)
>>> output = flow.asin(input)
>>> output.shape
oneflow.Size([4])
>>> output
tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32)
>>> output1 = input1.asin()
>>> output1.shape
oneflow.Size([2, 2])
>>> output1
tensor([[ 0.9273, 1.5708],
[-0.6435, -1.5708]], dtype=oneflow.float32)
"""
return flow._C.asin(input)
def arcsin_op(input):
"""
Alias for :func:`oneflow.asin`
"""
return flow._C.asin(input)
def asinh_op(input):
"""
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sinh^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([2, 3, 4]), dtype=flow.float32)
>>> output = flow.asinh(input)
>>> output.shape
oneflow.Size([3])
>>> output
tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32)
>>> output1 = input1.asinh()
>>> output1.shape
oneflow.Size([2, 3])
>>> output1
tensor([[-0.8814, 0.0000, -0.3900],
[ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32)
"""
return flow._C.asinh(input)
def arcsinh_op(input):
"""
Alias for :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
def asinh_op_tensor(input):
"""
See :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
def inplace_sin_op_tensor(input):
"""
In-place version of :func:`oneflow.sin`
"""
return flow._C.sin_(input)
def atan_op(input):
"""
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\tan^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32)
>>> output = flow.atan(input)
>>> output.shape
oneflow.Size([3])
"""
return flow._C.atan(input)
def arctan_op(input):
"""
Alias for :func:`oneflow.atan`
"""
return flow._C.atan(input)
def fmod_op(input, other):
"""
fmod(input, other, *, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the dividend :attr:`input`.
Supports broadcasting to a common shape, integer and float inputs.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> import oneflow as flow
>>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3]), 2.)
tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4, 5.]), 1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4., -5]), flow.tensor([4, 2, 1, 3., 1]))
tensor([1., 0., 0., 1., -0.], dtype=oneflow.float32)
"""
return flow._C.fmod(input, other)
def addmm(x, mat1, mat2, alpha=1, beta=1):
if len(x.shape) > 2 or len(mat1.shape) > 2 or len(mat2.shape) > 2:
raise ValueError("input matrixes shape can not be greater than 2")
else:
return flow.mul(x, beta) + flow.mul(flow._C.matmul(mat1, mat2), alpha)
def addmm_op(input, mat1, mat2, alpha=1, beta=1):
"""addmm(beta=1, input, alpha=1, mat1, mat2, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \\times m)` tensor, :attr:`mat2` is a
:math:`(m \\times p)` tensor, then :attr:`input` must be
broadcastable with a :math:`(n \\times p)` tensor
and :attr:`out` will be a :math:`(n \\times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\\text{out} = \\beta\\ \\text{input} + \\alpha\\ (\\text{mat1}_i \\mathbin{@} \\text{mat2}_i)
For inputs of type `float` or `double`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\\alpha`)
mat1 (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
out (Tensor, optional): the output tensor.
For example:
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.tensor(np.array([[1,2,4],[5,11,9.1]]))
>>> mat1 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5]]))
>>> mat2 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5],[3.7,2.2,8.1]]))
>>> output = flow.addmm(input, mat1, mat2)
>>> output
tensor([[100.6800, 33.8300, 126.8700],
[110.0100, 43.4800, 133.6100]], dtype=oneflow.float64)
>>> output.shape
oneflow.Size([2, 3])
>>> input2 = flow.tensor(np.array([1.7]))
>>> mat1 = flow.tensor(np.array([[1,2],[5,9.1],[7.7,1.4]]))
>>> mat2 = flow.tensor(np.array([[1,2,3.7],[5,9.1,6.8]]))
>>> output2 = flow.addmm(input2, mat1, mat2, alpha=1, beta=2)
>>> output2
tensor([[14.4000, 23.6000, 20.7000],
[53.9000, 96.2100, 83.7800],
[18.1000, 31.5400, 41.4100]], dtype=oneflow.float64)
>>> output2.shape
oneflow.Size([3, 3])
"""
return addmm(input, mat1, mat2, alpha, beta)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"noreply@github.com"
] | noreply@github.com |
71e42b525adbc2a63b17f6e3b79db5de95134301 | ff44ca1b092e10f9c7e4eb347be7d641b1c574e5 | /preprocessing/imagetoarraypreprocessor.py | bf676b248e144f49ceca10d19abc3eac0c65933b | [] | no_license | RobinZhang2020/DP4CV | 940a0e3b39a6353514890289bcfb35129e3b3706 | 388f4ff11fcd258fb8f103997adc505626bfae71 | refs/heads/master | 2023-03-21T05:18:11.675364 | 2021-03-09T07:45:59 | 2021-03-09T07:45:59 | 341,842,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from keras.preprocessing.image import img_to_array
class ImageToArrayPreprocessor:
def __init__(self,dataFormat=None):
self.dataFormat=dataFormat
def preprocess(self,image):
return img_to_array(image,data_format=self.dataFormat)
| [
"704805264@qq.com"
] | 704805264@qq.com |
83f9cbcb975b846ee5c5d189eda62303064a4f46 | 68f962de0af192f098b35b93d50e258276c70a09 | /pandastest.py | a2cdf84d90f55bb94b9f03442d13dfccd7ea6b3c | [] | no_license | judewellsbbk/PwD | cc7592448ead700553bc13677e3517835bd048f8 | 56947989cbc09f2405a6d1003762a2d4933f46d0 | refs/heads/master | 2020-03-15T11:36:38.203228 | 2018-05-05T18:18:17 | 2018-05-05T18:18:17 | 132,124,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | import numpy as np
import pandas as pd
def main():
my_dict = {'A': [1, 2], 'B': ['John', 4]}
my_data_frame = pd.DataFrame(data=my_dict)
print my_data_frame
print my_data_frame.dtypes
# a further demo
my_df = pd.DataFrame(some_filling(), columns=['A', 'B', 'C', 'D', 'E'])
print my_df
def some_filling():
""" returns a 5x5 matrix of random integers from [low, high)
"""
return np.random.randint(low=0, high=10, size=(5, 5))
main() | [
"jwells03@dcs.bbk.ac.uk"
] | jwells03@dcs.bbk.ac.uk |
7ff60d6b1a6c4d5c8feddfc8a81bafcaf4a303a3 | 4fa314e5f240beff11e63062293460f613c2fbdb | /lake_monster/environment/render.py | ee77fb61d5a404ccb6973e5beabf9b576ab9fc85 | [
"MIT"
] | permissive | zebengberg/lake-monster | 9c575e40405e46defa103f12324208f552915783 | d1158e3e1b46666c310cf50ce778a327a321a9e5 | refs/heads/main | 2023-02-23T10:43:56.611480 | 2021-01-25T21:13:03 | 2021-01-25T21:13:03 | 307,154,368 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,856 | py | """Utility functions for rendering an environment and displaying an episode in video."""
import numpy as np
from PIL import Image, ImageDraw
SIZE = 480
CENTER = SIZE // 2
RADIUS = 200
RED = (250, 50, 0)
BLACK = (0,) * 3
GREEN = (40, 200, 40)
def coords_to_rect(coords, radius=8):
"""Convert environment coordinates to PIL rectangle coordinates."""
x, y = coords
y *= -1
x, y = CENTER + RADIUS * x, CENTER + RADIUS * y
return x - radius, y - radius, x + radius, y + radius
def angle_to_rect(angle):
"""Convert environment angle to PIL rectangle coordinates."""
x, y = np.cos(angle), np.sin(angle)
return coords_to_rect((x, y))
def vector_to_rect(vector):
"""Convert action vector to PIL rectangle coordinates."""
x, y = 50 * vector
u, v = CENTER - RADIUS, CENTER - RADIUS
return u - x, v - y, u + x, v + y
def arrow_segments(vector):
"""Return arrow segments representing last movement."""
# body of the arrow
x, y = 40 * vector
u, v = CENTER - RADIUS + 10, CENTER - RADIUS + 10
lines = [(u - x, v + y, u + x, v - y)]
# head of the arrow
c, s = np.cos(0.65), np.sin(0.65)
rot_matrix = np.array(((c, -s), (s, c)))
for mat in [rot_matrix, np.linalg.inv(rot_matrix)]:
x1, y1 = 10 * np.dot(mat, vector)
lines.append((u + x - x1, v - y + y1, u + x, v - y))
return lines
def draw_text(draw, monster_speed, step, step_size, n_actions, r=None):
"""Draw informational text to image."""
monster_text = f'MONSTER SPEED: {monster_speed:.3f}'
step_text = f'STEP: {step}'
actions_text = f'NUMBER OF ACTIONS: {n_actions}'
size_text = f'STEP SIZE: {step_size:.3f}'
draw.text((10, SIZE - 20), monster_text, BLACK)
draw.text((10, SIZE - 40), actions_text, BLACK)
draw.text((10, SIZE - 60), size_text, BLACK)
draw.text((CENTER - 20, SIZE - 20), step_text, BLACK)
if r is not None:
radius_text = f'RADIUS: {r:.3f}'
draw.text((CENTER + 80, SIZE - 20), radius_text, BLACK)
def renderer(r,
prev_agent_rotation,
total_agent_rotation,
total_monster_rotation,
action_vector,
result,
reward,
step,
monster_speed,
n_actions,
step_size,
is_caught,
return_real=False,
multi_monster_rotations=None):
"""Render an environment state as a PIL image."""
c, s = np.cos(total_agent_rotation), np.sin(total_agent_rotation)
agent_rot_matrix = np.array(((c, -s), (s, c)))
agent_position = np.dot(agent_rot_matrix, (r, 0))
im = Image.new('RGB', (480, 480), (237, 201, 175))
draw = ImageDraw.Draw(im)
draw.ellipse((CENTER - RADIUS,) * 2 + (CENTER + RADIUS,) * 2,
fill=(0, 0, 255), outline=BLACK, width=4)
draw.ellipse((CENTER - 2,) * 2 + (CENTER + 2,) * 2, fill=BLACK)
draw_text(draw, monster_speed, step, step_size, n_actions, r)
draw.ellipse(coords_to_rect(agent_position), fill=RED)
if multi_monster_rotations is None:
multi_monster_rotations = [total_monster_rotation]
for monster in multi_monster_rotations:
draw.ellipse(angle_to_rect(monster), fill=GREEN)
# drawing the arrow
if action_vector is not None:
if is_caught:
color = (255, 150, 0)
else:
color = (255, 255, 0)
c, s = np.cos(prev_agent_rotation), np.sin(prev_agent_rotation)
agent_rot_matrix = np.array(((c, -s), (s, c)))
action_vector = np.dot(agent_rot_matrix, action_vector)
action_vector = action_vector / np.linalg.norm(action_vector)
lines = arrow_segments(action_vector)
for line in lines:
draw.line(line, fill=color, width=4)
# displaying the episode result
if result is not None:
white = (255,) * 3
draw.text((CENTER - 10, CENTER + 30), result.upper(), white)
draw.text((CENTER - 10, CENTER + 50), f'REWARD: {reward:.3f}', white)
if return_real:
return im, agent_position
return im
def render_agent_path(im, path):
"""Draw path onto im."""
np_center = np.array((CENTER, CENTER))
np_radius = np.array((RADIUS, -RADIUS))
scaled_path = [tuple(np_center + np_radius * coord) for coord in path]
draw = ImageDraw.Draw(im)
draw.line(scaled_path, fill=RED, width=4)
return im
def render_many_agents(positions, colors, step, step_size, n_actions, monster_speed):
"""Keep monster at (1, 0) and render agent positions."""
im = Image.new('RGB', (480, 480), (237, 201, 175))
draw = ImageDraw.Draw(im)
draw.ellipse((CENTER - RADIUS,) * 2 + (CENTER + RADIUS,) * 2,
fill=(0, 0, 255), outline=BLACK, width=4)
draw.ellipse((CENTER - 2,) * 2 + (CENTER + 2,) * 2, fill=BLACK)
draw_text(draw, monster_speed, step, step_size, n_actions)
draw.ellipse(angle_to_rect(0), fill=GREEN) # monster themself
for p, c in zip(positions, colors):
draw.ellipse(coords_to_rect(p, 2), fill=c)
return im
| [
"zebengberg@gmail.com"
] | zebengberg@gmail.com |
254b39836b581460441fc658e6f0419e540ea63d | bbd556f7726952b3ac84c2d2e7fc447251174930 | /venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/shutil.py | 7b81106e485b3a26f840e63dae1d184646c89914 | [] | no_license | svmsharma20/EurekaMW | 6b42f6afe7cb67ffc9c2a5edcc1e8b41c5bb1367 | b6ca82ce38ee1f19a58da379040446adaa8133ba | refs/heads/master | 2022-12-02T16:56:51.615777 | 2020-12-15T03:58:52 | 2020-12-15T03:58:52 | 199,877,827 | 0 | 3 | null | 2022-11-27T21:32:24 | 2019-07-31T14:59:40 | Python | UTF-8 | Python | false | false | 25,652 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add_word(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registry."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| [
"shivamsharmasvm20@gmail.com"
] | shivamsharmasvm20@gmail.com |
39f7638fa87ac3f507bbfd104317deef1e645d99 | fdb65605f3aef4f502debaa0fc9df6860e0e193f | /python/app.py | 79203b0ed32ee36d11715fd66ec404f66e17b6c8 | [] | no_license | twy3009/sparta | 5fad123dcfec5f0b6605ae11a22445996fd6e5d6 | 215e1d3e9f21f3ba8412114adf2e402b9f9960ae | refs/heads/master | 2022-04-22T02:19:58.229701 | 2020-04-26T05:57:45 | 2020-04-26T05:57:45 | 245,768,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,038 | py | from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('mongodb://test:test@15.164.103.10', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta
def scrab(name, page) :
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://search.naver.com/search.naver?&where=news&query='+ str(name)+'&sm=tab_pge&sort=0&photo=0&field=0&reporter_article=&pd=3&ds=2020.04.06&de=2020.04.26&docid=&nso=so:r,p:from20200406to20200410,a:all&mynews=0&cluster_rank=153&start={}1&refresh_start=0'.format(str(page-1)),headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
#lis = soup.find_all('li')
urls = soup.select('#main_pack > div.news.mynews.section._prs_nws > ul>li')
#print(urls2)
result = []
for url in urls :
test = url.select_one('a')
test2 = test.attrs['href']
result.append(test2)
return result
## HTML을 주는 부분
@app.route('/')
def home():
return render_template('index.html')
@app.route('/memo', methods=['GET'])
def listing():
# 모든 document 찾기 & _id 값은 출력에서 제외하기
result = list(db.articles.find({},{'_id':0}))
# articles라는 키 값으로 영화정보 내려주기
return jsonify({'result':'success', 'articles':result})
@app.route('/table', methods=['GET'])
def listing_table():
# 모든 document 찾기 & _id 값은 출력에서 제외하기
result = list(db.collection.find({},{'_id':0}))
return jsonify({'result':'success', 'collection':result})
## API 역할을 하는 부분
@app.route('/memo', methods=['POST'])
def saving():
# 클라이언트로부터 데이터를 받는 부분
name = request.form['url_give']
url_receive = scrab(name,1)[4]
comment_receive = request.form['comment_give']
# meta tag를 스크래핑 하는 부분
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get(url_receive, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
og_image = soup.select_one('meta[property="og:image"]')
og_title = soup.select_one('meta[property="og:title"]')
og_description = soup.select_one('meta[property="og:description"]')
url_image = og_image['content']
url_title = og_title['content']
url_description = og_description['content']
# mongoDB에 넣는 부분
article = {'url': url_receive, 'comment': comment_receive, 'image': url_image,
'title': url_title, 'desc': url_description}
db.articles.insert_one(article)
return jsonify({'result': 'success'})
if __name__ == '__main__':
app.run('0.0.0.0',port=80,debug=True) | [
"61925189+twy3009@users.noreply.github.com"
] | 61925189+twy3009@users.noreply.github.com |
56296e4c129d9135aedd9773390e5b84351d661c | cc75fafa3b62c2b03a95036616663263bede858d | /eval/Category_Baseline_unseen/separateCategory.py | 406407e71a0a5cbae523ee53fb642631faf4a0d5 | [] | no_license | liqipap/graph2text | 276d0d7274ad6d53e92ad5567c05bb1c430aefd9 | 798318c24997ef40f874262d4ac5084e59e9623d | refs/heads/master | 2020-03-20T08:30:06.318432 | 2018-01-20T12:23:22 | 2018-01-20T12:23:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | import xml.etree.ElementTree as Et
import xml.etree.cElementTree as ET
import sys
import getopt
def separateUnseen(filePath,catgory):
"""
this function separate unseen dataset according to category name
:param filePath: the directory for unseen dataset file
:param catgory: the current name of catgory we want to separate unseen dataset according to
return
"""
context = ET.iterparse(filePath, events=('end', ))
# create xml file to put extracted triples in it
filename = format("unseen"+catgory+".xml")
with open(filename, 'wb') as f:
f.write(b"<benchmark>\n")
f.write(b" <entries>\n")
for event, elem in context:
if elem.tag == 'entry':
cg = elem.get('category')
if cg==catgory:
f.write(ET.tostring(elem))
f.write(b" </entries>\n")
f.write(b"</benchmark>\n")
def main(argv):
usage = 'usage:\npython3 separateCategory.py -i <data-directory> -c category' \
'\ndata-directory is the directory where the whole xml of unseen dataset is'\
'\n category, the name of category'
try:
opts, args = getopt.getopt(argv, 'i:c:', ['inputdir=', 'category='])
except getopt.GetoptError:
print(usage)
sys.exit(2)
input_data = False
for opt, arg in opts:
if opt in ('-i', '--inputdir'):
inputdir= arg
input_data = True
elif opt in ('-c', '--category'):
catg=arg
input_data = True
else:
print(usage)
sys.exit()
if not input_data:
print(usage)
sys.exit(2)
print('Input directory is', inputdir)
print('The cuurent name of category is', catg)
separateUnseen(inputdir,catg)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"badr.md87@gmail.com"
] | badr.md87@gmail.com |
1a43fcbec667b510a0a1ff82df246326a83a70fb | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/andersbll/deeppy/deeppy/model/__init__.py | 3fc2414a36c1575b1ca19d8106a70e4a76258fb6 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 169 | py | from .adversarial import AdversarialNet
from .feedforward import FeedForwardNet, ClassifierNet, RegressorNet
from .variational_autoencoder import VariationalAutoencoder
| [
"rares.begu@gmail.com"
] | rares.begu@gmail.com |
a7ce93793489eef4e3aedb56e1be34d526b9e3b8 | 65498cfa6e8d833640421157641f9e060d8c5460 | /HIcalculation/functions/GET_heat_indices_v2.py | 54aca755025dc1026034e5f5b289d24840895eb8 | [] | no_license | IPCC-WG1/Chapter-12 | 7c372bbef624941c1cca18004617acd12534d827 | 358a09813f5b29ea064a0629c26b030d60c1f972 | refs/heads/main | 2023-04-07T23:52:43.573045 | 2023-03-20T08:35:28 | 2023-03-20T08:35:28 | 351,099,250 | 2 | 5 | null | 2023-03-08T17:03:23 | 2021-03-24T13:57:16 | Jupyter Notebook | UTF-8 | Python | false | false | 6,498 | py | #!/usr/bin/env python
# coding: utf-8
#This script organizes the calculation of the heat stress indicators. It reads the relevant data and stores the calculate heat stress indicators (i.e. HI) in the correct folder.
import os
import sys
import xarray as xr
import numpy as np
import dask.array as da
import time as t_util
from multiprocessing import Pool
sys.path.insert(0,'/div/amoc/exhaustion/Heat_Health_Global/Scripts/functions_Py/')
import calc_heat_health_indices as CalcHHI
#Master function for creating folders and call function to calculate heat indices
#Input:
# - model: model name (string)
# - SSP: RCP or SSP (string)
# - member: model member (string)
# - heat_index: heat stress indicator (in this case HI_NOAA) (string)
# - dir_in: directory where input files are stored (string)
# - dir_out: directory where calculated heat stress indicator should be stored (string)
# - year_vec: vector of years for which the heat stress indicator is calculated (1d numpy array)
# - dyear (integer)
# - var_names: variables names of input variables (list of strings)
def GET_heat_indices_v2(model, SSP, member, heat_index, dir_in, dir_out, year_vec, dyear, var_names=['tasmax', 'huss', 'sp'], var_files=['tasmax', 'huss', 'sp']):
#Input order for variables must be: 1) temperature, 2) pressure, 3) humidity !!
#Calculate heat indices for CMIP6
param_dict = dict({'scenario': SSP, 'ens_member': member, 'dir_in': dir_in, 'dir_out': dir_out, 'year_vec': year_vec, 'dyear': dyear})
calc_heat_indices(model, heat_index, param_dict, var_names, var_files)
#Calculate heat indices
def calc_heat_indices(model, heat_index, param_dict, var_names, var_files):
#Get parameters
dir_in = param_dict['dir_in']
dir_out = param_dict['dir_out']
year_vec = param_dict['year_vec']
dyear = param_dict['dyear']
#Define correct variable names
SSP = param_dict['scenario']
member = param_dict['ens_member']
print(' -calculating ' + heat_index + '... ', end = '')
start = t_util.time()
create = 1
for year in year_vec:
print(str(year) + ', ', end='')
#Select years
year_sel = [year, year + dyear - 1]
#Read data
data = read_data(dir_in, model, var_files, var_names, SSP, member, year_sel)
# Define chunks
if heat_index not in ['PT', 'SETstar', 'PET']:
data = data.chunk(chunks={'time': 6 * dyear})
#Set variable names
data = data.rename({var_names[0]: 'TX', var_names[1]: 'q', var_names[2]: 'p'})
#Remove unnecessary information
if 'height' in data.coords: data = data.drop('height')
#Calculate vapour pressure and relative humidity
e, RH = CalcHHI.get_humidity(data.q, data.p, data.TX)
#Define variable names
data = data.assign({'e': e, 'RH': RH})
# Update chunks
if heat_index not in ['PT', 'SETstar', 'PET']:
data = data.chunk(chunks={'time': 6 * dyear})
# Select heat index
if heat_index=='HI_NOAA':
index = CalcHHI.HI_NOAA(data.TX, data.RH)
#Rename dataset and compute index
index = index.to_dataset(name=heat_index)
index = index.load()
index[heat_index].attrs = {'units': '1'}
#Collect all years
if create==1:
index_out = index
create = 0
else:
index_out = xr.concat((index_out, index), dim='time')
stop = t_util.time()
print(' time = ' + "{:.2f}".format(stop - start))
#Get years and scenario for file name
t1 = str(year_vec[0])
t2 = str(year_vec[-1] + dyear - 1)
if param_dict['scenario']=='':
scen_out = ''
else:
scen_out = param_dict['scenario'] + '_'
if param_dict['ens_member']=='':
ens_out = ''
else:
ens_out = param_dict['ens_member'] + '_'
#Add tasmin to heat index file name
if var_names[0]=='tasmin':
T_out = '-tasmin'
else:
T_out = ''
#Save heat index to file
fname_out = dir_out + heat_index + T_out + '_' + model + '_' + scen_out + ens_out + t1 + '-' + t2 + '.nc'
index_out.astype('float32').to_netcdf(fname_out)
#Read data
def read_data(folder, model, var_files, var_names, SSP, member, year_sel):
#Folder for regridding
dir_regr = '/div/amoc/exhaustion/Heat_Health_Global/Data/Masks_Heights_Grids/Regridding/'
#Loop over variables
create = 1
for var_f, var_n in zip(var_files, var_names):
file_name = [file for file in os.listdir(folder) if (model + '_' in file) and (var_f + '_' in file) and (SSP + '_' in file) and (member in file)]
#Make sure that only file is selected
if len(file_name)>1:
file_name = [file for file in file_name if str(year_sel[0]) in file and str(year_sel[1]) in file]
if len(file_name)!=1:
print(file_name)
print('File name is not unambiguous!')
#Read and select data
data = xr.open_dataset(folder + file_name[0])
data = data.sel(time=slice(str(year_sel[0]), str(year_sel[1])))
#Re-index to standard grid
fname_std_grid = dir_regr + "Standard_grid_" + model + ".nc"
if os.path.exists(fname_std_grid):
grid_std = xr.open_dataset(fname_std_grid)
if 'rlat' in data: lat_name, lon_name = 'rlat', 'rlon'
elif 'x' in data: lat_name, lon_name = 'x', 'y'
else: lat_name, lon_name = 'lat', 'lon'
check1 = np.max(np.abs(data[lat_name].values - grid_std[lat_name].values))
check2 = np.max(np.abs(data[lon_name].values - grid_std[lon_name].values))
if (check1!=0) or (check2!=0):
try:
data = data.reindex({lat_name: grid_std[lat_name], lon_name: grid_std[lon_name]}, method='nearest')
except:
('Standard grid re-indexing not applied for calculating HSIs.')
data = data
else:
if model!='ERA5':
print('Standard grid re-indexing not applied for calculating HSIs.')
#Store data in one dataset
if create==1:
data_all = data
create = 0
else:
data_all[var_n] = data[var_n]
return data_all
| [
"clemens.schwingshackl@geographie.uni-muenchen.de"
] | clemens.schwingshackl@geographie.uni-muenchen.de |
a3f63dab27f381f4aacc708a5fc6df1166817262 | 2402d36afad839c13986bcc9b735ef11a6b30960 | /chapter_03/3.4_guest_list.py | c0bfbec8e03e69f304db2cec0849742c47a1f44a | [] | no_license | Blanca-7/Python-study | 3d12983069a7bab82d6ebdd58b160f3477e6c7e1 | 6a26b6fbc1120e14e1534a86c024d35bfb2bff36 | refs/heads/master | 2023-02-26T10:04:29.380721 | 2021-02-05T10:48:45 | 2021-02-05T10:48:45 | 334,710,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | guests = ["Dr. Einstein","Bruce Lee","Elon Musk"]
invitation = "I'd like to invite you to my dinner Party"
print(f"Dear {guests[0]}, {invitation}")
print(f"Dear {guests[1]}, {invitation}")
print(f"Dear {guests[2]}, {invitation}")
#alternative way with concatenation:
#print("Dear " + guests[1] + ", " + invitation)
| [
"noreply@github.com"
] | noreply@github.com |
2186bff0599d7d8a807bfb4ac72df8ad8111e6e4 | 8818aa1691c6773d46a9b21bc8dd4f3d2fbd36d5 | /predict.py | 238faf59589c3efd14e3487144898e3d6a052ef4 | [] | no_license | ammarsyatbi/Image-Classification | bab7e4ec5ff28298080c0c11ea8aa42df0415359 | 0dbf093966b6d80c69e96d70a006c5da8c92af53 | refs/heads/master | 2023-03-25T18:26:01.015802 | 2021-03-25T09:33:11 | 2021-03-25T09:33:11 | 350,969,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | import os
import tensorflow as tf
from tensorflow import keras
import numpy as np
from config import cfg
import utils
import json
CLASS_NAMES = utils.read_class_names(cfg.MODEL.CLASS_NAMES)
test_imgs = os.listdir(cfg.MODEL.TEST_DIR)
model = tf.keras.models.load_model(cfg.MODEL.SAVE_DIR)
predicted = {}
bad_imgs = []
for img_name in test_imgs:
test_img_path = os.path.join(cfg.MODEL.TEST_DIR, img_name)
try:
img = keras.preprocessing.image.load_img(
test_img_path, target_size=(cfg.MODEL.IMG_HEIGHT, cfg.MODEL.IMG_WIDTH)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
# load img
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(CLASS_NAMES[np.argmax(score)], 100 * np.max(score))
)
if CLASS_NAMES[np.argmax(score)] not in predicted:
predicted[CLASS_NAMES[np.argmax(score)]] = []
predicted[CLASS_NAMES[np.argmax(score)]].append(img_name)
except Exception as e:
print(e)
bad_imgs.append(img_name)
pass
print(f"Total bad images - {len(predicted)}")
with open(os.path.join(cfg.MODEL.RESULT_DIR, "predicted.json"), "w") as file:
file.write(json.dumps(predicted))
| [
"ammarsyatbi@github.com"
] | ammarsyatbi@github.com |
7e8116443903d033a1a47a2ffed807aec258d0c3 | 49e17d736df9889b3a0d91705abd0f3ed579d17c | /quests/Temple_Of_Ikov.py | b4d3f0e615c687daab5b6c89a084be6e2400e914 | [] | no_license | TheWhirl/RunescapeQuestWebsite | 4f258c04a1c1e6bb9f6d9e0fa63fdcab452ccfc2 | 8d5dacbc8251bd1f2dded4ffa04400ed48e0f1fb | refs/heads/master | 2020-05-16T02:54:35.603906 | 2018-12-23T13:03:58 | 2018-12-23T13:03:58 | 182,643,424 | 0 | 0 | null | 2019-04-22T07:22:00 | 2019-04-22T07:21:59 | null | UTF-8 | Python | false | false | 443 | py | import os
import sys
sys.path.insert(0,
os.path.dirname(os.path.realpath(__file__))[
0:-len("quests")])
from QuestInfo import Quest
class Temple_Of_Ikov(Quest):
def __init__(self):
super().__init__("Temple of Ikov")
self.age = 5
self.difficulty = "Experienced"
self.length = "Medium"
self.quest_points = 1
self.thieving = 42
self.ranged = 40
| [
"musomaddy@gmail.com"
] | musomaddy@gmail.com |
05b2aab7c6f1c9f3cb6f7ade63d62490fdd1c8db | 66f6be86059bf37100744b421d5d7f252b8a5126 | /victoria_inventory/__manifest__.py | ad49bad4c624b19e11a9d2c36c7111048dd25255 | [] | no_license | tate11/victoria_basement_12 | ec19d442a59beecfd09d60adc4af204179a40243 | a96baa19b48a42d42fdbdc1e525d0dbb629a75f2 | refs/heads/master | 2020-04-24T23:30:06.103458 | 2019-02-04T04:06:39 | 2019-02-04T04:06:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Victoria Inventory',
'version': '12.0',
'summary': 'Victoria Inventory Custom',
'sequence': 7,
'description': """
Victoria Inventory
==================
Inventory Custom
""",
'category': 'Inventory',
'website': 'https://www.envertis.com.au',
'images': [
],
'depends': [
'stock',
'victoria_product',
],
'data': [
'security/ir.model.access.csv',
'views/stock_view.xml',
'views/stock_update.xml',
'report/requet_docket_report.xml',
'report/requet_docket_report_view.xml',
],
'demo': [],
'qweb': [],
'installable': True,
'application': True,
'auto_install': False,
}
| [
"rohit.kumarsrivastava@envertis.com.au"
] | rohit.kumarsrivastava@envertis.com.au |
25fdc78dc70a5662efdf229ddd03144e6c0355bc | 92028af4d98547db0fd870ebd9832dbc82f9df79 | /mm.py | 36b1cc73386d3662b712c76274258ee2d2e2b712 | [] | no_license | tineroll/mmPictures | a1ba2fd385cb8609f72792206cf65c34cdcbba94 | 5cc692da657a277416673fa6371764d9f3838284 | refs/heads/master | 2021-05-11T20:33:57.983531 | 2018-01-14T16:03:04 | 2018-01-14T16:03:04 | 117,442,382 | 0 | 0 | null | 2018-01-14T15:31:59 | 2018-01-14T15:31:59 | null | UTF-8 | Python | false | false | 4,595 | py | #!/usr/bin/env python
# -*-coding:utf-8-*-
import urllib2
from lxml import etree
from os import system
"""
第一步: 从 http://www.zngirls.com/rank/sum/ 开始抓取MM点击头像的链接(注意是分页的)
#第二部 http://www.zngirls.com/girl/21751/ 抓取每一个写真集合的链接(注意是分页的)
#第三部 http://www.zngirls.com/g/19671/1.html 在写真图片的具体页面抓取图片(注意是分页的)
"""
pciturelist=[]
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
, "Connection": "keep-alive"
}
"""
从起始页面 http://www.zngirls.com/rank/sum/ 开始获取排名的页数和每一页的url
"""
def mmRankSum():
req = urllib2.Request("http://www.zngirls.com/rank/sum/", headers=header)
html = urllib2.urlopen(req)
htmldata = html.read()
htmlpath = etree.HTML(htmldata)
#首先获取页码数,然后用循环的方式挨个解析每一个页面
pages = htmlpath.xpath('//div[@class="pagesYY"]/div/a/@href')
for i in range( len(pages) -2 ):
pagesitem="http://www.zngirls.com/rank/sum/"+ pages[i]
mmRankitem(pagesitem)
"""
参数 url : 分页中每一页的具体url地址
通过穿过来的参数,使用 lxml和xpath 解析 html,获取每一个MM写真专辑页面的url
"""
def mmRankitem(url):
req = urllib2.Request(url, headers=header)
html = urllib2.urlopen(req)
htmldata = html.read()
htmlpath = etree.HTML(htmldata)
pages = htmlpath.xpath('//div[@class="rankli_imgdiv"]/a/@href')
for i in range(len(pages)):
print "http://www.zngirls.com/" + pages[i]+"album/"
getAlbums("http://www.zngirls.com/" + pages[i]+"/album/")
#print "http://www.zngirls.com/" + pages[i]
"""
参数 url : 每一个MM专辑的页面地址
通过穿过来的参数,获取每一个MM写真专辑图片集合的地址
"""
def getAlbums(girlUrl):
req = urllib2.Request(girlUrl, headers=header)
html = urllib2.urlopen(req)
htmldata = html.read()
htmlpath = etree.HTML(htmldata)
pages = htmlpath.xpath('//div[@class="igalleryli_div"]/a/@href')
for i in range(len(pages)):
getPagePicturess("http://www.zngirls.com/" + pages[i])
"""
参数 url : 每一个MM写真专辑图片集合的地址
通过穿过来的参数,首先先获取图片集合的页数,然后每一页解析写真图片的真实地址
"""
def getPagePicturess(albumsurl):
req = urllib2.Request(albumsurl, headers=header)
html = urllib2.urlopen(req)
htmldata = html.read()
htmlpath = etree.HTML(htmldata)
pages = htmlpath.xpath('//div[@id="pages"]/a/@href')
for i in range(len(pages)-2):
savePictures("http://www.zngirls.com" + pages[i])
"""
参数 url : 每一个MM写真专辑图片集合的地址(进过分页检测)
通过穿过来的参数,直接解析页面,获取写真图片的地址,然后下载保存到本地。
"""
def savePictures(itemPagesurl):
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
, "Connection": "keep-alive"
, "Referer": "image / webp, image / *, * / *;q = 0.8"
,"Accept":"image/webp,image/*,*/*;q=0.8"
}
try:
req = urllib2.Request(itemPagesurl, headers=header)
html = urllib2.urlopen(req)
htmldata = html.read()
htmlpath = etree.HTML(htmldata)
print itemPagesurl
pages = htmlpath.xpath('//div[@class="gallery_wrapper"]/ul/img/@src')
names = htmlpath.xpath('//div[@class="gallery_wrapper"]/ul/img/@alt')
except Exception:
pass
for i in range(len(pages) ):
print pages[i]
pciturelist.append(pages[i])
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
, "Connection": "keep-alive"
, "Referer": pages[i]
}
req = urllib2.Request(pages[i], headers=headers)
urlhtml = urllib2.urlopen(req)
respHtml = urlhtml.read()
binfile = open('%s.jpg' % ( names[i] ) , "wb")
binfile.write(respHtml);
binfile.close();
except Exception :
pass
mmRankSum()
"""
fl=open('list.txt', 'w')
for i in pciturelist:
fl.write(i)
fl.write("\n")
fl.close()
print '关机ing'
"""
print 'finish'
system('shutdown -s') | [
"394804550@qq.com"
] | 394804550@qq.com |
9aac8f17b3bf10aea1ebed124b84c898077044d9 | 84b9bcdbd610e704093d4bbf4d0bcf7134fcedf0 | /analyze_net/analyze_net.py | 4930671216a2d2ca94d7a7fb4dfa15d084300af0 | [] | no_license | JerryTom121/cite-network | 1813df9189cd6e8b10ebc15d00230c0466176114 | 29dfddbb43df6d48ed3b4dc28001489a7809d9ef | refs/heads/master | 2021-01-19T23:25:41.583449 | 2016-11-17T16:14:28 | 2016-11-17T16:14:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,319 | py | # -*- coding: utf-8 -*-
'''
Using the `graphml` files and two "comparison networks," conduct the actual
network analysis.
The "comparison networks" are citation networks grabbed from arXiv, with papers
from January 1993 to April 2003. They can be found at
<https://snap.stanford.edu/data/cit-HepPh.html> and
<https://snap.stanford.edu/data/cit-HepTh.html>.
'''
# Graph-tool modules
import graph_tool.all as gt
import graph_tool.community_old as gtcomm
# Color schemes used in plotting nets
from matplotlib.cm import bwr, bwr_r
# Python port of ggplot
# Very incomplete and buggy!
from ggplot import *
from ggplot.utils.exceptions import GgplotError
# Other things we'll need
import bottleneck as bn
from datetime import date, datetime
import logging
from matplotlib import pyplot as plt
import numpy as np
import os.path as path
import pandas as pd
from random import sample, seed
import scipy.stats as spstats
from statsmodels.distributions.empirical_distribution import ECDF as ecdf
from statsmodels.nonparametric.kde import KDEUnivariate as kde
def load_net(infile, core = False, filter = False):
'''
Load a `graphml` file.
:param infile: The `graphml` file to load.
:param core: Does the net contain a core vertex property map?
:filter: Apply a filter?
:return: the graph_tool `Graph`, a prefix for output files, and
(if core is True) the property map for core vertices
'''
# Output filename
# Prefix only, not extension:
# `split('.')` splits `infile` at the periods and returns a list
# `[:-1]` grabs everything except the extension
# `'.'.join` recombines everything with periods
outfile_pre = '.'.join(infile.split('.')[:-1])
if path.exists('output/' + outfile_pre + '.out.gt'):
print('Found pre-procesed graph')
infile = 'output/' + outfile_pre + '.out.gt'
print('Loading ' + infile)
net = gt.load_graph(infile)
# If `core` is true, extract the core set
if core:
core_pmap = net.vertex_properties['core']
core_vertices = [vertex for vertex in net.vertices() if core_pmap[vertex]]
# Print basic network statistics
print('Loaded ' + infile)
print('Vertices: ' + str(net.num_vertices()))
print('Edges: ' + str(net.num_edges()))
if core:
print('Core vertices: ' + str(len(core_vertices)))
if core and filter:
# Add a filter
print('Adding filter')
# Recent papers filter for the citation net
if 'citenet0' in infile:
year = net.vp['year']
recent_list = [year[vertex] > 2005 for vertex in net.vertices()]
recent_pmap = net.new_vertex_property('boolean')
recent_pmap.a = np.array(recent_list)
net.set_vertex_filter(recent_pmap)
# Distance from core set for the author nets
else:
net.set_directed(False)
extended_set_pmap = core_pmap.copy()
gt.infect_vertex_property(net, extended_set_pmap, vals=[True])
gt.infect_vertex_property(net, extended_set_pmap, vals=[True])
net.set_vertex_filter(extended_set_pmap)
# Remove everything caught in the filter
net.purge_vertices()
# Extract the largest component
net.set_vertex_filter(gt.label_largest_component(net, directed=False))
net.purge_vertices()
# Rebuild core
core_pmap = net.vertex_properties['core']
core_vertices = [vertex for vertex in net.vertices() if core_pmap[vertex]]
print('Filtered vertices: ' + str(net.num_vertices()))
print('Filtered edges: ' + str(net.num_edges()))
print('Filtered core: ' + str(len(core_vertices)))
elif filter and not core:
print('Filter = true with core = false')
if core:
return net, outfile_pre, core_pmap, core_vertices
else:
return net, outfile_pre
def layout_and_plot(net, color_pmap, outfile_pre, filename_mod = '.net',
size_pmap = None, reverse_colors = False):
'''
Plot the net, using a predefined layout if it's included as a vector property.
:param net: The network to plot.
:param color_pmap: Property map on `net` to color nodes.
:size_pmap: Property map on `net` to set size of verticies.
:param outfile_pre: Prefix for output filename.
:param filename_mod: Extension to use on the output filename.
'''
# Define a default size
if size_pmap is None:
size_pmap = net.new_vertex_property('float', val = 20)
# If a layout isn't included, calculate it
if 'layout' not in net.vp:
print('Calculating graph layout')
#net.vp['layout'] = gt.fruchterman_reingold_layout(net)
net.vp['layout'] = gt.sfdp_layout(net, verbose = True)
#net.vp['layout'] = gt.radial_tree_layout(net, 0, r=2)
# Set the colormap
if not reverse_colors:
colormap = bwr
else:
colormap = bwr_r
# Plot the graph
gt.graph_draw(net, vertex_fill_color = color_pmap,
vcmap = colormap,
vertex_size = size_pmap,
edge_pen_width = 1,
pos = net.vp['layout'], #pin = True,
fit_view = 1,
output_size = (2000, 2000),
output = outfile_pre + filename_mod + '.png')
return net.vp['layout']
def summary(data):
'''
Report several descriptive statistics for the 1D `data`.
:param data: The Python list or numpy array to summarize.
:return: A Pandas Series with the following stats:
minimum value, maximum value,
mean, standard deviation,
5, 25, 50 (median), 75, and 95 percentiles,
50 and 90 interquartile range
'''
minimum = min(data)
if type(minimum) is np.array:
minimum = min(data).item()
maximum = max(data)
if type(maximum) is np.array:
maximum = max(data).item()
stats = {'count' : int(len(data)),
'min': minimum,
'max': maximum,
'mean': np.average(data),
'sd': np.std(data),
'q05': np.percentile(data, 5),
'q25': np.percentile(data, 25),
'median': np.median(data),
'q75': np.percentile(data, 75),
'q95': np.percentile(data, 95)
}
stats['iqr50'] = stats['q75'] - stats['q25']
stats['iqr90'] = stats['q95'] - stats['q05']
return(pd.Series(stats))
def insularity(net, community):
'''
Calculates the insularity of a single community, the fraction of its edges
that are intracommunity.
:param net: The network of interest
:param community: A Boolean property map on `net`
:return: The insularity statistic
'''
# Community gets passed as a Boolean property map
# Build a list of nodes where community == True
community_nodes = set([vertex for vertex in net.vertices() if community[vertex]])
# The set of all nodes touching the community
community_edges = set([edge for node in community_nodes for edge in node.all_edges()])
#print(len(community_edges))
# Extract the intracommunity edges
intracommunity_edges = [edge for edge in community_edges
if edge.source() in community_nodes and
edge.target() in community_nodes]
# Return the fraction
return(len(intracommunity_edges) / len(community_edges))
def partition_insularity(net, partition):
'''
Calculates the insularity for communities defined by the distinct values
of the given property map.
:param net: The network of interest
:param partition: A discretely-valued property map on `net`
:return: Dict with {partition_value: insularity}
'''
insularities = {}
for community in set(partition.get_array()):
temp_pmap = net.new_vertex_property('bool',
vals = [partition[vertex] == community
for vertex in net.vertices()])
temp_ins = insularity(net, temp_pmap)
insularities[community] = temp_ins
return insularities
def degree_dist(net, core, show_plot = False, save_plot = True, outfile = None):
'''
Calculate out degree, an empirical CDF, and ranking for each vertex.
Plot both degree x empirical CDF and degree x ranking, highlighting core vertices.
Note that the plot is saved as a file only if *both* `save_plot` is true and
output filename are given.
:param net: The network whose degree distribution we'd like to plot
:param core: The property map of core vertices
:param show_plot: Show the plot on the screen?
:param save_plot: Save the plot as a file?
:param outfile: Filename to use to save the plot
:return: The CDF and ranking plots.
'''
# Build degree distribution
# Out degree for every vertex
out_degrees = [vertex.out_degree() for vertex in net.vertices()]
# Write them into the graph
net.vp['out-degree'] = net.new_vertex_property('int', vals = out_degrees)
# x values: degrees
degrees = list(set(out_degrees))
# Use the ecdf to build the y values
out_degree_ecdf = ecdf(out_degrees)
# Use 1-ecdf for legibility when most nodes have degree near 0
out_degree_dist = [1 - out_degree_ecdf(degree) for degree in degrees]
# Write 1-ecdf into the graph
net.vp['out-degree ecdf'] = \
net.new_vertex_property('float',
vals = [1 - out_degree_ecdf(net.vp['out-degree'][vertex])
for vertex in net.vertices()])
# Rank the vertices by out-degree
vertex_ranking = len(out_degrees) - bn.rankdata(out_degrees) + 1
# Write them into the graph
net.vp['out-degree rank'] = net.new_vertex_property('int', vals = vertex_ranking)
# Map these against `degree`:
# for each degree, get the index of its first occurrence in the
# vertex-level list `out_degrees`; that index corresponds to the
# index in `vertex_ranking`
ranking = [vertex_ranking[out_degrees.index(degree)]
for degree in degrees]
# Combine into a single data frame
degree_dist = pd.DataFrame({'degree': degrees,
'density': out_degree_dist,
'rank': ranking})
# Grab the degrees and rankings for the core vertices
out_degrees_core = [net.vp['out-degree'][vertex] for vertex in core]
out_degree_dist_core = [net.vp['out-degree ecdf'][vertex] for vertex in core]
ranking_core = [net.vp['out-degree rank'][vertex] for vertex in core]
degree_dist_core = \
pd.DataFrame({'degree': out_degrees_core,
'density': out_degree_dist_core,
'rank': ranking_core})
#print(degree_dist_core)
print('Summary statistics for core vertex out-degrees:')
print(pd.DataFrame({k: summary(degree_dist_core[k]) for k in degree_dist_core}))
# Build the degree x density plot
density_plot = ggplot(aes(x = 'degree'),
data = degree_dist) +\
geom_area(aes(ymin = 0, ymax = 'density', fill = 'blue'), alpha = .3) +\
geom_line(aes(y = 'density', color = 'blue'), alpha = .8) +\
xlab('Out-degree') +\
ylab('1 - Cumulative probability density') +\
scale_x_log10() + scale_y_log10() +\
theme_bw()
# Add a rug for the core vertices
density_plot = density_plot + \
geom_point(aes(x = 'degree', y = 'density'),
shape = '+', size = 250, alpha = .8, color = 'red',
data = degree_dist_core)
# If requested, show the plot
if show_plot:
print(density_plot)
# Save to disk
if outfile is not None and save_plot:
ggsave(filename = outfile + '.degree_density' + '.pdf', plot = density_plot)
# Same thing for degree x ranking
ranking_plot = ggplot(aes(x = 'degree'), data = degree_dist) +\
geom_area(aes(ymin = 0, ymax = 'rank', fill = 'blue'), alpha = .3) +\
geom_line(aes(y = 'rank', color = 'blue'), alpha = .8) +\
xlab('Out-degree') +\
ylab('Rank') +\
scale_x_log10() + scale_y_log10() +\
theme_bw()
ranking_plot = ranking_plot +\
geom_point(aes(x = 'degree', y = 'rank'),
shape = '+', size = 250, alpha = .8, color = 'red',
data = degree_dist_core)
if show_plot:
print(ranking_plot)
if outfile is not None and save_plot:
ggsave(filename = outfile + '.degree_rank' + '.pdf', plot = ranking_plot)
return(density_plot, ranking_plot)
def ev_centrality_dist(net, core, show_plot = False, save_plot = True, outfile = None):
'''
Calculate eigenvector centrality, an empirical CDF, and ranking for each vertex.
Plot both centrality x empirical CDF and centrality x ranking, highlighting core vertices.
Note that the plot is saved as a file only if *both* `save_plot` is true and
output filename are given.
:param net: The network whose degree distribution we'd like to plot
:param core: The property map of core vertices
:param show_plot: Show the plot on the screen?
:param save_plot: Save the plot as a file?
:param outfile: Filename to use to save the plot
:return: The CDF and ranking plots.
'''# Calculate eigenvector centrality and write it into the graph
print('Calculating eigenvector centrality')
net.vp['evc'] = gt.eigenvector(net, epsilon=1e-03)[1]
print('Done')
# Extract them into a useful format
eigen_central = net.vp['evc'].get_array().tolist()
# x values: centralities
centralities = list(set(eigen_central))
# Use the ecdf to build the y values
eigen_central_ecdf = ecdf(eigen_central)
# Use 1-ecdf for legibility when most nodes have centrality near 0
centrality_distribution = \
[1 - eigen_central_ecdf(centrality) for centrality in centralities]
# Write 1-ecdf into the graph
net.vp['evc ecdf'] = \
net.new_vertex_property('float',
vals = [1 - eigen_central_ecdf(net.vp['evc'][vertex])
for vertex in net.vertices()])
# Rank the vertices by eigenvector centrality
vertex_ranking = len(eigen_central) - bn.rankdata(eigen_central) + 1
# Write them into the graph
net.vp['evc rank'] = net.new_vertex_property('int', vals = vertex_ranking)
#print(vertex_ranking)
print('Mapping rankings to centralities')
# Map these against `centralities`:
# for each degree, get the index of its first occurrence in the
# vertex-level list `eigen_central`; that index corresponds to the
# index in `vertex_ranking`
ranking = [vertex_ranking[eigen_central.index(centrality)]
for centrality in centralities]
# Combine into a single data frame
centrality_dist = pd.DataFrame({'centrality': centralities,
'density': centrality_distribution,
'rank': ranking})
#print(centrality_dist.head())
# Grab centralities and rankings for the core vertices
centralities_core = [net.vp['evc'][vertex] for vertex in core]
centrality_distribution_core = [net.vp['evc ecdf'][vertex] for vertex in core]
ranking_core = [net.vp['evc rank'][vertex] for vertex in core]
centrality_dist_core = \
pd.DataFrame({'centrality': centralities_core,
'density': centrality_distribution_core,
'rank': ranking_core})
#print(centrality_dist_core)
print('Summary statistics for core vertex centralities:')
print(pd.DataFrame({k: summary(centrality_dist_core[k]) for k in centrality_dist_core}))
# Build the plot
density_plot = ggplot(aes(x = 'centrality'), data = centrality_dist) +\
geom_area(aes(ymin = 0, ymax = 'density', fill = 'blue'), alpha = .3) +\
geom_line(aes(y = 'density'), color = 'blue', alpha = .8) +\
xlab('Eigenvector centrality') +\
ylab('1 - Cumulative probability density') +\
scale_x_log10() + scale_y_log10() +\
theme_bw()
#Add a rug for the core vertices
density_plot = density_plot + \
geom_point(aes(x = 'centrality', y = 'density'),
shape = '+', size = 250, alpha = .8, color = 'red',
data = centrality_dist_core)
# If requested, show the plot
if show_plot:
print(density_plot)
# Save to disk
if outfile is not None and save_plot:
ggsave(filename = outfile + '.evc_density' + '.pdf', plot = density_plot)
# Same thing for degree x ranking
ranking_plot = ggplot(aes(x = 'centrality'), data = centrality_dist) +\
geom_area(aes(ymin = 0, ymax = 'rank', fill = 'blue'), alpha = .3) +\
geom_line(aes(y = 'rank'), color = 'blue', alpha = .8) +\
xlab('Eigenvector centrality') +\
ylab('Rank') +\
scale_x_log10() + scale_y_log10() +\
theme_bw()
ranking_plot = ranking_plot +\
geom_point(aes(x = 'centrality', y = 'rank'),
shape = '+', size = 250, alpha = .8, color = 'red',
data = centrality_dist_core)
if show_plot:
print(ranking_plot)
if outfile is not None and save_plot:
ggsave(filename = outfile + '.evc_rank' + '.pdf', plot = ranking_plot)
return(density_plot, ranking_plot)
def p_sample(samples, observation):
'''
Given a list of samples and an actual observation,
calculate the p-value of the observation against the sample distribution.
:param samples: A list or numpy array of sample values.
:param observation: The observation to compare against.
:return: The p-value. Left/right tail is chosen automatically to minimize p.
'''
sample_ecdf = ecdf(samples)
p = sample_ecdf(observation)
return(min(p, 1-p))
def plot_sample_dist(samples, observation, stat_label = '$Q$', p_label = None):
'''
Given a list of samples and an actual observation,
build a plot for the sample distribution.
:param samples: The list or numpy array of samples.
:param observation: The actual observation to plot against.
:param stat_label: The string to label the horizontal axis.
:p_label: P-value to label on the plot as text.
Note that text labels are buggy in the current version of ggplot,
so this does nothing right now.
:return: The sample distribution plot.
'''
# Get the kernel density estimate
sample_dist = kde(samples)
sample_dist.fit()
# Normalize
sample_dist.norm = sample_dist.density / sum(sample_dist.density)
#print(sample_dist.support)
sample_dist_plot = \
ggplot(aes(x = 'x', y = 'density', ymax = 'density', ymin = 0),
data = pd.DataFrame({
'density': sample_dist.norm,
'x': sample_dist.support})) + \
geom_line(color = 'blue') + \
geom_area(fill = 'blue', alpha = '.25') + \
geom_vline(aes(xintercept = observation), color = 'red')
if p_label is not None:
# TODO: adding a text label screws up the axes
pass
# sample_dist_plot = sample_dist_plot + \
# geom_text(aes(x = observation, y = .5*max(sample_dist.norm)),
# label = 'test')
sample_dist_plot = sample_dist_plot + \
ylab('density') + \
xlab(stat_label) + \
theme_bw()
return(sample_dist_plot)
def modularity_sample_dist(net, n_core, obs_mod, mod_func = gtcomm.modularity,
n_samples = 500, seed_int = None,
show_plot = False,
save_plot = True, outfile = None):
'''
Generate a sample distribution for modularity using sets of random nodes.
:param net: Network of interest
:param n_core: Number of core vertices
:param obs_mod: Observed modularity
:param mod_func: Function used to calculate modularity
:param n_samples = 1000: Number of samples to draw
:param seed_int: RNG seed
:param show_plot: Show the plot on the screen?
:param save_plot: Save the plot to a file?
:param outfile: Filename to save the plot
:return: p-value, fold induction of observation against sample
'''
# Initialize a container for samples
samples = []
# Set a seed
if seed_int is not None:
seed(seed_int)
print('Generating ' + str(n_samples) + ' random partitions')
while len(samples) < n_samples:
# Generate a random partition
#print('generating partition')
temp_part = sample(list(net.vertices()), n_core)
# `modularity` needs the groups passed as a PropertyMap
#print('building PropertyMap')
temp_part_pmap = net.new_vertex_property('bool', val = False)
for vertex in temp_part:
temp_part_pmap[vertex] = True
#print('calculating modularity')
# Calculate the modularity and save it in `samples`
samples += [mod_func(net, temp_part_pmap)]
if len(samples) % 100 == 0:
print(len(samples))
# Calculate p-value
print('Mean sample value: ' + str(np.mean(samples)))
p = p_sample(samples, obs_mod)
print('P-value of observed value: ' + str(p))
# Fold of observation relative to sampling distribution mean
fold = obs_mod / np.mean(samples)
print('Fold of observed value: ' + str(fold))
# Plot the sample distribution
sample_plot = plot_sample_dist(samples, obs_mod, p_label = p)
if show_plot:
print(sample_plot)
if outfile is not None and save_plot:
if max(samples) == min(samples):
pass
else:
ggsave(filename = outfile + '.mod_sample' + '.pdf',
plot = sample_plot)
return(p, fold)
def optimal_sample_dist(net, obs_mod, obs_ins,
n_samples = 500, seed_int = None,
show_plot = False,
save_plot = True, outfile = None):
'''
Generate a sample distribution for modularity using an algorithm that
tried to optimize modularity.
:param net: Network of interest
:param n_core: Number of core vertices
:param obs_mod: Observed modularity
:param obs_ins: Observed insularity
:param n_samples = 1000: Number of samples to draw
:param seed_int: RNG seed
:param show_plot: Show the plot on the screen?
:param save_plot: Save the plot to a file?
:param outfile: Filename to save the plot
:return: p-value, fold induction of observation against sample
'''
# Initialize a container for samples
samples_mod = []
samples_ins = []
# Set a seed
if seed_int is not None:
seed(seed_int)
print('Generating ' + str(n_samples) + ' maximum-modularity partitions')
while len(samples_mod) < n_samples:
# Generate an optimal partition
temp_part_pmap = gtcomm.community_structure(net, n_iter = 50,
n_spins = 2)
# Calculate the modularity and save it in `samples_mod`
samples_mod += [gtcomm.modularity(net, temp_part_pmap)]
# Likewise with insularities
samples_ins += [insularity(net, temp_part_pmap)]
if len(samples_mod) % 25 == 0:
print(len(samples_mod))
# Calculate p-value for modularity
sample_mean = np.mean(samples_mod)
print('Mean sample modularity: ' + str(sample_mean))
p = p_sample(samples_mod, obs_mod)
print('P-value of modularity: ' + str(p))
# Fold of observation relative to sampling distribution mean
fold = obs_mod / sample_mean
if abs(fold) < 1:
fold = 1 / fold
print('Fold of observed modularity: ' + str(fold))
# Plot the sample distribution
try:
sample_plot = plot_sample_dist(samples_mod, obs_mod, p_label = p)
if show_plot:
print(sample_plot)
if outfile is not None and save_plot:
ggsave(filename = outfile + '.opt_sample' + '.pdf',
plot = sample_plot)
except GgplotError:
print('Caught `GgplotError`. Skipping plot.')
# P-value for insularity
sample_mean = np.mean(samples_ins)
print('Mean sample insularity: ' + str(sample_mean))
p = p_sample(samples_ins, obs_ins)
print('P-value of insularity: ' + str(p))
# Fold of observation relative to sampling distribution mean
fold = obs_ins / sample_mean
if abs(fold) < 1:
fold = 1 / fold
print('Fold of observed insularity: ' + str(fold))
# Plot the sample distribution
try:
sample_plot = plot_sample_dist(samples_ins, obs_ins, p_label = p)
if show_plot:
print(sample_plot)
if outfile is not None and save_plot:
ggsave(filename = outfile + '.opt_sample' + '.pdf',
plot = sample_plot)
except GgplotError:
print('Caught `GgplotError`. Skipping plot.')
return(p, fold)
def run_analysis(netfile, compnet_files):
'''
Run the analysis.
:param netfile: Filename of the network to analyze
:param compnet_files: List of filenames of the comparison networks, viz.,
the high-energy physics networks.
'''
# Timestamp
# --------------------
print(datetime.now())
# Load the network
# --------------------
net, outfile_pre, core_pmap, core_vertices = load_net(netfile + '.graphml',
core = True,
filter = True)
output_folder = 'output/'
outfile_pre = output_folder + outfile_pre
# Plotting
print('Plotting')
layout = layout_and_plot(net, core_pmap, outfile_pre)
# Store the layout in the net
net.vp['layout'] = layout
# Show only the core vertices
net.set_vertex_filter(core_pmap)
layout_and_plot(net, core_pmap, outfile_pre, filename_mod = '.core.net',
reverse_colors = True)
net.set_vertex_filter(None)
# Vertex statistics
# --------------------
# ECDF for out-degree distribution
degree_dist(net, core_vertices, outfile = outfile_pre,
show_plot = False, save_plot = True)
# ECDF for eigenvector centrality
## Currently this is causing a segmentation fault
# ev_centrality_dist(net, core_vertices, outfile = outfile_pre,
# show_plot = False, save_plot = True)
# Modularity
# --------------------
# Calculate modularity, using the core vertices as the partition
modularity = gtcomm.modularity(net, core_pmap)
print('Observed modularity: ' + str(modularity))
obs_ins = insularity(net, core_pmap)
print('Observed insularity: ' + str(obs_ins))
# Calculate the number of core vertices
n_core = len(core_vertices)
# Construct a sampling distribution for the modularity statistic
# And use it to calculate a p-value for the modularity
print('Random sample modularity')
modularity_sample_dist(net, n_core, modularity,
outfile = outfile_pre + '.mod',
show_plot = False, save_plot = True)
print('Random sample insularities')
modularity_sample_dist(net, n_core, obs_ins,
mod_func = insularity,
outfile = outfile_pre + '.ins',
show_plot = False, save_plot = True)
# Information-theoretic partitioning
print('Information-theoretic partitioning')
# Calculate the partition
gt.seed_rng(5678)
np.random.seed(5678)
part_block = gt.minimize_blockmodel_dl(net, B_min = 2, B_max = 2,
verbose = True,
overlap = False)
# Extract the block memberships as a pmap
net.vp['partition'] = part_block.get_blocks()
# Calculate the modularity
block_modularity = gtcomm.modularity(net, net.vp['partition'])
print('Partion modularity: ' + str(block_modularity))
print('Partition insularities')
block_insularities = partition_insularity(net, net.vp['partition'])
for community in block_insularities:
print('Community ' + str(community) + ': ' +
str(block_insularities[community]))
print('Plotting')
size_pmap = gt.prop_to_size(core_pmap, mi = 10, ma = 20)
layout_and_plot(net, net.vp['partition'], outfile_pre,
size_pmap = size_pmap, filename_mod = '.partition')
# Modularity optimization
optimal_sample_dist(net, modularity, obs_ins,
outfile = outfile_pre,
show_plot = False, save_plot = True)
# Save results
# --------------------
# The above covers all of the analysis to be written into the output files,
# so we'll go ahead and save things now.
print('Saving')
# Save in graph-tool's binary format
net.save(outfile_pre + '.out' + '.gt')
# Replace vector-type properties with strings
#net.list_properties()
properties = net.vertex_properties
for property_key in properties.keys():
property = properties[property_key]
if 'vector' in property.value_type():
properties[property_key] = property.copy(value_type = 'string')
# Save as graphml
net.save(outfile_pre + '.out' + '.graphml')
# Comparison networks
# --------------------
for compnet_file in compnet_files:
# Load the comparison network
compnet, compnet_outfile = load_net(compnet_file)
# Set it to the same directedness as the network of interest
compnet.set_directed(net.is_directed())
# Size of compnet
n_compnet = compnet.num_vertices()
# Num vertices in compnet to use in each random partition
k_compnet = round(n_core / net.num_vertices() * n_compnet)
# Sample distribution based on random partition
print('Random sample modularities')
print('Observed modularity: ' + str(modularity))
modularity_sample_dist(compnet, k_compnet, modularity,
outfile = outfile_pre + '.mod.' + compnet_outfile,
show_plot = False, save_plot = True)
print('Random sample insularities')
print('Observed insularity: ' + str(obs_ins))
modularity_sample_dist(compnet, k_compnet, obs_ins,
mod_func = insularity,
outfile = outfile_pre + '.ins.' + compnet_outfile,
show_plot = False, save_plot = True)
# Sample distribution based on optimizing modularity
# optimal_sample_dist(compnet, modularity, n_samples = 300,
# outfile = outfile_pre + '.mod.' + compnet_outfile,
# show_plot = False)
# Timestamp
# --------------------
print(datetime.now())
# Visually separate analyses
print('-'*40)
if __name__ == '__main__':
# Networks for analysis
netfiles = ['citenet0']
#netfiles = ['autnet0']
#netfiles = ['autnet1']
#netfiles = ['autnet1', 'autnet0', 'citenet0']
# Comparison networks
#compnet_files = ['phnet.graphml']
compnet_files = ['phnet.graphml', 'ptnet.graphml']
# Set up logging
logging.basicConfig(level=logging.INFO, format = '%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler('output/' + str(date.today()) + '.log', 'w'))
print = logger.info
print('-'*40)
for netfile in netfiles:
seed(24680)
gt.seed_rng(24680)
run_analysis(netfile, compnet_files)
print(datetime.now()) | [
"hicks.daniel.j@gmail.com"
] | hicks.daniel.j@gmail.com |
5d5f3479b821973bba167087ee73016fa0ec3dc2 | 51f855b1baf5945b47f7042a5cb44f62eccf5e56 | /year.py | a1079171b1cfb051274ad657ceafb2f00b6d4332 | [] | no_license | swartzstrange/python3 | 1766940173497072dc2dd7293aba1a4e6f9e231c | 385e860e923e16a7d0158c11e5764ce2131f9780 | refs/heads/main | 2023-02-02T19:33:11.642505 | 2020-12-21T10:33:50 | 2020-12-21T10:33:50 | 319,213,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,119 | py | import tkinter
import os
from tkinter import *
from tkinter.messagebox import *
from tkinter.filedialog import *
class Notepad:
__root = Tk()
# default window width and height
__thisWidth = 300
__thisHeight = 300
__thisTextArea = Text(__root)
__thisMenuBar = Menu(__root)
__thisFileMenu = Menu(__thisMenuBar, tearoff=0)
__thisEditMenu = Menu(__thisMenuBar, tearoff=0)
__thisHelpMenu = Menu(__thisMenuBar, tearoff=0)
# To add scrollbar
__thisScrollBar = Scrollbar(__thisTextArea)
__file = None
def __init__(self, **kwargs):
# Set icon
try:
self.__root.wm_iconbitmap("Notepad.ico")
except:
pass
# Set window size (the default is 300x300)
try:
self.__thisWidth = kwargs['width']
except KeyError:
pass
try:
self.__thisHeight = kwargs['height']
except KeyError:
pass
# Set the window text
self.__root.title("Untitled - Notepad")
# Center the window
screenWidth = self.__root.winfo_screenwidth()
screenHeight = self.__root.winfo_screenheight()
# For left-alling
left = (screenWidth / 2) - (self.__thisWidth / 2)
# For right-allign
top = (screenHeight / 2) - (self.__thisHeight / 2)
# For top and bottom
self.__root.geometry('%dx%d+%d+%d' % (self.__thisWidth,
self.__thisHeight,
left, top))
# To make the textarea auto resizable
self.__root.grid_rowconfigure(0, weight=1)
self.__root.grid_columnconfigure(0, weight=1)
# Add controls (widget)
self.__thisTextArea.grid(sticky=N + E + S + W)
# To open new file
self.__thisFileMenu.add_command(label="New",
command=self.__newFile)
# To open a already existing file
self.__thisFileMenu.add_command(label="Open",
command=self.__openFile)
# To save current file
self.__thisFileMenu.add_command(label="Save",
command=self.__saveFile)
# To create a line in the dialog
self.__thisFileMenu.add_separator()
self.__thisFileMenu.add_command(label="Exit",
command=self.__quitApplication)
self.__thisMenuBar.add_cascade(label="File",
menu=self.__thisFileMenu)
# To give a feature of cut
self.__thisEditMenu.add_command(label="Cut",
command=self.__cut)
# to give a feature of copy
self.__thisEditMenu.add_command(label="Copy",
command=self.__copy)
# To give a feature of paste
self.__thisEditMenu.add_command(label="Paste",
command=self.__paste)
# To give a feature of editing
self.__thisMenuBar.add_cascade(label="Edit",
menu=self.__thisEditMenu)
# To create a feature of description of the notepad
self.__thisHelpMenu.add_command(label="About Notepad",
command=self.__showAbout)
self.__thisMenuBar.add_cascade(label="Help",
menu=self.__thisHelpMenu)
self.__root.config(menu=self.__thisMenuBar)
self.__thisScrollBar.pack(side=RIGHT, fill=Y)
# Scrollbar will adjust automatically according to the content
self.__thisScrollBar.config(command=self.__thisTextArea.yview)
self.__thisTextArea.config(yscrollcommand=self.__thisScrollBar.set)
def __quitApplication(self):
self.__root.destroy()
# exit()
def __showAbout(self):
showinfo("Notepad")
def __openFile(self):
self.__file = askopenfilename(defaultextension=".txt",
filetypes=[("All Files", "*.*"),
("Text Documents", "*.txt")])
if self.__file == "":
# no file to open
self.__file = None
else:
# Try to open the file
# set the window title
self.__root.title(os.path.basename(self.__file) + " - Notepad")
self.__thisTextArea.delete(1.0, END)
file = open(self.__file, "r")
self.__thisTextArea.insert(1.0, file.read())
file.close()
def __newFile(self):
self.__root.title("Untitled - Notepad")
self.__file = None
self.__thisTextArea.delete(1.0, END)
def __saveFile(self):
if self.__file == None:
# Save as new file
self.__file = asksaveasfilename(initialfile='Untitled.txt',
defaultextension=".txt",
filetypes=[("All Files", "*.*"),
("Text Documents", "*.txt")])
if self.__file == "":
self.__file = None
else:
# Try to save the file
file = open(self.__file, "w")
file.write(self.__thisTextArea.get(1.0, END))
file.close()
# Change the window title
self.__root.title(os.path.basename(self.__file) + " - Notepad")
else:
file = open(self.__file, "w")
file.write(self.__thisTextArea.get(1.0, END))
file.close()
def __cut(self):
self.__thisTextArea.event_generate("<<Cut>>")
def __copy(self):
self.__thisTextArea.event_generate("<<Copy>>")
def __paste(self):
self.__thisTextArea.event_generate("<<Paste>>")
def run(self):
# Run main application
self.__root.mainloop()
# Run main application
notepad = Notepad(width=600, height=400)
notepad.run()
| [
"noreply@github.com"
] | noreply@github.com |
3fafc5008c7ab7eaf1785a939ad7043423ac6756 | 1fad9fde9da1e5b51997d3f6ae58dd371f43339e | /s3_wrapper/__init__.py | c23781168fffffa3ad1c24480c7e01653b6cb19c | [] | no_license | penmark/s3-wrapper | ca5a3c44dedf0b178bfec834916e9ecc74b40a83 | 0de5e3d50b075e929ae52b2420c815a47821f262 | refs/heads/master | 2021-01-12T10:00:52.443459 | 2017-02-01T13:49:32 | 2017-02-01T13:49:32 | 76,335,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | import boto
from boto.s3.connection import NoHostProvided
def percent_callback(num_bytes, total_bytes):
print('\b'*10, '{:.2f}%'.format(num_bytes / total_bytes * 100), sep='', end='', flush=True)
class S3(object):
def __init__(self, options):
if not options.host:
options.host = NoHostProvided
if not options.calling_format:
options.calling_format = 'boto.s3.connection.SubdomainCallingFormat'
self.conn = boto.connect_s3(
aws_access_key_id=options.access_key,
aws_secret_access_key=options.secret_key,
host=options.host,
is_secure=options.is_secure,
calling_format=options.calling_format,
debug=1)
self.bucket = self.ensure_bucket(options.bucket)
self.default_policy = getattr(options, 'default_policy', 'public-read')
def ensure_bucket(self, bucket=None):
if bucket:
if not isinstance(bucket, str):
return bucket
b = self.conn.lookup(bucket)
if not b:
b = self.conn.create_bucket(bucket)
return b
return self.bucket
def make_key(self, name, bucket=None):
bucket = self.ensure_bucket(bucket)
return bucket.new_key(name)
def put_filename(self, filename, key_name, bucket=None, metadata=None, **kwargs):
bucket = self.ensure_bucket(bucket)
if not metadata:
metadata = {}
key = self.make_key(key_name, bucket)
for k, v in metadata.items():
key.set_metadata(k, v)
if not key.exists():
key.set_contents_from_filename(filename, policy=self.default_policy, **kwargs)
return key.generate_url(0, query_auth=False)
def put_string(self, data, key_name, bucket=None, metadata=None, **kwargs):
bucket = self.ensure_bucket(bucket)
if not metadata:
metadata = {}
key = self.make_key(key_name, bucket)
for k, v in metadata.items():
key.set_metadata(k, v)
if not key.exists():
key.set_contents_from_string(data, policy=self.default_policy, **kwargs)
return key.generate_url(0, query_auth=False)
def delete(self, key_name, bucket=None):
bucket = self.ensure_bucket(bucket)
key = bucket.get_key(key_name)
key.delete()
def list_bucket(self, bucket=None, keys=False):
bucket = self.ensure_bucket(bucket)
for key in bucket.list():
if keys:
yield key.name
else:
yield key.generate_url(0, query_auth=False)
def copy(self, src_bucket, src_key, dst_bucket, dst_key, move=False):
bucket = self.conn.get_bucket(src_bucket)
self.ensure_bucket(dst_bucket)
key = bucket.get_key(src_key)
if not key:
return None
new_key = key.copy(dst_bucket, dst_key, preserve_acl=True)
if move and new_key:
key.delete()
return new_key.generate_url(0, query_auth=False)
def move(self, src_bucket, src_key, dst_bucket, dst_key):
return self.copy(src_bucket, src_key, dst_bucket, dst_key, move=True)
| [
"pontus@wka.se"
] | pontus@wka.se |
7d8521d5854a94d73e095b63e9f8b8424aef96cd | c1ec53e988ca8a091be2e6fdfa0113409e0b8ba4 | /build_cgd_train.py | eb655dd4c6edb87649494938954461e2e20d49bf | [] | no_license | shehancaldera/robot_grasp_prediction | b30ddcb68b3c6a3a8f880434535cfd2796184fa8 | 3c0e697d02902a1ca08a4f70e7e0680a7bc2e678 | refs/heads/master | 2020-03-15T11:26:54.507758 | 2018-05-04T09:45:26 | 2018-05-04T09:45:26 | 132,121,076 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import keras
from keras.preprocessing import image
from keras.applications.inception_v3 import preprocess_input
from keras.backend import backend as K
base_dir = '/media/baxter/DataDisk/Cornell Grasps Dataset/original'
train_dir = os.path.join(base_dir, 'train')
BASE_FILENAME = 'pcd0'
IMAGE_FILENAME_SUFFIX = 'png'
BBOX_FILENAME_SUFFIX = 'txt'
TARGET_IMAGE_WIDTH = 224
START_INSTANCE = 100
INSTANCE_RANGE = 950
def open_img(instance_num, target_size, base_filename, filename_suffix):
img_filename = os.path.join(train_dir, base_filename + str(instance_num) + "r" + "." + filename_suffix)
img = Image.open(img_filename)
img = img.resize((target_size, target_size))
return img
def img_to_array(img):
# Converts a given RGB image (widht, height, 3) to a 1D array
img_array = np.asarray(img, dtype='float32') / 255
img_array = np.reshape(img_array, (-1))
return img_array
def img_from_array(img_array, target_img_width):
# Converts a given 1D array to the image size. Make sure to use the same image
# width to avoid overlapping or missing of pixel values
img_array = np.reshape(img_array, (target_img_width, target_img_width, 3))
img_from_array = Image.fromarray(img_array.astype('uint8'), 'RGB')
return img_from_array
def open_bboxes(instance_num, base_filename, filename_suffix):
filename = os.path.join(train_dir, base_filename + str(instance_num) + "cpos" + "." + filename_suffix)
with open(filename) as f:
bboxes = list(map(
lambda coordinate: float(coordinate), f.read().strip().split()))
return bboxes
def bboxes_to_grasps(box):
x = (box[0] + (box[4] - box[0]) / 2) * 0.35
y = (box[1] + (box[5] - box[1]) / 2) * 0.47
tan = box[3] - box[1]
h = box[3] + box[1]
w = box[7] - box[6]
grasp = [x, y, tan, h, w]
return grasp
def load_data(start_instance, instance_range):
x_train = []
y_train = []
for instance_num in range(start_instance, instance_range):
bboxes = open_bboxes(instance_num, BASE_FILENAME, BBOX_FILENAME_SUFFIX)
#print(bboxes)
for box_num in range(0, len(bboxes), 8):
y_train_temp = bboxes_to_grasps(bboxes[box_num:box_num+8])
y_train.append(y_train_temp)
img = open_img(instance_num, TARGET_IMAGE_WIDTH, BASE_FILENAME, IMAGE_FILENAME_SUFFIX)
img_array = img_to_array(img)
#print(img_array.shape)
x_train.append(img_array)
return x_train, y_train
def save_data(data_var_name, data_filename):
print('Saving the data sets... :', data_filename)
with open(data_filename, 'w') as f:
f.write('data = %s' % data_var_name)
# from file import score as my_list -> importing the saved datasets
def save_data_local():
x_train_data, y_train_data = load_data(START_INSTANCE, INSTANCE_RANGE)
print('Length of X_DATA', len(x_train_data))
print('Length of Y_DATA', len(y_train_data))
print('Saving datasets...')
save_data(x_train_data, 'x_train_data.py')
save_data(y_train_data, 'y_train_data.py')
print('Saving datasets: DONE!')
def read_train_data():
x_train_data, y_train_data = load_data(START_INSTANCE, INSTANCE_RANGE)
return x_train_data, y_train_data
if __name__ == '__main__':
read_train_data() | [
"noreply@github.com"
] | noreply@github.com |
8498743e7bbfaff848a13b6460ad182c482df5ef | 3ca892ab450d54f730dea381d4f2c5c896badc98 | /avalon/vendor/qtawesome/animation.py | a9638d74b0dbe096a6d18b73b7d81d689c932e91 | [
"MIT"
] | permissive | Colorbleed/core | d5fa5746a179ecea9da0c0bdcdd6025f71ab9234 | a6601eef08eb9307cf7bf3c056785282fc377184 | refs/heads/master | 2021-07-04T22:19:49.255251 | 2019-03-08T12:21:58 | 2019-03-08T12:21:58 | 93,861,791 | 6 | 3 | MIT | 2019-12-02T08:44:54 | 2017-06-09T13:32:10 | Python | UTF-8 | Python | false | false | 1,284 | py | from ..Qt import QtCore
class Spin:
def __init__(self, parent_widget, interval=10, step=1):
self.parent_widget = parent_widget
self.interval, self.step = interval, step
self.info = {}
def _update(self, parent_widget):
if self.parent_widget in self.info:
timer, angle, step = self.info[self.parent_widget]
if angle >= 360:
angle = 0
angle += step
self.info[parent_widget] = timer, angle, step
parent_widget.update()
def setup(self, icon_painter, painter, rect):
if self.parent_widget not in self.info:
timer = QtCore.QTimer()
timer.timeout.connect(lambda: self._update(self.parent_widget))
self.info[self.parent_widget] = [timer, 0, self.step]
timer.start(self.interval)
else:
timer, angle, self.step = self.info[self.parent_widget]
x_center = rect.width() * 0.5
y_center = rect.height() * 0.5
painter.translate(x_center, y_center)
painter.rotate(angle)
painter.translate(-x_center, -y_center)
class Pulse(Spin):
def __init__(self, parent_widget):
Spin.__init__(self, parent_widget, interval=300, step=45)
| [
"roy_nieterau@hotmail.com"
] | roy_nieterau@hotmail.com |
3541be7710cfb9fd33d4c232c1bb19be30a746fe | bbc2135f8a49088686a4631dd12943c259d91c8e | /model.py | c49ad4fc039666bf55c579d55914424bcd3aec26 | [] | no_license | christinababaya/hb-final-project | e264cb52b5dff889478e45610da2238982f60b37 | 67f466ba00377a34b40cb4ad7d53a4723745c012 | refs/heads/main | 2023-04-02T20:10:20.012907 | 2021-04-11T01:38:51 | 2021-04-11T01:38:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,011 | py |
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
"""A user"""
__tablename__ = 'users'
user_id = db.Column(db.Integer, autoincrement=True,
primary_key=True)
user_name = db.Column(db.String, unique=True, nullable=False)
password = db.Column(db.String, nullable=False)
user_age = db.Column(db.Integer, nullable=True)
user_weight = db.Column(db.Integer, nullable=True)
user_zipcode = db.Column(db.Integer, nullable=True)
def __repr__(self):
return f'<User user_id={self.user_id} user_name={self.user_name}>'
class Workout(db.Model):
"""User Workout"""
__tablename__ = 'workouts'
workout_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
workout_date = db.Column(db.DateTime, nullable=True)
user = db.relationship('User', backref='workouts')
def __repr__(self):
return f'<Workout workout_id={self.workout_id} workout_date={self.workout_date}>'
class Workout_exercise(db.Model):
"""Exercise specific for workout"""
__tablename__ = 'workout_exercises'
##TODO complete table columns/repr
we_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
workout_id = db.Column(db.Integer, db.ForeignKey('workouts.workout_id'))
exercise_id = db.Column(db.Integer, db.ForeignKey('exercises.exercise_id'))
we_sets = db.Column(db.Integer, nullable=False)
we_reps = db.Column(db.Integer, nullable=False)
we_repunit = db.Column(db.String, nullable=True)
we_weight = db.Column(db.Integer, nullable=True)
we_weightunit = db.Column(db.String, nullable=True)
we_equipment = db.Column(db.String, nullable=True)
workout = db.relationship('Workout', backref='workout_exercises')
exercise = db.relationship('Exercise', backref='workout_exercises')
def __repr__(self):
return f'<Workout_exercise we_id={self.we_id} we_sets={self.we_sets} we_reps={self.we_reps}>'
class Exercise(db.Model):
"""Specific exercise details"""
__tablename__ = 'exercises'
exercise_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
exercise_name = db.Column(db.String, nullable=False)
exercise_info = db.Column(db.Text, nullable=False)
api_id = db.Column(db.Integer, nullable=True)
def __repr__(self):
return f'<Exercise exercise_id={self.exercise_id} exercise_name={self.exercise_name}>'
def connect_to_db(flask_app, db_uri='postgresql:///workouts', echo=True):
flask_app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
flask_app.config['SQLALCHEMY_ECHO'] = echo
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = flask_app
db.init_app(flask_app)
print('Connected to the db!')
if __name__ == '__main__':
from server import app
connect_to_db(app)
# connect_to_db(app, echo=False) | [
"jessicachap223@gmail.com"
] | jessicachap223@gmail.com |
8b3708abb8c3b40eba5a9ea9072c5346f37edab3 | a5241edc493c0fe8e5743bbdd3f23dc357b8b883 | /third/urllib_error.py | 3f4ce78730a9505b6db09b8c1b41536c5a92f2d9 | [] | no_license | yangyangzijun/learn_pys | 25a06d8c7a4c707d218ff645edca63e56a158c9e | f5f024d172d66ecf3ee907a90f44a99f385b58ad | refs/heads/master | 2020-03-29T02:26:21.966959 | 2018-10-25T14:06:22 | 2018-10-25T14:06:22 | 149,436,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import urllib.request
import urllib.error
try:
data=urllib.request.urlopen("http://www.baidu.com")
print("ok")
except urllib.error.URLError as e:
print(e.code)
print(e.reason)
| [
"811295558@qq.com"
] | 811295558@qq.com |
1b478c8901f32352057ea471bc6af7e5e6f56e34 | 970a9020f2ee7ec81feb6f59afc5f035d84ef784 | /guessingGame.py | 5154dac801192283ab00e3141fdd2de0225d450e | [] | no_license | kml2p3/CS4830-Exploration2 | 0896f3afa3f31dcd050bf306df134515fba63895 | cc99b402b24002f017fe520cd78096bc065ad9fa | refs/heads/master | 2020-03-30T02:42:15.147006 | 2018-09-29T00:46:13 | 2018-09-29T00:46:13 | 150,644,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import random
number = random.randint(1,9)
guess = 0
numOfGuesses = 0
while guess != number and guess != "exit":
guess = input("What is your guess? ")
if guess == "exit":
break
guess = int(guess)
numOfGuesses += 1
if guess < number:
print("Too low!")
elif guess > number:
print("Too high!")
else:
print("Congratulations, you got it!")
print("It took ",numOfGuesses,"tries!") | [
"kml2p3@mail.missouri.edu"
] | kml2p3@mail.missouri.edu |
5d09f348af5df16b53230056d4eb3e6758f688c8 | 9d7a1f61e957c6ba688ba9acbd4810bfc41259bd | /crawling/scrapy/section04_03/section04_03/pipelines.py | d6934d13d6491f2787f92d85948d74fd762da68b | [] | no_license | saanghyuk/data_science_python | 17f4c35b9f4d197991fd0c03eecd06487ceaa9a0 | 7dde1ed2a3570edbdd716a43a4a340e64f7e2bb0 | refs/heads/master | 2023-08-24T10:47:13.478635 | 2021-11-05T15:37:33 | 2021-11-05T15:37:33 | 355,115,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,113 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
import csv
import xlsxwriter
class TestSpiderPipeline:
# 초기화 메서드
def __init__(self):
# 엑셀 처리 선언
self.workbook = xlsxwriter.Workbook("./result_excel.xlsx")
# CSV처리 선언(a, w 옵션 변경)
self.file_opener = open("./result_excel.csv", 'w')
self.csv_writer = csv.DictWriter(self.file_opener, fieldnames = ['rank_num', 'site_name', 'daily_time_site', 'daily_page_view', 'is_pass'])
#워크시트
self.worksheet = self.workbook.add_worksheet()
# 삽입 수
self.rowcount = 1
# 최초 1회 실행
def open_spider(self, spider):
spider.logger.info('TestSpider Pipeline Started ')
def process_item(self, item, spider):
if int(item.get('rank_num')) < 41:
item['is_pass'] = True
# 엑셀 저장
self.worksheet.write('A%s' %self.rowcount, item.get('rank_num'))
self.worksheet.write('B%s' %self.rowcount, item.get('site_name'))
self.worksheet.write('C%s' %self.rowcount, item.get('daily_time_site'))
self.worksheet.write('D%s' %self.rowcount, item.get('daily_page_view'))
self.worksheet.write('E%s' %self.rowcount, item.get('is_pass'))
self.rowcount+=1
# CSV 저장
self.csv_writer.writerow(item)
return item
else:
raise DropItem('Dropped Item. Because This Site Rank is {}'.format(item.get('rank_number')))
# print('Sorry, Dropped')
# 마지막 1회 실행
def close_spider(self, spider ):
# 엑셀 파일 닫기
self.workbook.close()
# csv파일 닫기
self.file_opener.close()
# 종료 선언
spider.logger.info('TestSpider Pipeline Closed') | [
"saanghyuk@gmail.com"
] | saanghyuk@gmail.com |
05926ba3ca184ff6f9aeaa8c26c96fecba2df8dd | 14148656b8c28ea4a28f45b00b0792004a2904db | /message/message_adapter.py | 0645d332ff3f3a6c8321b001ee1a775bf77acf33 | [] | no_license | CaveMike/mercury | 7c2d2bbb1e1352db1faa5ad049fab018ac3410d4 | eedaa52c1e49e91897533d93f2bf85654f80f423 | refs/heads/master | 2021-01-21T12:26:55.861250 | 2011-08-03T15:58:20 | 2011-08-03T15:58:20 | 2,149,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,147 | py | #!/usr/bin/env python
from iron.dispatcher import Dispatcher
from iron.event import Event
from mercury.core import SipException
from mercury.header.header import SIP_CRLF
from mercury.message.message import Message
from mercury.message.message import MessageEvent
from mercury.message.message_assembler import DatagramReassembler
from mercury.message.message_assembler import StreamReassembler
from mercury.message.message_coder import MessageCoder
from mercury.network.netevent import NetError
from mercury.network.netevent import NetEvent
from mercury.network.network import Network
import logging
class MessageAdapter(object):
"""Adapts between NetEvents and MessageEvents.
Routes outgoing MessageEvents to the appropriate destination using the
the appropriate source.
"""
def __init__( self, name, parent ):
#super( MessageAdapter, self ).__init__( name, parent )
self.log = logging.getLogger( self.__class__.__name__ )
self.network = Network( 'net', self )
self.network.addListener( self )
self.coder = MessageCoder( self.query( 'network.encoding' ) )
self.default = DatagramReassembler()
self.connections = {}
def identifyEvent( self, event ):
self.log.info( str(event) )
if isinstance( event, MessageEvent ):
return event.id
elif isinstance( event, NetEvent ):
return event.id
elif isinstance( event, NetError ):
return event.id
raise SipException( '[' + str(self.name) + '] ' + 'Ignoring event ' + str(event) + '.' )
def onBind( self, event ):
# Pass through to the underlying network implementation.
self.send( event, self.network, queued=False )
def onUnbind( self, event ):
# Pass through to the underlying network implementation.
self.send( event, self.network, queued=False )
def onRxPacket( self, event ):
# Decode the message and, if the decoding succeeded, pass the MessageEvent up.
text = self.coder.decode( event.packet )
if not event.connection:
message = self.default.parse( text )
else:
#FIXME: handle KeyError.
message = self.connections[event.connection].parse( text )
if message != None:
newEvent = MessageEvent( MessageEvent.EVENT_RX, message, transport=event.transport, localAddress=event.localAddress, localPort=event.localPort, remoteAddress=event.remoteAddress, remotePort=event.remotePort, useragent=self )
self.notify( newEvent, queued=False )
event.handled = True
def __onTxPacket( self, event ):
# Determine the transport, addresses, and ports to use and adjust the
# SIP message as necessary.
self.routeMessage( event )
# Encode the message, and if the encoding succeeded, pass the NetEvent down.
text = self.coder.encode( event.message )
newEvent = NetEvent( NetEvent.EVENT_TX_PACKET, event.transport, event.localAddress, event.localPort, event.remoteAddress, event.remotePort, packet=text )
if newEvent:
self.send( newEvent, self.network, queued=False )
event.handled = True
def onTxRequest( self, event ):
self.__onTxPacket( event )
def onTxResponse( self, event ):
self.__onTxPacket( event )
def onConnected( self, event ):
print 'ccc', str(event.connection)
self.connections[event.connection] = StreamReassembler()
event.handled = True
def onDisconnected( self, event ):
#FIXME: handle KeyError.
print 'ddd', str(event.connection)
#print self.connections
del self.connections[event.connection]
event.handled = True
def onNetError( self, event ):
self.log.error( str(event) )
#FIXME: Not sure what to do with these events. Should they be sent up as-is?
# Or converted to MessageEvents?
self.notify( event, queued=False )
event.handled = True
def routeMessage( self, event ):
"""This function determines the address and port to send the message to.
For requests, this function also determines the transport, address, and port to
send the message from.
Request:
Look up Request-URI host and get remote transport, address and port.
Modify/set Contact.
Modify/set Via to local transport, address, and port.
Response:
Get the destination from the Via.
"""
#FIXME:IMPLEMENT.
pass
| [
"corrigan@gmail.com"
] | corrigan@gmail.com |
ac78f5706a5fa6ab691f744614ebe243eeb0e6e6 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/SimpleXMLRPCServer/SimpleXMLRPCServer_dotted_name.py | 927f913a51fc70c40159c7b5e56b864de61651e3 | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
from SimpleXMLRPCServer import SimpleXMLRPCServer
import os
server = SimpleXMLRPCServer(('localhost', 9000), allow_none=True)
server.register_function(os.listdir, 'dir.list')
server.register_function(os.mkdir, 'dir.create')
server.register_function(os.rmdir, 'dir.remove')
try:
print 'Use Control-C to exit'
server.serve_forever()
except KeyboardInterrupt:
print 'Exiting' | [
"350840291@qq.com"
] | 350840291@qq.com |
37e222c25edc77b749c6c4a0af3b961c6cd12de0 | 03dc4293ba7f7d1ab61668d1b82c88a21e50a942 | /src/design/change_card.py | 010acd1a2ded6f1a1dabeb37aabb8179c175f09a | [] | no_license | JabaXNT/YandexProject2 | e0f333374e719c539b09c81466cfc3b6474a245c | d362b5979f37969a37ad0fd796d452f3f7f64749 | refs/heads/master | 2023-02-02T17:24:35.469464 | 2020-12-25T09:53:52 | 2020-12-25T09:53:52 | 321,351,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,686 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'change_card.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_2(object):
def setupUi(self, Dialog_2):
Dialog_2.setObjectName("Dialog_2")
Dialog_2.resize(413, 221)
self.sur_new = QtWidgets.QLineEdit(Dialog_2)
self.sur_new.setGeometry(QtCore.QRect(80, 30, 211, 20))
self.sur_new.setObjectName("sur_new")
self.pat_new = QtWidgets.QLineEdit(Dialog_2)
self.pat_new.setGeometry(QtCore.QRect(200, 90, 211, 20))
self.pat_new.setObjectName("pat_new")
self.bday_lbl = QtWidgets.QLabel(Dialog_2)
self.bday_lbl.setGeometry(QtCore.QRect(0, 120, 201, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.bday_lbl.setFont(font)
self.bday_lbl.setObjectName("bday_lbl")
self.sur_lbl = QtWidgets.QLabel(Dialog_2)
self.sur_lbl.setGeometry(QtCore.QRect(0, 30, 81, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.sur_lbl.setFont(font)
self.sur_lbl.setLineWidth(1)
self.sur_lbl.setObjectName("sur_lbl")
self.name_new = QtWidgets.QLineEdit(Dialog_2)
self.name_new.setGeometry(QtCore.QRect(80, 60, 211, 20))
self.name_new.setObjectName("name_new")
self.name_lbl = QtWidgets.QLabel(Dialog_2)
self.name_lbl.setGeometry(QtCore.QRect(0, 60, 47, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.name_lbl.setFont(font)
self.name_lbl.setObjectName("name_lbl")
self.bday_new = QtWidgets.QLineEdit(Dialog_2)
self.bday_new.setGeometry(QtCore.QRect(120, 120, 211, 20))
self.bday_new.setObjectName("bday_new")
self.Enter_lbl = QtWidgets.QLabel(Dialog_2)
self.Enter_lbl.setGeometry(QtCore.QRect(0, 0, 191, 31))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.Enter_lbl.setFont(font)
self.Enter_lbl.setObjectName("Enter_lbl")
self.fath_lbl = QtWidgets.QLabel(Dialog_2)
self.fath_lbl.setGeometry(QtCore.QRect(0, 90, 201, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.fath_lbl.setFont(font)
self.fath_lbl.setObjectName("fath_lbl")
self.save_btn = QtWidgets.QPushButton(Dialog_2)
self.save_btn.setGeometry(QtCore.QRect(160, 150, 91, 41))
self.save_btn.setObjectName("save_btn")
self.label = QtWidgets.QLabel(Dialog_2)
self.label.setGeometry(QtCore.QRect(0, 190, 411, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setText("")
self.label.setObjectName("label")
self.retranslateUi(Dialog_2)
QtCore.QMetaObject.connectSlotsByName(Dialog_2)
def retranslateUi(self, Dialog_2):
_translate = QtCore.QCoreApplication.translate
Dialog_2.setWindowTitle(_translate("Dialog_2", "Dialog"))
self.bday_lbl.setText(_translate("Dialog_2", "Год Рождения:"))
self.sur_lbl.setText(_translate("Dialog_2", "Фамилию:"))
self.name_lbl.setText(_translate("Dialog_2", "Имя:"))
self.Enter_lbl.setText(_translate("Dialog_2", "Введите:"))
self.fath_lbl.setText(_translate("Dialog_2", "Отчество (При наличии):"))
self.save_btn.setText(_translate("Dialog_2", "Сохранить"))
| [
"gluhow2015@gmail.com"
] | gluhow2015@gmail.com |
411846624c150abad251688c80a09c1cad1dc3a9 | 3dcc6eaef0ca68b230ed61b9fd2bfaf78f8d1c7d | /todo_app/todos/models/__init__.py | 007b0f8bc1c970fe2f9d07ff26b0dd5391d4d216 | [] | no_license | ivo-bass/ToDo-App | a6f92be6ba8dcb266cd9ab58d50bafc44ce3db9f | 0410fe885f729ef85e83a7779a5e971e42f74479 | refs/heads/main | 2023-05-14T13:28:50.219962 | 2021-06-18T13:14:49 | 2021-06-18T13:14:49 | 373,607,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from .todo import Todo
from .priority import Priority
from .category import Category
| [
"ivailo.ignatoff@gmail.com"
] | ivailo.ignatoff@gmail.com |
970ff15cbbbfb49cf8d755a5cd7dcc7e24404914 | 1b6f7e2b9482cd7db1deebebee9a2f0dbe67c868 | /app/app/urls.py | 0e99ccf47fb4627448a7bd607a6bf694c6ed4784 | [
"MIT"
] | permissive | Yi-Gaoqiao/homework_todo_api | bc1fcb4a0e6056bb39ea164d4851ac445c23226a | b803aea68229e0e1bc084be2f36fd668a2d911e3 | refs/heads/master | 2022-11-19T13:48:40.142377 | 2020-07-12T08:04:11 | 2020-07-12T08:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/', include('todo.urls')),
]
| [
"2bsttas@gmail.com"
] | 2bsttas@gmail.com |
6005b320155e884dcb8bc9c7994fc6807bcf4c35 | aa7c6a9276a859f75b3c5181a92f71d7c19122a5 | /zvt/domain/quotes/stock/stock_1m_kdata.py | f1378710d90001bd962e69feaa23a05bf88f493e | [
"MIT"
] | permissive | Pengyuyan2/zvt | deef9c5e5bd91c65728ad9bac8c79499707519ee | 9f9c77efcd34c04aaf11b12da0cf483cbe55e297 | refs/heads/master | 2023-07-12T16:55:15.040579 | 2021-08-22T09:41:33 | 2021-08-22T09:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.orm import declarative_base
from zvt.contract.register import register_schema
from zvt.domain.quotes import StockKdataCommon
KdataBase = declarative_base()
class Stock1mKdata(KdataBase, StockKdataCommon):
__tablename__ = 'stock_1m_kdata'
register_schema(providers=['joinquant'], db_name='stock_1m_kdata', schema_base=KdataBase, entity_type='stock')
# the __all__ is generated
__all__ = ['Stock1mKdata'] | [
"5533061@qq.com"
] | 5533061@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.