text stringlengths 8 6.05M |
|---|
import wx
class TestFrame(wx.Frame): # inherit from PanelFrame
def __init__(self):
wx.Frame.__init__(self, None, -1, '"Real World" sizer example')
panel = wx.Panel(self)
# 1st create controls
topLbl = wx.StaticText(panel, -1, "Personal Information")
topLbl.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
nameLbl = wx.StaticText(panel, -1, "Name:")
name = wx.TextCtrl(panel, -1, "")
addrLbl = wx.StaticText(panel, -1, "Address:")
addr1 = wx.TextCtrl(panel, -1, "")
addr2 = wx.TextCtrl(panel, -1, "")
cstLbl = wx.StaticText(panel, -1, "City, State, Zip:")
city = wx.TextCtrl(panel, -1, "", size=(150,-1))
state = wx.TextCtrl(panel, -1, "", size=( 50,-1))
zip = wx.TextCtrl(panel, -1, "", size=( 70,-1))
phoneLbl = wx.StaticText(panel, -1, "Phone:")
phone = wx.TextCtrl(panel, -1, "")
emailLbl = wx.StaticText(panel, -1, "Email:")
email = wx.TextCtrl(panel, -1, "")
saveBtn = wx.Button(panel, -1, "Save")
cancelBtn = wx.Button(panel, -1, "Cancel")
# 2nd create layout
# TopSizer is the top-level sizer
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(topLbl, 0, wx.ALL, 5)
topSizer.Add(wx.StaticLine(panel), 0,
wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
labelAlign = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL
# addrSizer - Grid that holds address info
addrSizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=5)
addrSizer.AddGrowableCol(1)
addrSizer.Add(nameLbl, 0, labelAlign)
addrSizer.Add(name, 0, wx.EXPAND)
addrSizer.Add(addrLbl, 0, labelAlign)
addrSizer.Add(addr1, 0, wx.EXPAND)
addrSizer.Add((10,10)) # Empty Space in the label spot
addrSizer.Add(addr2, 0, wx.EXPAND)
# SubSizer for city, state, and zip fields
cstSizer = wx.BoxSizer(wx.HORIZONTAL)
cstSizer.Add(city, 1)
cstSizer.Add(state, 0, wx.LEFT | wx.RIGHT, 5)
cstSizer.Add(zip)
# add SubSizer to addrSizer
addrSizer.Add(cstLbl, 0, labelAlign)
addrSizer.Add(cstSizer, 0, wx.EXPAND)
addrSizer.Add(phoneLbl, 0, labelAlign)
addrSizer.Add(phone, 0, wx.EXPAND)
addrSizer.Add(emailLbl, 0, labelAlign)
addrSizer.Add(email, 0, wx.EXPAND)
# add addrSizer to topSizer
topSizer.Add(addrSizer, 0, wx.EXPAND | wx.ALL, 10)
# create a button sizer with buttons in a row with resizable gaps
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20), 1)
btnSizer.Add(saveBtn)
btnSizer.Add((20,20), 1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20), 1)
# add btnSizer to main sizer
topSizer.Add(btnSizer, 0, wx.EXPAND | wx.BOTTOM, 10)
panel.SetSizer(topSizer)
topSizer.Fit(self)
topSizer.SetSizeHints(self) # prevent frame from getting to small
app = wx.PySimpleApp()
TestFrame().Show()
app.MainLoop()
|
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
used_characters = {}
max_length = 0
i = 0
j = 0
while i <len(s) and j < len(s):
if s[j] in used_characters.keys():
del used_characters[s[i]]
i+=1
else:
used_characters[s[j]] = 1
j+=1
max_length = max(j-i, max_length)
return max_length
solution = Solution()
print(solution.lengthOfLongestSubstring("abcabcbb"))
print(solution.lengthOfLongestSubstring("bbbbb"))
print(solution.lengthOfLongestSubstring("pwwkew"))
print(solution.lengthOfLongestSubstring(""))
print(solution.lengthOfLongestSubstring("b"))
print(solution.lengthOfLongestSubstring(" ")) |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from ckeditor_uploader.fields import RichTextUploadingField
from base.models import BaseArticle, BaseArticleSection
class News(BaseArticle):
"""Новости"""
excerpt = RichTextUploadingField(_('Анонс'), blank=True, null=True)
translation_fields = BaseArticle.translation_fields + ('excerpt',)
class Meta:
verbose_name = _('Новость')
verbose_name_plural = _('Новости')
def get_absolute_url(self, lang=settings.DEFAULT_LANGUAGE):
return reverse('press:press_page', kwargs={
'lang': lang,
'slug': self.slug
})
class NewsSection(BaseArticleSection):
"""Секции новостей"""
news = models.ForeignKey(News, verbose_name=_('Новость'), related_name='sections')
class Meta:
verbose_name = _('Секция новости')
verbose_name_plural = _('Секция новости')
|
#!/bin/python3
# This is done with hasty code
letters = [0] * 26
for char in input().strip():
letters[ord(char)-97] += 1
bycount = {}
for i in range(len(letters)):
if letters[i] == 0:
continue
bycount[letters[i]] = bycount.get(letters[i], 0) + 1
if len(bycount) > 2:
print('NO')
elif len(bycount) == 2 and min(list(bycount.values())) > 1:
print('NO')
else:
print('YES') |
"""
There are a total of n courses you have to take, labeled from 0 to n - 1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
For example:
2, [[1,0]]
There are a total of 2 courses to take. To take course 1 you should have finished course 0. So the correct course order is [0,1]
4, [[1,0],[2,0],[3,1],[3,2]]
There are a total of 4 courses to take. To take course 3 you should have finished both courses 1 and 2.
Both courses 1 and 2 should be taken after you finished course 0. So one correct course order is [0,1,2,3].
Another correct ordering is[0,2,1,3].
"""
class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
indegree = [0 for i in range(numCourses)]
conn = {i: [] for i in range(numCourses)}
zero_indegree = []
for link in prerequisites:
indegree[link[1]] += 1
conn[link[0]].append(link[1])
for i in range(len(indegree)):
if indegree[i] == 0:
zero_indegree.append(i)
i = 0
while i < len(zero_indegree):
for node in conn[zero_indegree[i]]:
indegree[node] -= 1
if indegree[node] == 0:
zero_indegree.append(node)
i += 1
if len(zero_indegree) == numCourses:
return zero_indegree[::-1]
else:
return []
if __name__ == '__main__':
sol = Solution()
courses = [[1,0]]
assert (sol.findOrder(2, courses) == [0, 1])
courses = [[1,0],[2,0],[3,1],[3,2]]
assert (sol.findOrder(4, courses) == [0,2,1,3]) |
file=open("C:/a.txt")
a=file.read()
a=a+"\n"
str="" #this string will contain line of word file
line=[] #line as a list initialised
for b in a:
if b!='\n':
str=str+b #storing characters till next line
continue
line=line+[str] #adding that line into list and then erasing line
str=""
print("Total Lines in word file : ",len(line))
for b in range(0,len(line)):
print(" Line ",b+1," : ",line[b])
|
from rest_framework.routers import SimpleRouter
from api.banners import views
router = SimpleRouter()
router.register(r'^site_?banners', views.SiteBannerViewSet, basename='site_banners')
urlpatterns = router.urls
|
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
from utils import *
class BaseNet(nn.Module):
def __init__(self, config):
super(BaseNet, self).__init__()
self.config = config
# #Embed layer
# self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
# self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def add_lr_scheduler(self, scheduler):
self.scheduler = scheduler
def forward(self, x):
pass |
# -*- coding: utf-8 -*-
# @Author: aaronlai
# @Date: 2016-10-15 01:00:07
# @Last Modified by: AaronLai
# @Last Modified time: 2016-11-12 13:03:00
from unittest import TestCase
from run_VQA import run_VQA
class Test_running(TestCase):
def test_VQA(self):
run_VQA('train_questions', 'train_choices', 'train_captions.json',
'train_ans_sol', 'glove_300d.csv', weight=2)
|
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_validators import validate_bool
CENTRALIZED = False
EXAMPLE_PAIR = "ZRX-WETH"
DEFAULT_FEES = [0, 0.00001]
USE_ETHEREUM_WALLET = True
FEE_TYPE = "FlatFee"
FEE_TOKEN = "ETH"
KEYS = {
"bamboo_relay_use_coordinator":
ConfigVar(key="bamboo_relay_use_coordinator",
prompt="Would you like to use the Bamboo Relay Coordinator? (Yes/No) >>> ",
required_if=lambda: False,
type_str="bool",
default=False,
validator=validate_bool),
"bamboo_relay_pre_emptive_soft_cancels":
ConfigVar(key="bamboo_relay_pre_emptive_soft_cancels",
prompt="Would you like to pre-emptively soft cancel orders? (Yes/No) >>> ",
required_if=lambda: False,
type_str="bool",
default=False,
validator=validate_bool),
}
|
#Take the items in this list of lists: [["Top Gun", "Risky Business", "Minority Report"], ["Titanic", "The Revenant", "Inception"], ["Training Day", "Man on Fire", "Flight"]]
#and write them to a CSV file. The data from each list should be a row in the file, with each item in
#the list separated by a comma.
import csv
with open("C:\\Users\\debor\\OneDrive\\Documents\\SDE Apprentice\\Python\\test.csv", "w") as file:
write = csv.writer(file, delimiter=",")
write.writerow(["Top Gun", "Risky Business", "Minority Report"])
write.writerow(["Titanic", "The Revenant", "Inception"])
write.writerow(["Training Day", "Man on Fire", "Flight"]) |
# Empty objects needed for ErrorScreen
debug_mode = 0
screen = None
# Try pygame import necessary for visual windows of applications
try:
import pygame
except ImportError as e:
print(e)
exit()
# Main error handling
try:
import json
import traceback
from classes.game_screen import GameScreen
from classes.start_screen import StartScreen
from classes.error_screen import ErrorScreen
import classes.constants as constants
# Load application settings
SETTINGS_FILE_PATH = 'settings/settings.json'
settings = json.load(open(SETTINGS_FILE_PATH, 'r'))
# Initialize application
screen_caption = settings['corposnake']['screen']['caption']
screen_size = (settings['corposnake']['screen']['width'], settings['corposnake']['screen']['height'])
debug_mode = settings['debug']['mode']
pygame.init()
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption(screen_caption)
# Application main loop
score = constants.FIRST_RUN
while True:
game_screen = GameScreen(screen, settings)
start_screen = StartScreen(screen, settings, game_screen)
start_screen.show_screen(score)
score = game_screen.play_game()
except SystemExit:
pass
except:
if debug_mode == 1:
traceback.print_exc()
else:
error_screen = ErrorScreen(screen)
|
from flask import Flask
from flask import Flask
app=Flask(__name__, instance_relative_config=True)
app.config.from_object('config')
import criptomonedas.views |
import sys
class Node(object):
"""
Abstract base class for AST nodes
"""
def children(self):
"""
A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False,
showcoord=False, _my_node_name=None):
"""
Pretty print the Node and all its attributes and children (recursively)
to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(
lead + self.__class__.__name__ + ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__ + ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self, n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class NodeVisitor(object):
"""
A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
def visit(self, node):
"""
Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""
Called if no explicit visitor function exists for a node.
Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c)
|
#!/usr/bin/python
print("Hello Git!")
print("Now I'm Git user!") |
from django.urls import resolve
from rest_framework import status
from rest_framework.exceptions import ErrorDetail
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIRequestFactory
from cars.models import Car, Manufacturer
from cars.views import CarDeleteView
factory = APIRequestFactory()
class CarDelViewTest(APITestCase):
def setUp(self) -> None:
self.view = CarDeleteView.as_view()
self.reverse_url = reverse("cars:car_del", kwargs={"pk": 1})
self.base_url = "/cars/"
self.request = factory.delete(self.base_url)
self.manufacturer = Manufacturer.objects.create(make="Ford")
def test_url_reverse(self):
found = resolve(self.reverse_url)
self.assertEqual(found.func.__name__, self.view.__name__)
self.assertEqual(self.reverse_url, "/cars/1/")
def test_existing_car_del(self):
car_id = Car.objects.create(manufacturer=self.manufacturer, model="Mustang").id
response = self.view(self.request, pk=car_id)
response.render()
with self.assertRaises(Car.DoesNotExist):
Car.objects.get(pk=car_id)
self.assertEqual(response.data, None)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_non_existing_car_del(self):
with self.assertRaises(Car.DoesNotExist):
Car.objects.get(pk=1)
response = self.view(self.request, pk=1)
response.render()
self.assertEqual(response.data, {'detail': ErrorDetail(string='Not found.', code='not_found')})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
from django.test import TestCase
from folder.models import Folder
from folder.serializers import FolderSerializer, FolderSerializerWithoutChildren
from user.models import User
from user.serializers import UserSerializer
class FolderSerializerTest(TestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create(
username='username',
email='email',
)
user.set_password('password')
user.save()
user2 = User.objects.create(
username='username2',
email='email2',
)
user2.set_password('password2')
user2.save()
root_folder = Folder.objects.create(
parent=None, owner=user, name='root folder')
root_folder.save()
child_folder = Folder.objects.create(
parent=root_folder, owner=user, name='test folder')
child_folder.shared_among.set([2])
child_folder.save()
pass
def setUp(self):
pass
def test_get_created_at(self):
root_folder = Folder.objects.get(id=1)
data = FolderSerializerWithoutChildren(
root_folder).data["created_at"]
self.assertTrue(data == "now")
def test_get_last_modified(self):
root_folder = Folder.objects.get(id=1)
data = FolderSerializerWithoutChildren(
root_folder).data["last_modified"]
self.assertTrue(data == "now")
def test_get_shared_among(self):
root_folder = Folder.objects.get(id=1)
data_folder_serializer = FolderSerializerWithoutChildren(
root_folder).data["shared_among"]
data_user_serializer = UserSerializer(
root_folder.shared_among.all(), many=True).data
self.assertTrue(data_folder_serializer == data_user_serializer)
|
# Creates a post and then deletes it
from tools.steps_helper import create_post, delete_post, get_latest_post_id, get_api, page_id, post_msg
def test_delete_post():
api = get_api()
create_post(api, post_msg)
post_id = get_latest_post_id(api)
delete_post(api, post_id)
assert api.request(page_id + '/posts?limit=1')['data'][0]['id'] != post_id, "post was not deleted"
|
def printCalculate(cidr):
s_cidr=cidr.split("/")
ip=s_cidr[0]
binary_ip=""
s_ip=ip.split(".")
for i in range(4):
int_ip=int(s_ip[i])
str_binary=str(decimalToBinary(int_ip))
zeros=""
for i in range(8-len(str_binary)):
zeros+="0"
binary_ip+=zeros+str_binary+"."
binary_ip=binary_ip[:-1]
binary_mask=""
binary_wildcard=""
for i in range(32):
if(i%8==0 and i!=0):
binary_mask+="."
binary_wildcard+="."
if(i<int(s_cidr[1])):
binary_mask+="1"
binary_wildcard+="0"
else:
binary_mask+="0"
binary_wildcard+="1"
mask=""
s_binary_mask=binary_mask.split(".")
for i in range(4):
mask+=str(int(s_binary_mask[i], 2))
if(i<3):
mask+="."
aux_binary_min_host=""
change=False
changeIndex=0
for i in range(len(binary_ip)):
if(binary_ip[34-i]=="0" and change==False):
change=True
changeIndex=34-i
aux_binary_min_host+=binary_ip[i]
else:
aux_binary_min_host+=binary_ip[i]
binary_min_host=""
for i in range(len(aux_binary_min_host)):
if(i==changeIndex):
binary_min_host+="1"
else:
binary_min_host+=aux_binary_min_host[i]
binary_max_host=""
for i in range(len(binary_mask)):
if(binary_mask[i]=="0"):
if(i==changeIndex):
binary_max_host+="0"
else:
binary_max_host+="1"
else:
binary_max_host+=binary_ip[i]
min_host=""
s_binary_min_host=binary_min_host.split(".")
for i in range(4):
min_host+=str(int(s_binary_min_host[i], 2))
if(i<3):
min_host+="."
max_host=""
s_binary_max_host=binary_max_host.split(".")
for i in range(4):
max_host+=str(int(s_binary_max_host[i], 2))
if(i<3):
max_host+="."
zero_bits=0
for i in range(4):
zero_bits+=s_binary_mask[i].count("0")
total_host=pow(2,zero_bits)-2
print("""\033[34m+--------------------------------------------------------+\033[0m
\033[32m\033[01mNETWORK:\033[0m """+cidr+"""
\033[32m\033[01mIP:\033[0m """+ip+"""
\033[32m\033[01mMASK:\033[0m """+mask+"""
\033[32m\033[01mRANGE:\033[0m """+min_host+""" / """+max_host+"""
\033[34m+--------------------------------------------------------+\033[0m
\033[36m\033[01mBINARY IP:\033[0m """+binary_ip+"""
\033[36m\033[01mBINARY MASK:\033[0m """+binary_mask+"""
\033[36m\033[01mBINARY WILDCARD:\033[0m """+binary_wildcard+"""
\033[36m\033[01mBINARY MIN HOST:\033[0m """+binary_min_host+"""
\033[36m\033[01mBINARY MAX HOST:\033[0m """+binary_max_host+"""
\033[34m+--------------------------------------------------------+\033[0m
\033[95m\033[01mMIN HOST:\033[0m """+min_host+"""
\033[95m\033[01mMAX HOST:\033[0m """+max_host+"""
\033[95m\033[01mTOTAL NUMBER OF HOSTS:\033[0m """+str(total_host)+"""
\033[34m+--------------------------------------------------------+\033[0m """)
def simpleCalculate(cidr):
s_cidr=cidr.split("/")
ip=s_cidr[0]
binary_ip=""
s_ip=ip.split(".")
for i in range(4):
int_ip=int(s_ip[i])
str_binary=str(decimalToBinary(int_ip))
zeros=""
for i in range(8-len(str_binary)):
zeros+="0"
binary_ip+=zeros+str_binary+"."
binary_ip=binary_ip[:-1]
binary_mask=""
binary_wildcard=""
for i in range(32):
if(i%8==0 and i!=0):
binary_mask+="."
binary_wildcard+="."
if(i<int(s_cidr[1])):
binary_mask+="1"
binary_wildcard+="0"
else:
binary_mask+="0"
binary_wildcard+="1"
mask=""
s_binary_mask=binary_mask.split(".")
for i in range(4):
mask+=str(int(s_binary_mask[i], 2))
if(i<3):
mask+="."
aux_binary_min_host=""
change=False
changeIndex=0
for i in range(len(binary_ip)):
if(binary_ip[34-i]=="0" and change==False):
change=True
changeIndex=34-i
aux_binary_min_host+=binary_ip[i]
else:
aux_binary_min_host+=binary_ip[i]
binary_min_host=""
for i in range(len(aux_binary_min_host)):
if(i==changeIndex):
binary_min_host+="1"
else:
binary_min_host+=aux_binary_min_host[i]
binary_max_host=""
for i in range(len(binary_mask)):
if(binary_mask[i]=="0"):
if(i==changeIndex):
binary_max_host+="0"
else:
binary_max_host+="1"
else:
binary_max_host+=binary_ip[i]
min_host=""
s_binary_min_host=binary_min_host.split(".")
for i in range(4):
min_host+=str(int(s_binary_min_host[i], 2))
if(i<3):
min_host+="."
max_host=""
s_binary_max_host=binary_max_host.split(".")
for i in range(4):
max_host+=str(int(s_binary_max_host[i], 2))
if(i<3):
max_host+="."
zero_bits=0
for i in range(4):
zero_bits+=s_binary_mask[i].count("0")
total_host=pow(2,zero_bits)-2
return [cidr,ip,mask,min_host,max_host,total_host]
def decimalToBinary(number):
if number<0:
return 'Not positive'
i = 0
result = ''
while number>>i:
result = ('1' if number>>i&1 else '0') + result
i += 1
return result |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 08 16:22:26 2015
@author: tw5n14
"""
from networkx import *
import matplotlib.pyplot as plt
import numpy as np
import csv
import math
import os
G = Graph()
userps = {}
gw, cw = [],[]
#load user profile from file
file_path = os.sep.join(os.path.dirname(__file__).split(os.sep)[:-1])+'/data/poi.csv'
with open(file_path, 'rt') as f:
reader = csv.reader(f)
first_row = next(reader)
for row in reader:
# print row
userp = {}
userp['screem_name'] = row[1]
userp['datetime'] = row[2]
userp['descrip'] = row[3]
userp['lan'] = row[6]
userp['location'] = row[7]
userp['gender'] = row[8]
userp['gw'] = row[9]
userp['cw'] = row[10]
if row[9]!='' and row[10]!='':
# print float(row[9]), float(row[10])
gw.append(float(row[9]))
cw.append(float(row[10]))
userps[row[0]] = userp
del userp
# load a network from file
file_path = os.sep.join(os.path.dirname(__file__).split(os.sep)[:-1])+'/data/mrredges-no-tweet-no-retweet-poi-counted.csv'
with open(file_path, 'rt') as fo:
reader = csv.reader(fo)
first_row = next(reader)
for row in reader:
n1 = (row[0])
n2 = (row[1])
b_type = row[2]
weightv = int(row[3])
# reply-to mentioned
if (G.has_node(n1)) and (G.has_node(n2)) and (G.has_edge(n1, n2)):
# print n1, n2, G.has_node(n1), G.has_node(n2), G.has_edge(n1,n2), weightv, G[n2]
G[n1][n2]['weight'] += weightv
else:
G.add_edge(n1, n2, weight=weightv)
# pos = random_layout(G)
# pos = shell_layout(G)
# pos = spring_layout(G)
#pos = spectral_layout(G)
# draw(G, pos)
# plt.show()
def pearson(x,y):
n = len(x)
avg_x = float(sum(x))/n
avg_y = float(sum(y))/n
print avg_x, avg_y
diffprod = 0.0
xdiff2 = 0.0
ydiff2 = 0.0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff*ydiff
xdiff2 += xdiff*xdiff
ydiff2 += ydiff*ydiff
return diffprod/math.sqrt(xdiff2*ydiff2)
def drop_zeros(list_a):
return [i for i in list_a if i>0]
def log_binning(list_x, list_y, bin_count=35):
max_x = np.log10(max(list_x))
max_y = np.log10(max(list_y))
min_x = np.log10(min(drop_zeros(list_x)))
min_y = np.log10(min(drop_zeros(list_y)))
bins_x = np.logspace(min_x, max_x, num=bin_count)
bins_y = np.logspace(min_y, max_y, num=bin_count)
bin_means_x = (np.histogram(list_x, bins_x, weights=list_x))[0] / (np.histogram(list_x, bins_x)[0])
bin_means_y = (np.histogram(list_y, bins_y, weights=list_y))[0] / (np.histogram(list_y, bins_y)[0])
return bin_means_x, bin_means_y
def PD(list_x, bin_count=35):
max_x = np.log10(max(list_x))
min_x = np.log10(min(drop_zeros(list_x)))
bins_x = np.logspace(min_x, max_x, num=bin_count)
weights = np.ones_like(list_x)/float(len(list_x))
hist, bin_deges = np.histogram(list_x, bins_x, weights=weights)
return hist, bin_deges
def CPD(list_x, bin_count=35):
max_x = np.log10(max(list_x))
min_x = np.log10(min(drop_zeros(list_x)))
bins_x = np.logspace(min_x, max_x, num=bin_count)
weights = np.ones_like(list_x)/float(len(list_x))
hist, bin_deges = np.histogram(list_x, bins_x, weights=weights)
cum = np.cumsum(hist[::-1])[::-1]
# print len(cum)
# print len(bin_deges)
return cum, bin_deges
#network analysis
print 'The number of nodes: %d' %(G.order())
print 'The number of nodes: %d' %(G.__len__())
print 'The number of nodes: %d' %(G.number_of_nodes())
print 'The number of edges: %d' %(G.size())
print 'The number of self-loop: %d' %(G.number_of_selfloops())
#gwhist, gwbins = CPD(gw,50)
#gwbin_centers = (gwbins[1:]+gwbins[:-1])/2.0
#print gwhist, sum(gwhist)
#gwp, = plt.plot(gwbin_centers, gwhist, color='blue', marker='x')
#cwhist, cwbins = CPD(cw,50)
#cwbin_centers = (cwbins[1:]+cwbins[:-1])/2.0
#cwp, = plt.plot(cwbin_centers, cwhist, color='red', marker='.')
#print cwhist, sum(cwhist)
#plt.legend((gwp, cwp), ('Global-Weight','Current-Weight'), loc=4)
#plt.xscale('log')
#plt.yscale('linear')
#plt.xlabel('KG(x)')
#plt.ylabel('P(x)')
print 'The plot of in-degree and out-degree of nodes'
print 'Node \t degree \t Strength'
degree, strength = [],[]
for node in G.nodes():
print '%s \t %d \t %d' %(node, G.degree(node), G.degree(node, weight='weight'))
degree.append(G.degree(node))
strength.append(G.degree(node, weight='weight'))
deg, stre = log_binning(degree, strength, 50)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Degree')
plt.ylabel('Strength')
plt.xlim(1, 1e4)
plt.ylim(1, 1e4)
degp = plt.scatter(deg, stre, c='r', marker='s', s=50, alpha=0.5, label='Undirected Graph(p=0.76)')
plt.legend(handles=[degp])
#plt.legend((degp), ('Undirected Graph(p=0.76)'), loc=4)
print 'pearson correlation of instrength and outstrength: %f' %(pearson(degree, strength))
#indcum, indbin_deges = CPD(degree, 100)
#indbin_centers = (indbin_deges[1:]+indbin_deges[:-1])/2.0
#indegr, = plt.plot(indbin_centers, indcum, color='blue', marker='x')
#
#inscum, insbin_deges = CPD(strength, 100)
#insbin_centers = (insbin_deges[1:]+insbin_deges[:-1])/2.0
#instre, = plt.plot(insbin_centers, inscum, color='red', marker='.')
#
#plt.legend((indegr, instre), ('Degree','Strength'), loc=3)
#plt.xscale('log')
#plt.yscale('log')
#plt.xlabel('k')
#plt.ylabel('P(k)')
|
from selenium import webdriver
import time
# инициализация хром-драйвера
driver = webdriver.Chrome("C:\\selenium\\chromedriver.exe")
# перевод браузера в полноэкранный режим
driver.fullscreen_window()
# переход на монголо-русский переводчик google
driver.get("https://translate.google.ru/?hl=ru&tab=TT&authuser=0#mn/ru")
# инициализация переменной mng и привязывание к id 'source'
mng = driver.find_element_by_id('source')
# ввод 200-та букв 'а' с тайм-аутом 2 секунды
for i in range(200):
mng.send_keys('а')
time.sleep(2)
mng.clear() # очистка поля
# ввод 20-ти букв 'о' с тайм-аутом 2 секунды
for i in range(20):
mng.send_keys('о')
time.sleep(2)
mng.send_keys(' моя оборона ') # ввод фразы ' моя оборона '
# ввод 20-ти букв 'о' с тайм-аутом 2 секунды
for i in range(20):
mng.send_keys('о')
time.sleep(2)
time.sleep(20) # тайм-аут 20 секунд
mng.clear() # очистка поля
mng.send_keys('эээ блэт туох дьиигин дээ') # ввод фразы 'эээ блэт туох дьиигин дээ'
time.sleep(100) # тайм-аут 100 секунд
driver.close() # закрытие веб драйвера
|
#PYTHON CODE for Q6:
import pandas as pd
import csv
df = pd.read_csv('faculty.csv')
faculty_dict={}
df.name = df[df.columns[0]]
df.name = df.name.apply(lambda x: x.split(' ')[-1])
faculty_dict = df.set_index('name').T.to_dict('list')
print
first3kv = {k: faculty_dict[k] for k in faculty_dict.keys()[:3]}
print first3kv
#OUTPUT for Q6:
>>> runfile('/Users/swatisharma/Documents/testdict.py', wdir='/Users/swatisharma/Documents')
{'Putt': [' PhD ScD', 'Professor of Biostatistics', 'mputt@mail.med.upenn.edu'], 'Feng': [' Ph.D', 'Assistant Professor of Biostatistics', 'ruifeng@upenn.edu'], 'Bilker': ['Ph.D.', 'Professor of Biostatistics', 'warren@upenn.edu']}
_______________________________________
#PYTHON CODE for Q7:
import pandas as pd
import csv
df = pd.read_csv('faculty.csv')
faculty_dict={}
df.name = df[df.columns[0]]
df.lname = df.name.apply(lambda x: x.split(' ')[-1])
df.fname = df.name.apply(lambda x: x.split(' ')[0])
df.fl = df.fname + ', ' + df.lname
faculty_dict = df.set_index(df.fl).T.to_dict('list')
print
first3kv = {k: faculty_dict[k] for k in faculty_dict.keys()[:3]}
print first3kv
#OUTPUT for Q7:
>>> runfile('/Users/swatisharma/Documents/q7.py', wdir='/Users/swatisharma/Documents')
{'Sarah, Ratcliffe': ['Sarah Jane Ratcliffe', ' Ph.D.', 'Associate Professor of Biostatistics', 'sratclif@upenn.edu'], 'Yenchih, Hsu': ['Yenchih Hsu', ' Ph.D.', 'Assistant Professor of Biostatistics', 'hsu9@mail.med.upenn.edu'], 'J., Landis': ['J. Richard Landis', ' B.S.Ed. M.S. Ph.D.', 'Professor of Biostatistics', 'jrlandis@mail.med.upenn.edu']}
__________________________________________
#PYTHON CODE for Q8:
import pandas as pd
import csv
df = pd.read_csv('faculty.csv')
faculty_dict={}
fac_dict = {}
df.name = df[df.columns[0]]
df.lname = df.name.apply(lambda x: x.split(' ')[-1])
print
faculty_dict = df.set_index(df.lname).T.to_dict('list')
for key, value in sorted(faculty_dict.items()):
print(key,value)
#OUTPUT for Q8:
>>> runfile('/Users/swatisharma/Documents/q8.py', wdir='/Users/swatisharma/Documents')
('Bellamy', ['Scarlett L. Bellamy', ' Sc.D.', 'Associate Professor of Biostatistics', 'bellamys@mail.med.upenn.edu'])
('Bilker', ['Warren B. Bilker', 'Ph.D.', 'Professor of Biostatistics', 'warren@upenn.edu'])
('Bryan', ['Matthew W Bryan', ' PhD', 'Assistant Professor of Biostatistics', 'bryanma@upenn.edu'])
('Chen', ['Jinbo Chen', ' Ph.D.', 'Associate Professor of Biostatistics', 'jinboche@upenn.edu'])
('Ellenberg', ['Jonas H. Ellenberg', ' Ph.D.', 'Professor of Biostatistics', 'jellenbe@mail.med.upenn.edu'])
('Feng', ['Rui Feng', ' Ph.D', 'Assistant Professor of Biostatistics', 'ruifeng@upenn.edu'])
('French', ['Benjamin C. French', ' PhD', 'Associate Professor of Biostatistics', 'bcfrench@mail.med.upenn.edu'])
('Gimotty', ['Phyllis A. Gimotty', ' Ph.D', 'Professor of Biostatistics', 'pgimotty@upenn.edu'])
('Guo', ['Wensheng Guo', ' Ph.D', 'Professor of Biostatistics', 'wguo@mail.med.upenn.edu'])
('Hsu', ['Yenchih Hsu', ' Ph.D.', 'Assistant Professor of Biostatistics', 'hsu9@mail.med.upenn.edu'])
('Hubbard', ['Rebecca A Hubbard', ' PhD', 'Associate Professor of Biostatistics', 'rhubb@mail.med.upenn.edu'])
('Hwang', ['Wei-Ting Hwang', ' Ph.D.', 'Associate Professor of Biostatistics', 'whwang@mail.med.upenn.edu'])
('Joffe', ['Marshall M. Joffe', ' MD MPH Ph.D', 'Professor of Biostatistics', 'mjoffe@mail.med.upenn.edu'])
('Landis', ['J. Richard Landis', ' B.S.Ed. M.S. Ph.D.', 'Professor of Biostatistics', 'jrlandis@mail.med.upenn.edu'])
('Li', ['Hongzhe Li', ' Ph.D', 'Professor of Biostatistics', 'hongzhe@upenn.edu'])
('Localio', ['A. Russell Localio', ' JD MA MPH MS PhD', 'Associate Professor of Biostatistics', 'rlocalio@upenn.edu'])
('Mitra', ['Nandita Mitra', ' Ph.D.', 'Associate Professor of Biostatistics', 'nanditam@mail.med.upenn.edu'])
('Morales', ['Knashawn H. Morales', ' Sc.D.', 'Associate Professor of Biostatistics', 'knashawn@mail.med.upenn.edu'])
('Propert', ['Kathleen Joy Propert', ' Sc.D.', 'Professor of Biostatistics', 'propert@mail.med.upenn.edu'])
('Putt', ['Mary E. Putt', ' PhD ScD', 'Professor of Biostatistics', 'mputt@mail.med.upenn.edu'])
('Ratcliffe', ['Sarah Jane Ratcliffe', ' Ph.D.', 'Associate Professor of Biostatistics', 'sratclif@upenn.edu'])
('Ross', ['Michelle Elana Ross', ' PhD', 'Assistant Professor is Biostatistics', 'michross@upenn.edu'])
('Roy', ['Jason A. Roy', ' Ph.D.', 'Associate Professor of Biostatistics', 'jaroy@mail.med.upenn.edu'])
('Sammel', ['Mary D. Sammel', ' Sc.D.', 'Professor of Biostatistics', 'msammel@cceb.med.upenn.edu'])
('Shaw', ['Pamela Ann Shaw', ' PhD', 'Assistant Professor of Biostatistics', 'shawp@upenn.edu'])
('Shinohara', ['Russell Takeshi Shinohara', '0', 'Assistant Professor of Biostatistics', 'rshi@mail.med.upenn.edu'])
('Shou', ['Haochang Shou', ' Ph.D.', 'Assistant Professor of Biostatistics', 'hshou@mail.med.upenn.edu'])
('Shults', ['Justine Shults', ' Ph.D.', 'Professor of Biostatistics', 'jshults@mail.med.upenn.edu'])
('Stephens', ['Alisa Jane Stephens', ' Ph.D.', 'Assistant Professor of Biostatistics', 'alisaste@mail.med.upenn.edu'])
('Troxel', ['Andrea Beth Troxel', ' ScD', 'Professor of Biostatistics', 'atroxel@mail.med.upenn.edu'])
('Xiao', ['Rui Xiao', ' PhD', 'Assistant Professor of Biostatistics', 'rxiao@mail.med.upenn.edu'])
('Xie', ['Dawei Xie', ' PhD', 'Assistant Professor of Biostatistics', 'dxie@upenn.edu'])
('Yang', ['Wei (Peter) Yang', ' Ph.D.', 'Assistant Professor of Biostatistics', 'weiyang@mail.med.upenn.edu'])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-03 03:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('stats', '0008_auto_20170728_0242'),
]
operations = [
migrations.AlterField(
model_name='abuse',
name='role_of_abuser',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Role of Abuser'),
),
migrations.AlterField(
model_name='abuse',
name='stop_date',
field=models.CharField(blank=True, max_length=4, verbose_name='Stop Date (Year)'),
),
migrations.AlterField(
model_name='client',
name='age',
field=models.IntegerField(blank=True, default=None, null=True, verbose_name='Age at time of visit'),
),
migrations.AlterField(
model_name='client',
name='user',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to=settings.AUTH_USER_MODEL, verbose_name='Input by User'),
),
migrations.AlterField(
model_name='currentsituation',
name='abuse',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stats.Abuse', verbose_name='Associated Client Number'),
),
migrations.AlterField(
model_name='currentsituation',
name='income',
field=models.CharField(blank=True, choices=[('Less than $20,000', 'Less than $20,000'), ('$20,000 - $34,999', '$20,000 - $34,999'), ('$35,000 - $49,999', '$35,000 - $49,999'), ('$50,000 - $75,999', '$50,000 - $75,999'), ('$75,000 - $99,999', '$75,000 - $99,999'), ('$100,000 - $149,999', '$100,000 - $149,999'), ('$150,000 - $199,999', '$150,000 - $199,999'), ('$200,000 or more', '$200,000 or more')], default=django.utils.timezone.now, max_length=50, verbose_name='Income'),
preserve_default=False,
),
migrations.AlterField(
model_name='currentsituation',
name='level_of_education',
field=models.CharField(blank=True, choices=[('Secondary education or less', 'Secondary education or less'), ('Diploma Program', 'Diploma Program'), ('Post-graduate - Bachelors', 'Post-graduate - Bachelors'), ('Post-graduate - Graduate', 'Post-graduate - Graduate'), ('Post-graduate - Professional', 'Post-graduate - Professional'), ('Post-graduate - Doctorate', 'Post-graduate - Doctorate')], default=django.utils.timezone.now, max_length=50, verbose_name='Level of Education'),
preserve_default=False,
),
migrations.AlterField(
model_name='currentsituation',
name='sexual_orientation',
field=models.CharField(blank=True, choices=[('Heterosexual', 'Heterosexual'), ('Homosexual', 'Homosexual'), ('Bisexual', 'Bisexual'), ('Transexual', 'Transexual'), ('Other', 'Other')], max_length=10, verbose_name='Sexual Orientation'),
),
migrations.AlterField(
model_name='requestedservice',
name='group_therapy',
field=models.BooleanField(default=False, verbose_name='Group Therapy'),
preserve_default=False,
),
]
|
'''test class'''
import unittest
import collections
from CsvIO import read_stock_csv, write_stock_csv
class TestCsvIO(unittest.TestCase):
"""TestCsvIO - Tests stock io"""
def test_readstockcsv(self):
"""test_readstockcsv - should read stocks list from csv file """
stocks = read_stock_csv('AAPL.csv')
self.assertEqual(53, len(stocks))
self.assertEqual('2016-08-01', stocks[0]["Date"])
self.assertEqual('104.410004', stocks[0]["Open"])
self.assertEqual('107.650002', stocks[0]["High"])
self.assertEqual('104.000000', stocks[0]["Low"])
self.assertEqual('107.480003', stocks[0]["Close"])
self.assertEqual('105.460434', stocks[0]["Adj Close"])
self.assertEqual('170149200', stocks[0]["Volume"])
def test_write_stock_csv(self):
"""test_write_stock_csv - should write stocks list to csv file"""
obj1 = collections.OrderedDict(
[('a', 'A1'), ('b', 'B1'), ('c', 'C1')])
obj2 = collections.OrderedDict(
[('a', 'A2'), ('b', 'B2'), ('c', 'C2')])
objects1 = [obj1, obj2]
write_stock_csv('test.csv', objects1)
objects2 = read_stock_csv('test.csv')
self.assertEqual(objects1, objects2)
if __name__ == '__main__':
unittest.main(exit=False)
|
from rest_framework import serializers
from .models import Calculator, Chance
class CalculatorSerializer(serializers.ModelSerializer):
class Meta:
model = Calculator
fields = '__all__'
class ChanceSerializer(serializers.ModelSerializer):
class Meta:
model = Chance
fields = '__all__'
|
import requests
import time
requests.post('http://localhost:5000', data = {'temp':'20','mac':'B4:21:8A:F0:13:44','sensor_id':'example_sensor_id'})
#requests.get('http://localhost:5000/data/99')
|
#print("hello world")
import tkinter
############## FUNCTIONS
def find_screen_width():
t = tkinter.Tk() # new window
t.update()
t.state('zoomed')
width = t.winfo_width()
t.destroy()
return width
def find_screen_height():
t = tkinter.Tk() # new window
t.update()
t.state('zoomed')
height = t.winfo_height()
t.destroy()
return height
def plan_b():
label_field = tkinter.Label(enemies_window, text="No, Foo you, but in English")
label_field.pack()
################# END FUNCTIONS
############################## Window Positioning
# Create 2 windows
allies_window, enemies_window = tkinter.Tk(), tkinter.Tk();
# find screen size
screen_width = find_screen_width()
screen_height = find_screen_height()
# Set allied window to center-left
allies_window.geometry("+0+"+str(int(screen_height/2)))
#set enemies window to center-right
window_width = enemies_window.winfo_reqwidth()
usable_width = str(screen_width - window_width)
enemies_window.geometry("+" + str(int(usable_width))+ "+" + str(int(screen_height/2)))
#################################################################################
label_field = tkinter.Label(allies_window, text="Hello World")
label_field.pack()
entry_field = tkinter.Entry(allies_window)
entry_field.pack()
button = tkinter.Button(allies_window, text="Foo - but in latin", command=plan_b)
button.pack()
#These keep the windows open when outside of python IDLE
allies_window.mainloop()#,enemies_window.mainloop();
|
from .metric import Metric
import numpy as np
import mot.utils.box
import mot.utils.debug
class EuclideanMetric(Metric):
"""
An affinity metric that only considers the euclidean of tracklets' box and detected box.
"""
def __init__(self, use_prediction=False):
super(EuclideanMetric).__init__()
self.encoding = 'box'
self.use_prediction = use_prediction
def __call__(self, tracklets, detection_features):
matrix = np.zeros([len(tracklets), len(detection_features)])
for i in range(len(tracklets)):
for j in range(len(detection_features)):
if self.use_prediction:
matrix[i][j] = mot.utils.box._pdist(tracklets[i].prediction.box, detection_features[j][self.encoding])
else:
matrix[i][j] = mot.utils.box._pdist(tracklets[i].last_detection.box,
detection_features[j][self.encoding])
mot.utils.debug.log_affinity_matrix(matrix, tracklets, self.encoding)
return matrix |
import random as rd
import math
import matplotlib.pyplot as plt
def base_station(num_base, pos_base):
"""input base station point"""
# Set POS base station here
station = []
for _ in range(num_base):
station.append(map(int, pos_base.split(',')))
return station
def random_nodes(width, height, station, set_energy, density):
"""random Nodes"""
node_member = []
len_nodes = math.ceil(density * (width * height))
# Random nodes
count = 0
while len(node_member) != len_nodes:
random_x, random_y = rd.randint(0, width), rd.randint(0, height)
if [random_x, random_y] not in node_member and \
[random_x, random_y] not in station:
node_member.append([random_x, random_y, set_energy])
count += 1
print('node', len(node_member))
return node_member, len_nodes
def random_cch(node_member, t_predefine, len_nodes):
"""random Cluster from amount Node"""
cch = []
num_candidate = math.ceil(t_predefine*len_nodes)
# random candidate cluster
print(num_candidate)
count = 0
while len(cch) != num_candidate:
c_cluster = node_member[rd.randint(0, len(node_member) - 1)]
cch.append(c_cluster)
node_member.remove(c_cluster)
count += 1
print('len cch',len(cch))
return cch
def distance_candidate(cch, pkt_control, elec_tran, elec_rec, \
fs, mpf, d_threshold,node_member, R1):
"calculate distance between each candidate then minus energy by send packet control "
cluster_member = []
cch.sort()
for item in range(len(cch)-1):
distance = 0
distance = math.sqrt((cch[item][0]-cch[item+1][0])**2 +\
(cch[item][1]-cch[item+1][1])**2)
if distance < d_threshold :#nodes (tranfer nodes)
cch[item][2] = cch[item][2]-((elec_tran+(fs*(distance**2)))*pkt_control)
elif distance >= d_threshold : #nodes (tranfer nodes)
cch[item][2] = cch[item][2]-((elec_tran+(mpf*(distance**4)))*pkt_control)
cch[item+1][2] = cch[item+1][2]-(elec_rec*pkt_control)
if distance > R1+R1 :
if float(cch[item][2]) <= float(cch[item+1][2]):
cluster_member.append(cch[item+1])
else:
cluster_member.append(cch[item])
else:
node_member.append(cch[item])
print('node',len(node_member))
print(cluster_member)
return cluster_member
def cluster_head(node_member, cluster_member, d_threshold, elec_tran,elec_rec ,fs,pkt_control, mpf):
"cluster head announce in them area then each decide to join"
group_node = []
for node in range(len(node_member)):
shot_dis = None # shortest distance
what_cluster = None # what cluster?
for cluster in range(len(cluster_member)):
cal_distance = \
math.sqrt((node_member[node][0] - cluster_member[cluster][0]) ** 2 +
(node_member[node][1] - cluster_member[cluster][1]) ** 2)
if shot_dis is None:
shot_dis = cal_distance
what_cluster = cluster
elif cal_distance < shot_dis:
shot_dis = cal_distance
what_cluster = cluster
if cal_distance < d_threshold :#nodes (tranfer nodes)
cluster_member[cluster][2] = cluster_member[cluster][2]-\
((elec_tran+(fs*(cal_distance**2)))*pkt_control)
elif cal_distance >= d_threshold : #nodes (tranfer nodes)
cluster_member[cluster][2] = cluster_member[cluster][2]-\
((elec_tran+(mpf*(cal_distance**4)))*pkt_control)
node_member[node][2] = node_member[node][2]-(elec_rec*pkt_control) #used energy's cluster
group_node.append([node, what_cluster, shot_dis])
print(cluster_member)
return group_node
def plot_graph(cluster_member, node_member, cch, station, group_node):
# split 2d list to 1d *list* [use with graph only]
node_x, node_y, energy_node = zip(*node_member)
# PLOT
fig, ax = plt.subplots()
ax.set_aspect('equal', adjustable='datalim')
for plot in cluster_member:
plt.plot(plot[0], plot[1], '.', color='red', alpha=0.7)
ax.add_patch(plt.Circle((plot[0], plot[1]), 30, alpha=0.17))
# ax.annotate(text, (plot[0][0], plot[0][1]))
for z in range(len(group_node)):
if group_node[z][2] != 0 and float(node_member[z][2]) > 0.0:
plt.plot([node_member[int(group_node[z][0])][0], \
cluster_member[int(group_node[z][1])][0]],\
[node_member[int(group_node[z][0])][1], \
cluster_member[int(group_node[z][1])][1]],\
color='k', linestyle='-', linewidth=0.1) # Black Line
plt.plot(node_x[0:], node_y[0:], '.', color='green', alpha=0.7)
ax.plot() # Causes an auto-scale update.
plt.show()
def start():
# Change Variables Here!!
width = int(100) # meter
height = int(100) # meter
density = float(0.0125)
t_predefine = float(0.1)
num_base = int(1)
pos_base = "0,0"
set_energy = int(3) # set energy = 1 Joule
pkt_control = 200 # bit
pkt_data = 4000 # bit
elec_tran = 50 * (10 ** (-9)) # 50 nanoj
elec_rec = 50 * (10 ** (-9)) # 50 nanoj
fs = 10 * (10 ** (-12)) # 10 picoj
mpf = 0.013 * (10 ** (-12)) # 0.013 picoj
d_threshold = 87 # **********************
R1 = 30
cluster_head(node_member, cluster_member, d_threshold, \
elec_tran,elec_rec ,fs,pkt_control, mpf)
# print("t=",t_predefine, "cch=",candidate , 'node=',len_nodes)
start()
|
#!/usr/bin/env python
import sys, os
import subprocess
build_dir = sys.argv[1]
try:
for line in open(build_dir + "/tests/all_tests.txt", "r").readlines():
print "******************************************************************" \
"*************"
print "* RUNNING: %s" % line.strip()
print "******************************************************************" \
"*************"
# run the next unit test
subprocess.check_call([line.strip(), "--logtostderr"],
stdout=sys.stdout, stderr=sys.stderr)
# we actually made it to the end
print "====================================================================" \
"===========\n" \
"ALL UNIT TESTS PASSING :-)"
except subprocess.CalledProcessError as e:
print "====================================================================" \
"===========\n" \
"UNIT TEST(s) FAILED :-( See above for details. \n" \
"====================================================================" \
"===========\n"
except Exception as e:
print "Failed to run all tests. Check the following: \n" \
"1) Does all_tests.txt in $BUILD_DIR/tests/ exist?" \
"2) Do all test binaries listed in all_tests.txt exist and are they " \
"executable?"
print e
|
from .quality_metric_list import *
from .quality_metric_calculator import (compute_quality_metrics,
get_quality_metric_list, QualityMetricCalculator, get_default_qm_params)
from .pca_metrics import get_quality_pca_metric_list
|
import torch
from segmentation_models_pytorch.utils.train import TrainEpoch, ValidEpoch
class TrainEpochMultiGPU(TrainEpoch):
def __init__(self, model, **kwargs) -> None:
super().__init__(model, **kwargs)
def _to_device(self):
device = self.device
if isinstance(self.device, list) and len(self.device) > 1:
if not isinstance(self.model, torch.nn.DataParallel):
self.model = torch.nn.DataParallel(self.model, self.device)
self.device = f'cuda:{self.device[0]}'
elif isinstance(self.device, list):
raise ValueError('list of device must be greter than 1, otherwise provide a device in string format')
self.model.to(self.device)
self.loss.to(self.device)
for metric in self.metrics:
metric.to(self.device)
class ValidEpochMultiGPU(ValidEpoch):
def __init__(self, model, **kwargs) -> None:
super().__init__(model, **kwargs)
def _to_device(self):
device = self.device
if isinstance(self.device, list) and len(self.device) > 1:
if not isinstance(self.model, torch.nn.DataParallel):
self.model = torch.nn.DataParallel(self.model, self.device)
self.device = f'cuda:{self.device[0]}'
elif isinstance(self.device, list):
raise ValueError('list of device must be greter than 1, otherwise provide a device in string format')
self.model.to(self.device)
self.loss.to(self.device)
for metric in self.metrics:
metric.to(self.device)
|
import VRP
import readers
import time
from gurobiHandler import vrpSolver
import csv
import getopt
import sys
import os
import matplotlib.pyplot as plt
import networkx as nx
class vrpRunner:
def __init__(self,readerType):
if readerType == "solomon":
self.reader = readers.solomonFileReader()
else:
raise Exception("illegal reader type: " + readerType)
def generateAndSolveInstance(self,fileName,buildParam,runParam,maxConfSize,timeout,setNumTrucks,alphs, capacity = None):
data = self.reader.readFile(fileName)
if not capacity:
capacity = data["capacity"]
vrp = VRP.VRP(data["nTrucks"],capacity , data["targets"])
t1 = time.time()
confs = vrp.bfsConfBuilderWrapper(buildParam, runParam, maxConfSize,alphs)
t2 = time.time()
if setNumTrucks:
for nConfs in range(1,vrp.nTargets):
print "trying with nConfs",nConfs,"..."
s = vrpSolver(confs, vrp,timeout)
s.buildIP(nConfs)
[nVehicles,totalDistance,exitOnTimeOut] = s.solve()
if nVehicles > -1:
break
if nVehicles == -2:
return {'nTrucks' : "n/a", 'totalDistance':"n/a",'confBuildTime': t2 - t1, 'solverTime': time.time() - t2,"fileName":fileName,\
"buildParam":buildParam,"runParam":runParam,"maxConfSize":maxConfSize,"exitOnTimeOut":exitOnTimeOut}
else:
s = vrpSolver(confs, vrp,timeout)
s.buildIP()
[nVehicles,totalDistance,exitOnTimeOut] = s.solve()
t3 = time.time()
return {'nTrucks' : nVehicles, 'totalDistance':totalDistance,'confBuildTime': t2 - t1, 'solverTime': t3 - t2,"fileName":fileName,\
"buildParam":buildParam,"runParam":runParam,"maxConfSize":maxConfSize,"exitOnTimeOut":exitOnTimeOut}
class filePrinter:
def __init__(self):
pass
def printHeaderIfNewFile(self,filename,headers):
if not os.path.isfile(filename):
csvfile = open(filename, 'ab')
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
csvfile.close()
def printSingleRes(self,res,filename,headers):
csvfile = open(filename, 'ab')
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writerow(res)
csvfile.close
def printRes(self,allRes,filename,headers):
writeHeader = True
if os.path.isfile(filename):
writeHeader = False
csvfile = open(filename, 'ab')
writer = csv.DictWriter(csvfile, fieldnames=headers)
if writeHeader:
writer.writeheader()
writer.writerows(allRes)
csvfile.close
class graphPrinter:
def __init__(self):
pass
def printGraph(self,vrp,chosenConfs,name):
G=nx.Graph()
# set positions
pos = {}
for target in range(len(vrp.targetLocations)):
pos[target] = vrp.targetLocations[target]
G.add_node(target)
for c in chosenConfs:
G.add_cycle([0] + c.targets)
nx.draw_networkx_nodes(G,pos,node_size=5)
nx.draw_networkx_edges(G,pos)
plt.axis('off')
plt.savefig(name) # save as png
plt.show() # display
def printGraphFromIlog(self,truck2Targets,targets,name):
G=nx.Graph()
# add nodes
for target in targets:
G.add_node(target)
G.add_node(0)
targets[0] = (35,35)
for t in truck2Targets:
G.add_cycle([0] + truck2Targets[t])
nx.draw_networkx_nodes(G,targets,node_size=5)
nx.draw_networkx_edges(G,targets)
plt.axis('off')
plt.savefig(name) # save as png
plt.show() # display
class optionsHandler:
def usage(self):
print "-r <run param>"
print "-b <build param>"
print "-n [25|50|100] <solomon lib>"
print "-t <gurobi timeout>"
print "-i <instance name>"
print "-s [t|f] <iteratively solve with a set num of trucks, default is f>"
print "-a <alpha*trimParam best confs, the rest will be chosen to maximize variance>"
def assertOptions(self,runParam,buildParam,solomonLib,timeout,instanceName = None):
message = ""
if runParam == 0:
message += "must supply run param"
if buildParam == 0:
message += "must supply build param"
if solomonLib == 0:
message += "must supply solomonLib"
if timeout == 0:
message += "must supply timeout"
if instanceName == "":
message += "must supply instance name"
if message != "":
print "one or more errors in parsing input:"
print message
self.usage()
sys.exit(2)
def __init__(self,args):
try:
opts, _ = getopt.gnu_getopt(args[1:], "r:b:n:t:s:i:a:")
self.opts = opts
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
self.usage()
sys.exit(2)
def parseOptions(self,enforceInstanceName = False):
runParam = 0
buildParam = 0
solomonLib = 0
timeout = 0
alpha = 1.0
instanceName = ""
setNumTrucks = False
for o, a in self.opts:
if o == "-h":
self.usage()
sys.exit(0)
elif o == "-s":
if a == "t":
setNumTrucks = True
elif a != "f":
print "invalid value for -s", a
self.usage()
sys.exit(0)
elif o == "-r":
runParam = int(a)
if runParam < 0:
print "invalid run param", a
sys.exit(0)
elif o == "-t":
timeout = int(a)
if timeout < 0:
print "invalid timeout", a
sys.exit(0)
elif o == "-b":
buildParam = int(a)
if buildParam < 0:
print "invalid build param", a
sys.exit(0)
elif o == "-i":
instanceName = a
elif o == "-a":
alpha = float(a)
if alpha > 1.0 or alpha < 0.0:
print "invalid alpha value", a
sys.exit(0)
elif o == "-n":
solomonLib = int(a)
if solomonLib not in [25,50,100]:
print "invalid solomonLib", a
sys.exit(0)
else:
assert False, "unhandled option : " + o
if enforceInstanceName:
self.assertOptions(runParam,buildParam,solomonLib,timeout,instanceName)
return (runParam,buildParam,solomonLib,timeout,setNumTrucks,alpha,instanceName)
else:
self.assertOptions(runParam,buildParam,solomonLib,timeout)
return (runParam,buildParam,solomonLib,timeout,setNumTrucks,alpha)
class bestSols:
def __init__(self):
# data taken from http://www.bernabe.dorronsoro.es/vrp/
self.all25Data = {"C101":[3,191.3],"C102":[3,190.3],"C103":[3,190.3],"C104":[3,186.9],"C105":[3,191.3],\
"C106":[3,191.3],"C107":[3,191.3],"C108":[3,191.3],"C109":[3,191.3],\
"C201":[2,214.7],"C202":[2,214.7],"C203":[2,214.7],"C204":[1,213.1],\
"C205":[2,214.7],"C206":[2,214.7],"C207":[2,214.5],"C208":[2,214.5],\
"R101":[8,617.1],"R102":[7,547.1],"R103":[5,454.6],"R104":[4,416.9],"R105":[6,530.5],"R106":[5,465.4],\
"R107":[4,424.3],"R108":[4,397.3],"R109":[5,441.3],"R110":[4,444.1],"R111":[4,428.8],"R112":[4,393.0],\
"R201":[4,463.3],"R202":[4,410.5],"R203":[3,391.4],"R204":[2,355.0],"R205":[3,393.0],"R206":[3,374.4],\
"R207":[3,361.6],"R208":[1,328.2],"R209":[2,370.7],"R210":[3,404.6],"R211":[2,350.9],\
"RC101":[4,461.1],"RC102":[3,351.8],"RC103":[3,332.8],"RC104":[3,306.6],\
"RC105":[4,411.3],"RC106":[3,345.5],"RC107":[3,298.3],"RC108":[3,294.5],\
"RC201":[3,360.2],"RC202":[3,338.0],"RC203":[3,326.9],"RC204":[3,299.7],\
"RC205":[3,338.0],"RC206":[3,324.0],"RC207":[3,298.3],"RC208":[2,269.1]}
self.all50Data = {"C101":[5,362.4],"C102":[5,361.4],"C103":[4,361.4],"C104":[5,359.0],"C105":[5,362.4],\
"C106":[5,362.4],"C107":[5,362.4],"C108":[5,362.4],"C109":[5,362.4],\
"C201":[3,360.2],"C202":[3,360.2],"C203":[3,359.8],"C204":[2,353.4],\
"C205":[3,359.8],"C206":[3,359.8],"C207":[3,359.6],"C208":[2,350.5],\
"R101":[13,1047.0],"R102":[12,944.9],"R103":[9,772.9],"R104":[6,631.2],"R105":[10,906.6],"R106":[8,793.6],\
"R107":[7,720.4] ,"R108":[6,618.2] ,"R109":[8,803.2],"R110":[8,724.9],"R111":[8,724.9],"R112":[6,651.1],\
"R201":[6,800.7],"R202":[5,712.2],"R203":[5,606.4],"R204":[2,509.5],"R205":[5,703.3],"R206":[5,647.0],\
"R207":[4,584.6],"R208":[2,487.7],"R209":[4,600.6],"R210":[5,663.4],"R211":[3,551.3],\
"RC101":[9,957.9],"RC102":[8,844.3],"RC103":[6,712.6],"RC104":[5,546.5],\
"RC105":[9,888.9],"RC106":[7,791.9],"RC107":[6,664.5],"RC108":[6,598.1],\
"RC201":[5,684.8],"RC202":[5,613.6],"RC203":[4,555.3],"RC204":[3,444.2],\
"RC205":[5,631.0],"RC206":[5,610.0],"RC207":[4,558.6],"RC208":[100,10000]}
self.all100Data = {"C101":[10,827.3],"C102":[10,827.3],"C103":[10,826.3],"C104":[10,822.9],"C105":[10,827.3],\
"C106":[10,827.3],"C107":[10,827.3],"C108":[10,827.3],"C109":[10,827.3],\
"R101":[20,1637.7],"R102":[18,1466.6],"R103":[14,1208.7],"R104":[10,982.01],"R105":[15,1355.3],"R106":[13,1234.6],\
"R107":[11,1064.6],"R108":[9,960.88],"R109":[13,1146.9],"R110":[12,1068],"R111":[12,1048.7],"R112":[9,982.14],\
"RC101":[15,1619.8],"RC102":[14,1457.4],"RC103":[11,1258],"RC104":[10,1135.48],\
"RC105":[15,1513.7],"RC106":[11,1424.73],"RC107":[11,1230.48],"RC108":[10,1139.82]}
# data taken from http://w.cba.neu.edu/~msolomon/c1c2solu.htm
dataToAdd_100 = {"C201":[3,589.1],"C202":[3,589.1],"C203":[3,588.7],"C204":[3,588.1],"C205":[3,586.4],\
"C206":[3,586.0],"C207":[3,585.8],"C208":[3,585.8]}
self.all100Data.update(dataToAdd_100)
# data taken from http://sun.aei.polsl.pl/~zjc/best-solutions-solomon.html
dataToAdd_100 = {"R101":[19,1650.79],"R102":[17,1486.85],"R103":[13,1292.67],"R104":[9,1007.31],"R105":[14,1377.11],\
"R106":[12,1252.03],"R107":[10,1104.65],"R109":[11,1194.73],"R110":[10,1118.83],"R111":[10,1096.73],\
"R201":[4,1252.37],"R202":[3,1191.70],"R203":[3,939.50],"R204":[2,825.52],"R205":[3,994.43],"R206":[3,906.14],\
"R207":[2,890.61],"R208":[2,726.82],"R209":[3,909.16],"R210":[3,939.37],"R211":[2,885.71],\
"RC101":[14,1696.95],"RC102":[12,1554.75],"RC105":[13,1629.44],\
"RC201":[4,1406.94],"RC202":[3,1365.64],"RC203":[3,1049.62],"RC204":[3,798.46],\
"RC205":[4,1297.65],"RC206":[3,1146.32],"RC207":[3,1061.14],"RC208":[3,828.14]}
self.all100Data.update(dataToAdd_100)
|
from flask import Flask, render_template, request, redirect, url_for
from werkzeug import secure_filename
import os
UPLOAD_FOLDER = '/root/portal/upload'
ALLOWED_EXTENSIONS = set(['txt','pdf','png','jpg','jpeg','gif','doc','docx','mp4'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def index():
return render_template('index.html')
@app.route('/dtc')
def dtc():
return render_template('dtchome.html')
@app.route('/about')
def about():
return render_template('about.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET','POST'])
def upload():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return render_template('uploadsuccessful.html')
return render_template('upload.html')
if __name__=='__main__':
app.run()
|
import unittest
from Calculator import Calculator
from CsvReader import CsvReader
from pprint import pprint
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.calculator = Calculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, Calculator)
def test_add(self):
result = Calculator.add(10, 5)
self.assertEqual(result, 15)
test_data = CsvReader('src/additionTest.csv').data
# pprint(test_data)
for row in test_data:
result = Calculator.add(10, 5)
self.assertEqual(result ,15)
#self.assertEqual(self.calculator.add(row['Value 1'], row['Value 2']), int(row['Result']))
#self.assertEqual(int(self.calculator.result), int(row['Result']))
def test_minus(self):
result = Calculator.minus(5, 3)
self.assertEqual(result, 2)
def test_multiply(self):
# test_data = CsvReader('src/multiplicationTests.csv').data
#pprint(test_data)
#for row in test_data:
# self.assertEqual(self, Calculator.multiply(row['Value 1'], row['Value 2']), int(row['Result']))
# self.assertEqual(self, Calculator.result, int(row['Result']))
result = Calculator.multiply(5, 3)
self.assertEqual(result, 15)
def test_divide(self):
result = Calculator.divide(3, 2)
self.assertEqual(result, 1.5)
with self.assertRaises(ValueError):
Calculator.divide(10, 0)
test_data = CsvReader('src/divisionTests.csv').data
#for row in test_data:
#self.assertEqual(self.calculator.divide(row['Value 1'], row['Value 2']), int(row['Result']))
# self.assertEqual(self.calculator.result, int(row['Result']))
def test_square(self):
result = Calculator.square(6)
self.assertEqual(result, 36)
#test_data = CsvReader('src/squaringTests.csv').data
# for row in test_data:
# self.assertEqual(self.calculator.square(row['Value 1'], row['Value 2']), int(row['Result']))
# self.assertEqual(self.calculator.result, int(row['Result']))
def test_square_root(self):
result = Calculator.squareRoot(81)
self.assertEqual(result, 9)
#test_data = CsvReader('src/squareRootTests.csv').data
# for row in test_data:
# self.assertEqual(self.calculator.squareRoot(row['Value 1'], row['Value 2']), int(row['Result']))
# self.assertEqual(self.calculator.result, int(row['Result']))
if __name__ == '__main__':
unittest.main()
|
''' Berkeley Deepdrive Segmentation Dataset loader '''
import os
import re
import numpy as np
from matplotlib.image import imread
from PIL import Image
import torch
from torch.utils.data import Dataset
#from dataset.utils import listdir
class BDDSegmentationDataset(Dataset):
''' Dataset loader for Berkeley Deepdrive Segmentation dataset '''
def __init__(self, path, split, transforms=None):
assert split in ['train', 'val', 'test'], 'split must be one of: {train, val, test}'
image_re = re.compile(r'(.*)\.jpg')
label_re = re.compile(r'(.*)_train_id\.png')
images = sorted(listdir(os.path.join(path, 'seg/images', split), image_re))
labels = sorted(listdir(os.path.join(path, 'seg/labels', split), label_re))
for (image, label) in zip(images, labels):
assert (image_re.match(os.path.basename(image)).group(1) ==
label_re.match(os.path.basename(label)).group(1))
self.images, self.labels = images, labels
self.transforms = transforms
def __len__(self):
return len(self.images)
def __getitem__(self, key):
image = imread(self.images[key])
label = imread(self.labels[key])
sample = (image,np.expand_dims(label, axis=0))
#sample = (np.asarray(image.getdata()),np.asarray(label.getdata()))
if self.transforms:
sample = self.transforms(sample)
return sample
def listdir(path, filter_=re.compile(r'.*')):
''' Enumerates full paths of files in a directory matching a filter '''
return [os.path.join(path, f) for f in os.listdir(path) if filter_.match(f)]
def bdd_palette(labels):
''' Applies a color palette to either a single label
tensor or a batch of tensors '''
assert len(labels.shape) in [2, 3], 'Invalid labels shape'
# pylint: disable=bad-whitespace
color_map = torch.Tensor([
[128, 67, 125], # Road
[247, 48, 227], # Sidewalk
[ 72, 72, 72], # Building
[101, 103, 153], # Wall
[190, 151, 152], # Fence
[152, 152, 152], # Pole
[254, 167, 56], # Light
[221, 217, 55], # Sign
[106, 140, 51], # Vegetation
[146, 250, 157], # Terrain
[ 65, 130, 176], # Sky
[224, 20, 64], # Person
[255, 0, 25], # Rider
[ 0, 22, 138], # Car
[ 0, 11, 70], # Truck
[ 0, 63, 98], # Bus
[ 0, 82, 99], # Train
[ 0, 36, 224], # Motorcycle
[121, 17, 38], # Bicycle
[ 0, 0, 0] # Other
]).to(labels.device) / 255.0
batched_input = True
if len(labels.shape) == 2:
batched_input = False
labels = torch.unsqueeze(labels, 0)
# Convert ignore index to label 20
labels = torch.clamp(labels, 0, 20 - 1).long()
n, h, w = labels.shape
labels_one_hot = torch.zeros(n, 20, h, w).to(labels.device)
labels_one_hot.scatter_(1, torch.unsqueeze(labels, 1), 1)
color_labels = torch.einsum('nlhw,lc->nchw', labels_one_hot, color_map)
if not batched_input:
color_labels = torch.squeeze(color_labels, 0)
return color_labels |
# https://www.reddit.com/r/dailyprogrammer/comments/3s4nyq/20151109_challenge_240_easy_typoglycemia/
import re
import random
def typo(sentence):
wordList = re.findall(r'\w+', sentence)
reworkedSentence = ""
for word in wordList:
newWord = ""
newMiddle = ""
firstLetter = word[0]
middleLetters = word[1:(len(word)-1)]
middleList = list(middleLetters)
random.shuffle(middleList)
for char in middleList:
newMiddle += char
lastLetter = word[-1]
newWord += firstLetter
newWord += newMiddle
newWord += lastLetter
reworkedSentence += newWord + " "
print(reworkedSentence)
typo("According to a research team at Cambridge University, " +
"it doesn't matter in what order the letters in a word are, the only important thing is that the first and last letter be in the right place. " +
"The rest can be a total mess and you can still read it without a problem." +
"This is because the human mind does not read every letter by itself, but the word as a whole. " +
"Such a condition is appropriately called Typoglycemia.")
|
from pkg_resources import EntryPoint, get_distribution
import pytest
from raincoat import match as match_module
@pytest.fixture
def basic_match():
return match_module.Match(filename="yay.py", lineno=12)
def test_str_match(basic_match):
assert str(basic_match) == "Match in yay.py:12"
def test_match_from_comment(match):
with pytest.raises(match_module.NotMatching):
assert match_module.match_from_comment(
"# Raincoat: bla", filename="yay.py", lineno=12) == match
def test_check_matches(mocker, match):
mocker.patch("raincoat.match.pypi.PyPIChecker.check",
return_value=[1])
mocker.patch("raincoat.match.match_types", {"pypi": match.__class__})
assert list(match_module.check_matches({"pypi": [match]})) == [1]
def test_check_matches_no_checker(mocker):
class Unfinished(match_module.Match):
match_type = "unfinished"
match = Unfinished("yay.py", 12)
match_module.match_types["unfinished"] = Unfinished
try:
with pytest.raises(NotImplementedError):
list(match_module.check_matches({"unfinished": [match]}))
finally:
match_module.match_types.pop("unfinished")
def test_compute_match_types(mocker):
iep = mocker.patch("raincoat.match.iter_entry_points")
class MatchFactory(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
MatchA, MatchB = MatchFactory("A"), MatchFactory("B")
entry_a, entry_b = iep.return_value = [
EntryPoint("a", "aaa"),
EntryPoint("b", "bbb"),
]
entry_a.load = lambda: MatchA
entry_b.load = lambda: MatchB
assert match_module.compute_match_types() == {"a": MatchA, "b": MatchB}
assert MatchA.match_type == "a"
assert MatchB.match_type == "b"
def test_compute_match_types_duplicate(mocker, caplog):
iep = mocker.patch("raincoat.match.iter_entry_points")
class MatchFactory(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
MatchA, MatchB = MatchFactory("A"), MatchFactory("B")
entry_a, entry_b = iep.return_value = [
EntryPoint("a", "aaa"),
EntryPoint("a", "bbb"),
]
entry_a.load = lambda: MatchA
entry_b.load = lambda: MatchB
assert match_module.compute_match_types() == {"a": MatchA}
assert "Several classes registered for the match type a" in caplog.records[0].message
assert "B will be ignored" in caplog.records[0].message
assert "A will be used" in caplog.records[0].message
def test_format_line_first(match, color):
assert match.format_line("haha", color, 0) == "message" "haha" "neutral"
def test_format_line_not_first(match, color):
assert match.format_line("haha", color, 1) == "haha"
def test_format(match, color):
assert match.format("haha", color) == (
"match" "umbrella == 3.2 @ path/to/file.py:MyClass "
"(from filename:12)" "neutral\n"
"message" "haha" "neutral\n")
def test_format_empty(match, color):
assert match.format("", color) == (
"matchumbrella == 3.2 @ path/to/file.py:MyClass "
"(from filename:12)" "neutral\n")
def test_format_space(match, color):
assert match.format("haha\n \nhehe", color) == (
"match" "umbrella == 3.2 @ path/to/file.py:MyClass "
"(from filename:12)" "neutral\n"
"message" "haha" "neutral\n"
"hehe\n")
|
from keras.models import Sequential
from keras.layers.core import Dense,Activation
model = Sequential()
model.add(Dense(units=32,input_shape=(784,)))
model.add(Activation('relu'))
print(model.summary()) |
#!/usr/bin/python3
import re
import time
def getstats():
rv = {}
f.seek(0)
for i in range(2): f.readline()
for line in f:
(name, data) = line.strip().split(":")
data = data.split()
rv[name] = [int(x) for x in data]
return rv
if __name__ == "__main__":
f = open("/proc/net/dev")
rx = re.compile("(?!(docker0|lo|veth[0-9a-f]+|virbr[0-9]+(-nic)?|vnet0)$)")
old = getstats()
while True:
time.sleep(2)
new = getstats()
ifaces = sorted([iface for iface in set(old) & set(new) if rx.match(iface)])
for iface in ifaces:
print("%12s: RX: %6.2f Mbps (%4d pps) " \
"TX: %6.2f Mbps (%4d pps)" % \
(iface,
(new[iface][0] - old[iface][0]) / 250000.0,
(new[iface][1] - old[iface][1]) / 2,
(new[iface][8] - old[iface][8]) / 250000.0,
(new[iface][9] - old[iface][9]) / 2
))
old = new
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import json
import platform
import warnings
try:
from setuptools import setup
from setuptools import Extension
from setuptools import find_packages
import setuptools
setuptools_version = setuptools.__version__.split('.')
if int(setuptools_version[0]) >= 50:
warnings.warn('The setuptools version found is >= 50.* '
'This version could lead to ModuleNotFoundError of basic packages '
'(ref. https://github.com/Nico-Curti/rFBP/issues/5). '
'We suggest to temporary downgrade the setuptools version to 49.3.0 to workaround this setuptools issue.', ImportWarning)
from setuptools import dist
dist.Distribution().fetch_build_eggs(['numpy>=1.15', 'Cython>=0.29'])
except ImportError:
from distutils.core import setup
from distutils.core import Extension
from distutils.core import find_packages
import numpy as np
from distutils import sysconfig
from Cython.Distutils import build_ext
from distutils.sysconfig import customize_compiler
from distutils.command.sdist import sdist as _sdist
def get_requires (requirements_filename):
'''
What packages are required for this module to be executed?
Parameters
----------
requirements_filename : str
filename of requirements (e.g requirements.txt)
Returns
-------
requirements : list
list of required packages
'''
with open(requirements_filename, 'r') as fp:
requirements = fp.read()
return list(filter(lambda x: x != '', requirements.split()))
def get_ext_filename_without_platform_suffix (filename):
name, ext = os.path.splitext(filename)
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix == ext:
return filename
ext_suffix = ext_suffix.replace(ext, '')
idx = name.find(ext_suffix)
if idx == -1:
return filename
else:
return name[:idx] + ext
class rfbp_build_ext (build_ext):
'''
Custom build type
'''
def get_ext_filename (self, ext_name):
if platform.system() == 'Windows':
# The default EXT_SUFFIX of windows include the PEP 3149 tags of compiled modules
# In this case I rewrite a custom version of the original distutils.command.build_ext.get_ext_filename function
ext_path = ext_name.split('.')
ext_suffix = '.pyd'
filename = os.path.join(*ext_path) + ext_suffix
else:
filename = super().get_ext_filename(ext_name)
return get_ext_filename_without_platform_suffix(filename)
def build_extensions (self):
customize_compiler(self.compiler)
try:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
build_ext.build_extensions(self)
class sdist(_sdist):
def run(self):
self.run_command("build_ext")
_sdist.run(self)
def read_description (readme_filename):
'''
Description package from filename
Parameters
----------
readme_filename : str
filename with readme information (e.g README.md)
Returns
-------
description : str
str with description
'''
try:
with open(readme_filename, 'r') as fp:
description = '\n'
description += fp.read()
except FileNotFoundError:
return ''
def read_dependencies_build (dependencies_filename):
'''
Read the json of dependencies
'''
with open(dependencies_filename, 'r') as fp:
dependecies = json.load(fp)
return dependecies
def read_version (CMakeLists):
'''
Read version from variables set in CMake file
Parameters
----------
CMakeLists : string
Main CMakefile filename or path
Returns
-------
version : tuple
Version as (major, minor, revision) of strings
'''
major = re.compile(r'set\s+\(RFBP_MAJOR\s+(\d+)\)')
minor = re.compile(r'set\s+\(RFBP_MINOR\s+(\d+)\)')
revision = re.compile(r'set\s+\(RFBP_REVISION\s+(\d+)\)')
with open(CMakeLists, 'r') as fp:
cmake = fp.read()
major_v = major.findall(cmake)[0]
minor_v = minor.findall(cmake)[0]
revision_v = revision.findall(cmake)[0]
version = map(int, (major_v, minor_v, revision_v))
return tuple(version)
def dump_version_file (here, version_filename):
'''
Dump the __version__.py file as python script
Parameters
----------
here : string
Local path where the CMakeLists.txt file is stored
version_filename: string
Filename or path where to save the __version__.py filename
'''
VERSION = read_version(os.path.join(here, './CMakeLists.txt'))
script = '''#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = ['Nico Curti', "Daniele Dall'Olio"]
__email__ = ['nico.curti2@unibo.it', 'daniele.dallolio@studio.unibo.it']
__version__ = '{}.{}.{}'
'''.format(*VERSION)
with open(version_filename, 'w') as fp:
fp.write(script)
here = os.path.abspath(os.path.dirname(__file__)).replace('\\', '/')
# Package meta-data.
NAME = 'ReplicatedFocusingBeliefPropagation'
DESCRIPTION = 'Replicated Focusing Belief Propagation algorithm.'
EMAIL = 'nico.curti2@unibo.it, daniele.dallolio@studio.unibo.it'
AUTHOR = "Nico Curti, Daniele Dall'Olio"
REQUIRES_PYTHON = '>=3.5'
VERSION = None
KEYWORDS = "belief-propagation deep-neural-networks spin-glass"
CPP_COMPILER = platform.python_compiler()
README_FILENAME = os.path.join(here, 'README.md')
REQUIREMENTS_FILENAME = os.path.join(here, 'requirements.txt')
DEPENDENCIES_FILENAME = os.path.join(here, 'ReplicatedFocusingBeliefPropagation', 'dependencies.json')
VERSION_FILENAME = os.path.join(here, 'ReplicatedFocusingBeliefPropagation', '__version__.py')
ENABLE_OMP = False
BUILD_SCORER = False
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
LONG_DESCRIPTION = read_description(README_FILENAME)
except FileNotFoundError:
LONG_DESCRIPTION = DESCRIPTION
current_python = sys.executable.split('/bin')[0]
numpy_dir = current_python + '/lib/python{0}.{1}/site-packages/numpy/core/include'.format(sys.version_info.major, sys.version_info.minor)
if os.path.isdir(numpy_dir):
os.environ['CFLAGS'] = '-I' + numpy_dir
dump_version_file(here, VERSION_FILENAME)
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(VERSION_FILENAME) as fp:
exec(fp.read(), about)
else:
about['__version__'] = VERSION
# parse version variables and add them to command line as definitions
Version = about['__version__'].split('.')
URL = 'https://github.com/Nico-Curti/rFBP'#/archive/v{}.tar.gz'.format(about['__version__'])
# Read dependecies graph
dependencies = read_dependencies_build(DEPENDENCIES_FILENAME)
# Set compiler variables
define_args = [ '-DMAJOR={}'.format(Version[0]),
'-DMINOR={}'.format(Version[1]),
'-DREVISION={}'.format(Version[2]),
'-DSTATS',
'-DNDEBUG',
'-DVERBOSE',
'-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION',
'-DPWD="{}"'.format(here)
]
if 'GCC' in CPP_COMPILER or 'Clang' in CPP_COMPILER:
cpp_compiler_args = ['-std=c++1z', '-std=gnu++1z', '-g0']
compile_args = [ '-Wno-unused-function', # disable unused-function warnings
'-Wno-narrowing', # disable narrowing conversion warnings
# enable common warnings flags
'-Wall',
'-Wextra',
'-Wno-unused-result',
'-Wno-unknown-pragmas',
'-Wfatal-errors',
'-Wpedantic',
'-march=native',
]
try:
compiler, compiler_version = CPP_COMPILER.split()
except ValueError:
compiler, compiler_version = (CPP_COMPILER, '0')
if compiler == 'GCC' and BUILD_SCORER:
BUILD_SCORER = True if int(compiler_version[0]) > 4 else False
if ENABLE_OMP and compiler == 'GCC':
linker_args = ['-fopenmp']
else:
linker_args = []
if compiler == 'Clang':
print('OpenMP support disabled. It can be used only with gcc compiler.', file=sys.stderr)
elif 'MSC' in CPP_COMPILER:
cpp_compiler_args = ['/std:c++latest', '/Ox']
compile_args = ['/Wall', '/W3']
if ENABLE_OMP:
linker_args = ['/openmp']
else:
linker_args = []
else:
raise ValueError('Unknown c++ compiler arg')
if BUILD_SCORER:
scorer_include = [os.path.join(os.getcwd(), 'scorer', 'include'),
os.path.join(os.getcwd(), 'scorer', 'scorer', 'include'),]
else:
scorer_include = []
whole_compiler_args = sum([cpp_compiler_args, compile_args, define_args, linker_args], [])
cmdclass = {'build_ext': rfbp_build_ext,
'sdist': sdist}
# Where the magic happens:
setup(
name = NAME,
version = about['__version__'],
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
long_description_content_type = 'text/markdown',
author = AUTHOR,
author_email = EMAIL,
maintainer = AUTHOR,
maintainer_email = EMAIL,
python_requires = REQUIRES_PYTHON,
install_requires = get_requires(REQUIREMENTS_FILENAME),
url = URL,
download_url = '{}/archive/v{}.tar.gz'.format(URL, about['__version__']),
keywords = KEYWORDS,
setup_requires = [# Setuptools 18.0 properly handles Cython extensions.
'setuptools>=18.0',
'numpy>=1.15'
'Cython>=0.29'],
packages = find_packages(include=['ReplicatedFocusingBeliefPropagation',
'ReplicatedFocusingBeliefPropagation.*'],
exclude=('test', 'example')),
include_package_data = True,
data_files = [('', ['CMakeLists.txt', 'README.md', 'LICENSE']),
('scripts', ['./scripts/download_atanherf.py'])],
platforms = 'any',
classifiers = [
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
#'License :: OSI Approved :: GPL License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
license = 'MIT',
cmdclass = cmdclass,
ext_modules = [
Extension(name='.'.join(['ReplicatedFocusingBeliefPropagation', 'lib', name]),
sources=values['sources'],
include_dirs=sum([values['include_dirs'], [np.get_include()]], []),
libraries=values['libraries'],
library_dirs=[
os.path.join(here, 'lib'),
os.path.join('usr', 'lib'),
os.path.join('usr', 'local', 'lib'),
], # path to .a or .so file(s)
extra_compile_args = whole_compiler_args,
extra_link_args = linker_args,
language='c++'
)
for name, values in dependencies.items() ],
)
|
from base64 import b64encode
from django import template
from LandingPage.models import *
register = template.Library()
@register.filter
def bin_2_img(_bin):
if _bin is not None: return b64encode(_bin).decode('utf-8')
@register.filter(name='getVideos')
def getVideos(id):
video=CourseVdeo.objects.get(Vid=id)
return video
|
# Generated by Django 3.2.3 on 2021-05-17 18:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0002_rename_cat_category'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('news_img', models.ImageField(upload_to='images/')),
('details', models.TextField()),
('time', models.DateTimeField(auto_now_add=True)),
('cat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.category')),
],
),
]
|
import math
import os
import random
import re
import sys
def squares(a, b):
count = 0
end = int(b ** (0.5))
start = int(a ** (0.5))
for x in range(start, end + 1):
if b >= (x * x) >= a:
count += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
ab = input().split()
a = int(ab[0])
b = int(ab[1])
result = squares(a, b)
fptr.write(str(result) + '\n')
fptr.close()
|
# Written By Saurav Paul
from tools.json_manager import JsonManager as JM
interaction_setting = {
'voice_reply' : False,
'text_reply' : True ,
'voice_read+voice_reply' : False ,
'text_read' : True,
}
bot = {
'name' : 'Jarvis', # You can change bot name from here
'gender' : 'male', #Whatever you want ;p
'Boss' : 'Saurav Paul', # you can put your name her ;p
'voice_engine' : 'robotic' , # you can change it to 'gTTS' for more natural voice (online)
}
START_SCREEN_NAME = bot['name'] # Enter a string to make start screen banner
def update_bot(orginal_path):
f = orginal_path + '/settings/bot.json'
JM.json_write(f,bot)
def read_bot(orginal_path):
f = orginal_path + '/settings/bot.json'
bot = JM.json_read(f)
print(bot)
# return bot
DEBUG = False |
import numpy
from nltk.sentiment.vader import SentimentIntensityAnalyzer
for i in numpy.arange(0,0.5,0.01):
pos_count = 0
pos_correct = 0
with open("positive.txt","r") as f:
for line in f.read().split('\n'):
sen = SentimentIntensityAnalyzer().polarity_scores(line)
if sen['compound'] >= i:
pos_correct += 1
pos_count +=1
neg_count = 0
neg_correct = 0
with open("negative.txt","r") as f:
for line in f.read().split('\n'):
sen = SentimentIntensityAnalyzer().polarity_scores(line)
if sen['compound'] <= -i:
neg_correct += 1
neg_count +=1
print("\nValue = {} ".format(i))
print("Positive accuracy = {}% via {} samples".format(pos_correct/pos_count*100.0, pos_count))
print("Negative accuracy = {}% via {} samples".format(neg_correct/neg_count*100.0, neg_count))
avj=((pos_correct/pos_count*100.0)+(neg_correct/neg_count*100.0))/2
print("Average = {} ".format(avj))
|
import networkx as nx
from networkx.exception import NetworkXError
from networkx.algorithms.bipartite import random_graph \
as bipartite_random_graph
from warnings import warn
# from IPython import embed
import numpy as np
#from pathos.multiprocessing import ProcessingPool
import pymp
class SBM(nx.DiGraph):
'''
SBM class. The additional arguments of the class are:
Parameters
----------
p : list of proportions for the node sets of the frame, should sum to 1
deg : expected degrees of the edge set, dictionary with keys (node1, node2)
and values degree12
'''
#num_threads = 4
def __init__(self, data=None, p={}, deg={}):
super(SBM,self).__init__(data)
if set(self.nodes()) != set(p.keys()):
raise NetworkXError('node set does not match keys in p dict')
nx.set_node_attributes(self, p, name='p')
if set(self.edges()) != set(deg.keys()):
raise NetworkXError('edge set does not match keys in deg dict')
nx.set_edge_attributes(self, deg, name='deg')
if not self.detailed_balance():
raise NetworkXError('detailed balance not satisfied')
def detailed_balance(self):
'''
Check the detailed balance conditions for the given Frame.
These are that p[u] deg[u,v] == p[v] deg[v,u],
where p and deg are the node proportion and degree attributes,
respectively.
Returns
-------
True or False
'''
for e in self.edges():
u = e[0]
v = e[1]
k_uv = self.adj[u][v]['deg']
if self.has_edge(v,u):
# check detailed balance
k_vu = self.adj[v][u]['deg']
pu = self.node[u]['p']
pv = self.node[v]['p']
try:
np.testing.assert_approx_equal(pu*k_uv,pv*k_vu)
except AssertionError:
print ('detailed balance not satisfied for edges %s and %s '
'and possibly others') % (str((u,v)), str((v,u)))
return False
else:
# it cannot be satisfied if edges aren't reciprocated
print ('detailed balance cannot be satisfied for '
'unreciprocated edges, %s missing') % str((v,u))
return False
return True
def _yfun_iterate(self, y, z, n_replica, alpha):
'''
MCMC iteration of the Y equations
'''
sample_neighbors = lambda k, n: \
(np.random.uniform(size=k) * n).astype(int)
def _update_col(self, y, z, n_replica, alpha, edgelist):
y_col = np.zeros((y.shape[0],1), dtype=np.complex)
other_col=range(n_replica)
for idx, e in enumerate(edgelist):
(u,v) = e
sumval = 0.0
for w in self.neighbors(v):
widx = edgelist.index((v,w))
k_vw = self.adj[v][w]['deg']
if w == u:
# select excess degree
k_samp=_sample_excess_degree(k_vw)
else:
# select degree
k_samp=_sample_degree(k_vw)
# select neighbors
# neighbor_idx=np.random.choice(other_col,
# k_samp,
# replace=True)
# neighbor_idx = np.random.multinomial(n_replica,
# p=probs,
# size=k_samp)
neighbor_idx = sample_neighbors(k_samp, n_replica)
for col in neighbor_idx:
sumval += y[widx,col]
sumval += k_samp*alpha
y_col[idx] = -1.0/(z + sumval)
return y_col
assert y.shape == (self.number_of_edges(), n_replica)
yout = [_update_col(self, y, z, n_replica, alpha,
list(self.edges()))
for col in range(n_replica)]
yout = np.hstack(tuple(yout))
# ## pymp parallel version
# yout = pymp.shared.array((y.shape[0], n_replica), dtype=np.complex)
# with pymp.Parallel() as p:
# for col in p.xrange(n_replica):
# yout[:,col] = _update_col(self, y, z, n_replica, alpha,
# self.edges()).flatten()
# ## pathos parallel version
# pool = ProcessingPool(self.num_threads)
# yout = pool.map(lambda x:
# _update_col(self, y, z, n_replica, alpha, edgelist),
# range(n_replica))
# yout = np.hstack(tuple(yout))
#pool.close()
return yout
def _xfun_iterate(self, y, z, n_replica, n_iter, alpha):
'''
MCMC iteration of the X equations
'''
sample_neighbors = lambda k, n: \
(np.random.uniform(size=k) * n).astype(int)
def _update_col(self, y, z, n_replica, alpha, nodelist, edgelist):
x_col = np.zeros((self.number_of_nodes(),1), dtype=np.complex)
#other_col=range(n_replica)
for idx, v in enumerate(nodelist):
sumval = 0.0
for w in self.neighbors(v):
widx = edgelist.index((v,w))
k_vw = self.adj[v][w]['deg']
# sample degree
k_samp = _sample_degree(k_vw)
# select neighbors
# neighbor_idx=np.random.choice(other_col,k_samp,
# replace=True)
# neighbor_idx = np.random.multinomial(n_replica,
# p=probs,
# size=k_samp)
neighbor_idx = sample_neighbors(k_samp,n_replica)
for col in neighbor_idx:
sumval += y[widx,col]
sumval += alpha*k_samp
x_col[idx] = -1.0/(z + sumval)
return x_col
assert y.shape == (self.number_of_edges(), n_replica, n_iter)
y = np.reshape(y, (self.number_of_edges(), n_replica * n_iter))
xout = [_update_col(self, y, z, n_replica * n_iter, alpha,
self.nodes(), list(self.edges()))
for col in range(n_replica)]
xout = np.hstack(tuple(xout))
# ## pymp parallel version
# xout = pymp.shared.array((self.number_of_nodes(), n_replica),
# dtype=np.complex)
# with pymp.Parallel() as p:
# for col in p.xrange(n_replica):
# xout[:,col] = _update_col(self, y, z, n_replica, alpha,
# self.nodes(), self.edges()).flatten()
## pathos parallel version
# pool = ProcessingPool(self.num_threads)
# xout = pool.map(lambda x:
# _update_col(self, y, z, n_replica, alpha,
# nodelist, edgelist),
# range(n_replica))
# xout = np.hstack(tuple(xout))
#pool.close()
return xout
def spectrum(self, xs, n_replica=100, epsilon=0.01, alpha=0, offset=1.0,
y_max_iter=100, y_transient=50,
x_max_iter=10, transient_delay=1,
parallel=True):
assert self.detailed_balance(), 'detailed balance not satisfied'
assert y_max_iter > y_transient, 'y_max_iter should be > y_transient'
# setup some vars
N = self.number_of_edges()
y0 = offset*np.ones((N,1)) + offset*np.ones((N,1))*1.0j
ps = [d['p'] for n,d in self.nodes(data=True)]
# parallel compute density at each point x
density_vec = pymp.shared.array(xs.shape, dtype='float64')
with pymp.Parallel(if_=parallel) as p:
for idx in p.xrange(len(xs)):
# for idx, x in enumerate(xs):
x = xs[idx]
y_pop = np.tile(y0,(1,n_replica))
x_pop = np.zeros((self.number_of_nodes(), n_replica),
dtype=np.complex)
y_avg = np.zeros((N, n_replica, int(y_max_iter - y_transient)),
dtype=np.complex)
x_avg = np.zeros((self.number_of_nodes(),
n_replica,
x_max_iter),
dtype=np.complex)
z = x + epsilon*1.0j
for i in range(y_transient):
y_pop = self._yfun_iterate(y_pop, z, n_replica, alpha)
for i in range(y_max_iter - y_transient):
y_pop = self._yfun_iterate(y_pop, z, n_replica, alpha)
y_avg[:,:,i] = y_pop
for i in range(x_max_iter):
x_pop = self._xfun_iterate(y_avg, z, n_replica,
y_avg.shape[2], alpha)
x_avg[:,:,i] = x_pop
#x_pop = self._xfun_iterate(y_pop, z, n_replica, alpha)
#x_pop = self._xfun_iterate(y_avg, z, n_replica, alpha)
#import pdb; pdb.set_trace()
x_soln = np.mean(np.reshape(x_avg,
(self.number_of_nodes(),
n_replica * x_max_iter)),
axis=1)
#x_soln = np.mean(x_pop,axis=1)
# print "y"
# print y_pop
# print "avg x"
# print x_soln
density = np.imag(np.dot(ps, x_soln))/np.pi
print "density(%0.3f) = %f" % (x,density)
density_vec[idx] = density
return density_vec
def base_matrices(self):
P=np.matrix(np.diag([d['p'] for n,d in self.nodes(data=True)]))
K=nx.linalg.adjacency_matrix(self, weight='deg').todense()
Q=P*K/np.tile(np.sum(P*K,axis=1),(1,self.number_of_nodes()))
return P,K,Q
def sample(self,n):
'''
Sample a random graph from the frame family.
Parameters
----------
n : number of nodes in resulting graph
Returns
-------
nx.Graph
'''
def _extract_blocks(A, n1, n2):
X = A[0:n1, n1:n1+n2]
Xt = A[n1:n1+n2, 0:n1]
return X, Xt
adj_mat = np.zeros((n,n))
n_block = np.zeros(self.number_of_nodes(),dtype=int)
# check realizability and fill n_block
for u in self.nodes():
p = self.node[u]['p']
try:
np.testing.assert_almost_equal(p*n, int(p*n))
except AssertionError:
print("n*p is not integer for node %d, n=%f" % (u,n))
raise NetworkXError('frame is not realizable')
n_block[u] = int(p*n)
n_blocksum=np.cumsum(n_block)
# fill in adj_mat
traversed={}
for e in self.edges():
traversed[e]=1
u = e[0]
v = e[1]
k_uv = self.adj[u][v]['deg']
k_vu = self.adj[v][u]['deg']
n_u = int(n*self.node[u]['p'])
n_v = int(n*self.node[v]['p'])
if u == v:
# on-diagonal block
g1 = nx.generators.fast_gnp_random_graph(n_u, float(k_uv)/n_u)
X = np.array(nx.to_numpy_matrix(g1))
if u == 0:
i_lower=0
else:
i_lower=n_blocksum[u-1]
adj_mat[i_lower:n_blocksum[u],i_lower:n_blocksum[u]] = X
elif not traversed.has_key((v,u)):
# off-diagonal block
g1 = bipartite_random_graph(n_u, n_v, float(k_uv)/n_v)
X,Xt = _extract_blocks(nx.to_numpy_matrix(g1), n_u, n_v)
if u == 0:
i_lower=0
else:
i_lower=n_blocksum[u-1]
if v == 0:
j_lower=0
else:
j_lower=n_blocksum[v-1]
adj_mat[i_lower:n_blocksum[u],j_lower:n_blocksum[v]] = X
adj_mat[j_lower:n_blocksum[v],i_lower:n_blocksum[u]] = Xt
adj_mat=np.matrix(adj_mat)
# adj_mat now filled in, so generate graph
g = nx.from_numpy_matrix(adj_mat)
b = dict(zip(range(0,n_blocksum[0]), [0]*n_blocksum[0]))
for i in range(1,self.number_of_nodes()):
b.update(dict(zip(range(n_blocksum[i-1],n_blocksum[i]),
[i]*n_block[i])))
nx.set_node_attributes(g, 'block', b)
return g
def _sample_degree(k):
return np.random.poisson(lam=k)
def _sample_excess_degree(k):
return np.random.poisson(lam=k)
|
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import os
from PIL import Image
import io
import sys
from matplotlib.pyplot import imshow
from torch import topk
from torch.nn import functional as F
class DenseNet121(nn.Module):
def __init__(self,out_size=2):
super(DenseNet121,self).__init__()
self.densenet121=torchvision.models.densenet121(pretrained=True)
num_features=self.densenet121.classifier.in_features
self.densenet121.classifier=nn.Sequential(
nn.Linear(num_features,out_size),
nn.Sigmoid())
def forward(self,x):
x=self.densenet121(x)
return x
|
from rest_framework import serializers
from rest_framework.utils import model_meta
from .models import Topic, Preference, Medium
class TopicSerializer(serializers.ModelSerializer):
class Meta:
model = Topic
fields = ('id', 'name', 'description')
extra_kwargs = {
'name': {
'validators': [],
},
}
class MediumSerializer(serializers.ModelSerializer):
class Meta:
model = Medium
fields = ('id', 'medium', 'medium_value', 'status', 'preference')
class PreferenceMediumSerializer(serializers.ModelSerializer):
class Meta:
model = Medium
fields = ('id', 'medium', 'medium_value', 'status')
class PreferenceSerializers(serializers.ModelSerializer):
topic = TopicSerializer()
mediums = PreferenceMediumSerializer(many=True, required=False)
#created_at = serializers.SerializerMethodField(method_name='get_created_at')
#updated_at = serializers.SerializerMethodField(method_name='get_updated_at')
class Meta:
model = Preference
#fields = ('uuid', 'mediums', 'user_ref', 'opted_in', 'entity', 'entity_id', 'topic', 'updated_at', 'created_at')
fields = ('id', 'user_ref', 'opted_in', 'entity', 'entity_id', 'topic', 'mediums')
def create(self, validated_data):
data = validated_data
mediums = validated_data.pop('mediums', [])
topic = validated_data.pop('topic')
topic_instance = Topic.objects.get(name=topic.get('name'))
preference = Preference.objects.create(**validated_data, topic=topic_instance)
for medium in mediums:
preference.mediums.create(**medium)
return preference
def get_created_at(self, instance):
return instance.created_at.isoformat()
def get_updated_at(self, instance):
return instance.updated_at.isoformat()
def update(self, instance, validated_data):
topic = validated_data.pop('topic')
mediums = validated_data.pop('mediums', [])
topic_instance = Topic.objects.get(name=topic.get('name'))
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.topic = topic_instance
instance.save()
return instance |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 18:28:19 2020
@author: Chethan
"""
# Importing libraries
import numpy as np, pandas as pd
import matplotlib.pyplot as plt, seaborn as sb
# Importing Dataset
range1 = [i for i in range(0,2)]
df = pd.read_csv(r"C:/Users/Chethan/Downloads/preprocessed_dataset_final.csv", usecols = range1)
#ds = df.drop(['day'], axis = 1)
col_number = 1
for col in df:
if col != 'day':
splits = str(col).split("_")
o = splits[0]
d = splits[1]
# Visualize the trends in data
sb.set_style('darkgrid')
# ds[col].plot(kind = 'line', legend = 'reverse', title = 'Flights count between airports ' + o + " and " + d + " over time" )
# plt.legend(loc = 'upper right', shadow = True, bbox_to_anchor = (1.35, 0.8))
# plt.show()
df.plot.line(x='day', y= col)
plt.show()
# path = r'C:/Users/Chethan/Desktop/test/'
# path = path + str(col_number) + "_" + str(col) + ".png"
# plt.savefig(path,bbox_inches='tight')
col_number = 1 + col_number
plt.clf() |
from BasicGame import *
from pygame import *
from pygame.locals import *
from math import e, pi, cos, sin, sqrt
from random import uniform, randint
import time
#add your classes here
from Player import *
from Projectile import *
# from Alien import *
from Wave import *
from UFO import *
#constants
FPS = 60 #Frames per second
class SpaceInvaders(BasicGame):
def __init__(self):
self.w, self.h = 800, 800
self.bkg_color = (0,0,0)
BasicGame.__init__(self, size=(self.w, self.h))
self.bkgimg = pygame.image.load('images/background.jpg').convert_alpha()
self.bkgimg = pygame.transform.scale(self.bkgimg, (self.w, self.h))
self.goimg = pygame.image.load('images/game-over.jpg').convert_alpha()
self.stimg = pygame.image.load('images/game-menu.png').convert_alpha()
#create walls for the game
self.LEFT_WALL = 0
self.RIGHT_WALL = self.w
self.TOP_WALL = 0
self.BOTTOM_WALL = self.h
self.shooting = False
self.ufo = False
self.count = 0
self.score = 0
self.alienshooting = False
self.playeralive = False
self.startmenu = True
self.gameover = False
self.winmenu = False
#create a single Player
self.Player = Player(self.w / 2, self.h - 75,0,0,5)
self.UFO = UFO(self.LEFT_WALL, self.TOP_WALL+50, 5 , 0 , 'mystery.png')
self.Wave = Wave(5, 10, 50, 100, 125,10,5)
def update(self):
self.keyPoll()
self.draw()
if self.playeralive == True:
self.count += 1
self.Player.move()
self.win()
if self.ufo == True:
self.UFO.move()
if self.count%30 == 0:
self.count = 0
self.Wave.move()
if self.ufo == False:
ufochance = randint(0,10)
if ufochance == 1:
self.ufo = True
self.handle_collisions(self.Player, self.UFO, self.Wave)
if self.shooting == True:
self.PlayerProjectile.move()
alienshotchance = randint(0,10)
if alienshotchance == 1:
if self.alienshooting == False:
self.alienshooting = True
self.Wave.shoot()
if self.alienshooting == True:
self.Wave.AlienProjectile.move()
def handle_collisions(self, Player, UFO, Wave):
if Player.right() > self.RIGHT_WALL:
Player.x = self.LEFT_WALL
elif Player.left() < self.LEFT_WALL:
Player.x = self.RIGHT_WALL-Player.right()
elif Player.top() < self.TOP_WALL:
Player.bounce_vert()
elif Player.bottom() > self.BOTTOM_WALL:
Player.bounce_vert()
if self.shooting == True:
if self.PlayerProjectile.bottom() < self.TOP_WALL:
self.shooting = False
self.PlayerProjectile.hit = False
if self.PlayerProjectile.top() < Wave.bottom() and self.PlayerProjectile.left() < Wave.right() and self.PlayerProjectile.right() > Wave.left():
if Wave.kill((int(((self.PlayerProjectile.left()-self.PlayerProjectile.right())/2)+self.PlayerProjectile.left()),int(self.PlayerProjectile.top()))) == True:
self.shooting = False
self.PlayerProjectile.hit = False
self.score += 2
if UFO.dead == False:
if self.PlayerProjectile.top() < UFO.bottom() and self.PlayerProjectile.left() > UFO.left() and self.PlayerProjectile.right() < UFO.right():
self.shooting = False
self.PlayerProjectile.hit = True
self.ufo = False
UFO.x = self.LEFT_WALL-UFO.right()/2
self.score += randint(3,10)
if self.alienshooting == True:
if Wave.AlienProjectile.bottom() > self.BOTTOM_WALL:
self.alienshooting = False
Wave.AlienProjectile.hit = False
if Wave.AlienProjectile.bottom() > Player.top() and Wave.AlienProjectile.left() < Player.right() and Wave.AlienProjectile.right() > Player.left():
self.playeralive = False
self.alienshooting = False
self.gameover = True
Wave.AlienProjectile.hit = False
if UFO.right() > self.RIGHT_WALL:
self.ufo = False
UFO.x = self.LEFT_WALL
if self.count%30 == 0:
if Wave.right() > self.RIGHT_WALL:
Wave.bounce_horiz()
Wave.move_vert()
elif Wave.left() < self.LEFT_WALL:
Wave.bounce_horiz()
Wave.move_vert()
#add code for the other three walls
def keyPoll(self):
#use this function if you want to handle multiple key presses
#this function must be called in update
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT] and keys[pygame.K_DOWN]:
pass #maybe "right" player moves down and right at same time?
if keys[pygame.K_d] and keys[pygame.K_x]:
pass #maybe "left" player moves down and right
def set_Player_color(self, color):
self.Player.set_color(color)
def keyDown(self, key):
if key == pygame.K_RIGHT:
self.Player.dx = 2
elif key == pygame.K_LEFT:
self.Player.dx = -2
elif key == pygame.K_SPACE:
if self.shooting == False:
self.PlayerProjectile = Projectile(self.Player.imgmid, self.Player.y, 0 , -10 , 'laser.png' )
self.shooting = True
else:
pass
def keyUp(self, key):
self.Player.dx = 0
if self.gameover == True:
time.sleep(.5)
if key == pygame.K_SPACE:
self.reset()
elif self.startmenu == True:
if key == pygame.K_SPACE:
self.reset()
self.startmenu = False
self.playeralive = True
elif self.winmenu == True:
time.sleep(.5)
if key == pygame.K_TAB:
self.reset()
if key == pygame.K_SPACE:
self.gamecontinue()
def mouseUp(self, button, pos):
# self.Wave.kill(pos)
if button == 1:
pass
def mouseDown(self, button, pos):
if button == 1:
pass
def mouseMotion(self, buttons, pos, rel):
left, mid, right = buttons
if left == 1:
pass
def reset(self):
self.startmenu = True
self.gameover = False
self.score = 0
self.shooting = False
self.alienshooting = False
self.winmenu = False
self.Wave.reset()
def gamecontinue(self):
self.startmenu = False
self.playeralive = True
self.gameover = False
self.shooting = False
self.alienshooting = False
self.winmenu = False
self.Wave.reset()
def scoretext(self, image, text, size, x, y):
font = pygame.font.Font('fonts/space_invaders.ttf', size)
scoretxt = "Score: " + text
text_surface = font.render(scoretxt, True, (255,255,255))
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
image.blit(text_surface, text_rect)
def text(self, image, text, size, x, y):
font = pygame.font.Font('fonts/space_invaders.ttf', size)
text_surface = font.render(text, True, (255,255,255))
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
image.blit(text_surface, text_rect)
def win(self):
if self.Wave.wavedie() == True:
self.winmenu = True
self.playeralive = False
def draw(self):
if self.playeralive == True:
self.screen.blit(self.bkgimg,(0,0)) #clear screen
self.Player.draw(self.screen)
self.scoretext(self.screen, str(self.score), 18, self.RIGHT_WALL/2, 10)
if self.shooting == True:
self.PlayerProjectile.draw(self.screen)
if self.ufo == True:
self.UFO.draw(self.screen)
self.Wave.draw(self.screen)
if self.alienshooting == True:
self.Wave.AlienProjectile.draw(self.screen)
elif self.gameover == True:
self.screen.fill(self.bkg_color)
self.text(self.screen, str("Game Over"), 75, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2-100))
self.text(self.screen, str("Press SPACE to restart"), 25, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2))
self.scoretext(self.screen, str(self.score), 25, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2 + 50))
elif self.startmenu == True:
self.screen.blit(self.bkgimg,(0,0))
self.text(self.screen, str("Space Invaders"), 75, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2-100))
self.text(self.screen, str("Press SPACE to start"), 25, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2))
elif self.winmenu == True:
self.screen.fill(self.bkg_color)
self.text(self.screen, str("You Win"), 75, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2-100))
self.text(self.screen, str("Press SPACE to continue or press TAB to restart"), 25, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2))
self.scoretext(self.screen, str(self.score), 25, self.RIGHT_WALL/2, (self.BOTTOM_WALL/2 + 50))
s = SpaceInvaders()
s.mainLoop(FPS)
|
def f(i):
if i<6:
return 0
return i//3-2
assert(f(12)==2)
assert(f(14)==2)
assert(f(1969)==654)
assert(f(100756)==33583)
with open("input.txt", "rt") as fi:
lines=fi.read().splitlines()
print(sum([f(int(l)) for l in lines]))
|
from requests_html import HTMLSession
import requests
import json
from hashlib import md5
import uuid
import datetime
google_api_key = ''
def get_verification_code(username):
# The code is the first 16 chars of the md5 hash of the username
username_hash = md5(username.encode('utf-8'))
return username_hash.hexdigest()[0:16]
def verfiy_localbitcoins(username, lbc_username):
session = HTMLSession()
# Fake user agent to reduce chance of getting seen as a bot
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0', }
response = session.request(url=f'https://localbitcoins.com/accounts/profile/{lbc_username}/', method='GET', headers=headers)
# If the profile is invalid it will redirect to home
if response.html.next() == 'https://localbitcoins.com/':
return False
code_area = response.html.find('.overflow-catch', first=True)
return code_area.text.find(get_verification_code(username)) != -1
def get_google_autocomplete_locations(location_string, session):
session_id = _get_google_maps_platform_session_id(session)
url = f'https://maps.googleapis.com/maps/api/place/autocomplete/json?input={location_string}&key={google_api_key}&sessiontoken={session_id}&types=(cities)'
response = requests.get(url)
if response.status_code == 200:
response_body = json.loads(response.content.decode('utf-8'))
if response_body['status'] == 'OK':
return response_body['predictions']
return None
def get_google_location(place_id, session):
session_id = _get_google_maps_platform_session_id(session)
url = f'https://maps.googleapis.com/maps/api/place/details/json?placeid={place_id}&key={google_api_key}&sessiontoken={session_id}&fields=geometry,formatted_address'
response = requests.get(url)
# A session ends after a series of autocomplete queries followed by a details request
_clear_google_maps_platform_session_id(session)
if response.status_code == 200:
response_body = json.loads(response.content.decode('utf-8'))
if response_body['status'] == 'OK':
return response_body['result']
return None
def _get_google_maps_platform_session_id(session):
"""Get a unique session id to use to interact with the google maps platform api
Takes the request.session argument from the user's request
Automatically creates a new session id if the current session timeouts
"""
# Try get google autocomplete session id for the user
# If the user does not have the session id, create it and add an expiration time/start time
# If the user does have the session id, check it hasn't expired
# Before making requests, ensure the user has a google maps platform session that has not expired
if (session.get('google_places_session_id', False)) and (datetime.datetime.now().timestamp() < session.get('google_places_expiration', datetime.datetime(1970,1,1).timestamp())):
session_id = session['google_places_session_id']
# print('using old gmp session')
return session_id
else:
session_id = str(uuid.uuid4())
session['google_places_session_id'] = session_id
# Google maps platform sessions expire after a few minutes
session['google_places_expiration'] = (datetime.datetime.now() + datetime.timedelta(seconds=120)).timestamp()
# print('new gmp session created')
return session_id
def _clear_google_maps_platform_session_id(session):
"""Clear the unique session id
Takes the request.session argument from the user's request
This should be done in order to close the session and ensure that new sessions use new session ids
Otherwise, the old session might be used and result in extra charges
"""
del session['google_places_session_id']
del session['google_places_expiration']
session.modified = True |
# encoding = utf-8
from flask import Flask, render_template, redirect, request, session, url_for
# 为下文session产生一个随机数
import os
from exts import db
# 引入配置文件
import config
from models import Users
from home.home import home_ob
from school.school import school_ob
from techn.techn import techn_ob
from talk.talk import talk_ob
from latter.latter import latter_ob
from about.about import about_ob
from login_register.login_register import login_register_ob
app = Flask(__name__)
app.config.from_object(config)
# 将app与exts中的SQLAlchemy绑定
db.init_app(app)
# 用于session配置
app.config['SECRET_KEY'] = os.urandom(24)
app.register_blueprint(home_ob, url_prefix='/')
app.register_blueprint(school_ob, url_prefix='/school')
app.register_blueprint(techn_ob, url_prefix='/techn')
app.register_blueprint(talk_ob, url_prefix='/talk')
app.register_blueprint(latter_ob, url_prefix='/latter')
app.register_blueprint(about_ob, url_prefix='/about')
app.register_blueprint(login_register_ob, url_prefix='/login')
app.register_blueprint(login_register_ob, url_prefix='/register')
@app.route('/login/', methods=['GET', 'POST'])
def login():
# 如果请求是GET请求,则跳转页面到login.html
if request.method == 'GET':
return render_template('login.html')
# 如果请求时POST请求,则跳转页面到登陆判断逻辑
else:
# 获取前端发送来的用户名和密码
userid = request.form.get('username')
password = request.form.get('password')
# 在数据库进行username和password匹配
user_login = Users.query.filter(Users.username == userid, Users.password == password).first()
# 如果都匹配正确
if user_login:
# 生成session
session['session_username'] = userid
return render_template('index2.html')
else:
return '请检测用户名或者密码输入是否正确'
@app.route('/register/', methods=['GET', 'POST'])
def register():
if request.method == 'GET':
return render_template('register.html')
else:
userid = request.form.get('username')
email = request.form.get('email')
password1 = request.form.get('password1')
password2 = request.form.get('password2')
# 将邮箱与数据库已知数据对比
user_register = Users.query.filter(Users.email == email).first()
# 如果已存在该邮箱
if user_register:
return '该邮箱已经被注册'
# 如果邮箱没有重复,那么继续往下判断两次输入密码
else:
if password1 != password2:
return '两次输入的密码不一样,请检查后输入'
else:
# 如果两次密码输入一致,开始像数据库中插入数据
user = Users(username=userid, email=email, password=password1)
# 插入命令
db.session.add(user)
# 确认插入命令
db.session.commit()
# 跳转到登陆界面
return render_template('login.html')
@app.route('/logout/')
def logout():
session.clear()
return redirect(url_for('login'))
# 钩子函数
@app.context_processor
def my_context_processor():
# 获取session
user_username = session.get('session_username')
# 如果存在session数据
if user_username:
# 将username数据从数据库中取出来
user = Users.query.filter(Users.username == user_username).first()
if user:
# 传递给前端使用
return {'user': user}
# 使用context_processor函数必须要提供一个集合,否则报错
return {}
if __name__ == '__main__':
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
app.run(debug=True)
|
class Solution:
def isPalindrome(self, s):
ans = [i.lower() for i in s if i.isalnum()]
return ans == ans[::-1]
def isPalindrome(self, s):
l, r = 0, len(s) - 1
while l < r:
while l < r and not s[l].isalnum():
l += 1
while l < r and not s[r].isalnum():
r -= 1
if s[l].lower() != s[r].lower():
return False
l += 1
r -= 1
return True
if __name__ == '__main__':
A = Solution()
print A.isPalindrome("8V8K;G;K;V;") |
'''
Created on Sep 13, 2016
@author: Dayo
'''
from django.conf.urls import url, include
from .views import *
from .webhooks import *
urlpatterns = [
#url(r'^$', Index.as_view(), name='index'),
url(r'^sms/$', SMSReport.as_view(), name='sms-reports'),
url(r'^email/$', EmailReport.as_view(), name='email-reports'),
url(r'^call/$', CallReport.as_view(), name='call-reports'),
url(r'^sms/delivery/infobip/$', infobip_sms_delivery_report_callback, name='infobip-delivery-url'),
url(r'^email/event/sendgrid/$', sendgrid_report_callback, name='sendgrid-event-url'),
url(r'^webhook/cdr/$', fs_call_detail_report_callback, name='fs-cdr-url'),
] |
print(
"The number of participants that survived beyond 5 years in a cohort of N participants"
" follows binomial distribution."
"\nThe parameter is q.") |
# -*- coding: utf-8 -*-
'''
Faire des tests sur les dimensions des fonctions, rapide juste un assert pour être sur
'''
import numpy as np
from src.Activation.ReLU import ReLU
from src.Activation.softmax import Softmax
from src.Loss.CESoftMax import CESoftMax
from src.Module.conv1D import Conv1D
from src.Module.flatten import Flatten
from src.Module.linear import Linear
from src.Module.sequential import Sequential
from src.Optim.optim import Optim
from src.Pooling.maxPool1D import MaxPool1D
from src.utils.utils import load_usps, transform_numbers
if __name__ == '__main__':
# Get the data
uspsdatatrain = "../data/USPS_train.txt"
uspsdatatest = "../data/USPS_test.txt"
alltrainx, alltrainy = load_usps(uspsdatatrain)
alltestx, alltesty = load_usps(uspsdatatest)
alltrainy_proba = transform_numbers(alltrainy, np.unique(alltrainy).shape[0])
alltrainx = alltrainx.reshape((alltrainx.shape[0], alltrainx.shape[1], 1))
alltestx = alltestx.reshape((alltestx.shape[0], alltestx.shape[1], 1))
validation_size = 500
allvalx = alltestx[:validation_size]
allvaly = alltesty[:validation_size]
alltestx = alltestx[validation_size:]
alltesty = alltesty[validation_size:]
# Get data values
length = alltrainx.shape[1]
# Network parameters
gradient_step = 1e-3
iterations = 10
batch_size = 25
kernel_size = 3
chan_input = 1
chan_output = 32
stride = 1
max_pool_stride = 2
max_pool_kernel = 2
# loss function
sftmax = CESoftMax()
# Network parameters
net = Sequential([Conv1D(kernel_size, chan_input, chan_output, stride=stride),
MaxPool1D(max_pool_kernel, max_pool_stride),
Flatten(),
Linear(4064, 100),
ReLU(),
Linear(100, 10)
])
# Train networks
opt = Optim(net=net, loss=sftmax, eps=gradient_step)
opt.SGD(alltrainx, alltrainy_proba, batch_size, X_val=allvalx, Y_val=allvaly,
f_val=lambda x: np.argmax(Softmax().forward(x), axis=1), maxiter=iterations, verbose=2)
predict = Softmax().forward(opt.predict(alltestx))
y_hat = np.argmax(predict, axis=1)
print("precision:", sum(y_hat == alltesty) / len(alltesty))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import logging.config
import sys
from logging.handlers import RotatingFileHandler
from src.settings import config
class CadastroLogger:
"""Custom logger for keeping track of the libreCatastro Scrapping"""
def __init__(self, class_name):
"""Constructor"""
'''default root console logging'''
self.logger = logging.getLogger(class_name)
self.logger.setLevel(logging.DEBUG)
'''console file logging'''
debug_file_handler = logging.StreamHandler(sys.stdout)
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.addFilter(
type('', (logging.Filter,), {'filter': staticmethod(lambda r: r.levelno <= logging.DEBUG)}))
'''error file logging'''
error_file_handler = RotatingFileHandler(config['error_log_file'], mode='a', maxBytes=5 * 1024 * 1024,
backupCount=100, encoding='utf-8', delay=0)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.addFilter(type('', (logging.Filter,), {'filter': staticmethod(lambda r: r.levelno <= logging.ERROR)}))
'''tracking file logging'''
tracking_file_handler = RotatingFileHandler(config['tracking_log_file'], mode='a', maxBytes=5 * 1024 * 1024,
backupCount=100, encoding='utf-8', delay=0)
tracking_file_handler.setLevel(logging.INFO)
tracking_file_handler.addFilter(
type('', (logging.Filter,), {'filter': staticmethod(lambda r: r.levelno <= logging.INFO)}))
self.logger.addHandler(debug_file_handler)
self.logger.addHandler(error_file_handler)
self.logger.addHandler(tracking_file_handler)
|
"""
Utility functions.
"""
import asyncio
import collections
import functools
import inspect
import io
import logging
import os
from typing import Set # noqa
import libnacl
import logbook
import logbook.compat
import logbook.more
# noinspection PyPackageRequirements
import lru
import wrapt
from .key import Key
__all__ = (
'enable_logging',
'disable_logging',
'get_logger',
'read_key_or_key_file',
'raise_server_error',
'randint',
'ViewIOReader',
'ViewIOWriter',
'async_lru_cache',
'aio_run',
'aio_run_decorator',
'aio_run_proxy_decorator',
'AioRunMixin',
)
_logger_group = logbook.LoggerGroup()
_logger_group.disabled = True
_logger_redirect_handler = logbook.compat.RedirectLoggingHandler()
_logger_convert_level_handler = logbook.compat.LoggingHandler()
def _convert_level(logging_level):
return _logger_convert_level_handler.convert_level(logging_level)
def enable_logging(level=logbook.WARNING, asyncio_level=None, aiohttp_level=None):
# Determine levels
level = logbook.lookup_level(level)
converted_level = _convert_level(level)
if asyncio_level is None:
asyncio_level = converted_level
else:
asyncio_level = _convert_level(asyncio_level)
if aiohttp_level is None:
aiohttp_level = converted_level
else:
aiohttp_level = _convert_level(aiohttp_level)
# Enable logger group
_logger_group.disabled = False
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Redirect asyncio logger
logger = logging.getLogger('asyncio')
logger.setLevel(asyncio_level)
logger.addHandler(_logger_redirect_handler)
# Redirect aiohttp logger
logger = logging.getLogger('aiohttp')
logger.setLevel(aiohttp_level)
logger.addHandler(_logger_redirect_handler)
def disable_logging():
# Reset aiohttp logger
logger = logging.getLogger('aiohttp')
logger.removeHandler(_logger_redirect_handler)
logger.setLevel(logging.NOTSET)
# Reset asyncio logger
logger = logging.getLogger('asyncio')
logger.removeHandler(_logger_redirect_handler)
logger.setLevel(logging.NOTSET)
# Disable asyncio debug logging
del os.environ['PYTHONASYNCIODEBUG']
# Disable logger group
_logger_group.disabled = True
def get_logger(name=None, level=logbook.NOTSET):
"""
Return a :class:`logbook.Logger`.
Arguments:
- `name`: The name of a specific sub-logger.
"""
base_name = 'threema.gateway'
name = base_name if name is None else '.'.join((base_name, name))
# Create new logger and add to group
logger = logbook.Logger(name=name, level=level)
_logger_group.add_logger(logger)
return logger
# TODO: Raises
def read_key_or_key_file(key, expected_type):
"""
Decode a hex-encoded key or read it from a file.
Arguments:
- `key`: A hex-encoded key or the name of a file which contains
a key.
- `expected_type`: One of the types of :class:`Key.Type`.
Return a:class:`libnacl.public.SecretKey` or
:class:`libnacl.public.PublicKey` instance.
"""
# Read key file (if any)
try:
with open(key) as file:
key = file.readline().strip()
except IOError:
pass
# Convert to key instance
return Key.decode(key, expected_type)
@asyncio.coroutine
def raise_server_error(response, error):
"""
Raise a :class:`GatewayServerError` exception from a
HTTP response. Releases the response before raising.
Arguments:
- `response`: A :class:`aiohttp.ClientResponse` instance.
- `error`: The :class:`GatewayServerError`. to instantiate.
Always raises :class:`GatewayServerError`.
"""
status = response.status
yield from response.release()
raise error(status)
def randint(a, b):
"""
Return a cryptographically secure random integer N such that
``a <= N <= b``.
"""
n = libnacl.randombytes_uniform(b) + a
assert a <= n <= b
return n
# TODO: Document properly
class ViewIOReader(io.RawIOBase):
def __init__(self, bytes_or_view):
super().__init__()
if isinstance(bytes_or_view, bytes):
bytes_or_view = memoryview(bytes_or_view)
self._view = bytes_or_view
self._offset = 0
self._length = len(self._view)
# IOBase methods
def fileno(self):
raise OSError('No file descriptors used')
def isatty(self):
return False
def readable(self):
return True
def readline(self, size=-1):
raise NotImplementedError
def readlines(self, hint=-1):
raise NotImplementedError
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
pass
elif whence == os.SEEK_CUR:
offset += self._offset
elif whence == os.SEEK_END:
offset = self._length - offset
else:
raise ValueError('Invalid whence value')
if not 0 < offset <= self._length:
raise ValueError('Offset is greater than view length')
self._offset = offset
return offset
def seekable(self):
return True
def tell(self):
return self._offset
def writable(self):
return False
# RawIOBase methods
def read(self, size=-1):
if size == -1:
return self.readall()
elif size < 0:
raise ValueError('Negative size')
start, end = self._offset, min(self._offset + size, self._length)
self._offset = end
return self._view[start:end]
def readall(self):
return self.read(self._length - self._offset)
def readinto(self, b):
data = self.readall()
b.extend(data)
return len(data)
# Custom methods
def __len__(self):
return self._length - self._offset
def readexactly(self, size):
data = self.read(size)
if len(data) < size:
raise asyncio.IncompleteReadError(data, size)
else:
return data
# TODO: Document properly
class ViewIOWriter(io.RawIOBase):
def __init__(self, bytes_or_views=None):
super().__init__()
self._views = []
self._length = 0
if bytes_or_views is not None:
for bytes_or_view in bytes_or_views:
self.writeexactly(bytes_or_view)
# IOBase methods
def fileno(self):
raise OSError('No file descriptors used')
def isatty(self):
return False
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return True
# RawIOBase methods
def write(self, bytes_or_view):
# Convert to memoryview if necessary
if isinstance(bytes_or_view, bytes):
bytes_or_view = memoryview(bytes_or_view)
# Append
length = len(bytes_or_view)
self._length += length
self._views.append(bytes_or_view)
return length
def writelines(self, lines):
raise NotImplementedError
# Custom methods
def __radd__(self, other):
self.extend(other)
return self
def __len__(self):
return self._length
def getvalue(self):
return b''.join(self._views)
# noinspection PyProtectedMember
def extend(self, other):
self._views += other._views
self._length += other._length
def writeexactly(self, bytes_or_view):
return self.write(bytes_or_view)
class _HashedSeq(list):
"""
This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
"""
__slots__ = 'hash_value'
# noinspection PyMissingConstructor
def __init__(self, tuple_):
self[:] = tuple_
self.hash_value = hash(tuple_)
def __hash__(self):
return self.hash_value
# noinspection PyPep8Naming
_CacheInfo = collections.namedtuple(
'CacheInfo', ('hits', 'misses', 'maxsize', 'currsize'))
def _make_key(
args, kwargs, typed,
fast_types={int, str, frozenset, type(None)},
kwargs_mark=(object(),),
):
"""
Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
key = args
if kwargs:
sorted_items = sorted(kwargs.items())
key += kwargs_mark
for item in sorted_items:
key += item
else:
sorted_items = []
if typed:
key += tuple(type(v) for v in args)
if kwargs:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fast_types:
return key[0]
return _HashedSeq(key)
class _LRUCacheDict(lru.LRUCacheDict):
def __init__(self, *args, **kwargs):
self.hits = self.misses = 0
super().__init__(*args, **kwargs)
def __len__(self):
return self.size()
def info(self):
"""Report cache statistics"""
return _CacheInfo(self.hits, self.misses, self.max_size, len(self))
def __getitem__(self, key):
try:
item = super().__getitem__(key)
except KeyError:
self.misses += 1
raise
else:
self.hits += 1
return item
def clear(self):
super().clear()
self.hits = self.misses = 0
def async_lru_cache(maxsize=1024, expiration=15 * 60, typed=False):
"""
Least-recently-used cache decorator for asyncio coroutines.
If *maxsize* is set to None, the LRU features are disabled and the
cache can grow without bound.
If *expiration* is set, cached values will be cleared after
*expiration* seconds.
If *typed* is True, arguments of different types will be cached
separately. For example, f(3.0) and f(3) will be treated as distinct
calls with distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize,
currsize) with f.cache_info(). Clear the cache and statistics
with f.cache_clear(). Access the underlying function with
f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
def decorating_function(func):
cache = _LRUCacheDict(max_size=maxsize, expiration=expiration)
@asyncio.coroutine
def wrapper(*args, **kwargs):
# Make cached key
key = _make_key(args, kwargs, typed)
# Get from cache
try:
return cache[key]
except KeyError:
pass
# Miss, retrieve from coroutine
value = yield from func(*args, **kwargs)
cache[key] = value
return value
wrapper.cache = cache
wrapper.cache_info = cache.info
wrapper.cache_clear = cache.clear
return functools.update_wrapper(wrapper, func)
return decorating_function
def aio_run(coroutine, loop=None, close_after_complete=False):
"""
Decorator to run an asyncio coroutine as a normal blocking
function.
Arguments:
- `coroutine`: The asyncio coroutine or task to be executed.
- `loop`: An optional :class:`asyncio.AbstractEventLoop`
subclass instance.
- `close_after_complete`: Close `loop` after the coroutine
returned. Defaults to ``False``.
Returns the result of the asyncio coroutine.
Example:
.. code-block::
@asyncio.coroutine
def coroutine(timeout):
yield from asyncio.sleep(timeout)
return True
# Call coroutine in a blocking manner
result = aio_run(coroutine(1.0))
print(result)
"""
# Create a new event loop (if required)
if loop is None:
loop_ = asyncio.get_event_loop()
# Closed? Set a new one
if loop_.is_closed():
loop_ = asyncio.new_event_loop()
asyncio.set_event_loop(loop_)
else:
loop_ = loop
# Run the coroutine and get the result
result = loop_.run_until_complete(coroutine)
# Close loop (if requested)
if close_after_complete:
loop_.close()
# Return the result
return result
def aio_run_decorator(loop=None, close_after_complete=False):
"""
Decorator to run an asyncio coroutine as a normal blocking
function.
Arguments:
- `loop`: An optional :class:`asyncio.AbstractEventLoop`
subclass instance.
- `close_after_complete`: Close `loop` after the coroutine
returned. Defaults to ``False``.
Returns a decorator to wrap around an asyncio coroutine.
Example:
.. code-block::
@asyncio.coroutine
def coroutine(timeout):
yield from asyncio.sleep(timeout)
return True
@aio_run_decorator()
def helper(*args, **kwargs):
return coroutine(*args, **kwargs)
# Call coroutine in a blocking manner
result = helper(timeout=1.0)
print(result)
"""
def _decorator(func):
# Make it a coroutine if it isn't one already
if not asyncio.iscoroutinefunction(func):
func = asyncio.coroutine(func)
def _wrapper(*args, **kwargs):
return aio_run(
func(*args, **kwargs),
loop=loop,
close_after_complete=close_after_complete,
)
return functools.update_wrapper(_wrapper, func)
return _decorator
def aio_run_proxy_decorator(cls):
"""
Proxy a publicly accessible class and run all methods marked as
async inside it (using the class attribute `async_functions`) with
an event loop to make it appear as a traditional blocking method.
Arguments:
- `cls`: A class to be wrapped. The class must inherit
:class:`AioRunMixin`. The class and all base classes must
supply a class attribute `async_functions` which is an
iterable of method names that should appear as traditional
blocking functions from the outside.
Returns a class factory.
.. note:: The `unwrap` property of the resulting instance can be
used to get the original instance.
"""
# Ensure each base class has added a class-level iterable of async functions
async_functions = set()
for base_class in inspect.getmro(cls)[:-1]:
try:
async_functions.update(base_class.__dict__.get('async_functions', None))
except TypeError:
message = "Class {} is missing 'async_functions' iterable"
raise ValueError(message.format(base_class.__name__))
# Sanity-check
if not issubclass(cls, AioRunMixin):
raise TypeError("Class {} did not inherit 'AioRunMixin'".format(
cls.__name__))
class _AioRunProxyDecoratorFactory(wrapt.ObjectProxy):
def __call__(self, *args, **kwargs):
# Create instance
instance = cls(*args, **kwargs)
# Sanity-check
if not isinstance(instance, AioRunMixin):
raise TypeError("Class {} did not inherit 'AioRunMixin'".format(
cls.__name__))
# Wrap with proxy (if required)
if instance.blocking:
class _AioRunProxy(wrapt.ObjectProxy):
@property
def unwrap(self):
"""
Get the wrapped instance.
"""
return self.__wrapped__
# Wrap all async functions with `aio_run`
for name in async_functions:
def _method(instance_, name_, *args_, **kwargs_):
method = aio_run_decorator()(getattr(instance_, name_))
return method(*args_, **kwargs_)
_method = functools.partial(_method, instance, name)
setattr(_AioRunProxy, name, _method)
return _AioRunProxy(instance)
else:
return instance
return _AioRunProxyDecoratorFactory(cls)
class AioRunMixin:
"""
Must be inherited when using :func:`aio_run_proxy_decorator`.
Arguments:
- `blocking`: Switch to turn the blocking API on or off.
"""
async_functions = set() # type: Set[str]
def __init__(self, blocking=False):
self.blocking = blocking
@property
def unwrap(self):
"""
Get the wrapped instance.
"""
return self
|
#-*- coding:utf8 -*-
import time
import datetime
import cStringIO as StringIO
from django.contrib import admin
from django.http import HttpResponse
from shopapp.yunda.models import (ClassifyZone,
BranchZone,
LogisticOrder,
YundaCustomer,
ParentPackageWeight,
TodaySmallPackageWeight,
TodayParentPackageWeight)
from shopback.base.options import DateFieldListFilter
from django.contrib import messages
from .service import YundaService,YundaPackageService,WEIGHT_UPLOAD_LIMIT
from common.utils import gen_cvs_tuple,CSVUnicodeWriter
class ClassifyZoneInline(admin.TabularInline):
model = ClassifyZone
fields = ('state','city','district')
class ClassifyZoneAdmin(admin.ModelAdmin):
list_display = ('state','city','district','branch')
list_display_links = ('state','city',)
#date_hierarchy = 'created'
#ordering = ['created_at']
search_fields = ['state','city','district']
admin.site.register(ClassifyZone,ClassifyZoneAdmin)
class BranchZoneAdmin(admin.ModelAdmin):
list_display = ('code','name','barcode')
list_display_links = ('code','name',)
#date_hierarchy = 'created'
#ordering = ['created_at']
inlines = [ClassifyZoneInline]
search_fields = ['code','name','barcode']
def export_branch_zone(self,request,queryset):
is_windows = request.META['HTTP_USER_AGENT'].lower().find('windows') >-1
pcsv =[]
bz_tuple = gen_cvs_tuple(queryset,
fields=['barcode','name','code'],
title=[u'网点条码',u'网点名称',u'网点编号'])
tmpfile = StringIO.StringIO()
writer = CSVUnicodeWriter(tmpfile,encoding= is_windows and "gbk" or 'utf8')
writer.writerows(bz_tuple)
response = HttpResponse(tmpfile.getvalue(), mimetype='application/octet-stream')
tmpfile.close()
response['Content-Disposition'] = 'attachment; filename=branch-zone-%s.csv'%str(int(time.time()))
return response
export_branch_zone.short_description = u"导出CSV文件"
actions = ['export_branch_zone',]
admin.site.register(BranchZone,BranchZoneAdmin)
class YundaCustomerAdmin(admin.ModelAdmin):
list_display = ('cus_id','name','code','company_name','qr_id','lanjian_id','ludan_id','sn_code','device_code',
'contacter','mobile','on_qrcode','on_lanjian','on_ludan','on_bpkg','status','memo')
list_display_links = ('name','company_name',)
#date_hierarchy = 'created'
#ordering = ['created_at']
list_filter = ('status',)
search_fields = ['cus_id','name','code','sync_addr','company_name','contacter']
#--------设置页面布局----------------
fieldsets =((u'客户基本信息:', {
'classes': ('expand',),
'fields': (('name','code','company_name')
,('company_trade','cus_id','contacter',)
,('state','city','district')
,('address','zip','mobile',)
,('phone','status','memo')
)
}),
(u'二维码设置:', {
'classes': ('collapse',),
'fields': (('qr_id','qr_code','on_qrcode'),)
}),
(u'揽件设置:', {
'classes': ('collapse',),
'fields': (('lanjian_id','lanjian_code','sn_code')
,('device_code','on_lanjian','on_bpkg'))
}),
(u'录单设置:', {
'classes': ('collapse',),
'fields': (('ludan_id','ludan_code','on_ludan'),)
})
)
admin.site.register(YundaCustomer,YundaCustomerAdmin)
class ParentPackageWeightAdmin(admin.ModelAdmin):
list_display = ('parent_package_id','weight','upload_weight','weighted','uploaded'
,'destinate','is_jzhw','is_charged')
list_display_links = ('parent_package_id',)
#date_hierarchy = 'created'
#ordering = ['created_at']
list_filter = ('is_jzhw','is_charged',('weighted',DateFieldListFilter),
('uploaded',DateFieldListFilter))
search_fields = ['parent_package_id','destinate']
admin.site.register(ParentPackageWeight,ParentPackageWeightAdmin)
class TodaySmallPackageWeightAdmin(admin.ModelAdmin):
list_display = ('package_id','parent_package_id','weight','upload_weight','weighted','is_jzhw')
list_display_links = ('package_id','parent_package_id',)
#date_hierarchy = 'created'
#ordering = ['created_at']
list_filter = ('is_jzhw',)
search_fields = ['package_id','parent_package_id']
def package_id_link(self, obj):
if obj.weight and float(obj.weight) > 3:
return u'<a href="%s/" style="color:blue;background-color:red;">%s</a>'%\
(obj.package_id,obj.package_id)
return u'<a href="%s/">%s</a>'%(obj.package_id,obj.package_id)
package_id_link.allow_tags = True
package_id_link.short_description = u"运单编号"
class Media:
css = {"all": ("admin/css/forms.css","css/admin/dialog.css", "jquery/jquery-ui-1.10.1.css")}
js = ("script/admin/adminpopup.js","jquery/jquery-ui-1.8.13.min.js",
"jquery/addons/jquery.upload.js","yunda/js/package.csvfile.upload.js")
def calcPackageWeightAction(self,request,queryset):
package_service = YundaPackageService()
for tspw in queryset:
try:
weight_tuple = package_service.calcSmallPackageWeight(tspw)
except Exception,exc:
messages.warning(request, exc.message)
else:
if weight_tuple[0] > 10:
messages.warning(request,u'小包(%s)重量超过10公斤,请核实!'%tspw.package_id)
tspw.weight = weight_tuple[0]
tspw.upload_weight = weight_tuple[1]
tspw.save()
calcPackageWeightAction.short_description = u"计算小包重量"
def uploadPackageWeightAction(self,request,queryset):
try:
package_service = YundaPackageService()
package_service.uploadSmallPackageWeight(queryset)
except Exception,exc:
messages.warning(request, exc.message)
else:
messages.info(request, u'上传成功!')
uploadPackageWeightAction.short_description = u"上传小包重量"
actions = ['calcPackageWeightAction','uploadPackageWeightAction']
admin.site.register(TodaySmallPackageWeight,TodaySmallPackageWeightAdmin)
class TodayParentPackageWeightAdmin(admin.ModelAdmin):
list_display = ('parent_package_id','weight','upload_weight','weighted','is_jzhw')
list_display_links = ('parent_package_id',)
#date_hierarchy = 'created'
#ordering = ['created_at']
list_filter = ('is_jzhw',)
search_fields = ['parent_package_id',]
class Media:
css = {"all": ("admin/css/forms.css","css/admin/dialog.css","jquery/jquery-ui-1.10.1.css")}
js = ("script/admin/adminpopup.js","jquery/jquery-ui-1.8.13.min.js",
"jquery/addons/jquery.upload.js","yunda/js/package.csvfile.upload.js")
#取消该商品缺货订单
def calcPackageWeightAction(self,request,queryset):
package_service = YundaPackageService()
for bpkw in queryset:
try:
weight_tuple = package_service.calcParentPackageWeight(bpkw)
except Exception,exc:
messages.warning(request, exc.message)
else:
if weight_tuple[0] > 50:
messages.error(request, u'集包号(%s)重量异常(%s),请核实。'%
(bpkw.parent_package_id,weight_tuple[0]))
continue
bpkw.weight = weight_tuple[0]
bpkw.upload_weight = weight_tuple[1]
bpkw.save()
calcPackageWeightAction.short_description = u"计算大包重量"
def uploadPackageWeightAction(self,request,queryset):
try:
package_service = YundaPackageService()
package_service.uploadParentPackageWeight(queryset)
except Exception,exc:
messages.warning(request, exc.message)
else:
messages.info(request, u'上传成功!')
uploadPackageWeightAction.short_description = u"上传大包重量"
actions = ['calcPackageWeightAction','uploadPackageWeightAction']
admin.site.register(TodayParentPackageWeight,TodayParentPackageWeightAdmin)
class LogisticOrderAdmin(admin.ModelAdmin):
list_display = ('cus_oid','out_sid','weight','receiver_name','receiver_state','receiver_city',
'receiver_mobile','weighted','created','is_charged','sync_addr','status')#,'customer'
list_display_links = ('out_sid','cus_oid',)
#date_hierarchy = 'created'
#ordering = ['created_at']
list_filter = ('status','is_charged','sync_addr',('weighted',DateFieldListFilter),('created',DateFieldListFilter))
search_fields = ['cus_oid','out_sid','parent_package_id','receiver_mobile','wave_no']
class Media:
css = {"all": ("admin/css/forms.css","css/admin/dialog.css", "jquery/jquery-ui-1.10.1.css")}
js = ("script/admin/adminpopup.js","jquery/jquery-ui-1.8.13.min.js",
"jquery/addons/jquery.upload.js","yunda/js/yundaorder.csvfile.upload.js")
#--------设置页面布局----------------
fieldsets =((u'系统信息:', {
'classes': ('expand',),
'fields': (('cus_oid','yd_customer','out_sid','parent_package_id',)
,('weight','upload_weight','weighted','uploaded')
,('valid_code','dc_code','is_charged','sync_addr','status')
)
}),
(u'包裹地址信息:', {
'classes': ('expand',),
'fields': (('receiver_name','receiver_state','receiver_city','receiver_district'),
('receiver_address','receiver_zip','receiver_mobile','receiver_phone','is_jzhw'))
}),
)
#取消该商品缺货订单
def pushPackageWeightAction(self,request,queryset):
try:
for package in queryset.filter(is_charged=False):
tspw,state = TodaySmallPackageWeight.objects.get_or_create(package_id=package.out_sid)
tspw.is_jzhw = package.isJZHW()
tspw.save()
except Exception,exc:
messages.error(request,'出错信息:%s'%exc.message)
pushPackageWeightAction.short_description = u"添加到今日小包上传列表"
actions = ['pushPackageWeightAction',]
admin.site.register(LogisticOrder,LogisticOrderAdmin)
|
# git的使用教程
# git 与svn的区别(git是分布式,而svn是集中式管理)
"""
1.下载git服务器(64为windows)--默认路径programs/git
2.tortoiseGit安装
3.创建一个本地仓库--(创建文件 --git init [或者是点击git beash])
4.
""" |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
def regevaluate(t, predict, criterion):
if criterion == 'mse':
return mean_squared_error(t, predict)
else:
return mean_absolute_error(t, predict)
data = pd.read_csv('housing.data', header=None, sep="\s+").values
number_of_patterns, number_of_attributes = data.shape
x = data[:, 0:number_of_attributes - 1]
t = data[:, number_of_attributes - 1]
lowest_mean_mae = 1000.0
lowest_mean_mse = 1000.0
lowest_n_mae = 0.0
lowest_n_mse = 0.0
max_epoch = 50000
for n in [5, 10, 20, 30, 40, 50]:
mean_mae = 0.0
mean_mse = 0.0
for i in range(9):
xtrain, xtest, ttrain, ttest = train_test_split(x, t, test_size=0.1)
model = MLPRegressor(hidden_layer_sizes=n, activation='relu', solver='adam', learning_rate='constant', max_iter=max_epoch, learning_rate_init=0.01)
model.fit(xtrain, ttrain)
y = model.predict(xtest)
mean_mae += regevaluate(ttest, y, 'mae')
mean_mse += regevaluate(ttest, y, 'mse')
mean_mae /= 9
mean_mse /= 9
if lowest_mean_mae > mean_mae:
lowest_mean_mae = mean_mae
lowest_n_mae = n
if lowest_mean_mse > mean_mse:
lowest_mean_mse = mean_mse
lowest_n_mse = n
print("Mean MAE: {}, N: {}".format(lowest_mean_mae, lowest_n_mae))
print("Mean MSE: {}, N: {}".format(lowest_mean_mse, lowest_n_mse))
xtrain, xtest, ttrain, ttest = train_test_split(x, t, test_size=0.1)
model = MLPRegressor(hidden_layer_sizes=lowest_mean_mse, activation='relu', solver='adam', learning_rate='constant', max_iter=max_epoch, learning_rate_init=0.01)
model.fit(xtrain, ttrain)
y = model.predict(xtest)
plt.subplot(1, 1, 1)
plt.plot(y, 'r.', markersize=6)
plt.plot(ttest, 'b-', markersize=6)
plt.show()
|
def showsite(siteurl):
html = requests.get(siteurl).text
soup = BeautifulSoup(html, 'html.parser')
kind = soup.select(".content-header-desc__detail")[1].text.strip() # 分類
area = soup.select(".content-header-desc__detail")[2].text.strip() # 區
item_desc = soup.select(".location-item .location-item__desc") # 店名、地址
name = item_desc[0].select("p")[0].text # 店名
imgurl = soup.find('div', {'class': 'images-featured-big-slider'}).get('style').split("'")[1] # 圖片名稱
lat = soup.select("#js-location-map")[0]["data-lat"] # 緯度
lng = soup.select("#js-location-map")[0]["data-lon"] # 經度
tel = soup.select(".location-item .location-item__desc")[2].text.strip() # 電話
addr = item_desc[0].select("p")[1].text.replace(" ", "").replace("\n", "").strip() # 地址
desc = soup.select(".restaurant-desc")[0].text.strip() # 說明
working_hours = soup.select(".location-item .location-item__desc")[1].text.strip() # 營業時間
print("分類:", kind) # 分類
print("地區:", area) # 地區
print("店名:", name) # 店名
print("網址:", siteurl) # 網址
print("圖片名稱:", imgurl) # 圖片名稱
print("緯度:", lat) # 緯度
print("經度:", lng) # 經度
print("電話:", tel) # 電話
print("地址:", addr) # 地址
print("說明:", desc) # 說明
print("營業時間:", working_hours + "\n") # 營業時間
def getpageurl(page, url):
global n, totpages
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
items = soup.select('.grid-restaurants__item__inner')
print("第" + str(page) + "頁,共有" + str(len(items)) + "間")
for item in items:
n += 1
print("n=", n)
itemurl = item.select('.resto-inner-title a')[0]['href'] # 網址
siteurl = rooturl + itemurl # 組成完整網址
showsite(siteurl) # 顯示該店資訊
if n == 1:
totpages = int(soup.find("input", {"class": "form-control"})['data-max_page']) # 總頁數
# 主程式
import requests
from bs4 import BeautifulSoup
n = 0 # 計算總共有多少家店
homeurl = 'https://guide.michelin.com/tw/taipei/restaurants?max=30&sort=relevance'
rooturl = 'https://guide.michelin.com'
getpageurl(1, homeurl) # 首頁
for page in range(2, totpages + 1): # 第 2~totpages頁
html = requests.get(homeurl).text
soup = BeautifulSoup(html, 'html.parser')
path = soup.find("a", {"class": "page-arrow"}) # 「>」 下一頁按鈕
fullurl = path["href"] # 讀取 href 內容
# 以「?」分割,刪除前面字串中的最後一個字元,再加上 page 後,組成完整的路徑
url = rooturl + fullurl.split("?")[0][:-1] + str(page) + "?" + fullurl.split("?")[1]
getpageurl(page, url)
print("\n總共有", n, "間") |
from get_fish_info import get_fish_info
import pandas as pd
from pathlib import Path
import pylab as pl
import pickle
import numpy as np
root_path = Path("/n/home10/abahl/engert_storage_armin/ariel_paper/free_swimming_behavior_data/dot_motion_coherence")
for experiment in ["chrna2a",
"disc1_hetinx",
"scn1lab_NIBR_20200708",
"scn1lab_zirc_20200710"]:
if experiment == "chrna2a":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=None)
fish_data.columns = ['fish_ID', "genotype"]
if experiment == "disc1_hetinx":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=None)
fish_data.columns = ['fish_ID', "genotype"]
if experiment == "scn1lab_NIBR_20200708":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=0)
fish_data.columns = ['fish_ID', "pre_genotype", "genotype"] # the post genotype is the correct one
if experiment == "scn1lab_zirc_20200710":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=0)
fish_data.columns = ['fish_ID', "pre_genotype", "genotype"]
print(fish_data)
# if experiment == "scn1lab_NIBR_20200708":
# fish_data.columns = ['fish_ID', "genotype"]
# if len(fish_data.columns) == 2:
# fish_data.columns = ['fish_ID', "genotype"]
# else:
# fish_data.columns = ['fish_ID', "genotype", "take"]
all_data = []
numtrials = 30
for i in range(len(fish_data)):
fish_ID = fish_data.iloc[i]["fish_ID"]
genotype = fish_data.iloc[i]["genotype"]
# take = fish_data.loc[i]["take"]
if genotype == "wt" or "+/+" in genotype:
genotype = 'wt'
elif genotype == "ht" or "+/-" in genotype:
genotype = 'het'
elif genotype == "hm" or "-/-" in genotype:
genotype = 'hom'
else:
print(fish_ID, genotype, "unknown genotype. Skipping.")
# if len(fish_data.columns) == 3:
# if fish_data.loc[i]["take"] == 0:
# print(fish_ID, genotype, "ignore fish (not good swimming?).")
# continue
for trial in range(0, numtrials):
print(experiment, fish_ID, genotype, trial)
try:
f = open(root_path / experiment / fish_ID / "raw_data" / f"trial{trial:03d}.dat", 'rb')
data = pickle.load(f)
f.close()
except:
break
for stim in range(8):
bout_times = data[f"bouts_start_stimulus_{stim:03d}"]["timestamp"]
bout_xs = data[f"bouts_start_stimulus_{stim:03d}"]["fish_position_x"]
bout_ys = data[f"bouts_start_stimulus_{stim:03d}"]["fish_position_y"]
bout_start_fish_accumulated_orientation = data[f"bouts_start_stimulus_{stim:03d}"][
"fish_accumulated_orientation"]
bout_end_fish_accumulated_orientation = data[f"bouts_end_stimulus_{stim:03d}"][
"fish_accumulated_orientation"]
heading_angle_changes = bout_end_fish_accumulated_orientation - bout_start_fish_accumulated_orientation
# Turn responses to left-ward motion the after way around
if stim in [0, 1, 2, 3]:
heading_angle_changes = -heading_angle_changes
for i in range(1, len(bout_times)):
all_data.append([fish_ID,
genotype,
trial,
stim % 4,
bout_times[i],
bout_xs[i],
bout_ys[i],
bout_times[i] - bout_times[i - 1],
heading_angle_changes[i],
np.sign(heading_angle_changes[i]) == np.sign(heading_angle_changes[i - 1])])
df = pd.DataFrame(all_data, columns=["fish_ID",
"genotype",
"trial",
"stim",
"bout_time",
"bout_x",
"bout_y",
"inter_bout_interval",
"heading_angle_change",
"same_as_previous"]).astype(dtype={"trial": "int64",
"stim": "int64",
"same_as_previous": "bool"}, copy=False)
df.set_index(['fish_ID', "genotype", 'trial', 'stim'], inplace=True)
df.sort_index(inplace=True)
df.to_hdf(root_path / experiment / "all_data.h5", key="all_bouts", complevel=9)
# Extract behavioral features
df_extracted_features, df_extracted_binned_features, \
df_extracted_binned_features_same_direction, \
df_extracted_binned_features_heading_angle_change_histograms, \
df_extracted_binned_features_inter_bout_interval_histograms, \
df_gmm_fitting_results = get_fish_info(df)
df_extracted_features.to_hdf(root_path / experiment / "all_data.h5", key="extracted_features", complevel=9)
df_extracted_binned_features.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features", complevel=9)
df_extracted_binned_features_same_direction.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features_same_direction", complevel=9)
df_extracted_binned_features_heading_angle_change_histograms.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features_heading_angle_change_histograms", complevel=9)
df_extracted_binned_features_inter_bout_interval_histograms.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features_inter_bout_interval_histograms", complevel=9)
df_gmm_fitting_results.to_hdf(root_path / experiment / "all_data.h5", key="gmm_fitting_results", complevel=9)
|
#functional programming means that you're allowed to pass functions around just as if they were variables or values.
#an anonymous function is a function that we simply do no define
#a named function:
def by_three(x):
return x % 3 == 0
#an anonymous function, that does the same thing:
lambda x: x % 3 == 0
#we can then use the anonymous function with 'filter':
my_list = range(16)
print filter(lambda x: x % 3 == 0, my_list)
#the above filters for lambda based on the second argument, my_list
#filter is similar to JS, in that it runs the function, and if true, the value is added to the list
#if you plan on using the code over and over, you might as well just definte a function though
#another example
languages = ["HTML", "JavaScript", "Python", "Ruby"]
print filter(lambda x: x == "Python", languages)
#the above will return ["Python"]
#so it essentially filtered through the array, returning all that met the conditional
squares = [x**2 for x in range(1, 11)]
print filter(lambda x: 30 <= x <= 70, squares)
#[36, 49, 64]
#remember, this is like using a function. Therefore, we can use more than arrays!
garbled = "IXXX aXXmX aXXXnXoXXXXXtXhXeXXXXrX sXXXXeXcXXXrXeXt mXXeXsXXXsXaXXXXXXgXeX!XX"
message = filter(lambda x: x != "X", garbled)
print message
#[I am another secret message!]
|
import web
render = web.template.render('templates/')
urls = (
'/', 'index'
)
db = web.database(dbn='postgres', user='dave', password='password', audb='mydb')
class index:
def GET(self):
todos = db.select('todo')
return render.index(todos)
if __name__ == "__main__":
# web.application(list with the url mappings, the namespace where the
# classes are)
app = web.application(urls, globals())
app.run()
#run this server just using python this.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from models import drfOpsClass, drfOpsClassBase, session
import uuid
import itchat
import re
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
'''test_msg_s = '#S06:00 1740'
test_msg_e = '#E18:00 1859'
'''
classStart = drfOpsClass()
classBaseStart = drfOpsClassBase()
dictStart = to_dict(classStart)
dictBaseSrart = to_dict(classBaseStart)
@itchat.msg_register(itchat.content.TEXT)
def print_content(msg):
a = msg['Text']
if a[0] == '#':
list = a[2:].split()
if a[1] == 'S' or a[1] == 's':
cuuid = str(uuid.uuid1())
duuid = str(uuid.uuid1())
dictStart['class_id'] = re.sub("-", "", cuuid)
dictStart['task_id'] = '123456789'
dictStart['work_date'] = '2018-01-05'
dictStart['class_order'] = '一班'
dictStart['create_org_id'] = 'C1000000800073'
dictStart['creator_id'] = '8a2b82db4b4d1a36014b6d39c3da0e72'
dictStart['rflag'] = '0'
dictStart['isondata'] = '2'
dictBaseSrart['base_id'] = re.sub("-","", duuid)
dictBaseSrart['class_id'] = dictStart['class_id']
dictBaseSrart['receive_time'] = list[0]
dictBaseSrart['receive_well_depth'] = float(list[1])
dictBaseSrart['create_org_id'] = 'C1000000800073'
dictBaseSrart['creator_id'] = '8a2b82db4b4d1a36014b6d39c3da0e72'
dictBaseSrart['rflag'] = '0'
dictBaseSrart['isodata'] = '2'
if a[1] == 'E' or a[1] == 'e':
drfOpsClass1 = drfOpsClass()
drfOpsClassBase1 = drfOpsClassBase()
dictBaseSrart['send_time'] = list[0]
dictBaseSrart['send_well_depth'] = float(list[1])
dictBaseSrart['drilled_footage'] = float(dictBaseSrart['send_well_depth']-dictBaseSrart['receive_well_depth'])
for dOClass in drfOpsClass().__table__.columns:
setattr(drfOpsClass1, dOClass.name, dictStart[dOClass.name])
for dOClassBase in drfOpsClassBase().__table__.columns:
setattr(drfOpsClassBase1, dOClassBase.name, dictBaseSrart[dOClassBase.name])
session.add(drfOpsClass1)
session.add(drfOpsClassBase1)
session.commit()
itchat.auto_login(hotReload=True)
itchat.run()
|
n = int(input())
board_size = [list(input()) for _ in range(n)]
def search_board(board, search):
for y, row in enumerate(board):
for x, ch in enumerate(row):
if ch == search:
return y, x
snake_pos = search_board(board_size, "S")
game_over = False
eaten_food = 0
def move(dy, dx):
global snake_pos, game_over, eaten_food
y, x = snake_pos
board_size[y][x] = "."
new_y = y + dy
new_x = x + dx
if new_y > (n - 1) or new_y < 0 or new_x > (n - 1) or new_x < 0:
game_over = True
print("Game over!")
print(f"Food eaten: {eaten_food}")
return
pos = board_size[new_y][new_x]
if pos == "B":
board_size[new_y][new_x] = "."
new_y, new_x = search_board(board_size, "B")
elif pos == "*":
board_size[new_y][new_x] = "."
eaten_food += 1
if eaten_food == 10:
print("You won! You fed the snake.")
print(f"Food eaten: {eaten_food}")
game_over = True
board_size[new_y][new_x] = "S"
snake_pos = (new_y, new_x)
def print_board():
print("\n".join(["".join(row) for row in board_size]))
movement = {
"up": lambda: move(-1, 0),
"down": lambda: move(1, 0),
"right": lambda: move(0, 1),
"left": lambda: move(0, -1),
}
while not game_over:
cmd = input()
fn_move = movement[cmd]
fn_move()
print_board()
|
#不借助临时,交换2个变量,使用2中方法
# 方法1:
# a=10
# b=20
# print('交换前:',a,b)
# a,b=b,a
# print('交换后:',a,b)
# 方法2:
a=20
b=10
print('交换前:',a,b)
a=a+b
b=a-b
a=a-b
print('交换后:',a,b)
|
import json
with open('quishpi_org.mrp') as f:
counter = 0
for l in f:
found_in_this = False
graph = json.loads(l)
for node in graph['nodes']:
if 'anchors' in node:
if len(node['anchors']) > 1:
if found_in_this:
counter += 1
found_in_this = True
print(counter)
|
def sign_up():
'''회원가입 함수'''
try:
sign_up()
except BadUserName:
print('이름으로 사용할 수 없는 입력입니다.')
except PasswordNotMatched:
print('입력한 패스워드가 서로 일치하지 않습니다.') |
from bs4 import BeautifulSoup
import urllib3
A = ['A','B','C']
B = ['01', '02', '03']
code = []
C = ['0', '1', '2', '3','4']
D = ['.0', '.1', ]
dd = []
for i in A:
for x in B:
code.append(i+x)
def check_code(idc):
url = "https://www.icd10data.com/search?s={}&codebook=icd10cm".format(idc)
http = urllib3.PoolManager()
r = http.request('GET', url)
soup = BeautifulSoup(r.data, "html.parser")
container = soup.find('div', class_='container vp')
row = container.find('div', class_='row')
disease = row.find('div', class_='searchPadded').text
print(idc, disease)
for i in code:
try:
check_code(i)
for l in D:
dd.append(i+l)
for q in dd:
check_code(q)
except AttributeError:
print(i)
|
from bs4 import BeautifulSoup
from companies_matcher.config import config
from .abc import ParserABC
import aiohttp
class MarketwatchParser(ParserABC):
_url = config['marketwatch']['url']
_endpoint = config['marketwatch']['endpoint']
_headers = {'User-Agent': config['service']['userAgent']}
def __init__(self, report: str, tickers: list, topics: list):
self._tickers = tickers
self._topics = topics
self._report = report
@staticmethod
def _parse_period(soup: BeautifulSoup):
row = soup.find('thead', class_='table__header')
return [i.text for i in row.find_all('div', attrs={'class': 'cell__content'})[2:-1]]
@staticmethod
def _combine_data_with_period(data: dict, period: list):
result = {key: {} for key in period}
for n, item in enumerate(period):
for key in data.keys():
values = data[key][n]
result[item].update({key: values})
return result
async def _request_html(self, ticker: str):
url = self._url + f'{ticker}/{self._endpoint}/{self._report}'
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=self._headers) as resp:
return await resp.text()
def _parse_html(self, html: str):
data = dict()
soup = BeautifulSoup(html, "html.parser")
rows = soup.find_all('tr', attrs={'class': 'table__row'})
for row in rows:
if cells := row.find_all('div', class_='cell__content'):
topic = cells[0].text
if topic in self._topics:
values = [i.text for i in cells[2:-1]]
data[topic] = values
period = self._parse_period(soup)
result = self._combine_data_with_period(data, period)
return result
|
#python program to check given number is prime or not
n=int(input("Enter an integer number"))
k=0
for i in range(1,n+1):
rem=n%i
if rem==0:
k=k+1
if k==2:
print(n,"is a prime number")
else:
print(n,"is not a prime number")
print("End of the program")
|
from unittest import TestCase, main
from ... import UndirectedGraph
class TestGetEdgeWeight(TestCase):
def setUp(self) -> None:
self.g = UndirectedGraph(edges={("a", "b"): 1, ("b", "c"): 2, ("e", "f"): 3})
def test_get_weight(self) -> None:
self.assertEqual(
self.g.get_edge_weight(("a", "b")), 1, "Should get edge weight."
)
self.assertEqual(
self.g.get_edge_weight(("b", "a")), 1, "Should get edge weight."
)
self.assertEqual(
self.g.get_edge_weight(("b", "c")), 2, "Should get edge weight."
)
self.assertEqual(
self.g.get_edge_weight(("c", "b")), 2, "Should get edge weight."
)
self.assertEqual(
self.g.get_edge_weight(("e", "f")), 3, "Should get edge weight."
)
self.assertEqual(
self.g.get_edge_weight(("f", "e")), 3, "Should get edge weight."
)
def test_invalid(self) -> None:
with self.assertRaises(
ValueError, msg="Should throw exception if edge is invalid."
):
self.g.get_edge_weight(("b", "g"))
with self.assertRaises(
ValueError, msg="Should throw exception if edge is invalid."
):
self.g.get_edge_weight(("b", "b"))
if __name__ == "__main__":
main()
|
from replit import clear
#HINT: You can call clear() to clear the output in the console.
logo = '''
___________
\ /
)_______(
|"""""""|_.-._,.---------.,_.-._
| | | | | | ''-.
| |_| |_ _| |_..-'
|_______| '-' `'---------'` '-'
)"""""""(
/_________\\
.-------------.
/_______________\\
'''
print(logo)
print("Welcome to Secret auction Program")
bid = {}
bidders = "yes"
while bidders == "yes":
name = str(input("What is your name? : "))
bid_amt = int(input("What is your bid? : "))
bid.update({name: bid_amt})
bidders = str(input("Are they any other bidders? : ")).lower()
clear()
winner = max(bid,key=bid.get)
print(f"the winner is {winner} with bid amount ${bid[winner]}")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch.autograd import Variable
def batch_cosine_sim(u, v, epsilon=1e-6):
"""
u: content_key: [batch_size x num_heads x mem_wid]
v: memory: [batch_size x mem_hei x mem_wid]
k: similarity: [batch_size x num_heads x mem_hei]
"""
assert u.dim() == 3 and v.dim() == 3
numerator = torch.bmm(u, v.transpose(1, 2))
# denominator = torch.sqrt(torch.bmm(u.norm(2, 2).pow(2) + epsilon, v.norm(2, 2).pow(2).transpose(1, 2) + epsilon)) # 0.1.12
denominator = torch.sqrt(
torch.bmm(
u.norm(2, 2, keepdim=True).pow(2) + epsilon,
v.norm(2, 2, keepdim=True).pow(2).transpose(1, 2) + epsilon,
)
) # 0.2.0
k = numerator / (denominator + epsilon)
return k
# batch_size = 3
# num_heads = 2
# mem_hei = 5
# mem_wid = 7
# u = torch.ones(batch_size, num_heads, mem_wid)
# u[0][0][4] = 0
# u[1][0][4] = 10
# u[1][1][6] = 10
#
# v = torch.ones(batch_size, mem_hei, mem_wid)
# v[0] = v[0] * 2
# v[1][0][0] = 0
# v[1][0][1] = 1
# v[1][0][2] = 2
# v[1][0][3] = 3
# v[1][0][4] = 4
# v[1][1][4] = 0
# print(u)
# print(v)
#
# batch_cosine_sim(Variable(u), Variable(v))
|
import os
import abc
from . import utils
class BaseProcessor:
def __init__(self):
self.data_dir = None
def set_data_dir(self, data_dir):
self.data_dir = data_dir
@abc.abstractmethod
def precoppy(self, prop, value):
pass
@abc.abstractmethod
def postcoppy(self, prop, value, project_directory):
pass
@abc.abstractmethod
def postrender(self, prop, value, project_directory):
pass
class License(BaseProcessor):
license_files = {
'bsd2': 'LICENSE_BSD2.txt',
'bsd3': 'LICENSE_BSD3.txt',
'gpl2': 'LICENSE_GPL2.txt',
'gpl3': 'LICENSE_GPL3.txt',
'mit': 'LICENSE_MIT.txt',
'apache2': 'LICENSE_APACHE2'
}
def __init__(self):
BaseProcessor.__init__(self)
def precoppy(self, prop, value):
pass
def postrender(self, prop, value, project_directory):
pass
def postcoppy(self, prop, value, project_directory):
if prop != "license":
return
lic = value.lower()
license_file = os.path.join(self.data_dir, self.license_files[lic])
project_license_file = os.path.join(project_directory, 'LICENSE.txt')
utils.copyfile(license_file, project_license_file)
|
"""Keep your requirements.txt files in sync with Pipfile or Pipfile.lock files."""
from .pipfile_requirements import PipfileRequirementsManager
|
import datetime
import time
from django.contrib.auth import get_user_model, authenticate
from django.contrib.auth.models import AnonymousUser
import graphene
import graphql_jwt
from graphene_django import DjangoObjectType
from graphql_jwt.decorators import login_required
from graphql_jwt.utils import get_payload
from django_redis import get_redis_connection
from redis import exceptions
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
class CreateUser(graphene.Mutation):
ok = graphene.Boolean()
class Arguments:
username = graphene.String(required=True)
password = graphene.String(required=True)
email = graphene.String(required=True)
def mutate(self, info, username, password, email):
user = get_user_model()(username=username, email=email)
user.set_password(password)
user.save()
ok = True
return CreateUser(ok=ok)
class Logout(graphene.Mutation):
ok = graphene.Boolean()
@login_required
def mutate(self, info):
token = getattr(info.context, 'jwt')
payload = get_payload(token)
# 方便删除set-cookie中的jwt
setattr(info.context, 'logout', True)
used = 'token_{}'.format(info.context.user.id)
zombie = 'zombie_{}'.format(info.context.user.id)
info.context.user = AnonymousUser()
con = get_redis_connection()
try:
con.rename(used, zombie)
exp_datetime = datetime.datetime.fromtimestamp(time.mktime(time.gmtime(payload['exp'])))
now_datetime = datetime.datetime.utcnow()
dod = (exp_datetime-now_datetime).total_seconds()
con.expire(zombie, dod)
# logout(info.context)
except exceptions.ResponseError:
pass
return Logout(ok=True)
class Query(graphene.ObjectType):
User_profile = graphene.List(UserType)
@login_required
def resolve_users(self, info, **kwargs):
return get_user_model().objects.get(id=info.context.user.id)
class Mutation(graphene.ObjectType):
Create_user = CreateUser.Field()
Log_in = graphql_jwt.ObtainJSONWebToken.Field()
Log_out = Logout.Field()
|
"""
This file defines class CorrectClassifiedReward.
@author: Clemens Rosenbaum :: cgbr@cs.umass.edu
@created: 6/8/18
"""
from .BaseReward import BaseReward
class CorrectClassifiedReward(BaseReward):
"""
Class CorrectClassifiedReward defines the +1 reward for correct classification, and -1 otherwise.
"""
def __init__(self, *args, **kwargs):
BaseReward.__init__(self, *args, **kwargs)
def forward(self, loss, yest, ytrue):
# input checking - onehot vs indices
if yest.numel() == yest.size(0):
y_ind = yest
else:
_, y_ind = yest.max(dim=1)
if ytrue.numel() == ytrue.size(0):
yt_ind = ytrue
else:
_, yt_ind = ytrue.max(dim=1)
return -1. + 2. * (y_ind.squeeze() == yt_ind.squeeze()).float() |
from django.db import models
from django.utils.regex_helper import Choice
from django.utils.timezone import now
# Create your models here.
class cart_item(models.Model):
Img = models.ImageField(upload_to='pics')
Product = models.CharField(max_length=255)
Quantity = models.IntegerField()
Price = models.FloatField()
Total = models.FloatField()
def __str__(self):
return self.Product
class Modify(models.Model):
title = models.CharField(max_length=50)
# des = models.CharField(max_length=500)
Lab_img = models.ImageField(upload_to='pics')
Pharmacy_img = models.ImageField(upload_to='pics')
equipments_img1 = models.ImageField(upload_to='pics')
equipments_img2 = models.ImageField(upload_to='pics')
number = models.BigIntegerField()
add = models.CharField(max_length=200)
class appointment(models.Model):
name = models.CharField(max_length=50)
Phone_Number = models.BigIntegerField()
problem = models.TextField()
appointment_Date = models.DateTimeField(auto_now_add=True,auto_now=False,blank=True,null=True)
Sent_Time = models.DateTimeField(default=now,blank=True)
# def __str__(self):
# return self.name
class pharmacy_order(models.Model):
DISTRICT_CHOICE = (
(1,'Anantapur'),
(2,'Chittoor'),
(3,'East Godavari'),
(4,'Guntur'),
(5,'YSR Kadapa'),
(6,'Krishna'),
(7,'Kurnool'),
(8,'Nellore'),
(9,'Prakasam'),
(10,'Srikakulam'),
(11,'Vijayanagaram'),
(12,'Visakapatnam'),
(13,'West Godavari'),
)
District = models.CharField(max_length=50,choices=DISTRICT_CHOICE)
First_Name = models.CharField(max_length=50)
Last_Name = models.CharField(max_length=50)
Address = models.CharField(max_length=500)
pincode = models.IntegerField()
Phone_Number = models.PositiveBigIntegerField()
order = models.TextField()
def __str__(self):
return self.First_Name |
import tkinter as tk
from PIL import Image, ImageTk
from player import Player, COLORS
from utils import *
from game_state import GameState
import pieces
from pieces.special_moves import *
from game_rules import *
from timer import *
from ai import *
class Board(tk.Frame):
def __init__(self, parent, rows=8, columns=8, size=32, color1="light steel blue", color2="steel blue"):
self.rows = rows
self.columns = columns
self.size = size
self.color1 = color1
self.color2 = color2
self.squares = {}
global sprites
sprites = []
self.lock = False
self.selsquare = []
global special_moves
special_moves = SpecialMoves()
self.populate_grid()
canvas_width = columns * size
canvas_height = rows * size
tk.Frame.__init__(self, parent)
self.canvas = tk.Canvas(self, borderwidth=0, highlightthickness=0,
width=canvas_width, height=canvas_height, background="black")
self.canvas.pack(side="top", fill="both", expand=True, padx=2, pady=2)
self.canvas.bind("<Configure>", self.refresh)
self.canvas.bind("<Button-1>", self.click_event_handler)
self.contadorPosPieces = 0
self.state = GameState(self, [Player(0), Player(1)])
global timerp1
global timerp2
LabelC1 = tk.LabelFrame(self, text="player2", height = 100, width = 150)
LabelC1.pack()
LabelC1.place(x = 600, y= 5)
timerp2 = Countdown(LabelC1)
timerp2.pack(padx = 30, pady = 10)
LabelC2 = tk.LabelFrame(self, text="player1", height = 100, width = 150)
LabelC2.pack()
LabelC2.place(x = 600, y= 450)
timerp1 = Countdown(LabelC2)
timerp1.pack(padx = 30, pady = 10)
timerp1.start_timer()
self.ai=None
black = tk.Button(self, text = "black", command = self.mode("black"))
def populate_grid(self):
for i in range(8):
for j in range(8):
square_info = {'piece': None, 'coord':(i, j),'selected':None,'gamerule':None,'aicoord':None}
self.squares[(i,j)] = square_info
def add_piece(self, piece, row=0, column=0):
sprites.append(tk.PhotoImage(file = piece.sprite_dir))
piece.sprite_ID = len(sprites)-1 #saves the sprite position in global sprites array
self.canvas.create_image(row, column, image=sprites[piece.sprite_ID], tags=(piece.name, "piece_name"), anchor="c")
self.place_piece(piece, row, column)
def place_piece(self, piece, row, column):
self.contadorPosPieces += 1
self.squares[(row, column)]['piece'] = piece
x0 = (column * self.size) + int(self.size/2)
y0 = (row * self.size) + int(self.size/2)
# #####print(self.squares[(row, column)])
self.canvas.coords(piece.name, x0, y0)
if(GameState.first_move and piece.color == "white" and self.contadorPosPieces == 65):
timerp1.start_timer()
timerp2.stop_timer()
elif(self.contadorPosPieces >64):
if(piece.color == "white"):
timerp2.start_timer()
timerp1.stop_timer()
else:
timerp2.stop_timer()
timerp1.start_timer()
def refresh(self, event):
xsize = int((event.width-1) / self.columns)
ysize = int((event.height-1) / self.rows)
self.size = min(xsize, ysize)
self.canvas.delete("square")
color = self.color2
for row in range(self.rows):
color = self.color1 if color == self.color2 else self.color2
for col in range(self.columns):
x1 = (col * self.size)
y1 = (row * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill=color, tags="square")
color = self.color1 if color == self.color2 else self.color2
for item in self.squares.items(): #iterates through the self.squares dict, attributing key to item[0] and value to item[1]
if item[1]['piece']: #if statement is only true if square isn't empty
piece = item[1]['piece']
self.place_piece(piece, item[0][0], item[0][1])
# puts piece over the square
self.canvas.tag_raise("piece")
self.canvas.tag_lower("square")
def mode(self, str):
self.ai = Ai(str,self.squares,self,sprites,special_moves)
if (GameState.turn(self.ai.color)):
self.ai.board=self
self.ai.ai_move()
GameState.switch()
def clear(self):
pieces_1 = self.state.players[0].pieces
pieces_2 = self.state.players[1].pieces
GameState.player = 'white'
GameState.blackcoord = (0,4)
GameState.whitecoord = (7,4)
for i in range (len(pieces_1)):
self.canvas.delete(pieces_1[i].name)
for i in range (len(pieces_2)):
self.canvas.delete(pieces_2[i].name)
timerp1.restart()
timerp1.seconds_left += 15
timerp2.restart()
timerp2.seconds_left += 15
self.squares = {}
self.populate_grid()
self.state = GameState(self, [Player(0), Player(1)])
timerp1.start_timer()
timerp2.stop_timer()
self.ai = None
self.mode("")
def position_pieces(self, player):
first_line = 0
second_line = 1
if (player.color != 0):
first_line = 7
second_line = 6
self.add_piece(player.pieces[0], first_line, 4)
self.add_piece(player.pieces[1], first_line, 3)
rooks = player.pieces[2:4]
bishops = player.pieces[4:6]
knights = player.pieces[6:8]
pawns = player.pieces[8:16]
for i in range (2):
self.add_piece(rooks[i], first_line, i*7)
self.add_piece(bishops[i], first_line, 2 + 3*i)
self.add_piece(knights[i], first_line, 1 + 5*i)
for i in range(8):
self.add_piece(pawns[i], second_line, i)
def add_square(self, piece, coord): # trava a movimentacao no tabuleiro
piece.selected = True # e encaminha os possiveis movimentos para o desenho
self.lock = True
vec = piece.get_possible_moves(coord,self.squares)
list_aux = []
if(piece.color == 'white'):
list_aux = check_all(self.squares, GameState.whitecoord, piece.color)
else:
list_aux = check_all(self.squares, GameState.blackcoord, piece.color)
if(list_aux):
vec = list(set(vec) & set(list_aux))
aux = 0
for i in range(8):
aux = 0
for j in range(8):
piece_aux = self.squares[(i,j)]['piece']
if(piece_aux is not None and piece.color != piece_aux.color):
aux = 2
if(piece_aux is not None and piece.color == piece_aux.color):
vec_aux = list(set(piece_aux.get_possible_moves((i,j), self.squares)) & set(list_aux))
if(vec_aux or (get_piece_type(piece_aux.name) == 'king' and piece_aux.get_possible_moves((i,j), self.squares))):
# #print("TO NÂO")
aux = 1
break
if(aux == 1):
break
if(aux == 0 or aux == 2):
stri = "Xeque-Mate"
tk.messagebox.showinfo("Xeque-Mate", stri)
self.clear()
else:
aux = 0
for i in range(1,8):
aux = 0
for j in range(1,8):
piece_aux = self.squares[(i,j)]['piece']
if(piece_aux is not None and piece.color != piece_aux.color):
aux = 2
if(piece_aux is not None and piece.color == piece_aux.color):
vec_aux = piece_aux.get_possible_moves((i,j), self.squares)
#print(vec_aux)
if(vec_aux):
aux = 1
break
if(aux == 1):
break
if(aux == 0 or aux == 2):
stri = "Afogamento"
tk.messagebox.showinfo("Empate por afogamento", stri)
piece.selected = False
self.lock = False
self.canvas.delete("square_selected")
self.after(200, self.clear())
if(get_piece_type(piece.name) == 'king'):
vec = piece.get_possible_moves(coord, self.squares)
if(not(vec)):# se nao tem movimentos libera a selecao de outras pecas
piece.selected = False
self.lock = False
self.draw_square(vec,coord)
def draw_square(self, vec, coord): # desenha os possiveis movimentos na tela
for i in range (len(vec)):
self.squares[(vec[i][0],vec[i][1])]['selected']=coord
self.squares[(vec[i][0],vec[i][1])]['gamerule']=vec[i][2]
x1 = (vec[i][0] * self.size)
y1 = (vec[i][1] * self.size)
x2 = x1 + self.size*0.8
y2 = y1 + self.size*0.8
if(vec[i][2]=='mov'):
self.selsquare.append(self.canvas.create_oval(y1+self.size*0.2, x1+self.size*0.2, y2, x2,outline="",fill="black",stipple="gray50", tags="square_selected"))
else:
self.selsquare.append(self.canvas.create_oval(y1+self.size*0.2, x1+self.size*0.2, y2, x2,outline="",fill="green",stipple="gray50", tags="square_selected"))
def clear_square(self,piece,selected): # libera da tela e do dicionarios os possiveis movimentos e destrava o tabuleiro
piece.selected = False
self.lock = False
for i in range(len(self.selsquare)):# libera da tela os quadrados referentes aos possiveis movimentos
self.canvas.delete(self.selsquare[i])
for i in range(len(selected)): # libera do dicionario as referencias a peca selecionada
self.squares[(selected[i][0], selected[i][1])]['selected']=None
self.selsquare = []
def capture_piece(self, coord):
capturedPiece = self.squares[coord]['piece']
if capturedPiece:
aicoord=self.squares[coord]['aicoord']
if aicoord:
self.squares[coord]['aicoord']=None
self.ai.update(aicoord)
self.canvas.delete(capturedPiece.name)
def move_piece(self, piece, ref, coord):
selected = piece.get_possible_moves(self.squares[ref]['coord'],self.squares)
color = piece.color
if (not piece.was_moved_before):
if (get_piece_type(piece.name) == "pawn"):
if (abs(ref[0]-coord[0]) == 2 and ((coord[1] < 7 and self.squares[(coord[0], coord[1]+1)]['piece']) or (coord[1]>0 and self.squares[(coord[0], coord[1]-1)]['piece']))):
GameState.possible_en_passant = coord
piece.was_moved_before = True
self.clear_square(piece,selected)
self.capture_piece(coord)
self.place_piece(self.squares[ref]['piece'],coord[0],coord[1]) # move a peca
self.squares[ref]['piece'] = None
if(color == 'white'):
timerp2.start_timer()
timerp1.stop_timer()
else:
timerp2.stop_timer()
timerp1.start_timer()
def click_event_handler(self, event): # encaminha funcoes dependendo do click do mouse
for row in range(self.rows):
for col in range(self.columns):
if(self.click_is_valid(row, col, event)): # tratamento do click mouse
piece = self.squares[(col,row)]['piece'] # guarda se o quadrado clicado eh uma peca
if(piece):
color = piece.color
ref = self.squares[(col,row)]['selected']
gr = self.squares[(col,row)]['gamerule']
if piece and GameState.turn(color): # clicou na peca
self.handle_board_lock(piece, row, col)
if ref: # clicou no quadrado vermelho
piece = self.squares[ref]['piece']
self.handle_piece_movimentation(piece, col, row, ref)
GameState.switch() # troca a cor do turno
if(gr!='mov'):
special_moves.mov_roque(self,gr,(col,row))
if (self.ai and GameState.turn(self.ai.color)):
self.ai.special_moves=special_moves
self.ai.board=self
self.ai.ai_move()
GameState.switch()
def click_is_valid(self, row, col, event):
return (row*self.size<event.x<=(row+1)*self.size) and (col*self.size<event.y<=(col+1)*self.size)
def handle_board_lock(self, piece, row, col):
if(not(self.lock) and not(piece.selected)):
self.add_square(piece,(col,row))
elif(self.lock and piece.selected):
self.clear_square(piece, piece.get_possible_moves(self.squares[(col,row)]['coord'],self.squares))
def handle_piece_movimentation(self, piece, row, col, ref):
if(piece is not None and piece.selected):
if (get_piece_type(piece.name)=='pawn'):
if (abs(col-ref[1])==1) and not self.squares[(row, col)]['piece']:
special_moves.en_passant(self)
else:
GameState.possible_en_passant = None
GameState.first_move = False
self.move_piece(piece,ref,(row,col))
if (get_piece_type(piece.name)=='pawn' and row in [0,7]):
self.lock=True
player = self.state.players[select_player(piece.color)]
special_moves.pawn_promotion(self, piece, row, col, sprites, player)
elif (get_piece_type(piece.name)=='king'):
if (piece.color == 'white'):
GameState.whitecoord = (row, col)
else:
GameState.blackcoord = (row, col)
def reset_timer(self): # consertar
timerp1.restart()
timerp2.restart()
def click_is_valid(self, row, col, event):
return (row*self.size<event.x<=(row+1)*self.size) and (col*self.size<event.y<=(col+1)*self.size)
|
# -*- coding:utf-8 -*-
import os
import sys
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
import base64
import logging
reload(sys)
sys.setdefaultencoding('utf8')
# Get an instance of a logger
logger = logging.getLogger("django")
class EMail:
def __init__(self, smtp_server, smtp_port, user, password, receiver):
self.smtp_server = smtp_server
self.smtp_port = smtp_port
self.user = user
self.password = password
self.receiver = receiver
# self.acc = acc
def send_email(self, subject='', content='', attachment=[]):
msg = MIMEMultipart()
if not isinstance(subject, unicode):
subject = unicode(subject)
msg["Subject"] = subject
msg["From"] = self.user
msg["To"] = self.receiver
# msg["Cc"] = acc
content = content.encode('utf-8')
part = MIMEText(content, 'plain', 'utf-8')
msg.attach(part)
# xls类型附件
file_name = attachment
# part = MIMEText(open(file_name,'rb').read(), 'base64', 'utf-8')
# part["Content-Type"] = 'application/octet-stream'
# basename = os.path.basename(file_name)
# part.add_header('Content-Disposition', 'attachment', filename=('gb2312', '', file_name))
# msg.attach(part)
for i in file_name:
part = MIMEApplication(open(i, 'rb').read())
part.add_header('Content-Disposition', 'attachment',
filename='=?utf-8?b?' + base64.b64encode(i.encode('UTF-8')) + '?=')
msg.attach(part)
# 连接smtp邮件服务器,端口默认是25
s = smtplib.SMTP_SSL(self.smtp_server, port=self.smtp_port, timeout=30)
# 登陆服务器
s.login(self.user, self.password)
# 发送邮件
try:
s.sendmail(self.user, self.receiver.split(','), msg.as_string())
logger.info("Email query data %s send successfully" % file_name)
s.close()
for i in file_name:
os.remove(i)
return True
except Exception, e:
logger.info(e)
return False
|
#Enclosing function local
#Funciones anidadas
name = "This is a global name"
def greet():
name = "Sammy"
def hello():
print("Hello "+name)
hello()
greet()
################################
x = 50
def func():
x = 1000
return x
print("Before function call, x is:", x)
x = func()
print("After function call, x is: ", x)
|
# -*- coding:utf-8 -*-
"""
机器人启动index
"""
import datetime
import json
import sys
import time
import itchat
from config import oss_url_2, oss_url_1, sms_msg_1, add_friend_msg
from itchat.content import *
from robot.service import service_handle
from robot.util.redis_conf import predis
from robot.util.oss import upload_file_images_to_oss, itchat_upload_images_to_oss, get_random_name
from tuling_jiqiren.tuling_1 import robot_reply
from robot import constants
reload(sys)
sys.setdefaultencoding('utf8')
new_instance_b = itchat.new_instance()
def get_login_robot():
myself_info = new_instance_b.get_friends(update=True)[0]
return myself_info["NickName"], myself_info["Uin"]
@new_instance_b.msg_register([TEXT, MAP, CARD, NOTE, SHARING])
def text_reply(msg):
print "66666666666666666666666", msg['Type']
print msg
new_instance_b.send('%s' % (msg['Text']), msg['FromUserName'])
@new_instance_b.msg_register([SHARING], isGroupChat=True)
def text_reply(msg):
print "===============分享======================="
print msg
msg["jiqiren_name"], msg["jiqiren_uin"] = get_login_robot()
service_handle.q_into_text(new_instance_b, msg)
# new_instance_b.send('%s: %s' % (msg['Type'], msg['Text']), msg['FromUserName'])
# 以下四类的消息的Text键下存放了用于下载消息内容的方法,传入文件地址即可
@new_instance_b.msg_register([PICTURE, RECORDING, ATTACHMENT, VIDEO])
def download_files(msg):
print "===========download_files==========="
print msg
jqr = dict()
jqr["jqr_name"], jqr["jqr_uin"] = get_login_robot()
code = "look_msg"
from_user_name = msg['FromUserName']
to_user_name = msg['ToUserName']
msg_type = msg['Type'] #文件类型
"""上传文件存储到阿里云"""
x = msg['Text']()
#file_name = msg["FileName"]
file_type = (msg["FileName"]).split('.')[1]
file_name = get_random_name(file_type)
url_name = oss_url_2 + file_name
url_name_1 = oss_url_1 + file_name
x = upload_file_images_to_oss(x, url_name)
print url_name_1
"""查找发文件人信息"""
search_friend = service_handle.search_friends(new_instance_b, userName=from_user_name)
"""存储好友聊天信息"""
friend_data = service_handle.friend_into_text2(jqr=jqr, content=url_name_1, to_user_name=to_user_name,
search_friend=search_friend, msg_type=msg_type)
"""从缓存不断获取发送消息, 指导输入结束,结束本次文字加图片的发送"""
redis_content = predis.get(name=msg['FromUserName'])
if redis_content:
"""多条消息连在一起"""
redis_content = json.loads(redis_content)
redis_content[1] += "{{"+url_name_1+"}}"
redis_content = json.dumps(redis_content)
predis.set(name=msg['FromUserName'], value=redis_content)
new_instance_b.send("收到,完成后发送'结束'.", msg['FromUserName'])
else:
"""发送单条消息"""
service_handle.friend_into_text2(jqr=jqr, content=url_name_1, to_user_name=to_user_name, search_friend=search_friend,
msg_type=msg_type)
if "KeyWord" in search_friend:
KeyWord = search_friend["KeyWord"]
else:
KeyWord = ""
Alias = search_friend["Alias"] if search_friend["Alias"] else "not_Alias_to_key_word"
user_list = service_handle.sql_search_user(Alias=Alias, keyword=KeyWord)
if not user_list:
user_list = service_handle.sql_search_user(userName=msg['FromUserName'])
print "##user_list##", user_list
notice_info_list = service_handle.into_edu_school_notice(content="{{" + url_name_1 + "}}", to_user_name=to_user_name,
user_list=user_list, content_class_name="")
print "###notice_info_list###", notice_info_list
for notice_info in notice_info_list:
print notice_info
"""生成短链接"""
msg_url = service_handle.handel_send_url(notice_info, code)
if code == "class_msg":
msg_url = notice_info["school_class_name"] + msg_url
new_instance_b.send(msg_url, msg['FromUserName'])
else:
if notice_info["school_class_user_name"]:
msg_url = sms_msg_1.format(notice_info["user_displayname"],str(datetime.datetime.now())[:16]) + msg_url
new_instance_b.send(msg_url, notice_info["school_class_user_name"])
else:
new_instance_b.send(u"获取班级信息错误,请在微信群里@机器人。", msg['FromUserName'])
# new_instance_b.send('@%s@%s' % ('img' if msg['Type'] == 'Picture' else 'fil', msg['FileName']), msg['FromUserName'])
# return '%s received' % msg['Type']
# msg['Text'](msg['FileName'])
# #print '@%s@%s' % ({'Picture': 'img', 'Video': 'vid'}.get(msg['Type'], 'fil'), msg['FileName'])
# return '@%s@%s' % ({'Picture': 'img', 'Video': 'vid'}.get(msg['Type'], 'fil'), msg['FileName'])
# 以下四类的消息的Text键下存放了用于下载消息内容的方法,传入文件地址即可
@new_instance_b.msg_register([PICTURE, RECORDING, ATTACHMENT, VIDEO], isGroupChat=True)
def download_files(msg):
print "======群图片信息=========="
msg["jiqiren_name"], msg["jiqiren_uin"] = get_login_robot()
import config
if config.OPEN_ROOM_FILE:
if msg["Type"] == 'Picture':
img_io = msg['Text']()
# show_url = itchat_upload_images_to_oss(img_io)
file_type = (msg["FileName"]).split('.')[1]
file_name = get_random_name(file_type)
url_name = oss_url_2 + file_name
show_url = oss_url_1 + file_name
x = upload_file_images_to_oss(img_io, url_name)
print show_url
msg['Content'] = show_url
"""存储群聊天信息"""
print "=================存储群聊天信息========================="
service_handle.q_into_text(new_instance_b, msg)
# 朋友text 聊天处理
@new_instance_b.msg_register(TEXT)
def text_reply_text(msg):
print "===============朋友text 聊天处理=====text_reply_text================="
content = msg["Content"].lstrip(' ')
print "##content##", content
from_user_name = msg['FromUserName']
to_user_name = msg['ToUserName']
code = "look_msg"
content_class_name = ""
content_cmd = content
msg_type = "text"
jqr = dict()
jqr["jqr_name"], jqr["jqr_uin"] = get_login_robot()
print "==============name, uin=================="
if content.strip() == constants.UPDATE_FRIENDS_KEY:
"""手动输入同步好友信息"""
print "===========itchat===开始同步好友信息========================="
friends = new_instance_b.get_friends(update=True)[1:]
begin_time_now = datetime.datetime.now()
service_handle.sync_friends(new_instance_b, friends, jqr)
end_time_now = datetime.datetime.now()
last_time = end_time_now - begin_time_now
print "===同步好友信息完成,开始时间:", begin_time_now
print "===结束时间:", end_time_now
print "===总共花费时间:", last_time
if content.strip() == constants.UPDATE_ROOMS_KEY:
"""手动输入同步群信息"""
print "===========itchat===开始同步群信息========================="
rooms = new_instance_b.get_chatrooms(update=True)
begin_time_now = datetime.datetime.now()
service_handle.sync_rooms(new_instance_b, rooms, jqr)
end_time_now = datetime.datetime.now()
last_time = end_time_now - begin_time_now
print "===同步群信息完成,开始时间:", begin_time_now
print "===结束时间:", end_time_now
print "===总共花费时间:", last_time
search_friend = service_handle.search_friends(new_instance_b, userName=from_user_name)
redis_content = predis.get(name=msg['FromUserName'])
if redis_content and content_cmd != constants.END_MESSAGES:
redis_content = json.loads(redis_content)
redis_content[1] += "{{"+content_cmd+"}}"
redis_content = json.dumps(redis_content)
predis.set(name=msg['FromUserName'], value=redis_content)
return new_instance_b.send("收到,完成后发送'结束'.", msg['FromUserName'])
if content.strip() == constants.HELP:
"""输入使用说明, 返回帮助链接 """
help_url = service_handle.xin_lang_convert_short_url(constants.HELP_URL)
return new_instance_b.send('%s: %s' % ("点滴机器人使用说明", help_url), msg['FromUserName'])
flag = False
if constants.AT_SEND_MESSAGES in content or constants.AT_MANAGE_CLASS_KEY in content:
"""判断是否给单个群发信息, @发消息, @班级管理"""
content_list = content.split("@")
if len(content_list) == 2:
content_class_name = content_list[0]
content_cmd = content_list[1]
flag = True
print "=============@发消息=============", content_cmd
elif not content_cmd in constants.CMD_DATA_LIST and not content_cmd.startswith(constants.SEND_MESSAGES) \
and content_cmd != constants.SEND_MESSAGES:
"""跟机器人聊天返回信息"""
jiqiren_msg = robot_reply(content_cmd, uid=msg['FromUserName'])
"""存聊天信息"""
friend_data = service_handle.friend_into_text2(jqr=jqr, content=content_cmd, to_user_name=to_user_name,
search_friend=search_friend, msg_type=msg_type)
return new_instance_b.send(jiqiren_msg, msg['FromUserName'])
if content_cmd == constants.MANAGE_CLASS_KEY:
"""班级管理"""
code = "class_msg"
if content_cmd == constants.SEND_MESSAGES:
"""开始输入发送多条消息"""
predis.set(name=msg['FromUserName'], value=json.dumps([content_class_name, ""]))
return new_instance_b.send("请输入文字内容:", msg['FromUserName'])
if content_cmd == constants.END_MESSAGES:
"""输入结束, 完成一次发送多条消息"""
if redis_content:
redis_content = json.loads(redis_content)
content_class_name = redis_content[0]
content_cmd = redis_content[1]
predis.delete(msg['FromUserName'])
new_instance_b.send("完成。", msg['FromUserName'])
if content.startswith(constants.SEND_MESSAGES) and content != constants.SEND_MESSAGES:
content_cmd = content.lstrip(constants.SEND_MESSAGES).lstrip(':').lstrip(':').strip()
content_cmd = "{{"+content_cmd+"}}"
friend_data = service_handle.friend_into_text2(jqr=jqr, content=content_cmd, to_user_name=to_user_name,
search_friend=search_friend, msg_type=msg_type)
print "=================search_friend=================", search_friend
if "KeyWord" in search_friend:
KeyWord = search_friend["KeyWord"]
else:
KeyWord = ""
Alias = search_friend["Alias"] if search_friend["Alias"] else "not_Alias_to_key_word"
user_list = service_handle.sql_search_user(Alias=Alias, keyword=KeyWord)
if not user_list:
user_list = service_handle.sql_search_user(userName=msg['FromUserName'])
print "=================user_list==================="
print user_list
"""如果flag为true, 则单个群发消息"""
if flag:
print "==================flag===========", flag
content_cmd = content_cmd.lstrip(constants.SEND_MESSAGES).lstrip(':').lstrip(':').strip()
content_cmd = "{{"+content_cmd+"}}"
"""存入消息表"""
notice_info_list = service_handle.into_edu_school_notice(content=content_cmd, to_user_name=to_user_name,
user_list=user_list, content_class_name=content_class_name)
for notice_info in notice_info_list:
"""把消息处理成短链接发送"""
msg_url = service_handle.handel_send_url(notice_info, code)
print "=================发送短链接拉====================="
if code == "class_msg":
print "===================class-msg===================="
msg_url = notice_info["school_class_name"] + msg_url
"""回复老师的问题"""
new_instance_b.send(msg_url, msg['FromUserName'])
else:
if notice_info["school_class_user_name"] and not flag:
print "==========66666666666666666666666666"
msg_url = sms_msg_1.format(notice_info["user_displayname"], str(datetime.datetime.now())[:16]) + msg_url
new_instance_b.send(msg_url, notice_info["school_class_user_name"])
elif notice_info["school_class_name"] == content_class_name and flag:
msg_url = sms_msg_1.format(notice_info["user_displayname"], str(datetime.datetime.now())[:16]) + msg_url
new_instance_b.send(msg_url, notice_info["school_class_user_name"])
else:
print "============77777777777777777========================="
new_instance_b.send(u"获取班级信息错误,请在微信群里@机器人。", msg['FromUserName'])
# 收到好友邀请自动添加好友
@new_instance_b.msg_register(FRIENDS)
def add_friend(msg):
print "##add_friend##", msg
new_instance_b.add_friend(**msg['Text']) # 该操作会自动将新好友的消息录入,不需要重载通讯录
new_instance_b.send(add_friend_msg, msg['RecommendInfo']['UserName'])
# 群 text消息处理
@new_instance_b.msg_register(TEXT, isGroupChat=True)
def groupchat_reply(msg):
print "=======================群 text消息处理 groupchat_reply===================="
"""
在注册时增加isGroupChat=True将判定为群聊回复
[PICTURE, RECORDING, ATTACHMENT, VIDEO]
isAt: 判断是否@本号,ActualNickName: 实际NickName,Content: 实际Content
"""
content = msg["Content"].lstrip(' ')
q_username = msg['FromUserName']
chat_room = new_instance_b.search_chatrooms(userName=q_username)
room_uin = chat_room["Uin"]
room_nick_name = chat_room["NickName"]
print "====room_uin======room_nick_name==========="
print room_uin, room_nick_name
if content.strip() == constants.HELP:
"""输入使用说明, 返回帮助链接 """
help_url = service_handle.xin_lang_convert_short_url(constants.HELP_URL)
return new_instance_b.send('%s: %s' % ("点滴机器人使用说明", help_url), msg['FromUserName'])
if content.strip() == constants.GROW:
"""输入成长日志, 返回成长日志链接 """
print "========成长日志从这里开始=========="
if room_uin:
c_room = service_handle.service_search_room_id_by_uin(room_uin)
else:
c_room = service_handle.service_search_room_id_by_nick_name(room_nick_name)
print "=============成长日志 room=============="
print c_room
if c_room:
room_id = str(c_room.id)
print "===成长日志 room id==", room_id
r_url = "http://jx.diandiyun.com/wx/robots/rooms/"+room_id+"/robotlog"
print "====发送加密前的===成才日志===url==="
print r_url
room_url = service_handle.xin_lang_convert_short_url(r_url)
print "====发送加密后的===成才日志===url==="
print room_url
return new_instance_b.send('%s: %s' % ("点击查看成长日志", room_url), msg['FromUserName'])
error_msg = u'暂无日志查看'
print "=========================成长日志, 没有日志可看============="
return new_instance_b.send(error_msg, msg['FromUserName'])
msg["jiqiren_name"], msg["jiqiren_uin"] = get_login_robot()
if msg['isAt']:
print "============begin isAt============================"
print msg
print "============end isAt==================================="
"""更新群成员信息"""
service_handle.q_isat_into(new_instance_b, msg)
else:
print "===================begin not isAt====================="
"""存储群聊天信息"""
service_handle.q_into_text(new_instance_b, msg)
# # 群 text消息处理
# @new_instance_b.msg_register(TEXT, isGroupChat=True)
# def groupchat_reply(msg):
# from_user_name = msg['FromUserName']
# memberlist = new_instance_b.update_chatroom(from_user_name, detailedMember=True)
# print "####memberlist####",memberlist
@new_instance_b.msg_register(SYSTEM)
def get_uin(msg):
"""
开始登录, 初始化数据, 主要有
:param msg:
:return:
"""
if msg['SystemInfo'] != 'uins':
return
else:
pass
"""登录者本人信息"""
myself_info = new_instance_b.get_friends(update=True)[0]
print "======robot_info============"
my_user = new_instance_b.search_friends(userName=myself_info["UserName"])
recod_id = service_handle.add_robot_self(my_user["NickName"], my_user["Uin"])
print my_user["Uin"], recod_id
print "======robot_info============"
ins = new_instance_b.instanceList[0]
fullContact = ins.memberList + ins.chatroomList + ins.mpList
print('** Uin Updated **')
for username in msg['Text']:
member = new_instance_b.utils.search_dict_list(fullContact, 'UserName', username)
nick_name = member.get('NickName', '')
uin = member['Uin']
alias = member['Alias']
user_name = member['UserName']
EncryChatRoomId = member["EncryChatRoomId"]
"""更新班级信息, 即群信息"""
if "@chatroom" in uin:
xcv = new_instance_b.update_chatroom(userName=EncryChatRoomId)
service_handle.handel_class_update_sys(uin=uin, nick_name=nick_name, user_name=user_name,
KeyWord=xcv["KeyWord"])
elif "gh_" in uin:
print "公众号", uin
else:
"""更新好友信息"""
print "好友", uin, alias, nick_name
if uin and not alias:
alias = uin
service_handle.friend_update_sys(uin=uin, nick_name=nick_name, alias=alias, user_name=user_name)
def init_robot_data():
service_handle.sync_robot_data.delay()
def run_index(key="default"):
if "linux" in sys.platform:
enable_cmd_qr = 2
else:
enable_cmd_qr = False
"""初始化机器人数据库表"""
pkl_name = "pkl/robot_" + str(key) + ".pkl"
new_instance_b.auto_login(hotReload=True, enableCmdQR=enable_cmd_qr, picDir=None, qrCallback=None, loginCallback=None,
exitCallback=None, statusStorageDir=pkl_name)
new_instance_b.run(debug=True, blockThread=True)
if __name__ == '__main__':
if "linux" in sys.platform:
enable_cmd_qr = 2
else:
enable_cmd_qr = False
new_instance_b.auto_login(hotReload=True, enableCmdQR=enable_cmd_qr, picDir=None, qrCallback=None, loginCallback=None,
exitCallback=None, statusStorageDir='pkl/robot_default.pkl')
new_instance_b.run(debug=True, blockThread=True)
|
import logging
import os
import random
import string
from kubeflow.testing import argo_build_util
# The name of the NFS volume claim to use for test files.
NFS_VOLUME_CLAIM = "nfs-external"
# The name to use for the volume to use to contain test data
DATA_VOLUME = "kubeflow-test-volume"
E2E_DAG_NAME = "e2e"
EXIT_DAG_NAME = "exit-handler"
LOCAL_TESTING = os.getenv("LOCAL_TESTING", "False")
DOCKER_CONFIG_VOLUME = {"name": "docker-config",
"configMap": {"name": "docker-config"}}
DOCKER_CONFIG_MOUNT = {"name": "docker-config",
"mountPath": "/kaniko/.docker/"}
AWS_CREDENTIALS_VOLUME = {"name": "aws-secret",
"secret": {"secretName": "aws-secret"}}
AWS_CREDENTIALS_MOUNT = {"mountPath": "/root/.aws/",
"name": "aws-secret"}
AWS_WORKER_IMAGE = "public.ecr.aws/j1r0q0g6/kubeflow-testing:latest"
class ArgoTestBuilder:
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, release=False,
**kwargs):
self.name = name
self.namespace = namespace
self.bucket = bucket
self.template_label = "argo_test"
self.test_target_name = test_target_name
self.mkdir_task_name = "make-artifacts-dir"
self.release = release
# *********************************************************************
#
# Define directory locations
#
# *********************************************************************
# mount_path is the directory where the volume to store the test data
# should be mounted.
self.mount_path = "/mnt/" + "test-data-volume"
# test_dir is the root directory for all data for a particular test
# run.
self.test_dir = self.mount_path + "/" + self.name
# output_dir is the directory to sync to GCS to contain the output for
# this job.
self.output_dir = self.test_dir + "/output"
self.artifacts_dir = "%s/artifacts/junit_%s" % (self.output_dir, name)
# source directory where all repos should be checked out
self.src_root_dir = "%s/src" % self.test_dir
# The directory containing the kubeflow/kubeflow repo
self.src_dir = "%s/kubeflow/kubeflow" % self.src_root_dir
# Root of testing repo.
self.testing_src_dir = os.path.join(self.src_root_dir,
"kubeflow/testing")
# Top level directories for python code
self.kubeflow_py = self.src_dir
# The directory within the kubeflow_testing submodule containing
# py scripts to use.
self.kubeflow_testing_py = "%s/kubeflow/testing/py" % self.src_root_dir
self.go_path = self.test_dir
def _build_workflow(self, exit_dag=True):
"""Create a scaffolding CR for the Argo workflow"""
volumes = [{
"name": DATA_VOLUME,
"persistentVolumeClaim": {
"claimName": NFS_VOLUME_CLAIM
},
}]
if LOCAL_TESTING == "False":
volumes.append(AWS_CREDENTIALS_VOLUME)
volumes.append(DOCKER_CONFIG_VOLUME)
workflow = {
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Workflow",
"metadata": {
"name": self.name,
"namespace": self.namespace,
"labels": argo_build_util.add_dicts([
{
"workflow": self.name,
"workflow_template": self.template_label,
},
argo_build_util.get_prow_labels()
]),
},
"spec": {
"entrypoint": E2E_DAG_NAME,
# Have argo garbage collect old workflows otherwise we overload
# the API server.
"volumes": volumes,
"templates": [
{
"dag": {
"tasks": []
},
"name": E2E_DAG_NAME
},
],
}, # spec
} # workflow
if exit_dag:
workflow["spec"]["onExit"] = EXIT_DAG_NAME
workflow["spec"]["templates"].append({
"dag": {
"tasks": []
},
"name": EXIT_DAG_NAME
})
return workflow
def build_task_template(self, mem_override=None, deadline_override=None):
"""Return a template for all the tasks"""
volume_mounts = [{
"mountPath": "/mnt/test-data-volume",
"name": DATA_VOLUME
}]
if LOCAL_TESTING == "False":
volume_mounts.append(AWS_CREDENTIALS_MOUNT)
volume_mounts.append(DOCKER_CONFIG_MOUNT)
image = AWS_WORKER_IMAGE
mem_lim = "4Gi"
if mem_override:
mem_lim = mem_override
active_deadline_sec = 3000
if deadline_override:
active_deadline_sec = deadline_override
task_template = {
"activeDeadlineSeconds": active_deadline_sec,
"container": {
"command": [],
"env": [],
"image": image,
"imagePullPolicy": "Always",
"name": "",
"resources": {
"limits": {
"cpu": "4",
"memory": mem_lim
},
"requests": {
"cpu": "1",
"memory": "1536Mi"
},
},
"volumeMounts": volume_mounts,
},
"metadata": {
"labels": {
"workflow_template": self.template_label,
}
},
"outputs": {},
}
# Define common environment variables to be added to all steps
common_env = [
{
"name": "PYTHONPATH",
"value": ":".join([self.kubeflow_py, self.kubeflow_testing_py])
},
{
"name": "GOPATH",
"value": self.go_path
},
]
task_template["container"]["env"].extend(common_env)
if self.test_target_name:
task_template["container"]["env"].append(
{
"name": "TEST_TARGET_NAME",
"value": self.test_target_name
}
)
task_template = argo_build_util.add_prow_env(task_template)
return task_template
# Common tasks
def create_install_modules_task(self, task_template, workingDir):
install = argo_build_util.deep_copy(task_template)
install["name"] = "npm-modules-install"
install["container"]["image"] = "node:12.20.1-stretch-slim"
install["container"]["command"] = ["npm"]
install["container"]["args"] = ["ci"]
install["container"]["workingDir"] = workingDir
return install
def create_format_typescript_task(self, task_template, workingDir):
format_task = argo_build_util.deep_copy(task_template)
format_task["name"] = "check-frontend-formatting"
format_task["container"]["image"] = "node:12.20.1-stretch-slim"
format_task["container"]["command"] = ["npm"]
format_task["container"]["args"] = ["run", "format:check"]
format_task["container"]["workingDir"] = workingDir
return format_task
def create_format_python_task(self, task_template, workingDir):
format_task = argo_build_util.deep_copy(task_template)
format_task["name"] = "check-python-formatting"
format_task["container"]["image"] = "python:3.7-slim-buster"
format_task["container"]["command"] = ["/bin/sh", "-c"]
format_task["container"]["args"] = ["pip install flake8 && flake8 ."]
format_task["container"]["workingDir"] = workingDir
return format_task
def create_kaniko_task(self, task_template, dockerfile, context,
destination, no_push=False):
"""
A task for building images inside a cluster container using Kaniko.
If we are testing the workflow locally then we won't be pushing images
to any registries. This will make it easier for people to try out and
extend the code.
"""
kaniko = argo_build_util.deep_copy(task_template)
# for short UUID generation
alphabet = string.ascii_lowercase + string.digits
# append the tag base-commit[0:7]
if ":" not in destination:
if self.release:
with open(os.path.join("/src/kubeflow/kubeflow",
"releasing/version/VERSION")) as f:
version = f.read().strip()
destination += ":%s" % version
else:
sha = os.getenv("PULL_BASE_SHA", "12341234kanikotest")
base = os.getenv("PULL_BASE_REF", "master")
destination += ":%s-%s" % (base, sha[0:8])
# add short UUID to step name to ensure it is unique
random_suffix = ''.join(random.choices(alphabet, k=8))
kaniko["name"] = "kaniko-build-push-" + random_suffix
kaniko["container"]["image"] = "gcr.io/kaniko-project/executor:v1.5.0"
kaniko["container"]["command"] = ["/kaniko/executor"]
kaniko["container"]["args"] = ["--dockerfile=%s" % dockerfile,
"--context=%s" % context,
"--destination=%s" % destination]
# don't push the image to a registry if trying out the produced
# Argo Workflow yaml locally
if LOCAL_TESTING == "True" or no_push:
kaniko["container"]["args"].append("--no-push")
return kaniko
def _create_checkout_task(self, task_template):
"""Checkout the kubeflow/testing and kubeflow/kubeflow code"""
main_repo = argo_build_util.get_repo_from_prow_env()
if not main_repo:
logging.info("Prow environment variables for repo not set")
main_repo = "kubeflow/testing@HEAD"
logging.info("Main repository: %s", main_repo)
repos = [main_repo]
checkout = argo_build_util.deep_copy(task_template)
checkout["name"] = "checkout"
checkout["container"]["command"] = [
"/usr/local/bin/checkout_repos.sh",
"--repos=" + ",".join(repos),
"--src_dir=" + self.src_root_dir,
]
return checkout
def _create_make_dir_task(self, task_template):
"""Create the directory to store the artifacts of each task"""
# (jlewi)
# pytest was failing trying to call makedirs. My suspicion is its
# because the two steps ended up trying to create the directory at the
# same time and classing. So we create a separate step to do it.
mkdir_step = argo_build_util.deep_copy(task_template)
mkdir_step["name"] = self.mkdir_task_name
mkdir_step["container"]["command"] = ["mkdir", "-p",
self.artifacts_dir]
return mkdir_step
def build_init_workflow(self, exit_dag=True):
"""Build the Argo workflow graph"""
workflow = self._build_workflow(exit_dag)
task_template = self.build_task_template()
# checkout the code
checkout_task = self._create_checkout_task(task_template)
argo_build_util.add_task_to_dag(workflow, E2E_DAG_NAME, checkout_task,
[])
# create the artifacts directory
mkdir_task = self._create_make_dir_task(task_template)
argo_build_util.add_task_to_dag(workflow, E2E_DAG_NAME, mkdir_task,
[checkout_task["name"]])
return workflow
# the following methods should be implemented from the test cases
def build(self):
"""Build the Argo Worfklow for this test"""
raise NotImplementedError("Subclasses should implement this!")
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""Return the final dict with the Argo Workflow to be submitted"""
raise NotImplementedError("Subclasses should implement this!")
|
import webapp2
import cgi
import os
import re
from google.appengine.ext import db
import jinja2
import random
from string import letters
import hashlib
import hmac
jinja_env = jinja2.Environment(autoescape=True,
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))
# private regular entry
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASS_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile("^[\S]+@[\S]+\.[\S]+$")
# private regular entry end
# user crypotolgy used in login
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
# user crypotolgy used in login end
# private for signup,could retain the username!
SECRET = 'I LOVE JINGJING'
def hash_str(s):
return hmac.new(SECRET, s).hexdigest()
def make_secure_val(s):
return "%s|%s" % (s, hash_str(s))
def check_secure_val(h):
val = h.split('|')[0]
if h == make_secure_val(val):
return val
# private end
# check validity of username,password, email
def valid_username(username):
return USER_RE.match(username)
def valid_password(password):
return PASS_RE.match(password)
def valid_email(email):
if email=="":
return True
else:
return EMAIL_RE.match(email)
# check validity of username,password, email end
# make a database class:User
class User(db.Model):
name = db.StringProperty(required = True)
pw_hash = db.StringProperty(required = True)
email = db.StringProperty()
# get user data by user_id
def by_id(self, uid):
return User.get_by_id(uid)
# get user data by user_name
@staticmethod
def by_name(name):
u = User.all().filter('name =', name).get()
return u
# get username if inform is valid
@staticmethod
def login(name, pw, email=None):
u = User.all().filter('name =', name).get()
if u and valid_pw(name,pw,u.pw_hash):
return u
class BlogHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def make_cookie(self, username):
username_hash = make_secure_val(username)
self.response.headers.add_header('Set-Cookie',
"username=%s; Path=/" % str(username_hash))
# make cookie end!
class Signup(BlogHandler):
def output(self,err_user="",err_pass="",err_veri="",err_email="",username="",email=""):
self.render("signup.html",err_user=err_user, err_pass = err_pass, err_veri=err_veri, err_email=err_email,username=username,email=email)
def get(self):
# self.response.headers['Content-Type'] = 'text/plain'
self.output()
def post(self):
username = self.request.get("username")
password = self.request.get("password")
verify = self.request.get("verify")
email = self.request.get("email")
err_user,err_pass,err_veri,err_email=False,False,False,False
err_user_text,err_pass_text,err_veri_text,err_email_text="","","",""
if not valid_username(username):
err_user_text = "That's not a valid username."
err_user=True
if not valid_password(password):
err_pass_text = "That wasn't a valid password."
err_pass=True
elif password != verify:
err_veri_text = "Your passwords didn't match."
err_veri=True
if not valid_email(email):
err_email_text = "That's not a valid email."
err_email=True
if err_user or err_pass or err_veri or err_email:
self.output(err_user_text,err_pass_text,err_veri_text,err_email_text,username,email)
else:
# You have to change username into a string, since username = self.request.get("username")
# is not a string().
u = User(name = username,
pw_hash = make_pw_hash(username,password),
email = email)
u.put()
username_hash = make_secure_val(username)
self.response.headers.add_header('Set-Cookie', "username=%s; Path=/" % str(username_hash))
self.make_cookie(username)
self.redirect("/welcome")
# self.redirect("/welcome?username=" + username)
# !we can get username transmitted to a new url by the above way !
# text = self.request.get('text')
class Login(BlogHandler):
def get(self):
self.render("login.html")
def post(self):
username = self.request.get("username")
password = self.request.get("password")
# make some user name and password
u = User.login(username, password)
if u:
self.make_cookie(username)
self.redirect("/welcome")
else:
error = "User or Password invalid!"
self.render("login.html",error = error)
class Welcome(BlogHandler):
def get(self):
username_hash = self.request.cookies.get('username', "0|0")
# name = self.request.get("username")
# ! we can get the transmitted username by this way!
username = check_secure_val(username_hash)
self.render("welcome_signup.html", username = username)
# self.response.write(self.request)
class Logout(BlogHandler):
def get(self):
username_hash = self.request.cookies.get('username', "0|0")
username = check_secure_val(username_hash)
#delete cookie method #1
self.response.headers.add_header('Set-Cookie', 'username=; Path=/')
#delete cookie methon #2
self.response.delete_cookie("username", path='/')
self.redirect('/signup')
app = webapp2.WSGIApplication([ ('/login', Login),
('/signup',Signup),
('/welcome',Welcome),
('/logout',Logout)],
debug=True) |
# conditions and booleans
# lang='python'
# if lang=='js':
# print('lang is js')
# elif lang=='python':
# print('lang is py')
# elif lang=='go':
# print('lang is go')
# else:
# print('no match')
user='modi'
lang='python'
if not user:
print('correct user')
else:
print('bad')
a=[1,2,3]
b=[1,2,3]
print(id(a))
print(id(b))
print(a == b)
print(a is b)
cond='modi'
if cond:
print('true')
else:
print('False')
|
# @Title: 统计位数为偶数的数字 (Find Numbers with Even Number of Digits)
# @Author: 2464512446@qq.com
# @Date: 2020-01-03 11:59:50
# @Runtime: 64 ms
# @Memory: 12.4 MB
class Solution:
def findNumbers(self, nums: List[int]) -> int:
count = 0
for i in nums:
if len(str(i)) % 2 == 0:
count +=1
return count
|
elements=[23,14,56,12,19,9,15,25,31,42,43]
i=0
even_count=0
odd_count=0
while i<len(elements):
if elements[i]%2==0:
even_count=even_count+1
else:
odd_count=odd_count+1
i=i+1
print("even_count",even_count)
print("odd_count",odd_count)
|
from google.appengine.ext import db
class Word(db.Model):
langA = db.StringProperty()
langB = db.StringProperty()
|
# Bisection method
def bisection(f, a, b, tol=1e-5):
lower, upper = a, b
if f(a)*f(b)>=0:
print("Method won't work. Needs opposite signs.")
else:
middle = 0.5*(upper + lower)
while abs(f(middle)) > tol:
if f(middle)*f(upper)<0:
lower, upper = middle, upper
else:
lower, upper = lower, middle
middle = 0.5*(lower+upper)
return middle
|
from ..connecting import connect2MySQL
connect2MySql=connect2MySQL.connect2MySql
# from connect2MySql.connecting.connect2MySQL import connect2MySql
import xlsxwriter
import pandas as pd
import os
class createScript(connect2MySql):
def __init__(self, script, conObj) :
self.script=script
self.conn=conObj
self.conn.createCursor().execute(self.script)
def showAll(self):
xObj=connect2MySql.showCursor(self.conn).fetchall()
return xObj
def showMany(self, number):
xObj=connect2MySql.showCursor(self.conn).fetchmany(number)
return xObj
def to_xlsx(self):
xObj=connect2MySql.showCursor(self.conn).fetchall()
column_name=connect2MySql.showCursor(self.conn).column_names
count=connect2MySql.showCursor(self.conn).rowcount
workbook=xlsxwriter.Workbook('result_query.xlsx', {'nan_inf_to_errors':True})
worksheet=workbook.add_worksheet()
amplitude=len(column_name)
def header():
#row=0
for index, column in enumerate(column_name):
worksheet.write(0, index, column)
header()
def write_data():
#start at row=1
row=1
for item in xObj:
for index, element in enumerate(item):
worksheet.write(row, index, element)
row+=1
workbook.close()
write_data()
|
N = int( input())
A = [(0,0)]*N
for i in range(N):
A[i] = ( int( input()), i)
A.sort()
ANS = [0]*N
now = A[0][0]
ans = 0
for i in range(N):
a, j = A[i]
if now == a:
ANS[j] = ans
else:
now = a
ans += 1
ANS[j] = ans
for i in range(N):
print( ANS[i])
##print( " ".join( map( str, ANS)))
|
from typing import Union
from datetime import datetime
from csv import DictReader
from operator import gt, lt
import bisect
from dateutil.parser import parse, ParserError
def _insort_reverse(a, x, lo=0, hi=None):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x > a[mid]:
hi = mid
else:
lo = mid+1
a.insert(lo, x)
class ColumnException(Exception):
pass
class Row:
def __init__(self, row_id: Union[int, str], data: {str: Union[int, float, str, datetime]}):
self.row_id = row_id
self.data = self.__parse_data(data)
def get_column_names(self):
return self.data.keys()
def __str__(self):
return str((self.row_id, self.data))
@classmethod
def __parse_data(cls, data):
return {key: cls.__parse_value(value) for key, value in data.items()}
@staticmethod
def __parse_value(value):
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
try:
return parse(value)
except ParserError:
return value
class DataSet:
def __init__(self, rows: [Row], column_names: [str]):
self.rows = rows
self.column_names = column_names
@staticmethod
def from_csv(csv_path: str):
with open(csv_path) as data_file:
reader = DictReader(data_file)
rows = []
row_count = 0
for row in reader:
rows.append(Row(row_id=row_count, data=row))
row_count += 1
column_names = rows[0].get_column_names()
return DataSet(rows, column_names)
def min(self, column_names: [str] = None):
return self.__min_max(lt, column_names)
def max(self, column_names: [str] = None):
return self.__min_max(gt, column_names)
def sort(self, column_names: [str] = None, reverse: bool = False):
if column_names is None:
column_names = self.column_names
else:
self.__validate_column_names(column_names)
if reverse:
insort = _insort_reverse
else:
insort = bisect.insort
columns = {column_name: [] for column_name in column_names}
for row in self.rows:
for column_name in column_names:
insort(columns[column_name], row.data.get(column_name))
return columns
def __min_max(self, infix_operator: Union[gt, lt], column_names: [str] = None):
if column_names is None:
column_names = self.column_names
else:
self.__validate_column_names(column_names)
result = {}
for row in self.rows:
for column_name in column_names:
if result.get(column_name) is None or infix_operator(row.data.get(column_name), result[column_name]):
result[column_name] = row.data.get(column_name)
return result
def __validate_column_names(self, column_names: [str] = None):
if column_names is not None and not set(self.column_names).issuperset(set(column_names)):
raise ColumnException(f'Column names {set(column_names) - set(self.column_names)} not found')
return column_names
def __str__(self):
return str([str(row) for row in self.rows])
|
# Generated by Django 2.2.3 on 2019-09-09 17:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracks', '0017_exam_result_case'),
]
operations = [
migrations.AddField(
model_name='exam_result',
name='times',
field=models.IntegerField(default=1),
),
]
|
from flask import Blueprint,render_template,url_for
from pybo.models import Question, Answer, User
from datetime import datetime
from pybo import db
from werkzeug.utils import redirect
bp = Blueprint('main', __name__, url_prefix='/')
@bp.route('/test')
def test():
for i in range(100):
q = Question(subject='테스트 데이터 [%03d]'%i, content='내용무',create_date=datetime.now())
db.session.add(q)
db.session.commit()
return redirect(url_for('main.index'))
@bp.route('/hello')
def hello_pybo():
#result = Question.query.filter(Question.id<6).all()
#result = Question.query.get(1) #id(primary key)가 1번 데이터를 가져옴
#result = Question.query.filter(Question.subject.like('%무엇%')).all()
#result = Question.query.filter(Question.username.like('김%')).all()
#result = Question.query.get(1)
#result.subject = '파이보 정말 재밌어요' #데이터 변경
#db.session.delete(result)
#db.session.commit()
#q = Question.query.get(2)
#a = Answer(question = q, content='답변 3번',create_date=datetime.now())
#db.session.add(a)
#db.session.commit()
#2번 질문에 대한 답변 데이터를 가져오세요.
#q = Question.query.get(5)
#a = Answer(question=q, content='답변 1번', create_date=datetime.now())
#a1 = Answer(question=q, content='답변 2번', create_date=datetime.now())
#a2 = Answer(question=q, content='답변 3번', create_date=datetime.now())
#a3 = Answer(question=q, content='답변 4번', create_date=datetime.now())
#a4 = Answer(question=q, content='답변 5번', create_date=datetime.now())
#result = q.answer_set
#print(result)
#db.session.delete(q)
#db.session.commit()
#print(result)
q = Question(subject='pybo가 무엇인가요?', content='pybo에 대해서 알고싶습니다.', create_date=datetime.now())
q1 = Question(subject='pybo가 힘들 땐 어떻게 해야하나요?', content='도움받을 만할 소스를 알고 싶습니다', create_date=datetime.now())
q2 = Question(subject='테이블 확인은 어디서하나요?', content='무슨프로그램으로 확인하나요?', create_date=datetime.now())
q3 = Question(subject='migrate는 무엇인가요?', content='flask db migrate는 어떤 명령어인가요?', create_date=datetime.now())
q4 = Question(subject='주소를 더 추가하려면 어떻게 하나요?', content='주소를 더 많이 집어넣고 싶습니다.', create_date=datetime.now())
q5 = Question(subject='날짜를 추가하려면 어떻게 하나요?', content='현재 날짜를 추가하고 싶어요.', create_date=datetime.now())
q6 = Question(subject='오류가 나요', content='에러 원인을 찾는 법을 알고싶습니다.', create_date=datetime.now())
q7 = Question(subject='파이썬 활용법은?', content='파이썬으로 무엇을 할 수 있나요?', create_date=datetime.now())
q8 = Question(subject='수정된 시간도 추가가 가능한가요?', content='질문 수정 시간을 추가하고 싶습니다.', create_date=datetime.now())
q9 = Question(subject='flask란 무엇인가요?', content='flask에 대해서 알고싶습니다.', create_date=datetime.now())
db.session.add(q)
db.session.add(q1)
db.session.add(q2)
db.session.add(q3)
db.session.add(q4)
db.session.add(q5)
db.session.add(q6)
db.session.add(q7)
db.session.add(q8)
db.session.add(q9)
db.session.commit()
return 'Hello, Pybo!'
#@bp.route('/')
#def index():
# question_list= Question.query.order_by(Question.create_date.desc())
# return render_template('question/question_list.html',question_list=question_list)
@bp.route('/')
def index():
return redirect(url_for('question._list'))
|
"""
@Author : Laura
@File : __init__.py.py
@Time : 2020/4/16 18:24
""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.