blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a56ecad786f4d182bac09142f6f96074b6ef6f3
|
74cec3aefc021447b79bd681901d085f207b1e13
|
/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_training_status/utils.py
|
823889e7a4ec6abccf9e01e03cadc8c7dcc6daf3
|
[
"MIT"
] |
permissive
|
timlawless/azure-intelligent-edge-patterns
|
83900c7d9cfa0ddd07131fb503e6837fb81f6cf4
|
d72c3c1c2d56a762b74a72cd3befd076dc77b8ac
|
refs/heads/master
| 2022-12-17T06:16:54.677327
| 2020-09-29T23:52:10
| 2020-09-29T23:52:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
"""
App Utilities
"""
import logging
from .models import TrainingStatus
logger = logging.getLogger(__name__)
def upcreate_training_status(project_id,
status: str,
log: str,
performance: str = "{}",
need_to_send_notification: bool = False):
"""upcreate_training_status.
Consider using constants.PROGRESS_X to replace status and log.
e.g.
upcreate_training_status(project_id=project_id,
need_to_send_notification=True,
**constants.PROGRESS_X)
Args:
project_id:
status (str): status
log (str): log
performance (str): performance
need_to_send_notification (bool): need_to_send_notification
"""
logger.info("Updating Training Status :%s", status)
logger.info("Updating Training Log :%s", log)
logger.info("need_to_send_notification :%s", need_to_send_notification)
obj, created = TrainingStatus.objects.update_or_create(
project_id=project_id,
defaults={
"status": status,
"log": log.capitalize(),
"performance": performance,
"need_to_send_notification": need_to_send_notification,
},
)
return obj, created
|
[
"peteeelol@gmail.com"
] |
peteeelol@gmail.com
|
a83f515a19f82123dc8b48be43644a6b9ec26ddb
|
4f27ee76eeca2422e84d29d483f1f9570d26b6b4
|
/Finance/Binomial/MultiStep.py
|
b5678f789a7533f5522d869379ddfcba4a60d868
|
[] |
no_license
|
dsocaciu/Projects
|
5a52b98600400d66f4a4f00bb59676bed70ad602
|
aa722562534408f6c84f6ca2c0b928d038c74cb3
|
refs/heads/master
| 2021-01-19T13:01:22.288458
| 2017-06-14T19:25:59
| 2017-06-14T19:25:59
| 82,357,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
#Step 2
from fractions import Fraction
class Node:
def __init__(self,Level=None,Price=None,Up=None, Down=None):
self.Level = Level
self.Price = Price
self.Up = Up
self.Down = Down
def getLevel(self):
return self.Level
def CalculateNextLevel(node):
currentLevel = node.getLevel()
UpNode = Node(currentLevel+1,node.Price*u)
DownNode = Node(currentLevel+1,node.Price*d)
node.Up = UpNode
node.Down = DownNode
return UpNode,DownNode
root = Node(0,Fraction(100,1))
u = Fraction(107,100)
d = Fraction(1,u)
upNode, downNode = CalculateNextLevel(root)
#print(str((100*d)*u))
#Level 0 - Root
print(str(root.Level)+ " " * root.Level + str(float(root.Price)))
#Level 1
print(str(upNode.Level) + " " * upNode.Level + str(float(upNode.Price)))
print(str(downNode.Level) + " " * downNode.Level + str(float(downNode.Price)))
#Extensions beyond one step
#Level 2
upupNode, updownNode = CalculateNextLevel(upNode)
print(str(upupNode.Level) + " " * upupNode.Level + str(float(upupNode.Price)))
print(str(updownNode.Level) + " " * updownNode.Level + str(float(updownNode.Price)))
downupNode, downdownNode = CalculateNextLevel(downNode)
print(str(downupNode.Level) + " " * downupNode.Level + str(float(downupNode.Price)))
print(str(downdownNode.Level) + " " * downdownNode.Level + str(float(downdownNode.Price)))
# Level 3
upupupNode, upupdownNode= CalculateNextLevel(upupNode)
print(str(upupupNode.Level) + " " * upupupNode.Level + str(float(upupupNode.Price)))
print(str(upupdownNode.Level) + " " * upupdownNode.Level + str(float(upupdownNode.Price)))
updownupNode, updowndownNode = CalculateNextLevel(updownNode)
print(str(updownupNode.Level) + " " * updownupNode.Level+ str(float(updownupNode.Price)))
print(str(updowndownNode.Level) + " " * updowndownNode.Level+ str(float(updowndownNode.Price)))
downupupNode, downupdownNode= CalculateNextLevel(downupNode)
print(str(downupupNode.Level) + " " * downupupNode.Level + str(float(downupupNode.Price)))
print(str(downupdownNode.Level) + " " * downupdownNode.Level + str(float(downupdownNode.Price)))
downdownupNode, downdowndownNode = CalculateNextLevel(downdownNode)
print(str(downdownupNode.Level) + " " * downdownupNode.Level+ str(float(downdownupNode.Price)))
print(str(downdowndownNode.Level) + " " * downdowndownNode.Level+ str(float(downdowndownNode.Price)))
|
[
"dan.socaciu@gmail.com"
] |
dan.socaciu@gmail.com
|
ff700f7d454463df58dd34ca5c24e3f7e49d6e50
|
299daa2b824700c36dde9b192edb44027511da13
|
/App/models.py
|
ee6a7ccf9b7e9b689d1e6073ac48223820e15efa
|
[] |
no_license
|
jacvon/UAV
|
44d7a2d3a84a3522c11f2bcd3f3eeedf7e61b9d3
|
9d817607777f01b3564f7723d220102d9cad3ac7
|
refs/heads/master
| 2023-02-07T16:52:05.065107
| 2020-05-12T08:48:57
| 2020-05-12T08:48:57
| 249,330,733
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
#这个没用,懒得删了
class Book(models.Model):
b_name = models.CharField(max_length=16, blank=True, null=True)
class Meta:
managed = False
db_table = 'Book'
class UserModel(models.Model):
u_name = models.CharField(max_length=16)
# upload_to 相对路径, 相对于的是MEDIA_ROOT 媒体根目录
u_icon = models.ImageField(upload_to='%Y/%m/%d/icons')
u_predict = models.IntegerField(default=0)
|
[
"jacvon_wan@outlook.com"
] |
jacvon_wan@outlook.com
|
3abdcc5b572a90e3383aecc6e94d1a98320216ed
|
4577d8169613b1620d70e3c2f50b6f36e6c46993
|
/students/1803699/homework03/program02.py
|
344ec0651031695a307c244a2e1dc1133f3d6acc
|
[] |
no_license
|
Fondamenti18/fondamenti-di-programmazione
|
cbaf31810a17b5bd2afaa430c4bf85d05b597bf0
|
031ec9761acb1a425fcc4a18b07884b45154516b
|
refs/heads/master
| 2020-03-24T03:25:58.222060
| 2018-08-01T17:52:06
| 2018-08-01T17:52:06
| 142,419,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
from immagini import *
def cammino(fname, fname1):
scacchiera=load(fname)
posx=0
posy=0
angolo=0
fstring=''
spins=0
white=(255,255,255)
black=(0,0,0)
paint(scacchiera,0,0,(0,255,0))
while spins<4:
angolo=fixangle(posx,posy,angolo)
sqcoord=findnext(posx,posy,angolo)
nextx=sqcoord[0]
nexty=sqcoord[1]
sqcolor=scacchiera[nexty][nextx]
if sqcolor==black or sqcolor==white:
posx=nextx
posy=nexty
scacchiera=paint(scacchiera,posx,posy,(0,255,0))
spins=0
fstring+=str(angolo)
else:
angolo+=1
if angolo>3:
angolo-=4
spins+=1
scacchiera=paint(scacchiera,posx,posy,(0,0,255))
save(scacchiera,fname1)
return fstring
def findnext(x,y,angolo):
if angolo==0:
nextsq=(x+40,y)
elif angolo==1:
nextsq=(x,y+40)
elif angolo==2:
nextsq=(x-40,y)
else:
nextsq=(x,y-40)
return nextsq
def fixangle(x,y,angolo):
if x==0:
if y==0 and (angolo==2 or angolo==3):
angolo=0
elif angolo==2:
angolo=3
elif x==560:
if y==560 and (angolo==0 or angolo==1):
angolo=2
elif angolo==0:
angolo=1
elif y==0 and angolo==3:
angolo=0
elif y==560 and angolo==1:
angolo=2
return angolo
def paint(img,x,y,color):
for i in range(0,40):
for t in range(0,40):
img[y+t][x+i]=color
return img
|
[
"a.sterbini@gmail.com"
] |
a.sterbini@gmail.com
|
08ea22b6c1395c1e364e91c3f0876d13382790d2
|
8dde96b47fa3ec6a8bcbae938da73adc555d4471
|
/User.py
|
2f2b5fd491209a276f54dfcc75261711efc908a8
|
[] |
no_license
|
carmolim/instagram_likes
|
73c6d2c29452c6b5537777c7003e4bafe7bdde26
|
7101424e2ae71fdb137cccdc781f80b05e229ee9
|
refs/heads/master
| 2021-01-17T09:49:25.801116
| 2016-03-24T13:06:30
| 2016-03-24T13:06:30
| 35,011,422
| 11
| 0
| null | 2015-07-12T23:43:51
| 2015-05-04T03:10:42
|
Python
|
UTF-8
|
Python
| false
| false
| 597
|
py
|
class User( object ):
def __init__( self, client_user, client_id, access_token, client_secret ):
self.client_user = client_user
self.client_id = client_id
self.access_token = access_token
self.client_secret = client_secret
print 'User %s created' % self.client_user
print ''
def get_client_user ( self ):
return self.client_user
def get_client_id ( self ):
return self.client_id
def get_access_token( self ):
return self.access_token
def get_client_secret ( self ):
return self.client_secret
def get_user_redirect_uri ( self ):
return self.redirect_uri
|
[
"carmolim@gmail.com"
] |
carmolim@gmail.com
|
e8022aab29808323257b230b2cecb0de277cc4cd
|
5b9bce9fdfc13848b6bacc73741f6e8fc5a4ae99
|
/testing/pcechoclient.py
|
d6c4f82a21900e1ac79cde66f76604954c82a49e
|
[] |
no_license
|
ContinuumBridge/bridge_admin
|
4a5a036f4e0cb4e96366a85524aef0c33e82a7ff
|
efd4148a55221f74cb8a11139a8416d1af453408
|
refs/heads/master
| 2022-03-20T15:00:01.422221
| 2020-01-04T11:08:17
| 2020-01-04T11:08:17
| 17,435,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
import sys
class EchoClient(LineReceiver):
end="Bye-bye!"
def connectionMade(self):
self.sendLine("Hello, world!")
self.sendLine("What a fine day it is.")
self.sendLine(self.end)
def lineReceived(self, line):
print "receive:", line
if line==self.end:
self.transport.loseConnection()
class EchoClientFactory(ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print 'connection failed:', reason.getErrorMessage()
reactor.stop()
def clientConnectionLost(self, connector, reason):
print 'connection lost:', reason.getErrorMessage()
reactor.stop()
def main():
factory = EchoClientFactory()
reactor.connectUNIX("/tmp/pcsocket", factory, timeout=10)
reactor.run()
if __name__ == '__main__':
main()
|
[
"peter.claydon@continuumbridge.com"
] |
peter.claydon@continuumbridge.com
|
802a35b2dd53c20416e66ef7eec5d95d5042c007
|
3fb26a1204611e5e2ab906ce73c4d047eccfd2ef
|
/__init__.py
|
b1adf3f14778b65f0f0d7fea72d2d03b8f9a7c10
|
[] |
no_license
|
Trebolium/nadineVD
|
e97294a9de36fba2816d98b34f43d24c6c615352
|
d2e26f84ba2abe85f41abe0ed2a22f8b47791e58
|
refs/heads/master
| 2022-02-26T10:57:48.162946
| 2019-07-25T22:46:59
| 2019-07-25T22:46:59
| 198,650,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
from vocalDetector import *
|
[
"brendan242424@gmail.com"
] |
brendan242424@gmail.com
|
138d4fa9febc3f2b3792bf0a311777ffb4e59fcb
|
5a696135f54c120fab988e4f1198a9ac41cae5b8
|
/portfolioapi/views.py
|
41034fc0b0470c8cf64b5c01432dd21d40d8391d
|
[] |
no_license
|
armstrongsouljah/codemeapi
|
f1d98ed2035c8131ba5e7611faf3d3eb831fd5f8
|
21afc2a75b1429bb920023b3cbba5aecc310293d
|
refs/heads/master
| 2023-08-17T02:53:03.695526
| 2020-05-16T07:49:53
| 2020-05-16T07:49:53
| 264,371,074
| 0
| 0
| null | 2021-09-22T19:07:11
| 2020-05-16T06:05:01
|
Python
|
UTF-8
|
Python
| false
| false
| 547
|
py
|
from django.shortcuts import render
from rest_framework import generics as g
from .serializers import ProjectSerializer
from .models import Project
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
class ProjectListView(g.ListAPIView):
queryset = Project.objects.all()
permission_classes = [AllowAny, ]
serializer_class = ProjectSerializer
class ProjectAddView(g.CreateAPIView):
queryset = Project.objects.all()
permission_classes = [IsAuthenticated, ]
serializer_class = ProjectSerializer
|
[
"armstrongsouljah@gmail.com"
] |
armstrongsouljah@gmail.com
|
11b0479a27c5ab90d81cee838e888ab9e14fa8bf
|
7b4abf7507c6b34244c7d15808f35a0bdd360831
|
/src/app/channels/channels.py
|
29e64c47b4ec5c2de4ea0b45908fc921d3412267
|
[
"MIT"
] |
permissive
|
aldomatus/python_sqlalchemy_mysql_docker_many-to-many
|
763ab78703cce42e12e20c9c1e58ca6290477590
|
d3e61761208eb8c604c4e2218ac6effeddd6291b
|
refs/heads/main
| 2023-07-12T18:59:47.521556
| 2021-08-11T23:04:26
| 2021-08-11T23:04:26
| 394,622,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
from flask import request, jsonify, Blueprint
channels = Blueprint('channels', __name__)
@channels.route('/channels', methods=['GET'])
def users_regards():
return jsonify({'message': 'Welcome!'})
@channels.route('/channel', methods=['POST'])
def create_user():
from src.main import db
from src.app.models import Channel, channel_schema
# Receive requests
if request.method == 'POST':
name = request.json['name']
new_channel= Channel(name)
db.session.add(new_channel)
db.session.commit()
return channel_schema.jsonify(new_channel)
|
[
"aldo.matus@outlook.com"
] |
aldo.matus@outlook.com
|
2316b3a02099a1a4c192eadd8ae8fd79893c2aa0
|
88dd43c9d2acba579c9319332a8ebf26aa2ccd3a
|
/tests/test_helpers.py
|
aa16d6dd07d8a9bbabb7a83ed655d657bc903d0e
|
[
"MIT"
] |
permissive
|
dcramer/exam
|
063637270d414198dda56b4fccaf1dd0103014af
|
548d3b3b0711badda67b1c42d84ff1d9b5365212
|
refs/heads/master
| 2021-01-16T17:40:14.817038
| 2012-12-06T01:44:53
| 2012-12-06T01:44:53
| 7,027,986
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
from unittest2 import TestCase
from mock import patch, Mock
from exam.helpers import rm_f, track, mock_import
from exam.decorators import fixture
from describe import expect
@patch('exam.helpers.shutil')
class TestRmrf(TestCase):
path = '/path/to/folder'
def test_calls_shutil_rmtreee(self, shutil):
rm_f(self.path)
shutil.rmtree.assert_called_once_with(self.path, ignore_errors=True)
@patch('exam.helpers.os')
def test_on_os_errors_calls_os_remove(self, os, shutil):
shutil.rmtree.side_effect = OSError
rm_f(self.path)
os.remove.assert_called_once_with(self.path)
class TestTrack(TestCase):
@fixture
def foo_mock(self):
return Mock()
@fixture
def bar_mock(self):
return Mock()
def test_makes_new_mock_and_attaches_each_kwarg_to_it(self):
tracker = track(foo=self.foo_mock, bar=self.bar_mock)
expect(tracker.foo).to == self.foo_mock
expect(tracker.bar).to == self.bar_mock
class TestMockImport(TestCase):
def test_is_a_context_manager_that_yields_patched_import(self):
with mock_import('foo') as mock_foo:
import foo
expect(foo).to == mock_foo
def test_mocks_import_for_packages(self):
with mock_import('foo.bar.baz') as mock_baz:
import foo.bar.baz
expect(foo.bar.baz).to == mock_baz
@mock_import('foo')
def test_can_be_used_as_a_decorator_too(self, mock_foo):
import foo
expect(foo).to == mock_foo
|
[
"jeff.pollard@gmail.com"
] |
jeff.pollard@gmail.com
|
6ba9c9abe2cbbb4ac9f451e51b7f6fc3c3f86ed8
|
267b937afa1e1e3ee0907cdd319327e8f7946746
|
/wrfchem_v415_ext/scripts/components/old2/old/produce_plots_args_rain_higherres.py
|
4facbcc9b4464b20b66892d54e08d95e4ab9f623
|
[] |
no_license
|
eiliscoleman/StreamAIR-AQ
|
3ed13aaa75f9c3cd9788104ebf794f90b0b7f57b
|
928f016572036cf7d1ac1ff220281aae314446cb
|
refs/heads/master
| 2023-01-23T09:08:52.109971
| 2020-11-27T09:47:00
| 2020-11-27T09:47:00
| 313,920,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
from __future__ import print_function, unicode_literals
import json
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.colors as mc
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import xarray as xr
import xarray.ufuncs as xu
import seaborn as sns
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
#import mercantile as mti
import argparse
from argparse import RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description = "read in args", formatter_class = RawDescriptionHelpFormatter)
parser.add_argument('filein', help = 'file to plot')
parser.add_argument('oproot', help = 'Path for output')
args = parser.parse_args()
tilesize = 768
dpi = 288
dir_path=args.oproot
print(dir_path)
png_out_dir=args.oproot
fid=args.filein
f_path=(png_out_dir+'/'+fid)
print(f_path)
print('this is the file to be plotted', args.filein)
data = xr.open_dataset(f_path)
LON, LAT=np.meshgrid(data.lon.values, data.lat.values)
print ('RAIN and pressure')
out_path=(png_out_dir+'/test_rain_psl.png')
plt.figure(figsize=(tilesize/dpi, tilesize/dpi), dpi=dpi)
plt.subplots_adjust(bottom=0, left=0, right=1, top=1)
plt.axis('off')
lev_range=np.arange(0,50,1)
levels=lev_range
cs=plt.contourf(LON, LAT, data.rain.values[0,:,:], levels, cmap=plt.cm.Blues, extend="both")
cs2=plt.contour(LON, LAT, data.p_sl[0,:,:]*1e-2, levels_n=20, linewidths=0.5, colors='k')
levels_n=np.arange(1000,1040,1)
cs2=plt.contour(LON, LAT, data.p_sl[0,:,:]*1e-2, levels_n, linewidths=0.2, colors='k')
plt.clabel(cs2, inline=1, fmt='%.0f', fontsize=2)
plt.savefig(out_path, dpi=1152, tilesize=768)
plt.close()
|
[
"eiliscoleman@gmail.com"
] |
eiliscoleman@gmail.com
|
0ee12d9738e4a2948bf9e76868469b53a28b568c
|
0884bc5f5dca38f239ea0d7d31232a67a9411c27
|
/format.py
|
166496d572d670a7e31f70bf6fd83cd5f4bd023d
|
[] |
no_license
|
d0v34/My-Python
|
cb6e9e2da266a08e6b948358675e80bcc8bf08dd
|
321a9545d1cfa4137c8547dc1159779cc8a0f30b
|
refs/heads/master
| 2021-08-12T04:15:58.727625
| 2021-08-05T06:09:07
| 2021-08-05T06:09:07
| 212,036,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
szer = 42
print("-" * szer)
print("| Czas | Zawodnik | Data |")
print("*" * szer)
print("| {:06.3f} | {:16s} | {:10s} |" .format(9.58, "Usain Bolt", "16.08.2009"))
print("| {:6.3f} | {:16s} | {:10s} |" .format(9.69, "Tyson Gay", "20.09.2009"))
print("| {:6.3f} | {:16s} | {:10s} |" .format(9.69, "Yohan Blake", "23.09.2012"))
print("| {:6.3f} | {:16s} | {:10s} |" .format(9.74, "Asafa Powell", "2.09.2008"))
print("-" * szer)
|
[
"fiat126p@gmail.com"
] |
fiat126p@gmail.com
|
304f7e9f5f5d929203246c25461e28f6a6b175af
|
ead61cfb84bf9383e3a51cfdd8d01aee05a43008
|
/networks/blocks/inception_block.py
|
169b04a0a866b0685f22379567637744056e7f22
|
[
"Apache-2.0"
] |
permissive
|
whq-hqw/omni_torch
|
67263676c5d649426c982d148b3816fef6c73e94
|
9bd654387619c0cbc6aee9e91482ecc9200138ef
|
refs/heads/master
| 2022-03-28T20:30:44.293548
| 2020-01-03T04:03:24
| 2020-01-03T04:03:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
"""
# Copyright (c) 2019 Wang Hanqin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .conv_block import conv_block
class InceptionBlock(nn.Module):
def __init__(self, input, filters, kernel_sizes, stride, padding, groups=1, name=None,
dilation=1, bias=True, activation=nn.ReLU(), batch_norm=nn.BatchNorm2d,
dropout=0, inner_maxout= None, maxout=None):
def concatenate_blocks(block1, block2):
list_of_block1 = list(block1.children())
list_of_block1.extend(list(block2.children()))
return nn.Sequential(*list_of_block1)
"""
:param input: int
:param filters: in the form of [[...], [...], ... , [...]], each cell represent a stream in the network
:param kernel_sizes: in the form of [[...], [...], ... , [...]]
:param stride: in the form of [[...], [...], ... , [...]]
:param padding: in the form of [[...], [...], ... , [...]]
"""
assert max([len(filters), len(kernel_sizes), len(stride), len(padding)]) is \
min([len(filters), len(kernel_sizes), len(stride), len(padding)])
inner_groups = len(filters)
super().__init__()
if inner_maxout is None:
inner_maxout = inner_groups * [None]
inner_blocks = []
for i in range(inner_groups):
if inner_maxout[i]:
ops = nn.Sequential(inner_maxout[i])
ops = concatenate_blocks(ops, conv_block(input, filters[i], kernel_sizes[i], stride[i], padding[i],
name="incep_" + str(i), activation=activation,
batch_norm=batch_norm, dropout=dropout, dilation=dilation,
bias=bias, groups=groups))
else:
ops = conv_block(input, filters[i], kernel_sizes[i], stride[i], padding[i],
name="incep_" + str(i), activation=activation, batch_norm=batch_norm,
dropout=dropout, dilation=dilation, bias=bias, groups=groups)
inner_blocks.append(ops)
if maxout:
inner_blocks.append(maxout)
self.inner_blocks = nn.ModuleList(inner_blocks)
def forward(self, x):
out = [block(x) for block in self.inner_blocks]
return torch.cat(out, dim=1)
|
[
"loveorchidsdavid@gmail.com"
] |
loveorchidsdavid@gmail.com
|
421fe091b6410369a00caf0601b89fb067b34c02
|
b6b2be9866fd16699ad5c30a21bbcb70755f1e57
|
/Experiments/_Legacy/CoralBleaching/WindowBasedClassifier/windowbasedclassifier_train_THEN_predict.py
|
883e47e8f8961fd4fb0902b783a4bf205434aca2
|
[] |
no_license
|
simonhughes22/PythonNlpResearch
|
24a482c7036c568b063ec099176b393d45a0a86b
|
2bc2914ce93fcef6dbd26f8097eec20b7d0e476d
|
refs/heads/master
| 2022-12-08T17:39:18.332177
| 2019-10-26T12:48:33
| 2019-10-26T12:48:33
| 16,458,105
| 17
| 7
| null | 2022-12-07T23:38:17
| 2014-02-02T16:36:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,156
|
py
|
from featureextractortransformer import FeatureExtractorTransformer
from sent_feats_for_stacking import *
from load_data import load_process_essays, extract_features
from featurevectorizer import FeatureVectorizer
from featureextractionfunctions import *
from CrossValidation import cross_validation
from wordtagginghelper import *
from IterableFP import flatten
# Classifiers
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from window_based_tagger_config import get_config
from model_store import ModelStore
from predictions_to_file import predictions_to_file
# END Classifiers
import Settings
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger()
# Create persister (mongo client) - fail fast if mongo service not initialized
# not hashed as don't affect persistence of feature processing
SPARSE_WD_FEATS = True
SPARSE_SENT_FEATS = True
MIN_FEAT_FREQ = 5 # 5 best so far
CV_FOLDS = 5
MIN_TAG_FREQ = 5
LOOK_BACK = 0 # how many sentences to look back when predicting tags
# end not hashed
# construct unique key using settings for pickling
settings = Settings.Settings()
model_store = ModelStore()
""" PETER - CHANGE THESE FILE PATHS """
folder = settings.data_directory + "CoralBleaching/BrattData/EBA1415_Merged/" # Location where the training data is, use EBA_Pre and Post test essays preferably
test_folder= settings.data_directory + "CoralBleaching/BrattData/Merged/" # Location where the new essays to tag are located
out_predictions_file = settings.data_directory + "CoralBleaching/Results/predictions.txt" # File to dump the predictions to
config = get_config(folder)
""" FEATURE EXTRACTION """
offset = (config["window_size"] - 1) / 2
unigram_window_stemmed = fact_extract_positional_word_features_stemmed(offset)
biigram_window_stemmed = fact_extract_ngram_features_stemmed(offset, 2)
#pos_tag_window = fact_extract_positional_POS_features(offset)
#pos_tag_plus_wd_window = fact_extract_positional_POS_features_plus_word(offset)
#head_wd_window = fact_extract_positional_head_word_features(offset)
#pos_dep_vecs = fact_extract_positional_dependency_vectors(offset)
extractors = [unigram_window_stemmed, biigram_window_stemmed]
feat_config = dict(config.items() + [("extractors", extractors)])
""" LOAD DATA """
tagged_essays = load_process_essays( **config )
logger.info("Essays loaded")
# most params below exist ONLY for the purposes of the hashing to and from disk
feature_extractor = FeatureExtractorTransformer(extractors)
essay_feats = feature_extractor.transform(tagged_essays)
logger.info("Features loaded")
""" DEFINE TAGS """
_, lst_all_tags = flatten_to_wordlevel_feat_tags(essay_feats)
regular_tags = list(set((t for t in flatten(lst_all_tags) if t[0].isdigit())))
CAUSE_TAGS = ["Causer", "Result", "explicit"]
CAUSAL_REL_TAGS = [CAUSAL_REL, CAUSE_RESULT, RESULT_REL]# + ["explicit"]
""" works best with all the pair-wise causal relation codes """
wd_train_tags = regular_tags + CAUSE_TAGS
wd_test_tags = regular_tags + CAUSE_TAGS
# tags from tagging model used to train the stacked model
sent_input_feat_tags = wd_train_tags
# find interactions between these predicted tags from the word tagger to feed to the sentence tagger
sent_input_interaction_tags = wd_train_tags
# tags to train (as output) for the sentence based classifier
sent_output_train_test_tags = list(set(regular_tags + CAUSE_TAGS + CAUSAL_REL_TAGS))
assert set(CAUSE_TAGS).issubset(set(sent_input_feat_tags)), "To extract causal relations, we need Causer tags"
# tags to evaluate against
""" CLASSIFIERS """
""" Log Reg + Log Reg is best!!! """
fn_create_wd_cls = lambda: LogisticRegression() # C=1, dual = False seems optimal
#fn_create_wd_cls = lambda : LinearSVC(C=1.0)
#fn_create_sent_cls = lambda : LinearSVC(C=1.0)
fn_create_sent_cls = lambda : LogisticRegression(dual=True) # C around 1.0 seems pretty optimal
# NOTE - GBT is stochastic in the SPLITS, and so you will get non-deterministic results
#fn_create_sent_cls = lambda : GradientBoostingClassifier() #F1 = 0.5312 on numeric + 5b + casual codes for sentences
if type(fn_create_sent_cls()) == GradientBoostingClassifier:
SPARSE_SENT_FEATS = False
#TODO Parallelize
essays_TD = essay_feats
# TD and VD are lists of Essay objects. The sentences are lists
# of featureextractortransformer.Word objects
print "Training Tagging Model"
""" Data Partitioning and Training """
td_feats, td_tags = flatten_to_wordlevel_feat_tags(essays_TD)
feature_transformer = FeatureVectorizer(min_feature_freq=MIN_FEAT_FREQ, sparse=SPARSE_WD_FEATS)
td_X = feature_transformer.fit_transform(td_feats)
wd_td_ys_bytag = get_wordlevel_ys_by_code(td_tags, wd_train_tags)
""" TRAIN Tagger """
tag2word_classifier = train_classifier_per_code(td_X, wd_td_ys_bytag, fn_create_wd_cls, wd_train_tags)
print "\nTraining Sentence Model"
""" SENTENCE LEVEL PREDICTIONS FROM STACKING """
sent_td_xs, sent_td_ys_bycode = get_sent_feature_for_stacking_from_tagging_model(sent_input_feat_tags, sent_input_interaction_tags, essays_TD, td_X, wd_td_ys_bytag, tag2word_classifier, SPARSE_SENT_FEATS, LOOK_BACK)
""" Train Stacked Classifier """
tag2sent_classifier = train_classifier_per_code(sent_td_xs, sent_td_ys_bycode , fn_create_sent_cls, sent_output_train_test_tags)
""" END TRAINING """
test_config = get_config(test_folder)
test_tagged_essays = load_process_essays(**test_config)
test_essay_feats = feature_extractor.transform(test_tagged_essays)
cv_wd_td_ys_by_tag, cv_wd_td_predictions_by_tag = defaultdict(list), defaultdict(list)
# TD and VD are lists of Essay objects. The sentences are lists
# of featureextractortransformer.Word objects
print "Running Tagging Model"
""" Data Partitioning and Training """
test_feats, _ = flatten_to_wordlevel_feat_tags(test_essay_feats)
test_x = feature_transformer.transform(test_feats)
""" TEST Tagger """
td_wd_predictions_by_code = test_classifier_per_code(test_x, tag2word_classifier, wd_test_tags)
print "\nRunning Sentence Model"
""" SENTENCE LEVEL PREDICTIONS FROM STACKING """
dummy_wd_td_ys_bytag = defaultdict(lambda : np.asarray([0.0] * test_x.shape[0]))
sent_test_xs, sent_test_ys_bycode = get_sent_feature_for_stacking_from_tagging_model(sent_input_feat_tags, sent_input_interaction_tags, test_essay_feats, test_x, dummy_wd_td_ys_bytag, tag2word_classifier, SPARSE_SENT_FEATS, LOOK_BACK)
""" Test Stack Classifier """
test_sent_predictions_by_code \
= test_classifier_per_code(sent_test_xs, tag2sent_classifier, sent_output_train_test_tags )
merge_dictionaries(td_wd_predictions_by_code, cv_wd_td_predictions_by_tag)
with open(out_predictions_file, "w+") as f_output_file:
f_output_file.write("Essay|Sent Number|Processed Sentence|Concept Codes|Predictions\n")
predictions_to_file(f_output_file, sent_test_ys_bycode, test_sent_predictions_by_code, test_essay_feats, regular_tags + CAUSE_TAGS + CAUSAL_REL_TAGS)
# print results for each code
print out_predictions_file
|
[
"simon.hughes@dice.com"
] |
simon.hughes@dice.com
|
19abf40fe4e150fdff4ad72dd8e5d5eca00779f5
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/configs/mmdet/detection/detection_tensorrt_static-800x1344.py
|
737054533c6d1350e5cf1c3ba8e1c1c7ea026a7c
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371
| 2023-08-31T09:59:29
| 2023-08-31T09:59:29
| 441,467,833
| 2,164
| 605
|
Apache-2.0
| 2023-09-14T10:39:04
| 2021-12-24T13:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 56
|
py
|
_base_ = ['../_base_/base_tensorrt_static-800x1344.py']
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
6aeb18de3effe94c35785f743188edb5b1833d15
|
89e65f7e2e0196a36f5dfced567a67cab3cba24d
|
/backend/users/migrations/0002_auto_20210324_0135.py
|
5236ed70f5577443707f2b8d206f27b9ff10c506
|
[] |
no_license
|
crowdbotics-apps/l0c4l-25232
|
e90bab9fd6c44f9bd6d7a8f8fa90684d509840a0
|
1271c3c84e4465634bcb9d0d46e02c9b794bfdbb
|
refs/heads/master
| 2023-04-02T20:27:31.769165
| 2021-03-24T11:29:42
| 2021-03-24T11:29:42
| 350,907,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
# Generated by Django 2.2.19 on 2021-03-24 01:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ab5c4a7cc0555180e525b35e0720ede50721f0d0
|
1c2ed80f77782ebee5b90480dbfc74a2d145d53f
|
/python-base/src/learn/shell/system_zip4.py
|
8f4f688b4d50233a9b9278681aa2ed1e5f75ab9f
|
[] |
no_license
|
icesx/IPython
|
970dfe7260906d85706b7117044510b5929e9098
|
452c63e17c6f05cb0540974f7c01c1e73f9836fe
|
refs/heads/master
| 2021-07-05T02:30:29.956369
| 2021-02-02T07:09:58
| 2021-02-02T07:09:58
| 220,771,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
#!/usr/bin/python
# Filename: system_zip4.py
import os
import time
# 1. The files and directories to be backed up are specified in a list.
source = ['/home/swaroop/byte','/home/swaroop/bin']
# If you are using Windows, use source = [r'C:\Documents', r'D:\Work'] or something like that
# 2. The backup must be stored in a main backup directory
target_dir = '/mnt/e/backup/' # Remember to change this to what you will be using
# 3. The files are backed up into a zip file.
# 4. The current day is the name of the subdirectory in the main directory
today = target_dir + time.strftime('%Y%m%d')
# The current time is the name of the zip archive
now = time.strftime('%H%M%S')
# Take a comment from the user to create the name of the zip file
comment = input('Enter a comment --> ')
if len(comment) == 0: # check if a comment was entered
target = today + os.sep + now + '.zip'
else:
target = today + os.sep + now + '_' + \
comment.replace(' ','_') + '.zip'
# Notice the backslash!
# Create the subdirectory if it isn't already there
if not os.path.exists(today):
os.mkdir(today) # make directory
print('Successfully created directory',today)
# 5. We use the zip command (in Unix/Linux) to put the files in a zip archive
zip_command = "zip -qr '%s' %s" % (target,' '.join(source))
# Run the backup
if os.system(zip_command) == 0:
print('Successful backup to',target)
else:
print('Backup FAILED')
|
[
"icesxrun@gmail.com"
] |
icesxrun@gmail.com
|
64eabaa4935dd78df0c5e852ab634f5e19a533f4
|
e3794217c22e3baed4738b1b8b1815b9148355fe
|
/learning_logs/migrations/0003_auto_20210426_0040.py
|
ccec2ee7040851ba67afb8831617f3e258be20d8
|
[] |
no_license
|
aiswfs/learning_log
|
1de83ea4ce861290a8237822c22ae059f53a648b
|
05b07ecb75e14c29bcde509e27651bae3120e0b7
|
refs/heads/master
| 2023-04-19T22:58:18.661997
| 2021-04-27T03:36:25
| 2021-04-27T03:36:25
| 361,594,549
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
# Generated by Django 2.2.20 on 2021-04-25 16:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('learning_logs', '0002_entry'),
]
operations = [
migrations.AddField(
model_name='topic',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='entry',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='topic',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"1024766276@qq.com"
] |
1024766276@qq.com
|
904e45682fa277c8e3d4b3e9352e0595318d3aa1
|
ace30d0a4b1452171123c46eb0f917e106a70225
|
/filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/neutron/objects/qos/policy.py
|
dfb84b85d748d70d43b8ae4236a8a20cb12169b9
|
[
"Python-2.0"
] |
permissive
|
juancarlosdiaztorres/Ansible-OpenStack
|
e98aa8c1c59b0c0040c05df292964520dd796f71
|
c01951b33e278de9e769c2d0609c0be61d2cb26b
|
refs/heads/master
| 2022-11-21T18:08:21.948330
| 2018-10-15T11:39:20
| 2018-10-15T11:39:20
| 152,568,204
| 0
| 3
| null | 2022-11-19T17:38:49
| 2018-10-11T09:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 10,103
|
py
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_utils import versionutils
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import exception
from oslo_versionedobjects import fields as obj_fields
from neutron._i18n import _
from neutron.common import exceptions
from neutron.db import api as db_api
from neutron.db import models_v2
from neutron.db.qos import api as qos_db_api
from neutron.db.qos import models as qos_db_model
from neutron.db.rbac_db_models import QosPolicyRBAC
from neutron.objects import common_types
from neutron.objects.db import api as obj_db_api
from neutron.objects.qos import rule as rule_obj_impl
from neutron.objects import rbac_db
@obj_base.VersionedObjectRegistry.register
class QosPolicy(rbac_db.NeutronRbacObject):
# Version 1.0: Initial version
# Version 1.1: QosDscpMarkingRule introduced
# Version 1.2: Added QosMinimumBandwidthRule
# Version 1.3: Added standard attributes (created_at, revision, etc)
# Version 1.4: Changed tenant_id to project_id
VERSION = '1.4'
# required by RbacNeutronMetaclass
rbac_db_model = QosPolicyRBAC
db_model = qos_db_model.QosPolicy
port_binding_model = qos_db_model.QosPortPolicyBinding
network_binding_model = qos_db_model.QosNetworkPolicyBinding
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
'name': obj_fields.StringField(),
'shared': obj_fields.BooleanField(default=False),
'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True),
}
fields_no_update = ['id', 'project_id']
synthetic_fields = ['rules']
binding_models = {'network': network_binding_model,
'port': port_binding_model}
def obj_load_attr(self, attrname):
if attrname == 'project_id':
return super(QosPolicy, self).obj_load_attr(attrname)
if attrname != 'rules':
raise exceptions.ObjectActionError(
action='obj_load_attr',
reason=_('unable to load %s') % attrname)
if not hasattr(self, attrname):
self.reload_rules()
def reload_rules(self):
rules = rule_obj_impl.get_rules(self.obj_context, self.id)
setattr(self, 'rules', rules)
self.obj_reset_changes(['rules'])
def get_rule_by_id(self, rule_id):
"""Return rule specified by rule_id.
@raise QosRuleNotFound: if there is no such rule in the policy.
"""
for rule in self.rules:
if rule_id == rule.id:
return rule
raise exceptions.QosRuleNotFound(policy_id=self.id,
rule_id=rule_id)
@classmethod
def get_object(cls, context, **kwargs):
# We want to get the policy regardless of its tenant id. We'll make
# sure the tenant has permission to access the policy later on.
admin_context = context.elevated()
with db_api.autonested_transaction(admin_context.session):
policy_obj = super(QosPolicy, cls).get_object(admin_context,
**kwargs)
if (not policy_obj or
not cls.is_accessible(context, policy_obj)):
return
policy_obj.reload_rules()
return policy_obj
@classmethod
def get_objects(cls, context, _pager=None, validate_filters=True,
**kwargs):
# We want to get the policy regardless of its tenant id. We'll make
# sure the tenant has permission to access the policy later on.
admin_context = context.elevated()
with db_api.autonested_transaction(admin_context.session):
objs = super(QosPolicy, cls).get_objects(admin_context, _pager,
validate_filters,
**kwargs)
result = []
for obj in objs:
if not cls.is_accessible(context, obj):
continue
obj.reload_rules()
result.append(obj)
return result
@classmethod
def _get_object_policy(cls, context, model, **kwargs):
with db_api.autonested_transaction(context.session):
binding_db_obj = obj_db_api.get_object(context, model, **kwargs)
if binding_db_obj:
return cls.get_object(context, id=binding_db_obj['policy_id'])
@classmethod
def get_network_policy(cls, context, network_id):
return cls._get_object_policy(context, cls.network_binding_model,
network_id=network_id)
@classmethod
def get_port_policy(cls, context, port_id):
return cls._get_object_policy(context, cls.port_binding_model,
port_id=port_id)
# TODO(QoS): Consider extending base to trigger registered methods for us
def create(self):
with db_api.autonested_transaction(self.obj_context.session):
super(QosPolicy, self).create()
self.reload_rules()
def delete(self):
with db_api.autonested_transaction(self.obj_context.session):
for object_type, model in self.binding_models.items():
binding_db_obj = obj_db_api.get_object(self.obj_context, model,
policy_id=self.id)
if binding_db_obj:
raise exceptions.QosPolicyInUse(
policy_id=self.id,
object_type=object_type,
object_id=binding_db_obj['%s_id' % object_type])
super(QosPolicy, self).delete()
def attach_network(self, network_id):
qos_db_api.create_policy_network_binding(self.obj_context,
policy_id=self.id,
network_id=network_id)
def attach_port(self, port_id):
qos_db_api.create_policy_port_binding(self.obj_context,
policy_id=self.id,
port_id=port_id)
def detach_network(self, network_id):
qos_db_api.delete_policy_network_binding(self.obj_context,
policy_id=self.id,
network_id=network_id)
def detach_port(self, port_id):
qos_db_api.delete_policy_port_binding(self.obj_context,
policy_id=self.id,
port_id=port_id)
def get_bound_networks(self):
return qos_db_api.get_network_ids_by_network_policy_binding(
self.obj_context, self.id)
def get_bound_ports(self):
return qos_db_api.get_port_ids_by_port_policy_binding(
self.obj_context, self.id)
@classmethod
def _get_bound_tenant_ids(cls, session, binding_db, bound_db,
binding_db_id_column, policy_id):
return list(itertools.chain.from_iterable(
session.query(bound_db.tenant_id).join(
binding_db, bound_db.id == binding_db_id_column).filter(
binding_db.policy_id == policy_id).all()))
@classmethod
def get_bound_tenant_ids(cls, context, policy_id):
"""Implements RbacNeutronObject.get_bound_tenant_ids.
:returns: set -- a set of tenants' ids dependant on QosPolicy.
"""
net = models_v2.Network
qosnet = qos_db_model.QosNetworkPolicyBinding
port = models_v2.Port
qosport = qos_db_model.QosPortPolicyBinding
bound_tenants = []
with db_api.autonested_transaction(context.session):
bound_tenants.extend(cls._get_bound_tenant_ids(
context.session, qosnet, net, qosnet.network_id, policy_id))
bound_tenants.extend(
cls._get_bound_tenant_ids(context.session, qosport, port,
qosport.port_id, policy_id))
return set(bound_tenants)
def obj_make_compatible(self, primitive, target_version):
def filter_rules(obj_names, rules):
return [rule for rule in rules if
rule['versioned_object.name'] in obj_names]
_target_version = versionutils.convert_version_to_tuple(target_version)
names = []
if _target_version >= (1, 0):
names.append(rule_obj_impl.QosBandwidthLimitRule.obj_name())
if _target_version >= (1, 1):
names.append(rule_obj_impl.QosDscpMarkingRule.obj_name())
if _target_version >= (1, 2):
names.append(rule_obj_impl.QosMinimumBandwidthRule.obj_name())
if 'rules' in primitive and names:
primitive['rules'] = filter_rules(names, primitive['rules'])
if _target_version < (1, 3):
standard_fields = ['revision_number', 'created_at', 'updated_at']
for f in standard_fields:
primitive.pop(f)
if primitive['description'] is None:
# description was not nullable before
raise exception.IncompatibleObjectVersion(
objver=target_version, objname='QoSPolicy')
if _target_version < (1, 4):
primitive['tenant_id'] = primitive.pop('project_id')
|
[
"jcdiaztorres96@gmail.com"
] |
jcdiaztorres96@gmail.com
|
79afb0e2319f1027af485ded125bd8ba9a51eb44
|
48a3bdc4f63cadd3414665e58d8fb8ab208100ed
|
/Prediction_file.py
|
a7e8ff33e63a1bf946b2db5fe6e9c836b3ec6c3a
|
[] |
no_license
|
HPYC1305KABU/-Voice-Based-Speech-Recognition-System-
|
f086f3cb07f4e0f84e6948368e0469d06b515d9a
|
54e70b34307ee72e136201671dd70e3ad6b73276
|
refs/heads/main
| 2023-02-28T09:07:36.531766
| 2021-02-08T19:58:03
| 2021-02-08T19:58:03
| 337,192,896
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,214
|
py
|
import numpy as np
import pandas as pd
#import seaborn as sns
#import matplotlib.pyplot as plt
#from sklearn.preprocessing import StandardScaler
#import scipy as sp
#import sklearn
#import random
#import time
from sklearn.preprocessing import MinMaxScaler
#from sklearn import preprocessing, model_selection
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.utils import shuffle
from keras.models import model_from_json
data = pd.read_csv('Main_file.csv')
data = shuffle(data)
#for test data------------------------------------------
data_test = pd.read_csv('Test_Data.csv')
data_test = shuffle(data_test)
#--------------------------------------------
i = 200
#data_to_predict = data_test[:i].reset_index(drop = True)
#predict_name = data_to_predict.name
#predict_name = np.array(predict_name)
#prediction = np.array(data_to_predict.drop(['name'],axis= 1))
#data = data.reset_index(drop = True)
#X = data.drop(['name'], axis = 1)
#scaler = MinMaxScaler(feature_range=(0, 1))
#X = scaler.fit_transform(X)
#X = pd.DataFrame(X)
#X = np.array(X)
Y = data['name']
# Transform name species into numerical values
encoder = LabelEncoder()
encoder.fit(Y)
Y = encoder.transform(Y)
Y = np_utils.to_categorical(Y)
#print(Y)
#for test data-----------------------------------------------------
X_test = data_test.drop(['name'], axis = 1)
#X_test = data.dropna(axis = 0, how ='any')
scaler = MinMaxScaler(feature_range=(0, 1))
X_test = scaler.fit_transform(X_test)
X_test = pd.DataFrame(X_test)
X_test = np.array(X_test)
#Y_test = data_test['name']
# Transform name species into numerical values
#encoder = LabelEncoder()
#encoder.fit(Y_test)
#Y_test = encoder.transform(Y_test)
#Y_test = np_utils.to_categorical(Y_test)
#print(Y)
# later...
# load json and create model
json_file = open('xyz.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("xyz.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
#loaded_model.compile(loss = 'categorical_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'] )
#score = loaded_model.evaluate(X, Y, verbose=0)
#print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
for j in range(i):
print('-------------------------------------------------------')
predictions = loaded_model.predict_classes(X_test)
#prediction_ = np.argmax(to_categorical(predictions), axis=1)
#prediction_ = np.argsort(predictions, axis=-1, kind='quicksort', order=None)
prediction_ = np.argsort(to_categorical(predictions[j]))[:-9:-1]
prediction_ = encoder.inverse_transform(prediction_)
#print(prediction_)
## print( " the nn predict {}, and the brand to find is {}".format(i,j))
print("----------------------------------------------------------------------------------------------")
pred = loaded_model.predict_proba(X_test)
dfe = pred[j]*100
wer = np.sort(pred[j]*100)[:-9:-1]
abc = dict(zip(prediction_,wer))
print(abc)
#print(wer)
|
[
"noreply@github.com"
] |
HPYC1305KABU.noreply@github.com
|
ff4baa4f91bf4f0f7a02f3ffb5c1b128d0847c97
|
1ec04220c0528d0068988c0e8e5d2aceb23e4860
|
/conf/__init__.py
|
7deef2edc4d30e6c35705dbf5f9da04855a00301
|
[
"Apache-2.0"
] |
permissive
|
circleJ/owncloud_client
|
ae0010f79e89604d8169d5db8cdb77b4baaccec2
|
d650a73938b2fdadf7517344bf773d551d78c993
|
refs/heads/master
| 2020-07-01T10:38:55.520797
| 2019-08-08T01:03:49
| 2019-08-08T01:03:49
| 201,149,781
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
|
[
"noreply@github.com"
] |
circleJ.noreply@github.com
|
c87d5fbee9a277cdd039d9486a6344c9cc7a09a4
|
671a4ebbd64e1d1784e0821d8ade887d143cde0d
|
/BB8/sep2/interface.py
|
59b75f79a192ba8d5d1e1df0ac7b4fb8a2a3eb12
|
[] |
no_license
|
smithenator5000/bb8
|
1d08afa7ff104f69ccda4184433e98ead578551c
|
2dd9be449c7ea4068a2444298c607592e48b212d
|
refs/heads/master
| 2020-04-14T19:15:47.629214
| 2019-01-04T03:27:32
| 2019-01-04T03:27:32
| 164,051,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,418
|
py
|
import bluepy.btle as bluetooth
import threading
import struct
COMMAND = dict(
CMD_PING = [0x00, 0x01],
CMD_VERSION = [0x00, 0x02],
CMD_CONTROL_UART_TX = [0x00, 0x03],
CMD_SET_BT_NAME = [0x00, 0x10],
CMD_GET_BT_NAME = [0x00, 0x11],
CMD_SET_AUTO_RECONNECT = [0x00, 0x12],
CMD_GET_AUTO_RECONNECT = [0x00, 0x13],
CMD_GET_PWR_STATE = [0x00, 0x20],
CMD_SET_PWR_NOTIFY = [0x00, 0x21],
CMD_SLEEP = [0x00, 0x22],
GET_POWER_TRIPS = [0x00, 0x23],
SET_POWER_TRIPS = [0x00, 0x24],
SET_INACTIVE_TIMER = [0x00, 0x25],
CMD_GOTO_BL = [0x00, 0x30],
CMD_RUN_L1_DIAGS = [0x00, 0x40],
CMD_RUN_L2_DIAGS = [0x00, 0x41],
CMD_CLEAR_COUNTERS = [0x00, 0x42],
CMD_ASSIGN_TIME = [0x00, 0x50],
CMD_POLL_TIMES = [0x00, 0x51],
BEGIN_REFLASH = [0x01, 0x02],
HERE_IS_PAGE = [0x01, 0x03],
LEAVE_BOOTLOADER = [0x01, 0x04],
IS_PAGE_BLANK = [0x01, 0x05],
CMD_ERASE_USER_CONFIG = [0x01, 0x06],
CMD_SET_CAL = [0x02, 0x01],
CMD_SET_STABILIZ = [0x02, 0x02],
CMD_SET_ROTATION_RATE = [0x02, 0x03],
CMD_SET_CREATION_DATE = [0x02, 0x04],
CMD_REENABLE_DEMO = [0x02, 0x06],
CMD_GET_CHASSIS_ID = [0x02, 0x07],
CMD_SET_CHASSIS_ID = [0x02, 0x08],
CMD_SELF_LEVEL = [0x02, 0x09],
CMD_SET_VDL = [0x02, 0x0A],
CMD_SET_DATA_STREAMING = [0x02, 0x11],
CMD_SET_COLLISION_DET = [0x02, 0x12],
CMD_LOCATOR = [0x02, 0x13],
CMD_SET_ACCELERO = [0x02, 0x14],
CMD_READ_LOCATOR = [0x02, 0x15],
CMD_SET_RGB_LED = [0x02, 0x20],
CMD_SET_BACK_LED = [0x02, 0x21],
CMD_GET_RGB_LED = [0x02, 0x22],
CMD_ROLL = [0x02, 0x30],
CMD_BOOST = [0x02, 0x31],
CMD_MOVE = [0x02, 0x32],
CMD_SET_RAW_MOTORS = [0x02, 0x33],
CMD_SET_MOTION_TO = [0x02, 0x34],
CMD_SET_OPTIONS_FLAG = [0x02, 0x35],
CMD_GET_OPTIONS_FLAG = [0x02, 0x36],
CMD_SET_TEMP_OPTIONS_FLAG = [0x02, 0x37],
CMD_GET_TEMP_OPTIONS_FLAG = [0x02, 0x38],
CMD_GET_CONFIG_BLK = [0x02, 0x40],
CMD_SET_SSB_PARAMS = [0x02, 0x41],
CMD_SET_DEVICE_MODE = [0x02, 0x42],
CMD_SET_CFG_BLOCK = [0x02, 0x43],
CMD_GET_DEVICE_MODE = [0x02, 0x44],
CMD_GET_SSB = [0x02, 0x46],
CMD_SET_SSB = [0x02, 0x47],
CMD_SSB_REFILL = [0x02, 0x48],
CMD_SSD_BUY = [0x02, 0x49],
CMD_SSB_USE_CONSUMEABLE = [0x02, 0x4A],
CMD_SSB_GRANT_CORES = [0x02, 0x4B],
CMD_SSB_ADD_XP = [0x02, 0x4C],
CMD_SSB_LEVEL_UP_ATTR = [0x02, 0x4D],
CMD_GET_PW_SEED = [0x02, 0x4E],
CMD_SSB_ENABLE_ASYNC = [0x02, 0x4F],
CMD_RUN_MACRO = [0x02, 0x50],
CMD_SAVE_TEMP_MACRO = [0x02, 0x51],
CMD_SAVE_MACRO = [0x02, 0x52],
CMD_INIT_MACRO_EXECUTIVE = [0x02, 0x54],
CMD_ABORT_MACRO = [0x02, 0x55],
CMD_MACRO_STATUS = [0x02, 0x56],
CMD_SET_MACRO_PARAM = [0x02, 0x57],
CMD_APPEND_TEMO_MACRO_CHUNK = [0x02, 0x58],
CMD_ERASE_ORBBAS = [0x02, 0x60],
CMD_APPEND_FRAG = [0x02, 0x61],
CMD_EXEC_ORBBAS = [0x02, 0x62],
CMD_ABORT_ORBBAS = [0x02, 0x63],
CMD_ANSWER_INPUT = [0x02, 0x64],
CMD_COMMIT_TO_FLASH = [0x02, 0x65])
ASYNC = dict(
POWER = 0x01,
DIAGNOSTICS = 0x02,
SENSE = 0x03,
CONTENTS = 0x04,
PRESLEEP = 0X05,
MARKERS = 0x06,
COLLISION = 0x07,
OBPRINT = 0x08,
OBERRASC = 0x09,
OBERRBIN = 0x0a,
SELFLEVEL = 0x0b,
GYROLIM = 0x0c,
SOUL = 0x0d,
LEVELUP = 0x0e,
SHIELD = 0x0f,
XP = 0x10,
BOOST = 0x11)
MRSC = dict(
ORBOTIX_RSP_CODE_OK = 0x00,
ORBOTIX_RSP_CODE_EGEN = 0x01,
ORBOTIX_RSP_CODE_ECHKSUM = 0x02,
ORBOTIX_RSP_CODE_EFRAG = 0x03,
ORBOTIX_RSP_CODE_EBAD_CMD = 0x04,
ORBOTIX_RSP_CODE_EUNSUPP = 0x05,
ORBOTIX_RSP_CODE_EBAD_MSG = 0x06,
ORBOTIX_RSP_CODE_EPARAM = 0x07,
ORBOTIX_RSP_CODE_EEXEC = 0x08,
ORBOTIX_RSP_CODE_EBAD_DID = 0x09,
ORBOTIX_RSP_CODE_MEM_BUSY = 0x0A,
ORBOTIX_RSP_CODE_BAD_PASSWORD = 0x0B,
ORBOTIX_RSP_CODE_POWER_NOGOOD = 0x31,
ORBOTIX_RSP_CODE_PAGE_ILLEGAL = 0x32,
ORBOTIX_RSP_CODE_FLASH_FAIL = 0x33,
ORBOTIX_RSP_CODE_MA_CORRUPT = 0x34,
ORBOTIX_RSP_CODE_MSG_TIMEOUT = 0x35)
class Comm(bluetooth.DefaultDelegate, threading.Thread): #class dealing with sending and receiving Sphero commands
device = None
per = None
msg = []
handle = []
async = []
end = None
address = None
antidos = None
wakecpu = None
txpower = None
main = None
refresh = None
pending = None
def __init__(self, device, refresh):
threading.Thread.__init__(self)
self.device = device
self.address = device.address
self.per = bluetooth.Peripheral(self.address, addrType = bluetooth.ADDR_TYPE_RANDOM)
self.per.setDelegate(self)
self.antidos = self.per.getCharacteristics(uuid = "22bb746f2bbd75542d6f726568705327")[0]
self.wakecpu = self.per.getCharacteristics(uuid = "22bb746f2bbf75542d6f726568705327")[0]
self.txpower = self.per.getCharacteristics(uuid = "22bb746f2bb275542d6f726568705327")[0]
self.main = self.per.getCharacteristics(uuid = "22bb746f2ba175542d6f726568705327")[0]
self.notify = self.per.getCharacteristics(uuid = "22bb746f2ba675542d6f726568705327")[0]
self.antidos.write("011i3", withResponse = True)
self.txpower.write("\x0007", withResponse = True)
self.wakecpu.write("\x01", withResponse = True)
self.end = threading.Event()
self.refresh = refresh
self.pending = False
def addMessage(self, msgk, handler):
self.msg.append([msgk.seq, msgk])
if(handler is not None): self.handle.append([msgk.seq, handler])
else: self.handle.append([msgk.seq, self.dummy])
def dummy(self, x):
i = 1
def addRegime(self, asyncID, handler):
self.async.append([ASYNC[asyncID], handler])
def send(self):
if(len(self.msg) == 0): return
msgk = self.msg[0][1]
#print("Sending {}".format(msgk.construct().encode("hex")))
self.main.write(msgk.construct(), withResponse = msgk.response)
if(not msgk.response): self.msg = self.msg[1:][:]
def run(self):
while(not self.end.isSet()):
self.send()
self.per.waitForNotifications(self.refresh)
def handleNotification(self, cHandle, data):
responseStr = data.encode("hex")
#print(responseStr)
if("ff" not in responseStr): return
while(responseStr[0:2] != "ff"): responseStr = responseStr[1:]
if(len(responseStr) < 12): return
if(responseStr[2:4] not in ["ff", "fe"]): return
if(responseStr[2:4] == "ff"):
seq = int(responseStr[6:8], 16)
try:
a = self.msg[:][0].index(seq)
self.msg = self.msg[0:a][:] + self.msg[(a+1):][:]
except: print("Ah, well")
if(len(self.handle) > 0):
if(seq in self.handle[:][0]):
k = self.handle[:][0].index(seq)
self.handle[k][1](responseStr)
self.handle = self.handle[0:k][:] + self.handle[(k+1):][:]
elif(responseStr[2:4] == "fe"):
j = int(responseStr[4:6], 16)
if(len(self.async) > 0):
if(j in self.async[:][0]):
k = self.async[:][0].index(j)
self.async[k][1](responseStr)
class Device: #class dealing with user control of Sphero
adress = None
comm = None
seq = None
def __init__(self, address, refresh):
self.address = address
self.comm = Comm(self, refresh)
self.seq = 1
def inc_seq(self):
self.seq = self.seq + 1
if(self.seq > 0xff): self.seq = 1
def ping(self, response, handler = None):
msg = Message(response, "CMD_PING", self.seq, [])
self.comm.addMessage(msg, handler)
self.inc_seq()
def set_rgb_led(self, response, red, green, blue, custom, handler = None):
msg = Message(response, "CMD_SET_RGB_LED", self.seq, [red, green, blue, custom])
self.comm.addMessage(msg, handler)
self.inc_seq()
def get_rgb_led(self, response, handler = None):
msg = Message(response, "CMD_GET_RGB_LED", self.seq, [])
self.comm.addMessage(msg, handler)
self.inc_seq()
def roll(self, response, speed, heading, mode, handler = None):
heading = self.split(heading, 2)
msg = Message(response, "CMD_ROLL", self.seq, [speed] + heading + [mode])
self.comm.addMessage(msg, handler)
self.inc_seq()
def set_data_streaming(self, response, N, M, MASK, COUNT, MASK2 = None, handler = None):
N = self.split(N, 2)
M = self.split(M, 2)
MASK = self.split(MASK, 4)
data = N + M + MASK + [COUNT]
if(MASK2 is not None):
MASK2 = self.split(MASK2, 4)
data = data + MASK2
msg = Message(response, "CMD_SET_DATA_STREAMING", self.seq, data)
self.comm.addMessage(msg, handler)
self.inc_seq()
def execute_ob(self, response, area, start, handler = None):
start = self.split(start, 2)
msg = Message(response, "CMD_EXEC_ORBBAS", self.seq, [area] + start)
print(msg.construct().encode("hex"))
self.comm.addMessage(msg, handler)
self.inc_seq()
def set_sensor_handler(self, handler):
self.comm.addRegime("SENSE", handler)
def begin(self): #begins comm thread
self.comm.start()
def end(self): #ends comm thread
self.comm.end.set()
self.comm.join()
def disconnect(self):
self.comm.per.disconnect()
def split(self, num, units):
num = hex(num)[2:]
while(len(num) < 2*units): num = "0" + num
res = []
for i in range(units):
b = int(num[2*i:2*(i + 1)], 16)
res.append(b)
return res
class Message: #class representing standard messages
response = None
command = None
data = None
seq = None
def __init__(self, response, command, seq, data):
self.response = response
self.command = command
self.data = data
self.seq = seq
def construct(self):
if(self.response): output = COMMAND[self.command] + [self.seq, len(self.data) + 1] + self.data
else: output = COMMAND[self.command] + [0, len(self.data) + 1] + self.data
chksum = ~ sum(output) % 256
output = output + [chksum]
if(self.response): output = [0xff, 0xff] + output
else: output = [0xff, 0xfe] + output
msg = "".join(struct.pack("B", x) for x in output)
return msg
|
[
"noreply@github.com"
] |
smithenator5000.noreply@github.com
|
3d860526ef201ce9fd12ae22e5039f7d84358fea
|
5b8a945adc0ea9e2e355ffc873db5a3760de6133
|
/proConf/settings.py
|
010f3289d966cd24fd7ba54af8ca5e27a3d2ff2d
|
[] |
no_license
|
Shirhussain/realestate
|
49d6dfc108823e774a4c7ade56f1895be9436616
|
07746368145fc5a70f4753b9510821911ce5f275
|
refs/heads/main
| 2023-02-25T07:32:35.077623
| 2020-10-30T16:23:04
| 2020-10-30T16:23:04
| 308,142,611
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,830
|
py
|
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6(6$q1glgy@bm9oycpr*w8gji2d&wdxxm&=4-8catlj!0aiu7m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#myapp
'realstate.apps.RealstateConfig',
'account.apps.AccountConfig',
'contact.apps.ContactConfig',
#django app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#for making humanize and saprated by comma for price i have to do the following code
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'proConf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / "templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proConf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'realstatedb',
'USER': 'postgres',
'PASSWORD' : 'king',
'HOST' : 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_URL = '/media/'
#the static root is going to collect all of static files in on root by the name of 'static'
#for this purpose you have to run collectstatic
STATIC_ROOT = BASE_DIR / "static"
MEDIA_ROOT = BASE_DIR / "media"
#the following code is going to use when you wanna serve static file
#in development that are in specifics root
STATICFILES_DIRS = [
BASE_DIR / "realstate/static",
]
#django messages
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
#django email config
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'yourReal@gmail.com'
EMAIL_HOST_PASSWORD = 'yourPasswordOfGamil'
EMAIL_USE_TLS = True
|
[
"sh.danishyar@gmail.com"
] |
sh.danishyar@gmail.com
|
170118c48959a608c2eeb55ce9ba0a772d5ef117
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/mne-tools_mne-python/mne-python-master/mne/io/eeglab/tests/test_eeglab.py
|
dd69dfd60790326e22307a681e1f7daed0599aef
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 10,518
|
py
|
# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Mikolaj Magnuski <mmagnuski@swps.edu.pl>
#
# License: BSD (3-clause)
import os.path as op
import shutil
import warnings
from nose.tools import assert_raises, assert_equal, assert_true
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from mne import write_events, read_epochs_eeglab, Epochs, find_events
from mne.io import read_raw_eeglab
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.eeglab.eeglab import _read_eeglab_events
from mne.datasets import testing
from mne.utils import _TempDir, run_tests_if_main, requires_version
base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
raw_fname = op.join(base_dir, 'test_raw.set')
raw_fname_onefile = op.join(base_dir, 'test_raw_onefile.set')
epochs_fname = op.join(base_dir, 'test_epochs.set')
epochs_fname_onefile = op.join(base_dir, 'test_epochs_onefile.set')
montage = op.join(base_dir, 'test_chans.locs')
warnings.simplefilter('always') # enable b/c these tests throw warnings
@requires_version('scipy', '0.12')
@testing.requires_testing_data
def test_io_set():
"""Test importing EEGLAB .set files"""
from scipy import io
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# main tests, and test missing event_id
_test_raw_reader(read_raw_eeglab, input_fname=raw_fname,
montage=montage)
_test_raw_reader(read_raw_eeglab, input_fname=raw_fname_onefile,
montage=montage)
for want in ('Events like', 'consist entirely', 'could not be mapped',
'string preload is not supported'):
assert_true(any(want in str(ww.message) for ww in w))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# test finding events in continuous data
event_id = {'rt': 1, 'square': 2}
raw0 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
event_id=event_id, preload=True)
raw1 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
event_id=event_id, preload=False)
raw2 = read_raw_eeglab(input_fname=raw_fname_onefile, montage=montage,
event_id=event_id)
raw3 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
event_id=event_id)
raw4 = read_raw_eeglab(input_fname=raw_fname, montage=montage)
Epochs(raw0, find_events(raw0), event_id)
epochs = Epochs(raw1, find_events(raw1), event_id)
assert_equal(len(find_events(raw4)), 0) # no events without event_id
assert_equal(epochs["square"].average().nave, 80) # 80 with
assert_array_equal(raw0[:][0], raw1[:][0], raw2[:][0], raw3[:][0])
assert_array_equal(raw0[:][-1], raw1[:][-1], raw2[:][-1], raw3[:][-1])
assert_equal(len(w), 4)
# 1 for preload=False / str with fname_onefile, 3 for dropped events
raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto',
phase='zero') # test that preloading works
# test that using uint16_codec does not break stuff
raw0 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
event_id=event_id, preload=False,
uint16_codec='ascii')
# test old EEGLAB version event import
eeg = io.loadmat(raw_fname, struct_as_record=False,
squeeze_me=True)['EEG']
for event in eeg.event: # old version allows integer events
event.type = 1
assert_equal(_read_eeglab_events(eeg)[-1, -1], 1)
eeg.event = eeg.event[0] # single event
assert_equal(_read_eeglab_events(eeg)[-1, -1], 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = read_epochs_eeglab(epochs_fname)
epochs2 = read_epochs_eeglab(epochs_fname_onefile)
# one warning for each read_epochs_eeglab because both files have epochs
# associated with multiple events
assert_equal(len(w), 2)
assert_array_equal(epochs.get_data(), epochs2.get_data())
# test different combinations of events and event_ids
temp_dir = _TempDir()
out_fname = op.join(temp_dir, 'test-eve.fif')
write_events(out_fname, epochs.events)
event_id = {'S255/S8': 1, 'S8': 2, 'S255/S9': 3}
epochs = read_epochs_eeglab(epochs_fname, epochs.events, event_id)
assert_equal(len(epochs.events), 4)
assert_true(epochs.preload)
assert_true(epochs._bad_dropped)
epochs = read_epochs_eeglab(epochs_fname, out_fname, event_id)
assert_raises(ValueError, read_epochs_eeglab, epochs_fname,
None, event_id)
assert_raises(ValueError, read_epochs_eeglab, epochs_fname,
epochs.events, None)
# test reading file with one event
eeg = io.loadmat(raw_fname, struct_as_record=False,
squeeze_me=True)['EEG']
one_event_fname = op.join(temp_dir, 'test_one_event.set')
io.savemat(one_event_fname, {'EEG':
{'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan, 'data': 'test_one_event.fdt',
'epoch': eeg.epoch, 'event': eeg.event[0],
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}})
shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
op.join(temp_dir, 'test_one_event.fdt'))
event_id = {eeg.event[0].type: 1}
read_raw_eeglab(input_fname=one_event_fname, montage=montage,
event_id=event_id, preload=True)
# test reading file with one channel
one_chan_fname = op.join(temp_dir, 'test_one_channel.set')
io.savemat(one_chan_fname, {'EEG':
{'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': 1, 'data': np.random.random((1, 3)),
'epoch': eeg.epoch, 'event': eeg.epoch,
'chanlocs': {'labels': 'E1', 'Y': -6.6069,
'X': 6.3023, 'Z': -2.9423},
'times': eeg.times[:3], 'pnts': 3}})
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
read_raw_eeglab(input_fname=one_chan_fname, preload=True)
# no warning for 'no events found'
assert_equal(len(w), 0)
# test reading file with 3 channels - one without position information
# first, create chanlocs structured array
ch_names = ['F3', 'unknown', 'FPz']
x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan]
dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]
chanlocs = np.zeros((3,), dtype=dt)
for ind, vals in enumerate(zip(ch_names, x, y, z)):
for fld in range(4):
chanlocs[ind][dt[fld][0]] = vals[fld]
# save set file
one_chanpos_fname = op.join(temp_dir, 'test_chanpos.set')
io.savemat(one_chanpos_fname, {'EEG':
{'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': 3, 'data': np.random.random((3, 3)),
'epoch': eeg.epoch, 'event': eeg.epoch,
'chanlocs': chanlocs, 'times': eeg.times[:3], 'pnts': 3}})
# load it
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True)
# one warning because some channels are not found in Montage
assert_equal(len(w), 1)
# position should be present for first two channels
for i in range(2):
assert_array_equal(raw.info['chs'][i]['loc'][:3],
np.array([-chanlocs[i]['Y'],
chanlocs[i]['X'],
chanlocs[i]['Z']]))
# position of the last channel should be zero
assert_array_equal(raw.info['chs'][-1]['loc'][:3], np.array([0., 0., 0.]))
# test reading channel names from set and positions from montage
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True,
montage=montage)
# one warning because some channels are not found in Montage
assert_equal(len(w), 1)
# when montage was passed - channel positions should be taken from there
correct_pos = [[-0.56705965, 0.67706631, 0.46906776], [0., 0., 0.],
[0., 0.99977915, -0.02101571]]
for ch_ind in range(3):
assert_array_almost_equal(raw.info['chs'][ch_ind]['loc'][:3],
np.array(correct_pos[ch_ind]))
# test reading channel names but not positions when there is no X (only Z)
# field in the EEG.chanlocs structure
nopos_chanlocs = chanlocs[['labels', 'Z']]
nopos_fname = op.join(temp_dir, 'test_no_chanpos.set')
io.savemat(nopos_fname, {'EEG':
{'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 3,
'data': np.random.random((3, 2)), 'epoch': eeg.epoch,
'event': eeg.epoch, 'chanlocs': nopos_chanlocs,
'times': eeg.times[:2], 'pnts': 2}})
# load the file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw = read_raw_eeglab(input_fname=nopos_fname, preload=True)
# test that channel names have been loaded but not channel positions
for i in range(3):
assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i])
assert_array_equal(raw.info['chs'][i]['loc'][:3],
np.array([0., 0., 0.]))
# test if .dat file raises an error
eeg = io.loadmat(epochs_fname, struct_as_record=False,
squeeze_me=True)['EEG']
eeg.data = 'epochs_fname.dat'
bad_epochs_fname = op.join(temp_dir, 'test_epochs.set')
io.savemat(bad_epochs_fname, {'EEG':
{'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan, 'data': eeg.data,
'epoch': eeg.epoch, 'event': eeg.event,
'chanlocs': eeg.chanlocs}})
shutil.copyfile(op.join(base_dir, 'test_epochs.fdt'),
op.join(temp_dir, 'test_epochs.dat'))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_raises(NotImplementedError, read_epochs_eeglab,
bad_epochs_fname)
assert_equal(len(w), 1)
run_tests_if_main()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
f8e0f9ae6ad7f697e748d4e0c96525e5b1d065d9
|
812007abf3fcc55b71018e0e1d0be24dde8245cf
|
/analyze_image.py
|
aabfe565524c539e828fa39cf8e8b6ec73d8890c
|
[] |
no_license
|
SoftwareDeveloper007/Face-Emotion-Recognition
|
b7b71a93e5202b45582e780ca45845aba653307d
|
5d6314d0f3b96e7a1121c25e504e74baa4d43341
|
refs/heads/master
| 2021-06-23T00:23:40.147779
| 2017-08-18T19:38:30
| 2017-08-18T19:38:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,491
|
py
|
import json
try:
import http.client, urllib.request, urllib.parse, urllib.error, base64, sys
class analyze_image():
def __init__(self, API_KEY, IMAGE_URL):
self.api_key = API_KEY
self.image_url = IMAGE_URL
self.get_analyzed_data()
def get_analyzed_data(self):
headers = {
# Request headers. Replace the placeholder key below with your subscription key.
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': self.api_key,
}
body = ""
# load image
#filename = 'D:/9_Github/3_Github Samples/2_Scraping/microsoft-emotion-recognition/chris_young.jpg'
filename = self.image_url
f = open(filename, "rb")
body = f.read()
f.close()
params = urllib.parse.urlencode({
})
# Replace the example URL below with the URL of the image you want to analyze.
#body = "{ 'url': '" + self.image_url + "' }"
try:
# NOTE: You must use the same region in your REST call as you used to obtain your subscription keys.
# For example, if you obtained your subscription keys from westcentralus, replace "westus" in the
# URL below with "westcentralus".
conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/emotion/v1.0/recognize?%s" % params, body, headers)
response = conn.getresponse().read()
self.data = json.loads(response)[0]
print(self.data)
conn.close()
except Exception as e:
print(e.args)
except:
import httplib, urllib, base64
class analyze_image():
def __init__(self, API_KEY, IMAGE_URL):
self.api_key = API_KEY
self.image_url = IMAGE_URL
self.get_analyzed_data()
def get_analyzed_data(self):
headers = {
# Request headers. Replace the placeholder key below with your subscription key.
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': self.api_key,
}
params = urllib.parse.urlencode({
})
# Replace the example URL below with the URL of the image you want to analyze.
body = "{ 'url': '" + self.image_url + "' }"
try:
# NOTE: You must use the same region in your REST call as you used to obtain your subscription keys.
# For example, if you obtained your subscription keys from westcentralus, replace "westus" in the
# URL below with "westcentralus".
conn = httplib.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/emotion/v1.0/recognize?%s" % params, body, headers)
response = conn.getresponse().read()
self.data = json.loads(response)[0]
print(self.data)
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
if __name__ == '__main__':
API_KEY = '1b897276f50843f78412b3185b80afcd'
IMAGE_URL = 'https://jbf-media.s3.amazonaws.com/production/event/2016/10/3/del_coro_ben1.jpg'
app = analyze_image(API_KEY, IMAGE_URL)
|
[
"28940651+YellowBackCorn@users.noreply.github.com"
] |
28940651+YellowBackCorn@users.noreply.github.com
|
775caf758d235a747c663d12580455c9e9952cb6
|
78db5bc74181173f2d00bea409997a64b4682adf
|
/venv/lib/python3.9/site-packages/pip/_vendor/html5lib/serializer.py
|
f82ea6e5ea48975e1835be992b5f3570968b824f
|
[
"MIT"
] |
permissive
|
CiscoDevNet/meraki-code
|
dfe680f077ebd053a3b663f1434f648f5a91b541
|
d031aab82e3fa5ce7cf57b257fef8c9a4c63d71e
|
refs/heads/master
| 2023-05-28T18:43:28.848983
| 2022-04-11T19:45:19
| 2022-04-11T19:45:19
| 188,288,487
| 67
| 60
|
MIT
| 2023-05-23T00:51:58
| 2019-05-23T18:43:15
|
Python
|
UTF-8
|
Python
| false
| false
| 16,168
|
py
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
"""Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
"""
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer
:arg inject_meta_charset: Whether or not to inject the meta charset.
Defaults to ``True``.
:arg quote_attr_values: Whether to quote attribute values that don't
require quoting per legacy browser behavior (``"legacy"``), when
required by the standard (``"spec"``), or always (``"always"``).
Defaults to ``"legacy"``.
:arg quote_char: Use given quote character for attribute quoting.
Defaults to ``"`` which will use double quotes unless attribute
value contains a double quote, in which case single quotes are
used.
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
values.
Defaults to ``False``.
:arg escape_rcdata: Whether to escape characters that need to be
escaped within normal elements within rcdata elements such as
style.
Defaults to ``False``.
:arg resolve_entities: Whether to resolve named character entities that
appear in the source tree. The XML predefined entities < >
& " ' are unaffected by this setting.
Defaults to ``True``.
:arg strip_whitespace: Whether to remove semantically meaningless
whitespace. (This compresses all whitespace to a single space
except within ``pre``.)
Defaults to ``False``.
:arg minimize_boolean_attributes: Shortens boolean attributes to give
just the attribute value, for example::
<input disabled="disabled">
becomes::
<input disabled>
Defaults to ``True``.
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
start tag of void elements (empty elements whose end tag is
forbidden). E.g. ``<hr/>``.
Defaults to ``False``.
:arg space_before_trailing_solidus: Places a space immediately before
the closing slash in a tag using a trailing solidus. E.g.
``<hr />``. Requires ``use_trailing_solidus=True``.
Defaults to ``True``.
:arg sanitize: Strip all unsafe or unknown constructs from output.
See :py:class:`html5lib.filters.sanitizer.Filter`.
Defaults to ``False``.
:arg omit_optional_tags: Omit start/end tags that are optional.
Defaults to ``True``.
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
Defaults to ``False``.
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifier contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
|
[
"agentle@cisco.com"
] |
agentle@cisco.com
|
233538cdcf672154b505c27a6f83b26578ee481e
|
9e765b38a03c2996e221a42c2a0dbc0fe02824cb
|
/cracking_the_coding_interview_qs/17.24/get_max_sum_submatrix.py
|
ee27a1e94867610e61ea9c58ec5ebcf2e73f322a
|
[
"Apache-2.0"
] |
permissive
|
angelusualle/algorithms
|
f709b4ae0c3275cece204d5fb56fd6ec34b4683b
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
refs/heads/main
| 2023-07-02T19:25:11.720114
| 2021-08-12T16:33:00
| 2021-08-12T16:33:00
| 269,791,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
#(N^3) in nxn matrix
def get_max_sum_submatrix(matrix):
num_rows = len(matrix)
num_cols = len(matrix[0])
best_one = None
for i in range(len(matrix)):
partial_sum = [0 for z in range(num_cols)]
for j in range(i, num_rows):
for z in range(num_cols):
partial_sum[z] += matrix[j][z]
start = 0
sum_ = 0
best = None
for y in range(num_cols):
sum_ += partial_sum[y]
if best is None or sum_ > best[0]:
best = (sum_, start, y)
if sum_ < 0:
start = y + 1
sum_ = 0
if best_one is None or best[0] > best_one[0]:
best_one = (best[0], i, best[1], j, best[2])
return best_one
|
[
"angelusualle@gmail.com"
] |
angelusualle@gmail.com
|
18bf59837660c6a5c2032b5ffb154da986ab663a
|
03710012371432e0dbb9668043fefa02b378616f
|
/lists/migrations/0004_item_list.py
|
6889d68cdec22f8c01908e1a5cc087cf62221f02
|
[] |
no_license
|
cgoodfred/tdd-book-python
|
abfa52a2fe5b9ad4a6e6165a78aa22c22ea78592
|
a24c2aabf8e5704f4cd874fa75548b249b2571fc
|
refs/heads/master
| 2023-05-07T19:55:47.309845
| 2020-06-06T11:57:13
| 2020-06-06T11:57:13
| 268,067,356
| 0
| 0
| null | 2021-06-10T22:58:29
| 2020-05-30T11:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 525
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-29 21:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('lists', '0003_list'),
]
operations = [
migrations.AddField(
model_name='item',
name='list',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='lists.List'),
),
]
|
[
"corey.goodfred@gmail.com"
] |
corey.goodfred@gmail.com
|
8b2349a6bf1ef11db5175235e32c85faa61bb3c4
|
9a60375964655de04b8a0b66ffdd68b04885d414
|
/gcloud/tests/apigw/test_apigw.py
|
d645315895c2e13be24a06577d38b4b1ea0aa24a
|
[
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kellyyk/work-sops
|
1e9696981f8f2474b1e4273a844277c7881cc722
|
b114db1b21c14b8454c34dff0453cb4fd8885c2e
|
refs/heads/master
| 2022-09-22T23:02:11.154540
| 2019-07-02T08:25:04
| 2019-07-02T08:25:04
| 194,818,709
| 0
| 0
|
NOASSERTION
| 2022-09-16T18:24:36
| 2019-07-02T08:11:08
|
Python
|
UTF-8
|
Python
| false
| false
| 56,933
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import absolute_import
import copy
import json
import logging
import jsonschema
from django.test import TestCase, Client
from pipeline.exceptions import PipelineException
from gcloud.core.utils import strftime_with_timezone
from gcloud.tasktmpl3.models import TaskTemplate
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.commons.template.models import CommonTemplate
from gcloud.periodictask.models import PeriodicTask
from gcloud.tests.mock import * # noqa
from gcloud.tests.mock_settings import * # noqa
logger = logging.getLogger('root')
def dummy_params_wrapper(perm):
def inner_dummy_wrapper(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return inner_dummy_wrapper
def dummy_wrapper(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
TEST_BIZ_CC_ID = '123' # do not change this to non number
TEST_BIZ_CC_NAME = 'biz name'
TEST_APP_CODE = 'app_code'
TEST_TEMPLATE_ID = '1' # do not change this to non number
TEST_TASKFLOW_ID = '2' # do not change this to non number
TEST_TASKFLOW_URL = 'url'
TEST_TASKFLOW_PIPELINE_TREE = 'pipeline_tree'
TEST_PERIODIC_TASK_ID = '3' # do not change to this non number
TEST_DATA = 'data'
TEST_NODE_ID = 'node_id'
TEST_CALLBACK_DATA = 'callback_data'
TEST_COMPONENT_CODE = 'component_code'
TEST_SUBPROCESS_STACK = '[1, 2, 3]'
class APITest(TestCase):
@classmethod
def setUpClass(cls):
cls.GET_TEMPLATE_LIST_URL = '/apigw/get_template_list/{biz_cc_id}/'
cls.GET_TEMPLATE_INFO_URL = '/apigw/get_template_info/{template_id}/{bk_biz_id}/'
cls.CREATE_TASK_URL = '/apigw/create_task/{template_id}/{bk_biz_id}/'
cls.START_TASK_URL = '/apigw/start_task/{task_id}/{bk_biz_id}/'
cls.OPERATE_TASK_URL = '/apigw/operate_task/{task_id}/{bk_biz_id}/'
cls.GET_TASK_STATUS_URL = '/apigw/get_task_status/{task_id}/{bk_biz_id}/'
cls.QUERY_TASK_COUNT_URL = '/apigw/query_task_count/{bk_biz_id}/'
cls.GET_PERIODIC_TASK_LIST_URL = '/apigw/get_periodic_task_list/{bk_biz_id}/'
cls.GET_PERIODIC_TASK_INFO_URL = '/apigw/get_periodic_task_info/{task_id}/{bk_biz_id}/'
cls.CREATE_PERIODIC_TASK_URL = '/apigw/create_periodic_task/{template_id}/{bk_biz_id}/'
cls.SET_PERIODIC_TASK_ENABLED_URL = '/apigw/set_periodic_task_enabled/{task_id}/{bk_biz_id}/'
cls.MODIFY_PERIODIC_TASK_CRON_URL = '/apigw/modify_cron_for_periodic_task/{task_id}/{bk_biz_id}/'
cls.MODIFY_PERIODIC_TASK_CONSTANTS_URL = '/apigw/modify_constants_for_periodic_task/{task_id}/{bk_biz_id}/'
cls.GET_TASK_DETAIL = '/apigw/get_task_detail/{task_id}/{bk_biz_id}/'
cls.GET_TASK_NODE_DETAIL = '/apigw/get_task_node_detail/{task_id}/{bk_biz_id}/'
cls.NODE_CALLBACK = '/apigw/node_callback/{task_id}/{bk_biz_id}/'
cls.IMPORT_COMMON_FLOW = '/apigw/import_common_template/'
super(APITest, cls).setUpClass()
def setUp(self):
self.white_list_patcher = mock.patch(APIGW_DECORATOR_CHECK_WHITE_LIST, MagicMock(return_value=True))
self.dummy_user = MagicMock()
self.dummy_user.username = ''
self.user_cls = MagicMock()
self.user_cls.objects = MagicMock()
self.user_cls.objects.get_or_create = MagicMock(return_value=(self.dummy_user, False))
self.get_user_model_patcher = mock.patch(APIGW_DECORATOR_GET_USER_MODEL, MagicMock(return_value=self.user_cls))
self.prepare_user_business_patcher = mock.patch(APIGW_DECORATOR_PREPARE_USER_BUSINESS, MagicMock())
self.business_exist_patcher = mock.patch(APIGW_DECORATOR_BUSINESS_EXIST, MagicMock(return_value=True))
self.white_list_patcher.start()
self.get_user_model_patcher.start()
self.prepare_user_business_patcher.start()
self.business_exist_patcher.start()
self.client = Client()
def tearDown(self):
self.white_list_patcher.stop()
self.get_user_model_patcher.stop()
self.prepare_user_business_patcher.stop()
self.business_exist_patcher.stop()
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
def test_get_template_list__for_business_template(self):
pt1 = MockPipelineTemplate(id=1,
name='pt1')
pt2 = MockPipelineTemplate(id=2,
name='pt2')
task_tmpl1 = MockTaskTemplate(id=1, pipeline_template=pt1)
task_tmpl2 = MockTaskTemplate(id=2, pipeline_template=pt2)
task_templates = [task_tmpl1, task_tmpl2]
with mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(filter_result=task_templates))):
assert_data = [
{
'id': tmpl.id,
'name': tmpl.pipeline_template.name,
'creator': tmpl.pipeline_template.creator,
'create_time': strftime_with_timezone(tmpl.pipeline_template.create_time),
'editor': tmpl.pipeline_template.editor,
'edit_time': strftime_with_timezone(tmpl.pipeline_template.edit_time),
'category': tmpl.category,
'bk_biz_id': TEST_BIZ_CC_ID,
'bk_biz_name': TEST_BIZ_CC_NAME
} for tmpl in task_templates
]
response = self.client.get(path=self.GET_TEMPLATE_LIST_URL.format(biz_cc_id=TEST_BIZ_CC_ID))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
with mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(filter_result=[]))):
assert_data = []
response = self.client.get(path=self.GET_TEMPLATE_LIST_URL.format(biz_cc_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
def test_get_template_list__for_common_template(self):
pt1 = MockPipelineTemplate(id=1,
name='pt1')
pt2 = MockPipelineTemplate(id=2,
name='pt2')
task_tmpl1 = MockCommonTemplate(id=1, pipeline_template=pt1)
task_tmpl2 = MockCommonTemplate(id=2, pipeline_template=pt2)
task_templates = [task_tmpl1, task_tmpl2]
with mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(
return_value=MockQuerySet(filter_result=task_templates))):
assert_data = [
{
'id': tmpl.id,
'name': tmpl.pipeline_template.name,
'creator': tmpl.pipeline_template.creator,
'create_time': strftime_with_timezone(tmpl.pipeline_template.create_time),
'editor': tmpl.pipeline_template.editor,
'edit_time': strftime_with_timezone(tmpl.pipeline_template.edit_time),
'category': tmpl.category,
'bk_biz_id': TEST_BIZ_CC_ID,
'bk_biz_name': TEST_BIZ_CC_NAME
} for tmpl in task_templates
]
response = self.client.get(path=self.GET_TEMPLATE_LIST_URL.format(biz_cc_id=TEST_BIZ_CC_ID),
data={'template_source': 'common'})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
with mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(filter_result=[]))):
assert_data = []
response = self.client.get(path=self.GET_TEMPLATE_LIST_URL.format(biz_cc_id=TEST_BIZ_CC_ID),
data={'template_source': 'common'})
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
def test_get_template_info__for_business_template(self):
pt1 = MockPipelineTemplate(id=1,
name='pt1')
tmpl = MockTaskTemplate(id=1, pipeline_template=pt1)
with mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
pipeline_tree = copy.deepcopy(tmpl.pipeline_tree)
pipeline_tree.pop('line')
pipeline_tree.pop('location')
assert_data = {
'id': tmpl.id,
'name': tmpl.pipeline_template.name,
'creator': tmpl.pipeline_template.creator,
'create_time': strftime_with_timezone(tmpl.pipeline_template.create_time),
'editor': tmpl.pipeline_template.editor,
'edit_time': strftime_with_timezone(tmpl.pipeline_template.edit_time),
'category': tmpl.category,
'bk_biz_id': TEST_BIZ_CC_ID,
'bk_biz_name': TEST_BIZ_CC_NAME,
'pipeline_tree': pipeline_tree
}
response = self.client.get(path=self.GET_TEMPLATE_INFO_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(assert_data, data['data'])
@mock.patch(TASKTEMPLATE_SELECT_RELATE,
MagicMock(return_value=MockQuerySet(get_raise=TaskTemplate.DoesNotExist())))
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
def test_get_template_info__for_business_template_does_not_exists(self):
response = self.client.get(path=self.GET_TEMPLATE_INFO_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID), )
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
def test_get_template_info__for_common_template(self):
pt1 = MockPipelineTemplate(id=1,
name='pt1')
tmpl = MockCommonTemplate(id=1, pipeline_template=pt1)
with mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
pipeline_tree = copy.deepcopy(tmpl.pipeline_tree)
pipeline_tree.pop('line')
pipeline_tree.pop('location')
assert_data = {
'id': tmpl.id,
'name': tmpl.pipeline_template.name,
'creator': tmpl.pipeline_template.creator,
'create_time': strftime_with_timezone(tmpl.pipeline_template.create_time),
'editor': tmpl.pipeline_template.editor,
'edit_time': strftime_with_timezone(tmpl.pipeline_template.edit_time),
'category': tmpl.category,
'bk_biz_id': TEST_BIZ_CC_ID,
'bk_biz_name': TEST_BIZ_CC_NAME,
'pipeline_tree': pipeline_tree
}
response = self.client.get(path=self.GET_TEMPLATE_INFO_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data={'template_source': 'common'})
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(assert_data, data['data'])
@mock.patch(COMMONTEMPLATE_SELECT_RELATE,
MagicMock(return_value=MockQuerySet(get_raise=CommonTemplate.DoesNotExist())))
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
def test_get_template_info__for_common_template_does_not_exists(self):
response = self.client.get(path=self.GET_TEMPLATE_INFO_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data={'template_source': 'common'})
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(TASKINSTANCE_CREATE_PIPELINE, MagicMock(return_value=(True, TEST_DATA)))
@mock.patch(TASKINSTANCE_CREATE, MagicMock(return_value=MockTaskFlowInstance(id=TEST_TASKFLOW_ID)))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
def test_create_task__success(self):
pt1 = MockPipelineTemplate(id=1, name='pt1')
tmpl = MockTaskTemplate(id=1, pipeline_template=pt1)
biz = MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)
with mock.patch(BUSINESS_GET, MagicMock(return_value=biz)):
with mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
assert_data = {'task_id': TEST_TASKFLOW_ID,
'task_url': TEST_TASKFLOW_URL,
'pipeline_tree': TEST_TASKFLOW_PIPELINE_TREE}
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'constants': 'constants',
'exclude_task_nodes_id': 'exclude_task_nodes_id',
'flow_type': 'common'}),
content_type="application/json",
HTTP_BK_APP_CODE=TEST_APP_CODE)
TaskFlowInstance.objects.create_pipeline_instance_exclude_task_nodes.assert_called_once_with(
tmpl,
{'name': 'name', 'creator': ''},
'constants',
'exclude_task_nodes_id')
TaskFlowInstance.objects.create.assert_called_once_with(
business=biz,
category=tmpl.category,
pipeline_instance=TEST_DATA,
template_id=TEST_TEMPLATE_ID,
create_method='api',
create_info=TEST_APP_CODE,
flow_type='common',
current_flow='execute_task'
)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
TaskFlowInstance.objects.create_pipeline_instance_exclude_task_nodes.reset_mock()
TaskFlowInstance.objects.create.reset_mock()
pt1 = MockPipelineTemplate(id=1,
name='pt1')
tmpl = MockCommonTemplate(id=1, pipeline_template=pt1)
with mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
assert_data = {'task_id': TEST_TASKFLOW_ID,
'task_url': TEST_TASKFLOW_URL,
'pipeline_tree': TEST_TASKFLOW_PIPELINE_TREE}
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'constants': 'constants',
'exclude_task_nodes_id': 'exclude_task_nodes_id',
'template_source': 'common',
'flow_type': 'common'}),
content_type="application/json",
HTTP_BK_APP_CODE=TEST_APP_CODE)
TaskFlowInstance.objects.create_pipeline_instance_exclude_task_nodes.assert_called_once_with(
tmpl,
{'name': 'name', 'creator': ''},
'constants',
'exclude_task_nodes_id')
TaskFlowInstance.objects.create.assert_called_once_with(
business=biz,
category=tmpl.category,
pipeline_instance=TEST_DATA,
template_id=TEST_TEMPLATE_ID,
create_method='api',
create_info=TEST_APP_CODE,
flow_type='common',
current_flow='execute_task'
)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
@mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet()))
@mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet()))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock(side_effect=jsonschema.ValidationError('')))
def test_create_task__validate_fail(self):
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id'}),
content_type="application/json")
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id',
'template_source': 'common'}),
content_type="application/json")
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
@mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet()))
@mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet()))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
def test_create_task__without_app_code(self):
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id'}),
content_type="application/json")
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id',
'template_source': 'common'}),
content_type="application/json")
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
@mock.patch(TASKINSTANCE_CREATE_PIPELINE, MagicMock(side_effect=PipelineException()))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
def test_create_task__create_pipeline_raise(self):
pt1 = MockPipelineTemplate(id=1,
name='pt1')
tmpl = MockTaskTemplate(id=1, pipeline_template=pt1)
with mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id'}),
content_type="application/json",
HTTP_BK_APP_CODE=TEST_APP_CODE)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
pt1 = MockPipelineTemplate(id=1,
name='pt1')
tmpl = MockCommonTemplate(id=1, pipeline_template=pt1)
with mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id',
'template_source': 'common'}),
content_type="application/json",
HTTP_BK_APP_CODE=TEST_APP_CODE)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
@mock.patch(TASKINSTANCE_CREATE_PIPELINE, MagicMock(return_value=(False, '')))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
def test_create_task__create_pipeline_fail(self):
pt1 = MockPipelineTemplate(id=1,
name='pt1')
tmpl = MockTaskTemplate(id=1, pipeline_template=pt1)
with mock.patch(TASKTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id'}),
content_type="application/json",
HTTP_BK_APP_CODE=TEST_APP_CODE)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
pt1 = MockPipelineTemplate(id=1,
name='pt1')
tmpl = MockCommonTemplate(id=1, pipeline_template=pt1)
with mock.patch(COMMONTEMPLATE_SELECT_RELATE, MagicMock(return_value=MockQuerySet(get_result=tmpl))):
response = self.client.post(path=self.CREATE_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'constants': 'constants',
'exclude_task_node_id': 'exclude_task_node_id',
'template_source': 'common'}),
content_type="application/json",
HTTP_BK_APP_CODE=TEST_APP_CODE)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_start_task(self):
assert_return = {'result': True}
task = MockTaskFlowInstance(task_action_return=assert_return)
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=task)):
response = self.client.post(path=self.START_TASK_URL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
task.task_action.assert_called_once_with('start', '')
data = json.loads(response.content)
self.assertEqual(data, assert_return)
def test_operate_task(self):
assert_return = {'result': True}
assert_action = 'any_action'
task = MockTaskFlowInstance(task_action_return=assert_return)
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=task)):
response = self.client.post(path=self.OPERATE_TASK_URL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'action': assert_action}),
content_type='application/json')
task.task_action.assert_called_once_with(assert_action, '')
data = json.loads(response.content)
self.assertEqual(data, assert_return)
def test_get_task_status__success(self):
task = MockTaskFlowInstance(get_status_return=TEST_DATA)
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=task)):
response = self.client.get(path=self.GET_TASK_STATUS_URL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], TEST_DATA)
def test_get_task_status__raise(self):
task = MockTaskFlowInstance(get_status_raise=Exception())
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=task)):
response = self.client.get(path=self.GET_TASK_STATUS_URL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(TASKINSTANCE_FORMAT_STATUS, MagicMock())
@mock.patch(APIGW_VIEW_PIPELINE_API_GET_STATUS_TREE, MagicMock(return_value=TEST_DATA))
def test_get_task_status__is_subprocess(self):
task = MockTaskFlowInstance(get_status_raise=TaskFlowInstance.DoesNotExist())
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=task)):
response = self.client.get(path=self.GET_TASK_STATUS_URL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
TaskFlowInstance.format_pipeline_status.assert_called_once_with(TEST_DATA)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], TEST_DATA)
@mock.patch(APIGW_VIEW_PIPELINE_API_GET_STATUS_TREE, MagicMock(return_value=TEST_DATA))
def test_get_task_status__is_subprocess_raise(self):
task = MockTaskFlowInstance(get_status_raise=TaskFlowInstance.DoesNotExist())
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=task)):
with mock.patch(APIGW_VIEW_PIPELINE_API_GET_STATUS_TREE, MagicMock(side_effect=Exception())):
response = self.client.get(path=self.GET_TASK_STATUS_URL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
with mock.patch(TASKINSTANCE_FORMAT_STATUS, MagicMock(side_effect=Exception())):
response = self.client.get(path=self.GET_TASK_STATUS_URL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(TASKINSTANCE_EXTEN_CLASSIFIED_COUNT, MagicMock(return_value=(True, TEST_DATA)))
def test_query_task_count__success(self):
response = self.client.post(path=self.QUERY_TASK_COUNT_URL.format(bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'group_by': 'category'}),
content_type='application/json')
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], TEST_DATA)
def test_query_task_count__conditions_is_not_dict(self):
response = self.client.post(path=self.QUERY_TASK_COUNT_URL.format(bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'conditions': []}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_query_task_count__group_by_is_not_valid(self):
response = self.client.post(path=self.QUERY_TASK_COUNT_URL.format(bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'group_by': 'invalid_value'}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(TASKINSTANCE_EXTEN_CLASSIFIED_COUNT, MagicMock(return_value=(False, '')))
def test_query_task_count__extend_classified_count_fail(self):
response = self.client.post(path=self.QUERY_TASK_COUNT_URL.format(bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'group_by': 'category'}),
content_type='application/json')
TaskFlowInstance.objects.extend_classified_count.assert_called_once_with('category',
{'business__cc_id': TEST_BIZ_CC_ID,
'is_deleted': False})
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_get_periodic_task_list(self):
pt1 = MockPeriodicTask(id='1')
pt2 = MockPeriodicTask(id='2')
pt3 = MockPeriodicTask(id='3')
periodic_tasks = [pt1, pt2, pt3]
assert_data = [{
'id': task.id,
'name': task.name,
'template_id': task.template_id,
'creator': task.creator,
'cron': task.cron,
'enabled': task.enabled,
'last_run_at': strftime_with_timezone(task.last_run_at),
'total_run_count': task.total_run_count,
} for task in periodic_tasks]
with mock.patch(PERIODIC_TASK_FILTER, MagicMock(return_value=periodic_tasks)):
response = self.client.get(path=self.GET_PERIODIC_TASK_LIST_URL.format(bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
def test_get_periodic_task_info__success(self):
task = MockPeriodicTask()
assert_data = {
'id': task.id,
'name': task.name,
'template_id': task.template_id,
'creator': task.creator,
'cron': task.cron,
'enabled': task.enabled,
'last_run_at': strftime_with_timezone(task.last_run_at),
'total_run_count': task.total_run_count,
'form': task.form,
'pipeline_tree': task.pipeline_tree
}
with mock.patch(PERIODIC_TASK_GET, MagicMock(return_value=task)):
response = self.client.get(path=self.GET_PERIODIC_TASK_INFO_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
@mock.patch(PERIODIC_TASK_GET, MagicMock(side_effect=PeriodicTask.DoesNotExist))
def test_periodic_task_info__task_does_not_exist(self):
response = self.client.get(path=self.GET_PERIODIC_TASK_INFO_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(TASKINSTANCE_PREVIEW_TREE, MagicMock())
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
def test_create_periodic_task__success(self):
task = MockPeriodicTask()
assert_data = {
'id': task.id,
'name': task.name,
'template_id': task.template_id,
'creator': task.creator,
'cron': task.cron,
'enabled': task.enabled,
'last_run_at': strftime_with_timezone(task.last_run_at),
'total_run_count': task.total_run_count,
'form': task.form,
'pipeline_tree': task.pipeline_tree
}
biz = MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)
template = MockTaskTemplate()
replace_template_id_mock = MagicMock()
with mock.patch(TASKTEMPLATE_GET, MagicMock(return_value=template)):
with mock.patch(BUSINESS_GET, MagicMock(return_value=biz)):
with mock.patch(PERIODIC_TASK_CREATE, MagicMock(return_value=task)):
with mock.patch(APIGW_REPLACE_TEMPLATE_ID, replace_template_id_mock):
response = self.client.post(
path=self.CREATE_PERIODIC_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': task.name,
'cron': task.cron,
'exclude_task_nodes_id': 'exclude_task_nodes_id'}),
content_type='application/json')
TaskFlowInstance.objects.preview_pipeline_tree_exclude_task_nodes.assert_called_with(
template.pipeline_tree,
'exclude_task_nodes_id'
)
PeriodicTask.objects.create.assert_called_once_with(
business=biz,
template=template,
name=task.name,
cron=task.cron,
pipeline_tree=template.pipeline_tree,
creator=''
)
data = json.loads(response.content)
replace_template_id_mock.assert_called_once_with(TaskTemplate, template.pipeline_tree)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
@mock.patch(TASKTEMPLATE_GET, MagicMock(side_effect=TaskTemplate.DoesNotExist()))
def test_create_periodic_task__template_does_not_exist(self):
response = self.client.post(path=self.CREATE_PERIODIC_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(TASKTEMPLATE_GET, MagicMock(return_value=MockTaskTemplate()))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock(side_effect=jsonschema.ValidationError('')))
def test_create_periodic_task__params_validate_fail(self):
response = self.client.post(path=self.CREATE_PERIODIC_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(TASKTEMPLATE_GET, MagicMock(return_value=MockTaskTemplate()))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
@mock.patch(TASKINSTANCE_PREVIEW_TREE, MagicMock(side_effect=Exception()))
def test_create_periodic_task__preview_pipeline_fail(self):
response = self.client.post(path=self.CREATE_PERIODIC_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
@mock.patch(TASKTEMPLATE_GET, MagicMock(return_value=MockTaskTemplate()))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
@mock.patch(TASKINSTANCE_PREVIEW_TREE, MagicMock())
@mock.patch(APIGW_REPLACE_TEMPLATE_ID, MagicMock(side_effect=Exception))
def test_create_periodic_task__replace_template_id_fail(self):
response = self.client.post(path=self.CREATE_PERIODIC_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'cron': 'cron'}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness(cc_id=TEST_BIZ_CC_ID, cc_name=TEST_BIZ_CC_NAME)))
@mock.patch(TASKTEMPLATE_GET, MagicMock(return_value=MockTaskTemplate()))
@mock.patch(APIGW_VIEW_JSON_SCHEMA_VALIDATE, MagicMock())
@mock.patch(TASKINSTANCE_PREVIEW_TREE, MagicMock())
@mock.patch(PERIODIC_TASK_CREATE, MagicMock(side_effect=Exception()))
@mock.patch(APIGW_REPLACE_TEMPLATE_ID, MagicMock())
def test_create_periodic_task__periodic_task_create_fail(self):
response = self.client.post(path=self.CREATE_PERIODIC_TASK_URL.format(template_id=TEST_TEMPLATE_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'name': 'name',
'cron': 'cron'}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness()))
def test_set_periodic_task_enabled__success(self):
task = MockPeriodicTask()
with mock.patch(PERIODIC_TASK_GET, MagicMock(return_value=task)):
response = self.client.post(path=self.SET_PERIODIC_TASK_ENABLED_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'enabled': True}),
content_type='application/json')
task.set_enabled.assert_called_once_with(True)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], {
'enabled': task.enabled
})
@mock.patch(PERIODIC_TASK_GET, MagicMock(side_effect=PeriodicTask.DoesNotExist))
def test_set_periodic_task_enabled__task_does_not_exist(self):
response = self.client.post(path=self.SET_PERIODIC_TASK_ENABLED_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'enabled': True}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_modify_cron_for_periodic_task__success(self):
biz = MockBusiness()
task = MockPeriodicTask()
cron = {'minute': '*/1'}
with mock.patch(BUSINESS_GET, MagicMock(return_value=biz)):
with mock.patch(PERIODIC_TASK_GET, MagicMock(return_value=task)):
response = self.client.post(
path=self.MODIFY_PERIODIC_TASK_CRON_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'cron': cron}),
content_type='application/json')
task.modify_cron.assert_called_once_with(cron, biz.time_zone)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], {'cron': task.cron})
@mock.patch(BUSINESS_GET, MagicMock(return_value=MockBusiness()))
@mock.patch(PERIODIC_TASK_GET, MagicMock(side_effect=PeriodicTask.DoesNotExist))
def test_modify_cron_for_periodic_task__task_does_not_exist(self):
response = self.client.post(path=self.MODIFY_PERIODIC_TASK_CRON_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'enabled': True}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_modify_cron_for_periodic_task__modify_raise(self):
biz = MockBusiness()
task = MockPeriodicTask()
task.modify_cron = MagicMock(side_effect=Exception())
cron = {'minute': '*/1'}
with mock.patch(BUSINESS_GET, MagicMock(return_value=biz)):
with mock.patch(PERIODIC_TASK_GET, MagicMock(return_value=task)):
response = self.client.post(
path=self.MODIFY_PERIODIC_TASK_CRON_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'cron': cron}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_modify_constants_for_periodic_task__success(self):
biz = MockBusiness()
task = MockPeriodicTask()
constants = {'k': 'v'}
with mock.patch(BUSINESS_GET, MagicMock(return_value=biz)):
with mock.patch(PERIODIC_TASK_GET, MagicMock(return_value=task)):
response = self.client.post(
path=self.MODIFY_PERIODIC_TASK_CONSTANTS_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({'constants': constants}),
content_type='application/json')
task.modify_constants.assert_called_once_with(constants)
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], task.modify_constants.return_value)
@mock.patch(PERIODIC_TASK_GET, MagicMock(side_effect=PeriodicTask.DoesNotExist))
def test_modify_constants_for_periodic_task__task_does_not_exist(self):
response = self.client.post(path=self.MODIFY_PERIODIC_TASK_CONSTANTS_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_modify_constants_for_periodic_task__modify_constants_raise(self):
biz = MockBusiness()
task = MockPeriodicTask()
task.modify_constants = MagicMock(side_effect=Exception())
with mock.patch(BUSINESS_GET, MagicMock(return_value=biz)):
with mock.patch(PERIODIC_TASK_GET, MagicMock(return_value=task)):
response = self.client.post(
path=self.MODIFY_PERIODIC_TASK_CONSTANTS_URL.format(task_id=TEST_PERIODIC_TASK_ID,
bk_biz_id=TEST_BIZ_CC_ID),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_get_task_detail__success(self):
mock_taskflow = MockTaskFlowInstance(get_task_detail_return=TEST_DATA)
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=mock_taskflow)):
assert_data = TEST_DATA
response = self.client.get(path=self.GET_TASK_DETAIL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
@mock.patch(TASKINSTANCE_GET, MagicMock(side_effect=TaskFlowInstance.DoesNotExist()))
def test_get_task_detail__success__taskflow_does_not_exists(self):
response = self.client.get(path=self.GET_TASK_DETAIL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID))
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_get_task_node_detail__success(self):
mock_taskflow = MockTaskFlowInstance(get_node_detail_return={'result': True, 'data': TEST_DATA})
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=mock_taskflow)):
assert_data = TEST_DATA
response = self.client.get(path=self.GET_TASK_NODE_DETAIL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data={'node_id': TEST_NODE_ID,
'component_code': TEST_COMPONENT_CODE,
'subprocess_stack': TEST_SUBPROCESS_STACK})
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['data'], assert_data)
mock_taskflow.get_node_detail.assert_called_once_with(TEST_NODE_ID,
TEST_COMPONENT_CODE,
json.loads(TEST_SUBPROCESS_STACK))
@mock.patch(TASKINSTANCE_GET, MagicMock(side_effect=TaskFlowInstance.DoesNotExist()))
def test_get_task_node_detail__taskflow_doest_not_exist(self):
response = self.client.get(path=self.GET_TASK_NODE_DETAIL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data={'node_id': TEST_NODE_ID,
'component_code': TEST_COMPONENT_CODE,
'subprocess_stack': TEST_SUBPROCESS_STACK})
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_get_task_node_detail__with_invalid_subprocess_stack(self):
response = self.client.get(path=self.GET_TASK_NODE_DETAIL.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data={'node_id': TEST_NODE_ID,
'component_code': TEST_COMPONENT_CODE,
'subprocess_stack': 'abcdefg'})
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
def test_node_callback__success(self):
mock_instance = MockTaskFlowInstance()
with mock.patch(TASKINSTANCE_GET, MagicMock(return_value=mock_instance)):
response = self.client.post(path=self.NODE_CALLBACK.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({
'node_id': TEST_NODE_ID,
'callback_data': TEST_CALLBACK_DATA
}),
content_type='application/json')
data = json.loads(response.content)
self.assertTrue(data['result'])
mock_instance.callback.assert_called_once_with(TEST_NODE_ID, TEST_CALLBACK_DATA)
@mock.patch(TASKINSTANCE_GET, MagicMock(side_effect=TaskFlowInstance.DoesNotExist()))
def test_node_callback__taskflow_does_not_exists(self):
response = self.client.post(path=self.NODE_CALLBACK.format(task_id=TEST_TASKFLOW_ID,
bk_biz_id=TEST_BIZ_CC_ID),
data=json.dumps({
'node_id': TEST_NODE_ID,
'callback_data': TEST_CALLBACK_DATA
}),
content_type='application/json')
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(APIGW_VIEW_CHECK_WHITE_LIST, MagicMock(return_value=False))
@mock.patch(APIGW_READ_TEMPLATE_DATA_FILE, MagicMock())
def test_import_common_template__app_has_no_permission(self):
response = self.client.post(path=self.IMPORT_COMMON_FLOW)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
from gcloud.apigw.views import read_template_data_file
read_template_data_file.assert_not_called()
@mock.patch(APIGW_VIEW_CHECK_WHITE_LIST, MagicMock(return_value=True))
@mock.patch(APIGW_READ_TEMPLATE_DATA_FILE, MagicMock(return_value={'result': False, 'message': 'token'}))
def test_import_common_template__read_template_data_file_error(self):
response = self.client.post(path=self.IMPORT_COMMON_FLOW)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertEqual(data['message'], 'token')
@mock.patch(APIGW_VIEW_CHECK_WHITE_LIST, MagicMock(return_value=True))
@mock.patch(APIGW_READ_TEMPLATE_DATA_FILE, MagicMock(return_value={'result': True,
'data': {'template_data': 'token'}}))
@mock.patch(COMMONTEMPLATE_IMPORT_TEMPLATES, MagicMock(side_effect=Exception()))
def test_import_common_template__import_templates_error(self):
response = self.client.post(path=self.IMPORT_COMMON_FLOW)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertTrue('message' in data)
@mock.patch(APIGW_VIEW_CHECK_WHITE_LIST, MagicMock(return_value=True))
@mock.patch(APIGW_READ_TEMPLATE_DATA_FILE, MagicMock(return_value={'result': True,
'data': {'template_data': 'token'}}))
@mock.patch(COMMONTEMPLATE_IMPORT_TEMPLATES, MagicMock(return_value={'result': False, 'message': 'token'}))
def test_import_common_template__import_templates_fail(self):
response = self.client.post(path=self.IMPORT_COMMON_FLOW)
data = json.loads(response.content)
self.assertFalse(data['result'])
self.assertEqual(data['message'], 'token')
@mock.patch(APIGW_VIEW_CHECK_WHITE_LIST, MagicMock(return_value=True))
@mock.patch(APIGW_READ_TEMPLATE_DATA_FILE, MagicMock(return_value={'result': True,
'data': {'template_data': 'token'}}))
@mock.patch(COMMONTEMPLATE_IMPORT_TEMPLATES, MagicMock(return_value={'result': True, 'message': 'token'}))
def test_import_common_template__success(self):
response = self.client.post(path=self.IMPORT_COMMON_FLOW, data={'override': True})
data = json.loads(response.content)
self.assertTrue(data['result'])
self.assertEqual(data['message'], 'token')
CommonTemplate.objects.import_templates.assert_called_once_with('token', True)
|
[
"yanyukai@xinxindai.com"
] |
yanyukai@xinxindai.com
|
af5d0b52140865dff7f3de167c9c6ff52505eabf
|
454cb7ce13e0e359b978bc34830f83fdb7426630
|
/python/d086.py
|
ce3ddf6249653a79301be63120ae14f8f48af78d
|
[] |
no_license
|
scorpio-su/zerojudge
|
d662925b1769e8f2b7cba1db291642b44365e3e8
|
4897d1626147d432d21577c330d489c6dd576cd6
|
refs/heads/master
| 2022-12-06T00:59:20.648107
| 2020-08-20T13:16:56
| 2020-08-20T13:16:56
| 279,103,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 09:46:45 2020
@author: Username
"""
dic={'hardwork':98,'knowledge':96,'attitude':100}
for s in [input()]:
s=s.replace('\n','').replace(' ','')
#if s[-1] == '':s=s[:-1]
if s == '0': break
num=0
s=s.lower()
for i in range(len(s)):
k=''
if s[i]=='a' or s[i]=='h': k=s[i:i+8]
if s[i]=='k': k=s[i:i+9]
#print(k)
if dic.get(k):num+=dic.get(k)
if num!=0: print(num)
else: print('Fail')
#hardwork KNOWLEDGE aTtitUdE C++
#hardworkKNOWLEDGEaTtitUdEC++
|
[
"aa891119@gmail.com"
] |
aa891119@gmail.com
|
f108e824a0b3288d1e3d3d6a86db62ae697df0a5
|
2c22736309a50968896b4724df4a7a1d1a150d88
|
/0x0C-python-almost_a_circle/models/square.py
|
b9a88f03d6e1fb2b7f14438e3604fc0ce2382063
|
[] |
no_license
|
gcifuentess/holbertonschool-higher_level_programming
|
ce9f263c0eef07facc1e02b719a8ae7193233d6d
|
75e405ec7f1aa9138aa54e86f7b41aa08ead7f2a
|
refs/heads/master
| 2023-06-18T08:36:22.580908
| 2021-07-18T20:46:40
| 2021-07-18T20:46:40
| 291,871,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,883
|
py
|
#!/usr/bin/python3
"""Square module"""
from models.rectangle import Rectangle
class Square(Rectangle):
"""Square class as a subclass of Rectangle"""
symbol = '#'
'''symbol used to print the square'''
def __init__(self, size, x=0, y=0, id=None):
"""Square constructor"""
self.size = size
super().__init__(self.size, self.size, x, y, id)
@property
def size(self):
return self.width
@size.setter
def size(self, value):
self.width = value
self.height = value
def __str__(self):
"""returns the string to be printed when print() invoked"""
return "[Square] ({}) {}/{} - {}".format(self.id, self.x,
self.y, self.width)
def update(self, *args, **kwargs):
"""Updates Square's attributes (id, size, x and y)"""
i = 0
for arg in args:
i += 1
if i == 1:
self.id = arg
elif i == 2:
self.size = arg
elif i == 3:
self.x = arg
elif i == 4:
self.y = arg
else:
break
if i == 0:
for key, value in kwargs.items():
if key == 'size':
self.size = value
elif key == 'x':
self.x = value
elif key == 'y':
self.y = value
elif key == 'id':
self.id = value
else:
break
def to_dictionary(self):
"""retunrs a dictionary with the attributes of the class"""
self.new_dict = {}
self.new_dict['size'] = self.size
self.new_dict['x'] = self.x
self.new_dict['y'] = self.y
self.new_dict['id'] = self.id
return self.new_dict
|
[
"1795@holbertonschool.com"
] |
1795@holbertonschool.com
|
0e9fbc0615ec3aaf544e09a19d1b254749955762
|
5352abcba8d8c2f334e8258fa8b67435ab33aff2
|
/marriage_my_id/settings.py
|
e374ae53a5f6979d057a36587f3094a7e4d3c134
|
[] |
no_license
|
hendpraz/marriage_my_id
|
71440bef7b99794941b82e2ae5340374de82edc6
|
ed96358d088390a09a206c99991b688fbac14339
|
refs/heads/master
| 2023-08-05T17:17:41.564580
| 2020-01-09T14:34:33
| 2020-01-09T14:34:33
| 227,720,334
| 0
| 0
| null | 2021-09-22T18:18:34
| 2019-12-13T00:15:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,436
|
py
|
"""
Django settings for marriage_my_id project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&h*lqtrc85uzu@9c@i-d!%d7+99+fpo^qylr^)8v8#4-abjp5_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'marriage.apps.MarriageConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'marriage_my_id.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'marriage_my_id.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': str(os.getenv("MYSQL_DATABASE_NAME")),
'USER': str(os.getenv("MYSQL_USER")),
'PASSWORD': str(os.getenv("MYSQL_PASSWORD")),
'HOST': str(os.getenv("MYSQL_HOST")),
'OPTIONS': {'charset': 'utf8mb4'},
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = str(os.getenv("STATIC_URL_TANIA_DIKA"))
|
[
"mhendryp99@gmail.com"
] |
mhendryp99@gmail.com
|
65374bc87c697aa10acae04ee679f3524117e722
|
c475fdc75bf4d7e40cb86182bb79c6468678cd08
|
/djrhr/users/migrations/0003_remove_customuser_username.py
|
b41faefe06e701a76dd4d08d6fe7576c4d190910
|
[] |
no_license
|
matthewmridha/react-django-boilerplate
|
a2c9a1e1230399fb391ea94c3165e4a4b1cb57d0
|
36fb350e055f1d64086f53ca2e5adceb414c43ca
|
refs/heads/main
| 2023-07-08T16:53:26.399561
| 2021-08-14T05:23:30
| 2021-08-14T05:23:30
| 382,972,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
# Generated by Django 3.2.3 on 2021-05-30 05:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_alter_customuser_managers'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='username',
),
]
|
[
"matthewmridha@gmail.com"
] |
matthewmridha@gmail.com
|
fc8196b320bf8b85d4fd7007703d75f76858aa28
|
fb025fc7b94ef915b6e837e163ad836d6bf4056a
|
/dtype/dtype-ft2.py
|
1a8553eb1bdea43913a35961120f537a789ea636
|
[] |
no_license
|
Richard5127/Python
|
e50ad205dc2acaf5e1a115f3d443e90ecfeaef6b
|
55385b3dfbb126f2fec2924bd079afd6f02781d1
|
refs/heads/main
| 2023-01-12T23:12:57.081241
| 2020-11-03T17:05:32
| 2020-11-03T17:05:32
| 309,753,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
#!/usr/bin/python
# coding: utf-8
x = 16.7
y = int(x) + 218
print(y, type(y))
|
[
"noreply@github.com"
] |
Richard5127.noreply@github.com
|
4e93ae0dfb5d711647264f20366ac5a8b8fe52cf
|
4fe57e7ed6c937e77e777394b119769e24e0e7e0
|
/ver1/my_debugger_defines.py
|
51996bde67b4fb90a3c30c8f90fb42a91231982e
|
[] |
no_license
|
keko5342/study_reverse_engineering_python
|
74ee86777ac2c6b6af43176890271fa2ce5f0ed4
|
1bfc5c2e938f2c301d92d2084eb9f73cfeba526e
|
refs/heads/master
| 2020-04-07T05:25:15.126111
| 2018-11-22T04:18:01
| 2018-11-22T04:18:01
| 158,095,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
from ctypes import *
WORD = c_ushort
DWORD = c_ulong
LPBYTE = POINTER(c_ubyte)
LPTSTR = POINTER(c_char)
HANDLE = c_void_p
DEBUG_PROCESS = 0x00000001
CREATE_NEW_CONSOLE = 0x00000010
class STARTUPINFO(Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPTSTR),
("lpDesktop", LPTSTR),
("lpTitle", LPTSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
class PROCESS_INFORMATION(Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
|
[
"kekosute@gmail.com"
] |
kekosute@gmail.com
|
e5ebc43703c0fa54cda0b5fdaa1a140684a919df
|
c9f12789125de168977c3c33764c4262eacb2429
|
/Solver/fast_solve.py
|
53ca08c96514ee1624f291abffaac8faa7ace35c
|
[] |
no_license
|
Sami-AlEsh/Sudoku-Solver
|
3da8596848130c7408b9cf2d5f52a2b58485e1db
|
a47ac5810cefe20a6566486c8b6fd50a96f9056b
|
refs/heads/master
| 2023-02-15T19:26:48.741716
| 2021-01-11T08:00:12
| 2021-01-11T08:00:12
| 328,574,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,063
|
py
|
# written by Nathan Esau, Aug 2020
import copy
def get_possible_values(board, row, col):
possible_values = [1,2,3,4,5,6,7,8,9]
tl_row = row - row % 3
tl_col = col - col % 3
for i in range(9):
if len(board[row][i]) == 1: # entry already solved
if board[row][i][0] in possible_values:
possible_values.remove(board[row][i][0])
if len(board[i][col]) == 1: # entry already solved
if board[i][col][0] in possible_values:
possible_values.remove(board[i][col][0])
if len(board[tl_row+i//3][tl_col+i%3]) == 1: # entry already solved
if board[tl_row+i//3][tl_col+i%3][0] in possible_values:
possible_values.remove(board[tl_row+i//3][tl_col+i%3][0])
return possible_values
def fill_missing_entries(board, box):
row_start, row_end = (0,2) if box in [0,1,2] else (3,5) if box in [3,4,5] else (6,8)
col_start, col_end = (0,2) if box in [0,3,6] else (3,5) if box in [1,4,7] else (6,8)
for row in range(row_start, row_end + 1):
for col in range(col_start, col_end + 1):
if len(board[row][col]) == 1: # entry already solved
continue
board[row][col] = get_possible_values(board, row, col)
def solve_missing_entries(board, box):
row_start, row_end = (0,2) if box in [0,1,2] else (3,5) if box in [3,4,5] else (6,8)
col_start, col_end = (0,2) if box in [0,3,6] else (3,5) if box in [1,4,7] else (6,8)
possible_squares = dict((i, []) for i in range(1, 10, 1))
for row in range(row_start, row_end + 1):
for col in range(col_start, col_end + 1):
for e in board[row][col]:
possible_squares[e].append((row, col))
for (k, v) in possible_squares.items():
if len(v) == 1:
row, col = v[0]
if len(board[row][col]) != 1: # solve entry
board[row][col] = [k]
def solve_strategy(board):
for _ in range(25): # max_iter = 25
initial_board = copy.deepcopy(board)
for box in range(9):
fill_missing_entries(board, box)
solve_missing_entries(board, box)
if board == initial_board:
return "stuck"
solved = True
for i in range(9):
for j in range(9):
if len(board[i][j]) == 0:
return "failed"
if len(board[i][j]) != 1:
solved = False
if solved:
return "solved"
def get_guess(board):
solved_count = {}
for i in range(9): # row i, col i, box i
rc, cc, bc = 0, 0, 0
for j in range(9):
if len(board[i][j]) == 1:
rc += 1
if len(board[j][i]) == 1:
cc += 1
if len(board[i//3*3 + j//3][i%3*3 + j%3]) == 1:
bc += 1
if rc < 9: solved_count["r"+str(i)] = rc
if cc < 9: solved_count["c"+str(i)] = cc
if bc < 9: solved_count["b"+str(i)] = bc
rcb = max(solved_count, key=solved_count.get)
square = None
options = None
t, i = rcb[0], int(rcb[1])
for j in range(9):
if t == 'r' and len(board[i][j]) > 1:
square, options = [i,j], board[i][j]
break
if t == 'c' and len(board[j][i]) > 1:
square, options = [j,i], board[j][i]
break
if t == 'b' and len(board[i//3*3+j//3][i%3*3+j%3]) > 1:
square, options = [i//3*3+j//3, i%3*3+j%3], board[i//3*3+j//3][i%3*3+j%3]
break
return {"rcb": rcb, "square": square, "options": options}
def apply_guess(board, guess, value):
square = guess["square"]
board[square[0]][square[1]] = [value]
def solve(initial_board): # return solved board
board = copy.deepcopy(initial_board)
root = {"board":board,"parent":None,"child":None,"depth":0,"guess":None,"value":None}
node = root
while True:
state = solve_strategy(board)
if state == "solved":
return board
if state == "stuck":
node["board"] = copy.deepcopy(board)
node["child"] = {"board": board, "parent": node, "depth": root["depth"] + 1}
node = node["child"]
node["guess"] = get_guess(board)
node["value"] = node["guess"]["options"][0]
apply_guess(board, node["guess"], node["value"])
if state == "failed": # backtrack - change guess
while len(node["guess"]["options"]) <= 1:
node = node["parent"]
board = copy.deepcopy(node["parent"]["board"])
node["board"] = copy.deepcopy(board)
node["guess"]["options"] = node["guess"]["options"][1:]
node["value"] = node["guess"]["options"][0]
apply_guess(board, node["guess"], node["value"])
def print_board(board):
for i in range(9):
for j in range(9):
if len(board[i][j]) == 1:
print(board[i][j][0], end= " ")
else:
print("X", end=" ")
print("")
|
[
"sami-esh@hotmail.com"
] |
sami-esh@hotmail.com
|
ba71267f3a8c3e627a0cdf019698551924086c3d
|
03f00f93b2ad0fe9825617eb9451aa3230465c08
|
/04-Decision-Science/03-Linear-Regression/02-Sellers/tests/test_seller.py
|
ca50c662d51e3af83adfd391f42b6245fe46a761
|
[] |
no_license
|
grinbea/data-challenges-clean
|
775e2f094f65745956473bdf4fea1016da854e63
|
8126abd45728e5c64a6b302dd182d8e497376fe3
|
refs/heads/master
| 2023-08-15T04:37:35.388965
| 2021-10-09T19:00:01
| 2021-10-09T19:00:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
from nbresult import ChallengeResultTestCase
class TestSeller(ChallengeResultTestCase):
def test_shape(self):
self.assertEqual(self.result.shape, (2970, 14))
def test_columns(self):
columns = ['date_first_sale',
'date_last_sale',
'delay_to_carrier',
'n_orders',
'quantity',
'quantity_per_order',
'review_score',
'sales',
'seller_city',
'seller_id',
'seller_state',
'share_of_five_stars',
'share_of_one_stars',
'wait_time']
self.assertEqual(self.result.columns, columns)
def test_average_review_score(self):
self.assertEqual(self.result.avg_review_score, 4)
def test_unique_state(self):
states = ['AM',
'BA',
'CE',
'DF',
'ES',
'GO',
'MA',
'MG',
'MS',
'MT',
'PA',
'PB',
'PE',
'PI',
'PR',
'RJ',
'RN',
'RO',
'RS',
'SC',
'SE',
'SP']
self.assertEqual(self.result.unique_state, states)
def test_wait_time(self):
self.assertEqual(self.result.min_wait_time, 1.21)
self.assertEqual(self.result.max_wait_time, 189)
self.assertEqual(self.result.avg_wait_time, 12)
def test_average_delay_carrier(self):
self.assertLess(self.result.avg_delay_carrier, 0.6)
self.assertGreater(self.result.avg_delay_carrier, 0.3)
def test_quantity(self):
self.assertIn(self.result.avg_quantity, (37, 38))
self.assertLess(self.result.max_quantity, 2040)
self.assertGreater(self.result.max_quantity, 2030)
self.assertEqual(self.result.min_quantity, 1)
def test_average_sales(self):
self.assertEqual(self.result.avg_sales, 4566)
|
[
"pedemonte.david@gmail.com"
] |
pedemonte.david@gmail.com
|
6984beeeacc8c0e02d2294688d39c4eb33c50793
|
865ecabe443e00ed85a2a8ab8ef031807441bcf5
|
/robot-view-ctrl.py
|
011b4b31dce3bb2406731bc394a8adfa387faa16
|
[] |
no_license
|
8HoSsEiN8/HW5
|
745e889d1be2698a99c31245ef0535571b2eb37c
|
e3063b9c83f36a76e726063dc399e08f6e46fe3f
|
refs/heads/master
| 2021-01-22T14:25:44.056608
| 2014-10-15T07:45:12
| 2014-10-15T07:45:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,270
|
py
|
#!/usr/bin/env python
# /* -*- indent-tabs-mode:t; tab-width: 8; c-basic-offset: 8 -*- */
# /*
# Copyright (c) 2014, Daniel M. Lofaro <dan (at) danLofaro (dot) com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# */
import diff_drive
import ach
import sys
import time
from ctypes import *
import socket
import cv2.cv as cv
import cv2
import numpy as np
from myFunctions import setVelocity
import actuator_sim as ser
dd = diff_drive
ref = dd.H_REF()
tim = dd.H_TIME()
ROBOT_DIFF_DRIVE_CHAN = 'robot-diff-drive'
ROBOT_CHAN_VIEW = 'robot-vid-chan'
ROBOT_TIME_CHAN = 'robot-time'
# CV setup
cv.NamedWindow("wctrl", cv.CV_WINDOW_AUTOSIZE)
#capture = cv.CaptureFromCAM(0)
#capture = cv2.VideoCapture(0)
# added
##sock.connect((MCAST_GRP, MCAST_PORT))
newx = 320
newy = 240
nx = 640
ny = 480
r = ach.Channel(ROBOT_DIFF_DRIVE_CHAN)
r.flush()
v = ach.Channel(ROBOT_CHAN_VIEW)
v.flush()
t = ach.Channel(ROBOT_TIME_CHAN)
t.flush()
i=0
print '======================================'
print '============= Robot-View ============='
print '========== Daniel M. Lofaro =========='
print '========= dan@danLofaro.com =========='
print '======================================'
while True:
# Get Frame
img = np.zeros((newx,newy,3), np.uint8)
c_image = img.copy()
vid = cv2.resize(c_image,(newx,newy))
[status, framesize] = v.get(vid, wait=False, last=True)
if status == ach.ACH_OK or status == ach.ACH_MISSED_FRAME or status == ach.ACH_STALE_FRAMES:
vid2 = cv2.resize(vid,(nx,ny))
img = cv2.cvtColor(vid2,cv2.COLOR_BGR2RGB)
cv2.imshow("wctrl", img)
cv2.waitKey(10)
else:
raise ach.AchException( v.result_string(status) )
[status, framesize] = t.get(tim, wait=False, last=True)
if status == ach.ACH_OK or status == ach.ACH_MISSED_FRAME or status == ach.ACH_STALE_FRAMES:
pass
#print 'Sim Time = ', tim.sim[0]
else:
raise ach.AchException( v.result_string(status) )
#-----------------------------------------------------
#--------[ Do not edit above ]------------------------
#-----------------------------------------------------
# Def:
# ref.ref[0] = Right Wheel Velos
# ref.ref[1] = Left Wheel Velos
# tim.sim[0] = Sim Time
# img = cv image in BGR format
# Convert RGB to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# Define upper and lower range of green color in HSV
lower_green = np.array([50, 50, 50], dtype=np.uint8)
upper_green = np.array([70,255,255], dtype=np.uint8)
# Threshold the HSV image to get only green colors
mask = cv2.inRange(hsv, lower_green, upper_green)
# Use findContours to get the boundry of the green blob
contours,hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Look through all the seperate contours and highlight the boundry and centroid
for cnt in contours:
# Calculate moments
moments = cv2.moments(cnt)
if moments['m00']!=0:
x = int(moments['m10']/moments['m00'])
y = int(moments['m01']/moments['m00'])
print 'Center of Mass = ', '(', x, ', ', y, ')'
# draw contours
cv2.drawContours(img,[cnt],0,(0,0,255),1)
# draw centroids in red
cv2.circle(img,(x,y),10,(0,0,255),-1)
cv2.imshow('wctrl',img)
cv2.waitKey(10)
s = 57
buff = setVelocity(0, 1, s)
ref = ser.serial_sim(r,ref,buff)
buff = setVelocity(1, 0, s)
ref = ser.serial_sim(r,ref,buff)
print 'Sim Time = ', tim.sim[0]
# Sleeps
time.sleep(0.1)
#-----------------------------------------------------
#--------[ Do not edit below ]------------------------
#-----------------------------------------------------
|
[
"h.ghaffarinik@gmail.com"
] |
h.ghaffarinik@gmail.com
|
9f623eedb506f29db43ca466d6b9b966618e24ae
|
09301742234cb74438145d7d80f9df43511d0264
|
/statsmodels practice.py
|
eed8da2b0958bd905df00dbdc09fb3f434923966
|
[] |
no_license
|
Siphra/udacityprojects
|
ddfe3b0336325ee95603ef2fe9734fa417220664
|
f2b7df5c18579acd7a94c5aa429cc108cf4d2ce1
|
refs/heads/main
| 2023-02-16T07:15:21.323421
| 2021-01-15T16:58:50
| 2021-01-15T16:58:50
| 318,332,489
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,898
|
py
|
import pandas as pd
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sb
import math
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, accuracy_score, confusion_matrix
# lesson 15 multiple linear regression question 4 has a mistake in its grading
df = pd.read_csv(r'I:\Python\PycharmProjects\udacityproject1\csv from classes\house_prices.csv')
df['intercept'] = 1
dfs = pd.get_dummies(df['style'])
dfn = pd.get_dummies(df['neighborhood'])
df_new = df.join(dfn)
df_new = df_new.join(dfs)
lm = sm.OLS(df_new.price, df_new[['intercept','bathrooms','bedrooms','area']]) #simple linear model
results = lm.fit()
y, X = dmatrices(' price ~ area + bedrooms + bathrooms', df, return_type='dataframe')
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["features"]=X.columns
df['bedrooms_squared'] = df.bedrooms * df.bedrooms
lm = sm.OLS(df.price, df[['intercept','bedrooms','bedrooms_squared']]) #simple linear model
results = lm.fit()
# Below is logistic regression testing above is for linear regression
df = pd.read_csv(r'I:\Python\PycharmProjects\udacityproject1\csv from classes\fraud_dataset.csv')
df[['weekday','weekend']] = pd.get_dummies(df.day)
df[['no-fraud','fraud']] = pd.get_dummies(df.fraud)
df['intercept'] = 1
lm = sm.Logit(df['fraud'],df[['intercept','duration']])
results = lm.fit()
lm = sm.Logit(df['fraud'],df[['intercept','weekday','duration']])
results = lm.fit()
df = pd.read_csv(r'I:\Python\PycharmProjects\udacityproject1\csv from classes\admissions.csv')
df[['1', '2', '3', '4']] = pd.get_dummies(df.prestige)
df['intercept'] = 1
#df.drop(['1'], axis=1, inplace=True)
log_mod = sm.Logit(df.admit, df[['intercept', 'gre', 'gpa', '2', '3', '4']])
results = log_mod.fit()
gre_eb = math.exp(.0022)
gpa_eb = math.exp(.7793)
pre_eb = math.exp(-1.3387)
gpa_ebi = 1/gpa_eb
pre_ebi = 1/pre_eb
y = df['admit']
X = df[['gre', 'gpa', '1', '2', '3']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
log_mod = LogisticRegression()
log_mod.fit(X_train, y_train)
y_pred = log_mod.predict(X_test)
print(df.head())
print(precision_score(y_test, y_pred))
print(recall_score(y_test, y_pred))
print(accuracy_score(y_test ,y_pred))
print(confusion_matrix(y_test, y_pred))
#below is project code
df = pd.read_csv(r'I:\Python\PycharmProjects\udacityproject1\csv from classes\ab_data.csv')
print(df.head())
print(df.info())
print(df.nunique())
print(sum(df.converted)/df.user_id.nunique())
dfchk = df[df['group'] == 'treatment']
dfchkop = df[df['group'] == 'control']
dfchk = dfchk[dfchk['landing_page']== 'old_page']
dfchkop = dfchkop[dfchkop['landing_page'] == 'new_page']
dfchk = dfchk.append(dfchkop)
print(dfchk.info())
df2 = df[~df.isin(dfchk)].dropna()
print(df2.head())
print(df2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0])
print(df2.nunique())
dups = df2[df2.user_id.duplicated(keep = False)==True]
print(dups)
df2.drop(2893, axis=0, inplace=True)
print(df2.shape)
conv = sum(df2.converted)/df2.shape[0]
print(conv)
dfc = df2[df2.group == 'control']
print(sum(dfc.converted)/dfc.shape[0])
dft = df2[df2.group == 'treatment']
print(sum(dft.converted)/dft.shape[0])
print(sum(df2.group == 'treatment')/df2.shape[0])
print(conv,conv, dft.shape[0], dfc.shape[0])
new_page_converted = np.random.choice([0,1], dft.shape[0],replace= True, p=[conv,1-conv])
old_page_converted = np.random.choice([0,1], dfc.shape[0], replace= True, p=[conv, 1-conv])
plt.hist(new_page_converted, color='green')
plt.hist(old_page_converted, color='red')
plt.show()
|
[
"47285212+Siphra@users.noreply.github.com"
] |
47285212+Siphra@users.noreply.github.com
|
7a29554c47f64f44a779fbd3cf62cd4b9c870afe
|
c5959b7e4fc5b752b54a6352449c1bb0d28d9115
|
/bab/bab-7/readlines.py
|
80ab2cde68be77c52c9d38e68bb31da2fecf5efc
|
[] |
no_license
|
romanbatavi/kickstarter-python
|
f5592a371740b28c045ef99dd510d1c6a92ff8d1
|
ed3eb692e09a3f44fd3e0b16ab7b042ee2658db6
|
refs/heads/master
| 2023-03-29T11:34:23.774873
| 2021-04-04T09:11:28
| 2021-04-04T09:11:28
| 354,500,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
######################################################
# Nama file: readlines.py
######################################################
def main():
# membuka file
f = open("data.txt", "r")
# membaca seluruh baris, dan menampungnya
# ke dalam objek list
data = f.readlines()
print(data)
# menutup file
f.close()
if __name__ == "__main__":
main()
|
[
"romanbatavi98@gmail.com"
] |
romanbatavi98@gmail.com
|
6f68e207244df25bb479e209f3172b7c2c682d62
|
b91fb7f75909a38eeaf87b8b36880a861ebf2380
|
/pympack/pympack/settings.py
|
73015f688290a4d680abf7bf406acb0bda838b5f
|
[] |
no_license
|
WalterCM/QuickSurvey
|
a2154051412b2966e40bc39b7a8988603b0acf2f
|
b4217400d4b664976234f411a0512c2969ee23bb
|
refs/heads/master
| 2022-12-12T18:50:59.990264
| 2018-09-03T22:11:30
| 2018-09-03T22:11:30
| 147,231,901
| 0
| 0
| null | 2022-12-08T02:51:41
| 2018-09-03T16:56:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
"""
Django settings for pympack project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-bu)uam##lz3x!p&h@3m^r@7xijzr=1^0pqb-h9t=ur0hd!02u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["192.168.0.4"]
# Application definition
INSTALLED_APPS = [
'survey',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pympack.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pympack.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'es-PE'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
from django.urls import reverse_lazy
LOGIN_REDIRECT_URL = reverse_lazy('results')
LOGIN_URL = reverse_lazy('login')
LOGOUT_URL = reverse_lazy('logout')
|
[
"walterc316@gmail.com"
] |
walterc316@gmail.com
|
7802429f9b84282a88de994e58ed34b0925e0b40
|
6c37be61fb4da574f85d359ff077a07ea5332bcd
|
/ChatBot/ChatBot/ChatterBot Demo/Basic Version.py
|
e4a1de5763e65309dac92a3070ae5b755148a643
|
[] |
no_license
|
ArvinRoad/Artificial-Intelligence-study
|
8c8ec59d8988a4caaec28e89ffb85387ffffdafe
|
cd32d0096749a2dac3cf2b027502f86a776c6859
|
refs/heads/main
| 2023-08-17T22:45:36.708501
| 2021-10-21T18:13:10
| 2021-10-21T18:13:10
| 356,546,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
# -*- coding: utf-8 -*-
from chatterbot import ChatBot
# 构建ChatBot并指定Adapter
bot = ChatBot(
'Default Response Example Bot',
storage_adapter='chatterbot.storage.JsonFileStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch'
},
{
'import_path': 'chatterbot.logic.LowConfidenceAdapter',
'threshold': 0.65,
'default_response': 'I am sorry, but I do not understand.'
}
],
trainer='chatterbot.trainers.ListTrainer'
)
# 手动给定一点语料用于训练
bot.train([
'How can I help you?',
'I want to create a chat bot',
'Have you read the documentation?',
'No, I have not',
'This should help get you started: http://chatterbot.rtfd.org/en/latest/quickstart.html'
])
# 给定问题并取回结果
question = 'How do I make an omelette?'
print(question)
response = bot.get_response(question)
print(response)
print("\n")
question = 'how to make a chat bot?'
print(question)
response = bot.get_response(question)
print(response)
|
[
"53329456+ArvinRoad@users.noreply.github.com"
] |
53329456+ArvinRoad@users.noreply.github.com
|
ccbc9274839f4d29a2a6306498aa6a3beb9e8ef3
|
1289329a4b29d88b32e5fcb40dbf01ff97f497e9
|
/main.py
|
eb40487f28d59c2c797f2b144e4791ad3ebc67c7
|
[] |
no_license
|
chingchengWan/segmentation_of_word
|
e6951d22263b67070d5311a4651d5b367651362d
|
c7f5fffb515e2d40de1ff2c08adf61096d612f2b
|
refs/heads/master
| 2020-07-31T10:55:42.975103
| 2019-12-29T16:16:14
| 2019-12-29T16:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
from flask import Flask, request, render_template, redirect, url_for
import re
import json
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def search():
if request.method == 'POST':
target = request.values['keyword']
file = open('/Users/wan/segmentation_of_word/ans_file.txt','r', encoding = 'utf-8')
text = file.readlines()
#retString = "<h1>Here is the results of {}</h1>".format(target)
word = []
for lines in text:
if target in lines:
word.append(lines)
# for _i in word:
# retString += _i
# retString += "<br><br>"
#return word
sents = word
return render_template('hello.html', sents=sents, keyword=target)
'''
# SPA, single page application
with open("./templates/index.html", "r") as fp:
retpage = fp.readlines()
return ''.join(retpage)
'''
return render_template('index.html')
if __name__ == '__main__':
app.debug = True
app.run()
|
[
"noreply@github.com"
] |
chingchengWan.noreply@github.com
|
83d1e632543bd97a4fb84fc4b236b76ed95377cb
|
97bf4b0653f3c3de13e23ac914c11ec6abf204a2
|
/Etcd_py/adn_readSspvm.py
|
4086a39ed0f6b5e46f2e943c7b89a226fc987f72
|
[] |
no_license
|
riliangxing/Etcd_py
|
431d1fd3fe81ada500efa61be50f28c1145964d9
|
acc4c59acb32d7648db62f5f3fcb92a23f37d180
|
refs/heads/master
| 2021-01-12T10:42:03.993536
| 2016-11-02T11:37:23
| 2016-11-02T11:37:23
| 72,633,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
from adn_checkSsp import _checkSsp
from adn_checkSsp import _getSspVm
from adn_checkSsp import dir
from adn_checkSsp import sspnameArray
from adn_checkSsp import _getSsp
def _getSspVms():
SspVms = {}
if(len(sspnameArray) > 0):
if(sspnameArray[0] == "all"):
ssps = _getSsp(dir)
for ssp in ssps:
SspVms[ssp] = _readVm(ssp)
else:
for sspname in sspnameArray:
SspVms[sspname] = _readVm(sspname)
return SspVms
def _readVm(ssp):
signSspvm = {}
sspvms = _getSspVm(ssp)
for sspvm in sspvms:
vmpath = dir + "/" + ssp + "/" + sspvm
f = open(vmpath, "r")
content = f.read()
f.close()
signSspvm[sspvm] = content
return signSspvm
|
[
"wb-xrl232180@alibaba-inc.com"
] |
wb-xrl232180@alibaba-inc.com
|
f975d0b27270d50058e8a7590da45e5eef35b251
|
c24298b258055df330336aad5b92785a12a120e2
|
/data/rawDataToCsv.py
|
a03c90d6e467b2b26bb46cfeeda28e40ef3485d3
|
[] |
no_license
|
sahilmgandhi/FreeThrowClassifier
|
b1879fee43e97490969cdf7815375d144ebc215c
|
41da3cbbb088463dd032eabbaae23c3824ffc2ac
|
refs/heads/master
| 2020-03-17T07:50:16.959580
| 2018-06-17T20:22:32
| 2018-06-17T20:22:32
| 133,414,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
import numpy as np
import matplotlib as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
import pandas as pd
# Change the name here
sheetMap = pd.read_excel('normal_shot.xlsx', sheet_name=None)
# Now you can list all sheets in the file
# print(sheetMap.keys())
wristData = {}
elbowData = {}
shoulderData = {}
zeroArr = np.zeros(500)
oneArr = np.ones(500)
for key in sheetMap:
# Change this from zeroArr to oneArr
sheetMap[key]['GoodShot?'] = oneArr
sheetMap[key] = sheetMap[key].drop(
sheetMap[key].columns[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]], axis=1)
if "wrist" in key:
wristArr = sheetMap[key].as_matrix()
for i in range(0, 50):
if str(round(wristArr[i][0], 2)) in wristData:
wristData[str(round(wristArr[i][0], 2))
].append(wristArr[i][1:])
else:
wristData[str(round(wristArr[i][0], 2))] = []
wristData[str(round(wristArr[i][0], 2))
].append(wristArr[i][1:])
elif "elbow" in key:
elbowArr = sheetMap[key].as_matrix()
for i in range(0, 50):
if str(round(elbowArr[i][0], 2)) in elbowData:
elbowData[str(round(elbowArr[i][0], 2))
].append(elbowArr[i][1:])
else:
elbowData[str(round(elbowArr[i][0], 2))] = []
elbowData[str(round(elbowArr[i][0], 2))
].append(elbowArr[i][1:])
elif "should" in key:
shoulderArr = sheetMap[key].as_matrix()
for i in range(0, 50):
if str(round(shoulderArr[i][0], 2)) in shoulderData:
shoulderData[str(round(shoulderArr[i][0], 2))
].append(shoulderArr[i][1:])
else:
shoulderData[str(round(shoulderArr[i][0], 2))] = []
shoulderData[str(round(shoulderArr[i][0], 2))
].append(shoulderArr[i][1:])
for key in wristData:
for arr in wristData[key]:
aa = [arr]
a = np.asarray(aa)
fileName = 'wrist'+str(key)+'.csv'
with open(fileName, 'a') as f:
np.savetxt(f, a, delimiter=",")
for key in elbowData:
for arr in elbowData[key]:
aa = [arr]
a = np.asarray(aa)
fileName = 'elbow'+str(key)+'.csv'
with open(fileName, 'a') as f:
np.savetxt(f, a, delimiter=",")
for key in shoulderData:
for arr in shoulderData[key]:
aa = [arr]
a = np.asarray(aa)
fileName = 'shoulder'+str(key)+'.csv'
with open(fileName, 'a') as f:
np.savetxt(f, a, delimiter=",")
|
[
"sahilmgandhi@gmail.com"
] |
sahilmgandhi@gmail.com
|
cc888f0656c9c2e5a9dd9321ccbfd11f5faf1447
|
1c099518cfa5843928763854e6c231d435fc25f4
|
/deepfake/line_sampler.py
|
06ca1ce73a4e68885557c2bf30dc3197a429ce1a
|
[
"CC-BY-2.0"
] |
permissive
|
poke53280/ml_mercari
|
b3cdda6d53fc4e0f2fca93d9a0ea0231f205ad69
|
f01ff6c1ca3f341e57c769e06abb136a044c9f74
|
refs/heads/master
| 2021-06-02T06:21:57.211262
| 2020-10-11T12:08:58
| 2020-10-11T12:08:58
| 114,643,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
from mp4_frames import get_output_dir
from mp4_frames import get_part_dir
from mp4_frames import get_video_path_from_stem_and_ipart
from mp4_frames import read_video
from image_grid import _get_bb_from_centers_3D
from image_grid import GetSubVolume3D
import numpy as np
import pandas as pd
import cv2
from multiprocessing import Pool
####################################################################################
#
# get_line
#
#
def get_line(p0, p1):
dp = p1 - p0
dp = np.abs(dp)
num_steps = np.max(dp)
# t element of [0, 1]
step_size = 1 / num_steps
ai = np.arange(start = 0, stop = 1 + step_size, step = step_size)
ai_t = np.tile(ai, 3).reshape(-1, ai.shape[0])
p = (p1 - p0).reshape(3, -1) * ai_t
p = p + p0.reshape(3, -1)
p = np.round(p)
return p
####################################################################################
#
# load_sample_cubes
#
def load_sample_cubes(original, l_fakes, l_ac, nCubeSize, iPart):
l_bb = _get_bb_from_centers_3D(l_ac, nCubeSize)
l_video_file = []
l_video_file.append(original)
l_video_file.extend(l_fakes)
d = nCubeSize // 2
d_cubes = []
for x in l_video_file:
print(f"Creating cubes from {x}...")
video = read_video_from_stem_and_ipart(x, iPart)
l_cubes = []
for bb in l_bb:
cube = GetSubVolume3D(video, bb)
assert cube.shape == (nCubeSize, nCubeSize, nCubeSize, 3)
l_cubes.append(cube)
d_cubes.append(l_cubes)
"""c"""
return d_cubes
####################################################################################
#
# rasterize_lines
#
def rasterize_lines(p, nLength):
l_l = []
for x in p:
l = get_line(x[::2], x[1::2])
assert l.shape[1] >= nLength, f"Line is short: {l.shape[1]}"
l = np.swapaxes(l, 0, 1)
l = l[:nLength]
l = l.astype(np.int32)
l_l.append(l)
anLines = np.stack(l_l)
return anLines
####################################################################################
#
# sample_cube
#
def sample_cube(r, anLines):
l_sample = []
for l in anLines:
l_x = l[:, 0]
l_y = l[:, 1]
l_z = l[:, 2]
r_sample = r[l_z, l_y, l_x]
l_sample.append(r_sample)
anSamples = np.stack(l_sample)
return anSamples
"""c"""
|
[
"anders.topper@gmail.com"
] |
anders.topper@gmail.com
|
001a991cc4ff599180b6490c8b889c8714b25142
|
5e47615e6b7f5105b0a1cf0c0f29734d5f6b1be1
|
/src/make_moves.py
|
7653fbb7b4b691201754df1ac8055a043ee4a3e3
|
[] |
no_license
|
palenz/maze_solver
|
dc0cd08ca9d81cedffb1a3d77afd367c0a92bb48
|
f49114fb6c34ff2e21d70d32be5adfafc58498cc
|
refs/heads/main
| 2023-04-10T04:38:36.409642
| 2021-04-21T10:45:56
| 2021-04-21T10:45:56
| 360,132,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,298
|
py
|
import requests
import json
from game import Game, url
# Returns true if domo is one or two steps ahead
def domo_ahead(game, first_move, second_move):
next_block = position_calculator(game, first_move, game.pony_position)
two_ahead = position_calculator(game, second_move, next_block)
return game.domokun_position == next_block or game.domokun_position == two_ahead
def position_calculator(game, move, position):
current_position = position
if move == 'east':
current_position += 1
elif move == 'west':
current_position -= 1
elif move == 'south':
current_position += game.width
elif move == 'north':
current_position -= game.width
return current_position
# Returns a new list of moves with the last intersection list turned into a simple move
def last_intersection(moves):
indexes = []
new_moves = None
for move in moves:
if isinstance(move, list):
indexes.append(moves.index(move))
i = max(indexes)
new_moves = moves[:i]
new_moves.append(moves[i][2])
return new_moves
# Returns the block number of the last intersection
def last_intersection_position(moves):
indexes = []
new_pos = None
for move in moves:
if isinstance(move, list):
indexes.append(moves.index(move))
i = max(indexes)
new_pos = moves[i][0]
return new_pos
# Checks if there is a south wall
def south_wall(game, point):
if (game.width*game.height) > point >= ((game.width*game.height)-game.width):
return True
else:
south_block = point + game.width
return 'north' in game.walls[south_block]
# Checks if there is an east wall
def east_wall(game, point):
if (point+1) % game.width == 0:
return True
else:
east_block = point + 1
return 'west' in game.walls[east_block]
# Returns the available moves for any given block number
def available_moves(game, position):
moves = []
if not('north' in game.walls[position]):
moves.append('north')
if not('west' in game.walls[position]):
moves.append('west')
if not(south_wall(game, position)):
moves.append('south')
if not(east_wall(game, position)):
moves.append('east')
return moves
# Calculates the opposite move
def opposite(direction):
if direction == 'north':
return 'south'
elif direction == 'south':
return 'north'
elif direction == 'east':
return 'west'
elif direction == 'west':
return 'east'
# Formats the final path to remove intersection lists
def clean_path(path):
clean_path = path
for move in clean_path:
if type(move) == list:
to_insert = move[1]
i = clean_path.index(move)
clean_path.remove(move)
clean_path.insert(i, to_insert)
return clean_path
# Tries possible options until it finds the path to the exit point
# Path will look like this [[13, right, left], north, south, south]
def find_path(game):
path = []
position = game.pony_position
while position != game.end_point_position:
if len(path) == 0:
start_moves = available_moves(game, position)
new_position = position_calculator(game, start_moves[0], position)
path.append([position])
path[0].extend(start_moves)
position = new_position
elif len(available_moves(game, position)) == 2:
if type(path[-1]) == str:
last_move = path[-1]
elif type(path[-1]) == list:
last_move = path[-1][1]
lm_opposite = opposite(last_move)
moves = available_moves(game, position)
moves.remove(lm_opposite)
new_position = position_calculator(game, moves[0], position)
path.append(moves[0])
position = new_position
elif len(available_moves(game, position)) == 3:
if type(path[-1]) == str:
last_move = path[-1]
elif type(path[-1]) == list:
last_move = path[-1][1]
lm_opposite = opposite(last_move)
moves = available_moves(game, position)
moves.remove(lm_opposite)
new_position = position_calculator(game, moves[0], position)
path.append([position])
path[-1].extend(moves)
position = new_position
elif len(available_moves(game, position)) == 1:
last_int_position = last_intersection_position(path)
new_path = last_intersection(path)
path = new_path
position = position_calculator(game, new_path[-1], last_int_position)
return clean_path(path)
# Makes the next move post request (also added game.print_game() to view the game in the terminal)
def make_move(game, move):
game.print_game()
move_params = {
"direction": move
}
res = requests.post(url + "/" + game.id, json=move_params)
move_response = json.loads(res.text)
game.status = move_response['state']
game.status_message = move_response['state-result']
# Follows the path and prints the end result.
def solve_maze(game):
if game.status == 'Active':
vpath = find_path(game)
for index, move in enumerate(vpath):
i_next_move = index + 1
i_previous_move = index - 1
previous_move = vpath[i_previous_move]
run = opposite(previous_move)
next_move = vpath[i_next_move]
if game.status == 'over':
break
if domo_ahead(game, move, next_move):
# domo_threat = True
make_move(game, run)
else:
# if domo_threat == True:
# make_move(game, previous_move)
# make_move(game, move)
# domo_threat = False
# else:
make_move(game, move)
print(game.status_message)
elif game.status == 'over' or game.status == 'won':
print(game.status_message)
# Initialise and call the create_game() and solve_maze() functions below
# Example
game1 = Game("Applejack", 15, 15, 1)
game1.create_game()
solve_maze(game1)
|
[
"jpalenzuela@outlook.com"
] |
jpalenzuela@outlook.com
|
d81f4f6abd7994a713c18cfc3e930cacbd78ec3f
|
bd555f64088b9698a8335bb0d66149f02c843b16
|
/modules/migrations/0001_initial.py
|
02b2e2ffe92281b357756bc4aaf1be693893b3d2
|
[] |
no_license
|
gretkierewicz/dissertation
|
eabd0ac10d1c8cd102ceb49d6c4ff94ad45a9291
|
57545b82b0e4d9b16334eaf16c2f0e1bdb6f3ece
|
refs/heads/master
| 2023-06-04T10:39:05.182020
| 2021-06-23T20:10:46
| 2021-06-23T20:10:46
| 311,950,776
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
# Generated by Django 3.1.3 on 2021-06-23 20:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('schedules', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Modules',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('module_code', models.SlugField(max_length=45)),
('name', models.CharField(max_length=256)),
('examination', models.BooleanField(default=False)),
('language', models.CharField(default='pl', max_length=2)),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='modules', to='schedules.schedules')),
],
options={
'ordering': ['module_code'],
'unique_together': {('module_code', 'schedule')},
},
),
migrations.CreateModel(
name='Classes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('classes_hours', models.PositiveIntegerField()),
('students_limit_per_group', models.PositiveIntegerField(null=True)),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_of_classes', to='modules.modules')),
],
options={
'ordering': ['module', 'name'],
'unique_together': {('module', 'name')},
},
),
]
|
[
"piotr.gretkierewicz@gmail.com"
] |
piotr.gretkierewicz@gmail.com
|
68ee63b1b259a3d9777bde8e104791dc49f52f81
|
214216dbf7d84cc7b1d50e281faec14435d17708
|
/PyEMD/EMD.py
|
de0af69b39aefd048af2a0d988596a7068ccd491
|
[] |
no_license
|
hedgefair/PyEMD
|
9e84d123dd81444c9e449454111f70cf240116d1
|
6248f85a12e26d59147a34524fec48e84f6822e4
|
refs/heads/master
| 2020-12-03T00:34:44.460321
| 2017-06-13T05:35:00
| 2017-06-13T05:35:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34,454
|
py
|
#!/usr/bin/python
# coding: UTF-8
#
# Author: Dawid Laszuk
# Contact: laszukdawid@gmail.com
#
# Edited: 07/06/2017
#
# Feel free to contact for any information.
from __future__ import division, print_function
import logging
import numpy as np
import os
from scipy.interpolate import interp1d
from PyEMD.splines import *
class EMD:
"""
**Empirical Mode Decomposition**
Method of decomposing signal into Intrinsic Mode Functions (IMFs)
based on algorithm presented in Huang et al. [Huang1998]_.
Algorithm was validated with Rilling et al. [Rilling2003]_ Matlab's version from 3.2007.
Parameters
----------
spline_kind : string, (default: 'cubic')
Defines type of spline, which connects extrema.
Possible: cubic, akima, slinear.
nbsym : int, (default: 2)
Number of extrema used in boundary mirroring.
extrema_detection : string, (default: 'simple')
How extrema are defined.
* *simple* - Ext point is one above/below neighbours.
* *parabol* - Ext point is a peak of a parabola.
References
----------
.. [Huang1998] N. E. Huang et al., "The empirical mode decomposition and the
Hilbert spectrum for non-linear and non stationary time series
analysis", Proc. Royal Soc. London A, Vol. 454, pp. 903-995, 1998
.. [Rilling2003] G. Rilling, P. Flandrin and P. Goncalves, "On Empirical Mode
Decomposition and its algorithms", IEEE-EURASIP Workshop on
Nonlinear Signal and Image Processing NSIP-03, Grado (I), June 2003
Examples
--------
>>> import numpy as np
>>> T = np.linspace(0, 1, 100)
>>> S = np.sin(2*2*np.pi*T)
>>> emd = EMD()
>>> emd.extrema_detection = "parabol"
>>> IMFs = emd.emd(S)
>>> IMFs.shape
(1, 100)
"""
logger = logging.getLogger(__name__)
def __init__(self, spline_kind='cubic', nbsym=2, **kwargs):
# Declare constants
self.std_thr = 0.2
self.svar_thr = 0.001
self.power_thr = -5
self.total_power_thr = 0.01
self.range_thr = 0.001
self.nbsym = nbsym
self.reduce_scale = 1.
self.scale_factor = 1.
self.PLOT = 0
self.INTERACTIVE = 0
self.plotPath = 'splineTest'
self.spline_kind = spline_kind
self.extrema_detection = 'simple' # simple, parabol
self.DTYPE = np.float64
self.FIXE = 0
self.FIXE_H = 0
self.MAX_ITERATION = 1000
# Update based on options
for key in kwargs.keys():
if key in self.__dict__.keys():
self.__dict__[key] = kwargs[key]
if self.PLOT:
import pylab as plt
def extract_max_min_spline(self, T, S):
"""
Extracts top and bottom envelopes based on the signal,
which are constructed based on maxima and minima, respectively.
Parameters
----------
T : numpy array
Position or time array.
S : numpy array
Input data S(T).
Returns
-------
max_spline : numpy array
Spline spanned on S maxima.
min_spline : numpy array
Spline spanned on S minima.
"""
# Get indexes of extrema
max_pos, max_val, min_pos, min_val, indzer = self.find_extrema(T, S)
if max_pos.dtype!=self.DTYPE:
self.logger.error('max_pos.dtype: '+str(max_pos.dtype))
if max_val.dtype!=self.DTYPE:
self.logger.error('max_val.dtype: '+str(max_val.dtype))
if min_pos.dtype!=self.DTYPE:
self.logger.error('min_pos.dtype: '+str(min_pos.dtype))
if min_val.dtype!=self.DTYPE:
self.logger.error('min_val.dtype: '+str(min_val.dtype))
if len(max_pos) + len(min_pos) < 3: return [-1]*4
#########################################
# Extrapolation of signal (over boundaries)
pp_res = self.prepare_points(T, S, max_pos, max_val, min_pos, min_val)
max_extrema, min_extrema = pp_res
max_t_spline, max_spline = self.spline_points(T, max_extrema)
min_t_spline, min_spline = self.spline_points(T, min_extrema)
if max_extrema.dtype!=self.DTYPE:
self.logger.error('max_extrema.dtype: '+str(max_extrema.dtype))
if max_spline.dtype!=self.DTYPE:
self.logger.error('max_spline.dtype: '+str(max_spline.dtype))
if max_t_spline.dtype!=self.DTYPE:
self.logger.error('maxTSline.dtype: '+str(max_t_spline.dtype))
return max_spline, min_spline, max_extrema, min_extrema
def prepare_points(self, T, S, max_pos, max_val, min_pos, min_val):
"""
Performs extrapolation on edges by adding extra extrema, also known
as mirroring signal. The number of added points depends on *nbsym*
variable.
Input
-----
S : numpy array
Input signal.
T : numpy array
Position or time array.
max_pos : iterable
Sorted time positions of maxima.
max_vali : iterable
Signal values at max_pos positions.
min_pos : iterable
Sorted time positions of minima.
min_val : iterable
Signal values at min_pos positions.
Returns
-------
min_extrema : numpy array (2 rows)
Position (1st row) and values (2nd row) of minima.
min_extrema : numpy array (2 rows)
Position (1st row) and values (2nd row) of maxima.
"""
if self.extrema_detection=="parabol":
return self._prepare_points_parabol(T, S, max_pos, max_val, min_pos, min_val)
elif self.extrema_detection=="simple":
return self._prepare_points_simple(T, S, max_pos, max_val, min_pos, min_val)
else:
msg = "Incorrect extrema detection type. Please try: "
msg+= "'simple' or 'parabol'."
raise ValueError(msg)
def _prepare_points_parabol(self, T, S, max_pos, max_val, min_pos, min_val):
"""
Performs mirroring on signal which extrema do not necessarily
belong on the position array.
See self.prepare_points().
"""
# Need at least two extrema to perform mirroring
max_extrema = np.zeros((2,len(max_pos)), dtype=self.DTYPE)
min_extrema = np.zeros((2,len(min_pos)), dtype=self.DTYPE)
max_extrema[0], min_extrema[0] = max_pos, min_pos
max_extrema[1], min_extrema[1] = max_val, min_val
# Local variables
nbsym = self.nbsym
end_min, end_max = len(min_pos), len(max_pos)
####################################
# Left bound
dPos = max_pos[0] - min_pos[0]
leftExtType = ["min", "max"][dPos<0]
if (leftExtType == "max"):
if (S[0]>min_val[0]) and (np.abs(dPos)>(max_pos[0]-T[0])):
# mirror signal to first extrema
expand_left_max_pos = 2*max_pos[0] - max_pos[1:nbsym+1]
expand_left_min_pos = 2*max_pos[0] - min_pos[0:nbsym]
expand_left_max_val = max_val[1:nbsym+1]
expand_left_min_val = min_val[0:nbsym]
else:
# mirror signal to beginning
expand_left_max_pos = 2*T[0] - max_pos[0:nbsym]
expand_left_min_pos = 2*T[0] - np.append(T[0], min_pos[0:nbsym-1])
expand_left_max_val = max_val[0:nbsym]
expand_left_min_val = np.append(S[0], min_val[0:nbsym-1])
elif (leftExtType == "min"):
if (S[0] < max_val[0]) and (np.abs(dPos)>(min_pos[0]-T[0])):
# mirror signal to first extrema
expand_left_max_pos = 2*min_pos[0] - max_pos[0:nbsym]
expand_left_min_pos = 2*min_pos[0] - min_pos[1:nbsym+1]
expand_left_max_val = max_val[0:nbsym]
expand_left_min_val = min_val[1:nbsym+1]
else:
# mirror signal to beginning
expand_left_max_pos = 2*T[0] - np.append(T[0], max_pos[0:nbsym-1])
expand_left_min_pos = 2*T[0] - min_pos[0:nbsym]
expand_left_max_val = np.append(S[0], max_val[0:nbsym-1])
expand_left_min_val = min_val[0:nbsym]
if not expand_left_min_pos.shape:
expand_left_min_pos, expand_left_min_val = min_pos, min_val
if not expand_left_max_pos.shape:
expand_left_max_pos, expand_left_max_val = max_pos, max_val
expand_left_min = np.vstack((expand_left_min_pos[::-1], expand_left_min_val[::-1]))
expand_left_max = np.vstack((expand_left_max_pos[::-1], expand_left_max_val[::-1]))
####################################
# Right bound
dPos = max_pos[-1] - min_pos[-1]
rightExtType = ["min","max"][dPos>0]
if (rightExtType == "min"):
if (S[-1] < max_val[-1]) and (np.abs(dPos)>(T[-1]-min_pos[-1])):
# mirror signal to last extrema
idx_max = max(0, end_max-nbsym)
idxMin = max(0, end_min-nbsym-1)
expand_right_maxPos = 2*min_pos[-1] - max_pos[idx_max:]
expand_right_min_pos = 2*min_pos[-1] - min_pos[idxMin:-1]
expand_right_max_val = max_val[idx_max:]
expand_right_min_val = min_val[idxMin:-1]
else:
# mirror signal to end
idx_max = max(0, end_max-nbsym+1)
idxMin = max(0, end_min-nbsym)
expand_right_maxPos = 2*T[-1] - np.append(max_pos[idx_max:], T[-1])
expand_right_min_pos = 2*T[-1] - min_pos[idxMin:]
expand_right_max_val = np.append(max_val[idx_max:],S[-1])
expand_right_min_val = min_val[idxMin:]
elif (rightExtType == "max"):
if (S[-1] > min_val[-1]) and len(max_pos)>1 and (np.abs(dPos)>(T[-1]-max_pos[-1])):
# mirror signal to last extremum
idx_max = max(0, end_max-nbsym-1)
idxMin = max(0, end_min-nbsym)
expand_right_maxPos = 2*max_pos[-1] - max_pos[idx_max:-1]
expand_right_min_pos = 2*max_pos[-1] - min_pos[idxMin:]
expand_right_max_val = max_val[idx_max:-1]
expand_right_min_val = min_val[idxMin:]
else:
# mirror signal to end
idx_max = max(0, end_max-nbsym)
idxMin = max(0, end_min-nbsym+1)
expand_right_maxPos = 2*T[-1] - max_pos[idx_max:]
expand_right_min_pos = 2*T[-1] - np.append(min_pos[idxMin:], T[-1])
expand_right_max_val = max_val[idx_max:]
expand_right_min_val = np.append(min_val[idxMin:], S[-1])
if not expand_right_min_pos.shape:
expand_right_min_pos, expand_right_min_val = min_pos, min_val
if not expand_right_maxPos.shape:
expand_right_maxPos, expand_right_max_val = max_pos, max_val
expand_right_min = np.vstack((expand_right_min_pos[::-1], expand_right_min_val[::-1]))
expand_right_max = np.vstack((expand_right_maxPos[::-1], expand_right_max_val[::-1]))
max_extrema = np.hstack((expand_left_max, max_extrema, expand_right_max))
min_extrema = np.hstack((expand_left_min, min_extrema, expand_right_min))
return max_extrema, min_extrema
def _prepare_points_simple(self, T, S, max_pos, max_val, min_pos, min_val):
"""
Performs mirroring on signal which extrema can be indexed on
the position array.
See self.prepare_points().
"""
# Find indexes of pass
indmin = np.array([np.nonzero(T==t)[0] for t in min_pos]).flatten()
indmax = np.array([np.nonzero(T==t)[0] for t in max_pos]).flatten()
if S.dtype != self.DTYPE:
self.logger.error('S.dtype: '+str(S.dtype))
if T.dtype != self.DTYPE:
self.logger.error('T.dtype: '+str(T.dtype))
# Local variables
nbsym = self.nbsym
end_min, end_max = len(min_pos), len(max_pos)
####################################
# Left bound - mirror nbsym points to the left
if indmax[0] < indmin[0]:
if S[0] > S[indmin[0]]:
lmax = indmax[1:min(end_max,nbsym+1)][::-1]
lmin = indmin[0:min(end_min,nbsym+0)][::-1]
lsym = indmax[0]
else:
lmax = indmax[0:min(end_max,nbsym)][::-1]
lmin = np.append(indmin[0:min(end_min,nbsym-1)][::-1],0)
lsym = 0
else:
if S[0] < S[indmax[0]]:
lmax = indmax[0:min(end_max,nbsym+0)][::-1]
lmin = indmin[1:min(end_min,nbsym+1)][::-1]
lsym = indmin[0]
else:
lmax = np.append(indmax[0:min(end_max,nbsym-1)][::-1],0)
lmin = indmin[0:min(end_min,nbsym)][::-1]
lsym = 0
####################################
# Right bound - mirror nbsym points to the right
if indmax[-1] < indmin[-1]:
if S[-1] < S[indmax[-1]]:
rmax = indmax[max(end_max-nbsym,0):][::-1]
rmin = indmin[max(end_min-nbsym-1,0):-1][::-1]
rsym = indmin[-1]
else:
rmax = np.append(indmax[max(end_max-nbsym+1,0):], len(S)-1)[::-1]
rmin = indmin[max(end_min-nbsym,0):][::-1]
rsym = len(S)-1
else:
if S[-1] > S[indmin[-1]]:
rmax = indmax[max(end_max-nbsym-1,0):-1][::-1]
rmin = indmin[max(end_min-nbsym,0):][::-1]
rsym = indmax[-1]
else:
rmax = indmax[max(end_max-nbsym,0):][::-1]
rmin = np.append(indmin[max(end_min-nbsym+1,0):], len(S)-1)[::-1]
rsym = len(S)-1
# In case any array missing
if not lmin.size: lmin = indmin
if not rmin.size: rmin = indmin
if not lmax.size: lmax = indmax
if not rmax.size: rmax = indmax
# Mirror points
tlmin = 2*T[lsym]-T[lmin]
tlmax = 2*T[lsym]-T[lmax]
trmin = 2*T[rsym]-T[rmin]
trmax = 2*T[rsym]-T[rmax]
# If mirrored points are not outside passed time range.
if tlmin[0] > T[0] or tlmax[0] > T[0]:
if lsym == indmax[0]:
lmax = indmax[0:min(end_max,nbsym)][::-1]
else:
lmin = indmin[0:min(end_min,nbsym)][::-1]
if lsym == 0:
raise Exception('Left edge BUG')
lsym = 0
tlmin = 2*T[lsym]-T[lmin]
tlmax = 2*T[lsym]-T[lmax]
if trmin[-1] < T[-1] or trmax[-1] < T[-1]:
if rsym == indmax[-1]:
rmax = indmax[max(end_max-nbsym,0):][::-1]
else:
rmin = indmin[max(end_min-nbsym,0):][::-1]
if rsym == len(S)-1:
raise Exception('Right edge BUG')
rsym = len(S)-1
trmin = 2*T[rsym]-T[rmin]
trmax = 2*T[rsym]-T[rmax]
zlmax = S[lmax]
zlmin = S[lmin]
zrmax = S[rmax]
zrmin = S[rmin]
tmin = np.append(tlmin, np.append(T[indmin], trmin))
tmax = np.append(tlmax, np.append(T[indmax], trmax))
zmin = np.append(zlmin, np.append(S[indmin], zrmin))
zmax = np.append(zlmax, np.append(S[indmax], zrmax))
max_extrema = np.array([tmax, zmax])
min_extrema = np.array([tmin, zmin])
if max_extrema.dtype != self.DTYPE:
self.logger.error('max_extrema.dtype: '+str(max_extrema.dtype))
# Make double sure, that each extremum is significant
max_dup_idx = np.where(max_extrema[0,1:]==max_extrema[0,:-1])
max_extrema = np.delete(max_extrema, max_dup_idx, axis=1)
min_dup_idx = np.where(min_extrema[0,1:]==min_extrema[0,:-1])
min_extrema = np.delete(min_extrema, min_dup_idx, axis=1)
return max_extrema, min_extrema
def spline_points(self, T, extrema):
"""
Constructs spline over given points.
Parameters
----------
T : numpy array
Position or time array.
extrema : numpy array
Position (1st row) and values (2nd row) of points.
Returns
-------
T : numpy array
Position array (same as input).
spline : numpy array
Spline array over given positions T.
"""
kind = self.spline_kind.lower()
t = T[np.r_[T>=extrema[0,0]] & np.r_[T<=extrema[0,-1]]]
if t.dtype != self.DTYPE:
self.logger.error('t.dtype: '+str(t.dtype))
if extrema.dtype != self.DTYPE:
self.logger.error('extrema.dtype: '+str(extrema.dtype))
if kind == "akima":
return t, akima(extrema[0], extrema[1], t)
elif kind == 'cubic':
if extrema.shape[1]>3:
return t, interp1d(extrema[0], extrema[1], kind=kind)(t)
else:
return cubic_spline_3pts(extrema[0], extrema[1], t)
elif kind in ['slinear', 'quadratic', 'linear']:
return T, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)
else:
raise ValueError("No such interpolation method!")
def _not_duplicate(self, S):
"""
Returns indices for not repeating values, where there is no extremum.
Example
-------
>>> S = [0, 1, 1, 1, 2, 3]
>>> idx = self._not_duplicate(S)
[0, 1, 3, 4, 5]
"""
idx = [0]
for i in range(1,len(S)-1):
if (S[i] == S[i+1] and S[i] == S[i-1]):
pass
else: idx.append(i)
idx.append(len(S)-1)
return idx
def find_extrema(self, T, S):
"""
Returns extrema (minima and maxima) for given signal S.
Detection and definition of the extrema depends on
**extrema_detection** variable, set on initiation of EMD.
Parameters
----------
T : numpy array
Position or time array.
S : numpy array
Input data S(T).
Returns
-------
local_max_pos : numpy array
Position of local maxima.
local_max_val : numpy array
Values of local maxima.
local_min_pos : numpy array
Position of local minima.
local_min_val : numpy array
Values of local minima.
"""
if self.extrema_detection=="parabol":
return self._find_extrema_parabol(T, S)
elif self.extrema_detection=="simple":
return self._find_extrema_simple(T, S)
else:
msg = "Incorrect extrema detection type. Please try: "
msg+= "'simple' or 'parabol'."
raise ValueError(msg)
def _find_extrema_parabol(self, T, S):
"""
Performs parabol estimation of extremum, i.e. an extremum is a peak
of parabol spanned on 3 consecutive points, where the mid point is
the closest.
See `self.find_extrema()`.
"""
# Finds indexes of zero-crossings
S1, S2 = S[:-1], S[1:]
indzer = np.nonzero(S1*S2<0)[0]
if np.any(S == 0):
iz = np.nonzero(S==0)[0]
indz = []
if np.any(np.diff(iz)==1):
zer = S == 0
dz = np.diff(np.append(np.append(0, zer), 0))
debz = np.nonzero(dz == 1)[0]
finz = np.nonzero(dz == -1)[0]-1
indz = np.round((debz+finz)/2.)
else:
indz = iz
indzer = np.sort(np.append(indzer, indz))
dt = float(T[1]-T[0])
scale = 2.*dt*dt
idx = self._not_duplicate(S)
T = T[idx]
S = S[idx]
# p - previous
# 0 - current
# n - next
Tp, T0, Tn = T[:-2], T[1:-1], T[2:]
Sp, S0, Sn = S[:-2], S[1:-1], S[2:]
#~ a = Sn + Sp - 2*S0
#~ b = 2*(Tn+Tp)*S0 - ((Tn+T0)*Sp+(T0+Tp)*Sn)
#~ c = Sp*T0*Tn -2*Tp*S0*Tn + Tp*T0*Sn
TnTp, T0Tn, TpT0 = Tn-Tp, T0-Tn, Tp-T0
scale = Tp*Tn*Tn + Tp*Tp*T0 + T0*T0*Tn - Tp*Tp*Tn - Tp*T0*T0 - T0*Tn*Tn
a = T0Tn*Sp + TnTp*S0 + TpT0*Sn
b = (S0-Sn)*Tp**2 + (Sn-Sp)*T0**2 + (Sp-S0)*Tn**2
c = T0*Tn*T0Tn*Sp + Tn*Tp*TnTp*S0 + Tp*T0*TpT0*Sn
a = a/scale
b = b/scale
c = c/scale
a[a==0] = 1e-14 #TODO: bad hack for zero div
tVertex = -0.5*b/a
idx = np.r_[tVertex<T0+0.5*(Tn-T0)] & np.r_[tVertex>=T0-0.5*(T0-Tp)]
a, b, c = a[idx], b[idx], c[idx]
tVertex = tVertex[idx]
_T, _S = T0[idx], S0[idx]
#~ sVertex = a*(tVertex+_T)*(tVertex-_T) + b*(tVertex-_T) + _S
sVertex = a*tVertex*tVertex + b*tVertex + c
local_max_pos, local_max_val = tVertex[a<0], sVertex[a<0]
local_min_pos, local_min_val = tVertex[a>0], sVertex[a>0]
return local_max_pos, local_max_val, local_min_pos, local_min_val, indzer
def _find_extrema_simple(self, T, S):
"""
Performs extrema detection, where extremum is defined as a point,
that is above/below its neighbours.
See `self.find_extrema()`.
"""
# Finds indexes of zero-crossings
S1, S2 = S[:-1], S[1:]
indzer = np.nonzero(S1*S2<0)[0]
if np.any(S==0):
iz = np.nonzero(S==0)[0]
indz = []
if np.any(np.diff(iz)==1):
zer = (S==0)
dz = np.diff(np.append(np.append(0, zer), 0))
debz = np.nonzero(dz==1)[0]
finz = np.nonzero(dz==-1)[0]-1
indz = np.round((debz+finz)/2.)
else:
indz = iz
indzer = np.sort(np.append(indzer, indz))
# Finds local extrema
d = np.diff(S)
d1, d2 = d[:-1], d[1:]
indmin = np.nonzero(np.r_[d1*d2<0] & np.r_[d1<0])[0]+1
indmax = np.nonzero(np.r_[d1*d2<0] & np.r_[d1>0])[0]+1
# When two or more points have the same value
if np.any(d==0):
imax, imin = [], []
bad = (d==0)
dd = np.diff(np.append(np.append(0, bad), 0))
debs = np.nonzero(dd==1)[0]
fins = np.nonzero(dd==-1)[0]
if debs[0] == 1:
if len(debs)>1:
debs, fins = debs[1:], fins[1:]
else:
debs, fins = [], []
if len(debs) > 0:
if fins[-1] == len(S)-1:
if len(debs) > 1:
debs, fins = debs[:-1], fins[:-1]
else:
debs, fins = [], []
lc = len(debs)
if lc > 0:
for k in range(lc):
if d[debs[k]-1] > 0:
if d[fins[k]] < 0:
imax.append(np.round((fins[k]+debs[k])/2.))
else:
if d[fins[k]] > 0:
imin.append(np.round((fins[k]+debs[k])/2.))
if len(imax) > 0:
indmax = indmax.tolist()
for x in imax: indmax.append(int(x))
indmax.sort()
if len(imin) > 0:
indmin = indmin.tolist()
for x in imin: indmin.append(int(x))
indmin.sort()
local_max_pos = T[indmax]
local_max_val = S[indmax]
local_min_pos = T[indmin]
local_min_val = S[indmin]
return local_max_pos, local_max_val, local_min_pos, local_min_val, indzer
def end_condition(self, S, IMF):
"""Tests for end condition of whole EMD. The procedure will stop if:
* Absolute amplitude (max - min) is below *range_thr* threshold, or
* Metric L1 (mean absolute difference) is below *total_power_thr* threshold.
Parameters
----------
S : numpy array
Original signal on which EMD was performed.
IMF : numpy 2D array
Set of IMFs where each row is IMF. Their order is not important.
Returns
-------
end : bool
Is this the end?
"""
# When to stop EMD
tmp = S.copy() - np.sum(IMF, axis=0)
# # Power is enough
# if np.log10(np.abs(tmp).sum()/np.abs(Res).sum()) < self.power_thr:
# self.logger.info("FINISHED -- POWER RATIO")
# return True
if np.max(tmp) - np.min(tmp) < self.range_thr:
self.logger.info("FINISHED -- RANGE")
return True
if np.sum(np.abs(tmp)) < self.total_power_thr:
self.logger.info("FINISHED -- SUM POWER")
return True
return False
def check_imf(self, imf_new, imf_old, eMax, eMin, mean):
"""
Huang criteria for **IMF** (similar to Cauchy convergence test).
Signal is an IMF if consecutive siftings do not affect signal
in a significant manner.
"""
# local max are >0 and local min are <0
if np.any(eMax[1]<0) or np.any(eMin[1]>0):
return False
# Convergence
if np.sum(imf_new**2) < 1e-10: return False
# Scaled variance test
svar = np.sum((imf_new-imf_old)**2)/(max(imf_old)-min(imf_old))
if svar < self.svar_thr:
self.logger.info("Scaled variance -- PASSED")
return True
# Standard deviation test
std = np.sum(((imf_new-imf_old)/imf_new)**2)
if std < self.std_thr:
self.logger.info("Standard deviation -- PASSED")
return True
return False
def _common_dtype(self, x, y):
"""Determines common numpy DTYPE for arrays."""
dtype = np.find_common_type([x.dtype, y.dtype], [])
if x.dtype != dtype: x = x.astype(dtype)
if y.dtype != dtype: y = y.astype(dtype)
return x, y
def emd(self, S, T=None, max_imf=None):
"""
Performs Empirical Mode Decomposition on signal S.
The decomposition is limited to *max_imf* imfs.
Returns IMF functions in numpy array format.
Parameters
----------
S : numpy array,
Input signal.
T : numpy array, (default: None)
Position or time array. If None passed numpy arange is created.
max_imf : int, (default: -1)
IMF number to which decomposition should be performed.
Negative value means *all*.
Returns
-------
IMF : numpy array
Set of IMFs producesed from input signal.
"""
if T is None: T = np.arange(len(S), dtype=S.dtype)
if max_imf is None: max_imf = -1
# Make sure same types are dealt
S, T = self._common_dtype(S, T)
self.DTYPE = S.dtype
scale = 1.
Res = S.astype(self.DTYPE)
Res, scaledS = Res/scale, S/scale
imf = np.zeros(len(S), dtype=self.DTYPE)
imf_old = Res.copy()
N = len(S)
if Res.dtype!=self.DTYPE:
self.logger.error('Res.dtype: '+str(Res.dtype))
if scaledS.dtype!=self.DTYPE:
self.logger.error('scaledS.dtype: '+str(scaledS.dtype))
if imf.dtype!=self.DTYPE:
self.logger.error('imf.dtype: '+str(imf.dtype))
if imf_old.dtype!=self.DTYPE:
self.logger.error('imf_old.dtype: '+str(imf_old.dtype))
if T.dtype!=self.DTYPE:
self.logger.error('T.dtype: '+str(T.dtype))
if S.shape != T.shape:
info = "Position or time array should be the same size as signal."
raise ValueError(info)
# Create arrays
imfNo = 0
IMF = np.empty((imfNo, N)) # Numpy container for IMF
notFinish = True
while(notFinish):
self.logger.debug('IMF -- '+str(imfNo))
Res = scaledS - np.sum(IMF[:imfNo], axis=0)
imf = Res.copy()
mean = np.zeros(len(S), dtype=self.DTYPE)
# Counters
n = 0 # All iterations for current imf.
n_h = 0 # counts when |#zero - #ext| <=1
# Start on-screen displaying
if self.PLOT and self.INTERACTIVE:
plt.ion()
while(n<self.MAX_ITERATION):
n += 1
self.logger.debug("Iteration: "+str(n))
max_pos, max_val, min_pos, min_val, indzer = self.find_extrema(T, imf)
extNo = len(min_pos)+len(max_pos)
nzm = len(indzer)
if extNo > 2:
# Plotting. Either into file, or on-screen display.
if n>1 and self.PLOT:
plt.clf()
plt.plot(T, imf*scale, 'g')
plt.plot(T, max_env*scale, 'b')
plt.plot(T, min_env*scale, 'r')
plt.plot(T, mean*scale, 'k--')
plt.title("imf{}_{:02}".format(imfNo, n-1))
if self.INTERACTIVE:
plt.draw()
else:
fName = "imf{}_{:02}".format(imfNo, n-1)
plt.savefig(os.path.join(self.plotPath,fName))
imf_old = imf.copy()
imf = imf - self.reduce_scale*mean
max_env, min_env, eMax, eMin = self.extract_max_min_spline(T, imf)
if type(max_env) == type(-1):
notFinish = True
break
mean = 0.5*(max_env+min_env)
if max_env.dtype!=self.DTYPE:
self.logger.error('max_envimf.dtype: '+str(max_env.dtype))
if min_env.dtype!=self.DTYPE:
self.logger.error('min_envimf.dtype: '+str(min_envimf.dtype))
if imf.dtype!=self.DTYPE:
self.logger.error('imf.dtype: '+str(imf.dtype))
if mean.dtype!=self.DTYPE:
self.logger.error('mean.dtype: '+str(mean.dtype))
# Fix number of iterations
if self.FIXE:
if n>=self.FIXE+1: break
# Fix number of iterations after number of zero-crossings
# and extrema differ at most by one.
elif self.FIXE_H:
res = self.find_extrema(T, imf)
max_pos, max_val, min_pos, min_val, ind_zer = res
extNo = len(max_pos)+len(min_pos)
nzm = len(ind_zer)
if n == 1: continue
if abs(extNo-nzm)>1: n_h = 0
else: n_h += 1
#if np.all(max_val>0) and np.all(min_val<0):
# n_h += 1
#else:
# n_h = 0
# STOP
if n_h >= self.FIXE_H: break
# Stops after default stopping criteria are met
else:
ext_res = self.find_extrema(T, imf)
max_pos, max_val, min_pos, min_val, ind_zer = ext_res
extNo = len(max_pos) + len(min_pos)
nzm = len(ind_zer)
f1 = self.check_imf(imf, max_env, min_env, mean, extNo)
#f2 = np.all(max_val>0) and np.all(min_val<0)
f2 = abs(extNo - nzm)<2
# STOP
if f1 and f2: break
else:
notFinish = False
break
IMF = np.vstack((IMF, imf.copy()))
IMF = IMF*scale
imfNo += 1
if self.end_condition(scaledS, IMF) or imfNo==max_imf:
notFinish = False
break
#~ # Saving residuum
#~ Res = Res - imf
#~ #Res = scaledS - np.sum([IMF[i] for i in range(imfNo)],axis=0)
#~ IMF[imfNo] = Res
#~ imfNo += 1
return IMF
###################################################
## Beginning of program
if __name__ == "__main__":
import pylab as plt
# Logging options
logging.basicConfig(level=logging.DEBUG)
# EMD options
max_imf = -1
DTYPE = np.float64
# Signal options
N = 400
tMin, tMax = 0, 2*np.pi
T = np.linspace(tMin, tMax, N, dtype=DTYPE)
S = np.sin(20*T*(1+0.2*T)) + T**2 + np.sin(13*T)
S = S.astype(DTYPE)
print("Input S.dtype: "+str(S.dtype))
# Prepare and run EMD
emd = EMD()
emd.PLOT = 0
emd.FIXE_H = 5
emd.nbsym = 2
emd.spline_kind = 'cubic'
emd.DTYPE = DTYPE
nIMF = emd.emd(S, T, max_imf)
imfNo = nIMF.shape[0]
# Plot results
c = 1
r = np.ceil((imfNo+1)/c)
plt.ioff()
plt.subplot(r,c,1)
plt.plot(T, S, 'r')
plt.xlim((tMin, tMax))
plt.title("Original signal")
for num in range(imfNo):
plt.subplot(r,c,num+2)
plt.plot(T, nIMF[num],'g')
plt.xlim((tMin, tMax))
plt.ylabel("Imf "+str(num+1))
plt.tight_layout()
plt.show()
|
[
"laszukdawid@gmail.com"
] |
laszukdawid@gmail.com
|
df8f72f59e79a29fcf54fef95a406b4bea46673c
|
7a43aaec2bd9d5253140b0f72d9497d39bef0b64
|
/tests/__init__.py
|
fac8036076a11c0395797da95b332a29b09b7338
|
[
"MIT"
] |
permissive
|
gummybuns/cerami
|
92785dc7cd4c802c73c38cad0895896cd2e22099
|
e97c0baa42c4bdfb10bbe3b4b859873e3d50aa3a
|
refs/heads/master
| 2023-08-29T22:22:28.909170
| 2021-10-17T17:25:00
| 2021-10-17T17:25:00
| 246,976,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
from .cerami import *
|
[
"zmb1391@gmail.com"
] |
zmb1391@gmail.com
|
4b56ab7cc00d775efc6d4bee863ef43a526f7760
|
7290df6b7b374e8eba076f9f221e1bd397408a06
|
/photoboard/tests/test.py
|
b0ce5df3fb7c71f4fde1e8e6183a323e68381800
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
teriyakichild/photoboard
|
ab7ee65db03194749723ae8e9b57f7045174e494
|
837d69875a775b765ea01269a9ff4e6b76956fb5
|
refs/heads/master
| 2020-05-04T21:52:10.146597
| 2014-06-11T01:45:55
| 2014-06-11T01:45:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from backend import photos, boards
p = photos()
#print p.new('asdf',1,1)
print p.get(1)
b = boards()
print p.all(1)
print b.get(1)
|
[
"tony.rogers@rackspace.com"
] |
tony.rogers@rackspace.com
|
d1c0283dbbd07a685621aecc3bcb1b520e23bc80
|
ffad717edc7ab2c25d5397d46e3fcd3975ec845f
|
/Python/pyesri 2/EXAMPLES/sequence_operators.py
|
805cd4fd7825ef7dbbf784c7fcaae6de25d43cbb
|
[] |
no_license
|
shaunakv1/esri-developer-conference-2015-training
|
2f74caea97aa6333aa38fb29183e12a802bd8f90
|
68b0a19aac0f9755202ef4354ad629ebd8fde6ba
|
refs/heads/master
| 2021-01-01T20:35:48.543254
| 2015-03-09T22:13:14
| 2015-03-09T22:13:14
| 31,855,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
#!/usr/bin/python
colors = ["red", "blue", "green", "yellow", "brown", "black"]
months = (
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
)
print "yellow in colors: ",("yellow" in colors)
print "pink in colors: ",("pink" in colors)
print "colors:", ",".join(colors)
del colors[4] # remove brown
print "removed 'brown':", ",".join(colors)
colors.remove('green')
print "removed 'green':", ",".join(colors)
sum_of_lists = [True] + [True] + [False]
print "sum of lists:",sum_of_lists
product = [True] * 5
print "product of lists:",product
|
[
"shaunakv1@gmail.com"
] |
shaunakv1@gmail.com
|
1c6bb1ac916d6d185e810dbe859a21cf287afe47
|
801125b61d11a4c1d519cbc77dd96f478678f372
|
/Parlett-Reid.py
|
e81ae3f70aecf955fc6166c7053a1e840c6117af
|
[] |
no_license
|
alex9609/Analisis-Numerico-Copy-
|
0956f594e489ef07ecf969b4dfcda10c0ecf83f5
|
2a3cdc771c39471e22b7cec1aed49d5fb45c862c
|
refs/heads/master
| 2023-03-23T23:32:05.310942
| 2020-06-05T07:40:52
| 2020-06-05T07:40:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,780
|
py
|
# Librería numpy
import numpy as np
from time import*
import math
import os
import csv
def mulVector(A,b,n):
v=[]
for i in range(n):
v.append(sum(A[i][j]*b[j] for j in range(n)))
return v
def mulMatrices(A, B, n):
M = [[0 for f in range(n)] for c in range(n)]
for i in range(n):
for j in range(n):
for k in range(n):
M[i][j] += A[i][k]*B[k][j]
return M
def inversa(M,n):
I = []
for i in range (n):
I.append([0]*(n))
for i in range (n):
I[i][i] = 1
mayor = 0
Q = []
Q1 = []
for s in range (n):
Q.append(0)
Q1.append(0)
for i in range (n):
j = i+1
if M[i][i] == 0:
p = i+1
mayor = M[j][i]
for j in range (i+2,n):
if(abs(mayor) < abs(M[j][i])):
mayor= M[j][i]
p = j
Q=M[i]
M[i]=M[p]
M[p]=Q
Q1=I[i]
I[i]=I[p]
I[p]=Q1
for j in range (0,i):
w = M[j][i]*(M[i][i])**(-1)
for k in range (i,n):
M[j][k] =M[j][k] - (w*M[i][k])
M[j][i]=0
for k in range (n):
I[j][k] =I[j][k] - (w*I[i][k])
for j in range (i+1,n):
w = M[j][i]*(M[i][i])**(-1)
for k in range (i,n):
M[j][k] =M[j][k] - (w*M[i][k])
M[j][i]=0
for k in range (n):
I[j][k] =I[j][k] - (w*I[i][k])
for i in range (n):
I[i][i]=I[i][i]*(M[i][i])**(-1)
return I
def transMatriz(M,n):
return [[ M[i][j] for i in range(n)] for j in range(n)]
return M
def esSimetrica(A,n):
for i in range(n):
for j in range(i+1,n):
if A[i][j] != A[j][i]:
return False
return True
'''def imprimeMatriz(A):
for i in range(len(A)):
text = " |"
for j in range(len(A[i])):
if(j==len(A)):
text = text +str("%8.3f"%A[i][j])
else:
text = text +str("%8.3f"%A[i][j])
print (text+"| ")
print
'''
def metodoParlet(aa,bb,nn):
global A,a,b,n
a = aa[:]
b = bb[:]
n = nn
A=[]
for i in range(n):
A.append([0]*(n+1))
for i in range(n):
for j in range(n):
A[i][j]=a[i][j]
for i in range(n):
A[i][n]=b[i]
I,P,M,L,U,T,G,F,S,PP,W,PPT,b=[],[],[],[],[],[],[],[],[],[],[],[],[]
for i in range (n):
I.append([0]*(n))
PPT.append([0]*(n))
P.append([0]*(n))
W.append([0]*(n))
M.append([0]*(n))
L.append([0]*(n))
PP.append([0]*(n))
U.append([0]*(n+1))
T.append([0]*(n+1))
S.append([0]*(n+1))
G.append(0)
F.append(0)
b.append(0)
P[i][i]=1
PP[i][i]=1
I[i][i]=1
W[i][i]=1
for i in range(n):
L[i][i]=1
for i in range(n):
for j in range(n+1):
T[i][j]=A[i][j]
#algoritmo#
for i in range(n-2):
P=[]
for j in range (n):
P.append([0]*(n))
P[j][j]=1
for j in range(n):
for k in range(n):
S[j][k] = I[j][k]
#Pivoteo#
p = i+1
mayor = abs(T[i+1][i])
for j in range (i+2,n):
if mayor < abs(T[j][i]):
mayor = abs(T[j][i])
p = j
P[i+1] = S[p]
P[p] = S[i+1]
#PAPt#
F=T[i+1]
T[i+1] = T[p]
T[p] = F
for j in range(n):
for k in range(n+1):
U[j][k] = T[j][k]
for j in range (n):
for k in range (n):
suma = 0
for l in range (n):
suma = suma + U[j][l]*P[k][l]
T[j][k] = suma
#Gauss#
for j in range (n):
G[j]=0
U.append([0]*(n+1))
for j in range (i+2,n):
G[j] = U[j][i]*(U[i+1][i]**(-1))
#matriz de gauss#
for j in range (n):
for l in range (n):
M[j][l] = S[j][l] - G[j]*I[i+1][l]
#MPAPTMT#
for j in range(n):
for k in range(n+1):
U[j][k] = T[j][k]
for j in range(n):
for k in range(n):
suma = 0
for l in range(n):
suma = suma + M[j][l]*U[l][k]
T[j][k] = suma
for j in range(n):
for k in range(n+1):
U[j][k] = T[j][k]
for j in range(n):
for k in range(n):
suma = 0
for l in range(n):
suma = suma + U[j][l]*M[k][l]
T[j][k] = suma
#EL P TOTAL#
for j in range(n):
for k in range(n):
suma = 0
for l in range(n):
suma = suma + P[j][l]*W[l][k]
PP[j][k] = suma
for j in range (n):
for k in range(n):
W[j][k]=PP[j][k]
#M2P2M1P1#
L= mulMatrices(mulMatrices(M,P,n),L,n)
for j in range(n):
for k in range(n):
PPT[j][k]=PP[k][j]
L=mulMatrices(L,PPT,n)
L=inversa(L,n)
Q=[]
for s in range (n+1):
Q.append(0)
Lt=[]
for s in range (n):
Lt.append([0]*n)
z=[]
for i in range (n):
z.append(0)
z[0]=T[0][n]*(L[0][0]**(-1))
for j in range (1,n):
suma=0
for k in range (j):
suma+= L[j][k]*z[k]
z[j]=(T[j][n]-suma)*(L[j][j]**(-1))
matT=[]
for i in range(n):
matT.append([])
for j in range(n):
matT[i].append(T[i][j])
for i in range (n):
j=i+1
if T[i][i]==0:
p=i+1
mayor = T[j][i]
for j in range (i+2,n):
if(abs(mayor) < abs(T[j][i])):
mayor= T[j][i]
p = j
Q=T[i]
T[i]=T[p]
T[p]=Q
k=z[i]
z[i]=z[p]
z[p]=k
for j in range (i+1,n):
w = T[j][i]*(T[i][i])**(-1)
for k in range (i,n+1):
T[j][k] = T[j][k] - (w*T[i][k])
T[j][i]=0
z[j] = z[j] - (w*z[i])
W=[]
for i in range (n):
W.append(0)
W[n-1]=z[n-1]/T[n-1][n-1]
for j in range (n-2,-1,-1):
suma=0
for k in range (j+1,n):
suma+= T[j][k]*W[k]
W[j]=(z[j]-suma)/T[j][j]
for i in range(n):
for j in range (n):
Lt[i][j]= L[j][i]
y=[]
for i in range (n):
y.append(0)
y[n-1]=W[n-1]/Lt[n-1][n-1]
for j in range (n-2,-1,-1):
suma=0
for k in range (j+1,n):
suma+= Lt[j][k]*y[k]
y[j]=(W[j]-suma)*(Lt[j][j]**(-1))
x=[]
for i in range (n):
x.append(0)
for j in range(n):
suma = 0
for l in range(n):
suma=suma+PPT[j][l]*y[l]
x[j]=suma
print('MÉTODO DE PAULETT-REID',end="\n\n")
print("Este método premultiplica y postmultiplica la matriz A por matrices para generar así la factorización PAPT = LTLT\n")
L = np.array(L,float)
PP = np.array(PP,float)
matT = np.array(matT,float)
print ("\nMatriz L:")
print(L.round(7))
print ("\nMatriz P:")
print(PP.round(7))
print ("\nMatriz T:")
print(matT.round(7))
print ("\nDesarrollamos A.x = b, en P.A.Pt = L.T.Lt:")
return x
# Insertar dimension de la matriz A
n = 4
# Matriz a usar:
A = np.array([[1,1,1,1],
[8,4,2,1],
[3,2,1,0],
[12,2,0,0]],float)
A1 = np.array([[1,1,1,1],
[8,4,2,1],
[3,2,1,0],
[12,2,0,0]],float)
# Coloca el vector solución b
b=np.array([2,6,5,-6],float)
b1=np.array([2,6,5,-6],float)
if esSimetrica(A,n):
t1=perf_counter(); #Calcula tiempo inicio del algoritmo
x= metodoParlet(A,b,n)
t2=perf_counter(); #Calcula tiempo inicio del algoritmo
x = np.array(x,float)
print ("Solucion de 'x' es: "+str(x.round(7)))
print("\nEl tiempo de ejecución es: "+str(t2-t1))
R = x - np.linalg.solve(A1,b1)
print("\nLa calidad de la solución es:\n")
print(str((np.linalg.norm(R,np.inf)/np.linalg.norm(b1,np.inf))*(1/np.linalg.cond(A1,np.inf)))+" ≤ ||E||∞/||x||∞ ≤ "+str((np.linalg.norm(R,np.inf)/np.linalg.norm(b1,np.inf))*np.linalg.cond(A1,np.inf)))
else:
At = transMatriz(A,n)
C = mulMatrices(At, A, n)
d = mulVector(At, b, n)
t1=perf_counter(); #Calcula tiempo inicio del algoritmo
x= metodoParlet(C,d,n)
t2=perf_counter(); #Calcula tiempo inicio del algoritmo
x = np.array(x,float)
print ("Solucion de 'x' es: "+str(x.round(7)))
print("\nEl tiempo de ejecución es: "+str(t2-t1))
R = x - np.linalg.solve(A1,b1)
print("\nLa calidad de la solución es:\n")
print(str((np.linalg.norm(R,np.inf)/np.linalg.norm(b1,np.inf))*(1/np.linalg.cond(A1,np.inf)))+" ≤ ||E||∞/||x||∞ ≤ "+str((np.linalg.norm(R,np.inf)/np.linalg.norm(b1,np.inf))*np.linalg.cond(A1,np.inf)))
|
[
"noreply@github.com"
] |
alex9609.noreply@github.com
|
f19b9dea2cdbf9d1ae985214ba886a130f688479
|
ccc717f01d8ef6fcdbd16933213ad15f2d72d60c
|
/app/models.py
|
0375fc6c9a7f81f5e9825dcf3c1b375beb2bb435
|
[] |
no_license
|
benpmeredith/NSSD-app
|
b910fad3e86fa3baa56686de322567498e247da4
|
6c9f35bfd180025059521d43beb0e0693e5c26f0
|
refs/heads/master
| 2020-04-14T16:47:24.517308
| 2018-02-14T06:53:15
| 2018-02-14T06:53:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
"""Define models for storing user data."""
from app import db
import datetime
class Search(db.Model):
"""Search persistence model."""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String())
search_string = db.Column(db.String())
datetime = db.Column(
db.DateTime, default=datetime.datetime.utcnow)
def __init__(self, user_id, search_string):
"""Initialize search model."""
self.user_id = user_id
self.search_string = search_string
def __repr__(self):
"""Return string representation of search model."""
return '<{}: {}>'.format(self.user_id, self.search_string)
|
[
"ryantlee9@gmail.com"
] |
ryantlee9@gmail.com
|
89892adafba422b3323e71df969dc9751b91ce37
|
2e893c59ad8adc85d3421a4a20b23ed3bb8e748e
|
/mobet/mobet_app/migrations/0004_auto_20200512_0205.py
|
f3f81ad41b5bae65c5fb4306c7caa60895c0e3a5
|
[] |
no_license
|
dlwlgjs132/mobet_server
|
b73e8775ebdcc0bac0eb07e1268eaacbb28e1fc2
|
2ac1d0b006e5f674821b9e91f2de2d6bfd3c5d1a
|
refs/heads/master
| 2022-09-11T15:11:47.628055
| 2020-06-01T09:38:16
| 2020-06-01T09:38:16
| 268,480,263
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# Generated by Django 3.0.5 on 2020-05-12 02:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobet_app', '0003_remove_user_imei'),
]
operations = [
migrations.AlterField(
model_name='user',
name='PHONENUM',
field=models.CharField(default='', max_length=20, unique=True),
),
]
|
[
"dlwlgjs132@naver.com"
] |
dlwlgjs132@naver.com
|
dfcd5824ecd123f52783b26c5ed7ada99ea3bb2a
|
a803c82161b0d74780e9adf2257e6b56f7ad2c14
|
/POO/encapsulamento/main.py
|
69de886891be4bf4e2632130dc23c1145b678c1e
|
[] |
no_license
|
ArondevIta/curso-python
|
83b9c7e998b753ff8a74bb2ccd1893e356aacf67
|
5d5519654f166d8e7b27a1f71d48acac6bdd723e
|
refs/heads/master
| 2022-12-25T14:02:47.669450
| 2020-09-17T23:30:02
| 2020-09-17T23:30:02
| 295,863,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
class BaseDeDados:
def __init__(self):
self.__dados = {}
@property
def dados(self):
return self.__dados
def inserir_cliente(self, id, nome):
if 'clientes' not in self.__dados:
self.__dados['clientes'] = {id: nome}
else:
self.__dados['clientes'].update({id: nome})
def lista_clientes(self):
for id, nome in self.__dados['clientes'].items():
print(id, nome)
def apaga_cliente(self, id):
del self.__dados['clientes'][id]
bd = BaseDeDados()
bd.inserir_cliente(1, 'Otavio')
bd.inserir_cliente(2, 'Zaine')
bd.inserir_cliente(3, 'João')
bd.lista_clientes()
print(bd.dados)
# _ protected
# __ private
|
[
"aronamaral@gmail.com"
] |
aronamaral@gmail.com
|
733a87559ba5236c55ce878f99afa55fac739206
|
e8ea4a4af4c5b2e237c372dc56b14026a3d0fa8c
|
/stack.py
|
b0f208e91384256fa605dbb889a62f9c51085f26
|
[] |
no_license
|
ProArif/Python-Data-Structures
|
ca5233687a17a7c0deb59015a1efba37c5f2cbcd
|
e2c72a118f72372335a322978bbab6a4ca68fcb4
|
refs/heads/master
| 2020-05-01T05:08:28.877351
| 2019-03-27T17:57:55
| 2019-03-27T17:57:55
| 177,293,167
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
class Stack:
def __init__(self):
self.stack = []
def add(self,value):
#add value in the stack
if value not in self.stack:
self.stack.append(value)
return True
else:
return False
def peek(self):
#View element at top of the stack (First element of the stack)
return self.stack[0]
def remove(self):
#remove element from the stack
if len(self.stack) <=0:
return "There is no element in the stack"
else:
return self.stack.pop()
def size(self):
return len(self.stack)
Astack = Stack()
Astack.add("Sat")
Astack.add("Sun")
Astack.add('Mon')
print(Astack.peek())
print()
print(Astack.size())
Astack.add('Tue')
print(Astack.remove())
print(Astack.remove())
print()
print(Astack.peek())
print()
print(Astack.remove())
|
[
"noreply@github.com"
] |
ProArif.noreply@github.com
|
bebed8fe4197ea43a2d1a8fd55cf0e5235c8326a
|
41f1094515257458849228ac5b026b34c4a7814b
|
/bind/python/example.py
|
61181ed59d1a9501a71829e25b58d6c62ee16931
|
[] |
no_license
|
srsem/fix_parser
|
db98f4adb9edf96f75cf07555ce3b89f490184e2
|
91b19abf724c854bf2f7f5b03b412f6c7c69a536
|
refs/heads/master
| 2023-06-05T05:27:29.223559
| 2021-06-26T01:51:30
| 2021-06-26T01:51:30
| 380,391,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
from fix_parser import *
from fix_fields import *
p = FixParser(b"fix.4.4.xml", PARSER_FLAG_CHECK_ALL)
print("PROT VERSION = {0}".format(p.getProtocolVer()))
# create Exec Report
m = p.createMsg("8")
# check its params
print("MSG TYPE = {0}".format(m.getType()))
print("MSG NAME = {0}".format(m.getName()))
# fill message
m.setFieldAsString(FIXFieldTag_SenderCompID, b"QWERTY_12345678")
m.setFieldAsString(FIXFieldTag_TargetCompID, b"ABCQWE_XYZ")
m.setFieldAsInt32(FIXFieldTag_MsgSeqNum, 34)
m.setFieldAsString(FIXFieldTag_TargetSubID, "bsrv-ivanov_ii1")
m.setFieldAsString(FIXFieldTag_SendingTime, b"20120716-06:00:16.230")
m.setFieldAsString(FIXFieldTag_OrderID, b"1")
m.setFieldAsString(FIXFieldTag_ClOrdID, b"CL_ORD_ID_1234567")
m.setFieldAsString(FIXFieldTag_ExecID, b"FE_1_9494_1")
m.setFieldAsChar(FIXFieldTag_ExecType, b"0")
m.setFieldAsChar(FIXFieldTag_OrdStatus, b"1")
m.setFieldAsString(FIXFieldTag_Account, b"ZUM")
m.setFieldAsString(FIXFieldTag_Symbol, b"RTS-12.12")
m.setFieldAsChar(FIXFieldTag_Side, b"1")
m.setFieldAsDouble(FIXFieldTag_OrderQty, 25.0)
m.setFieldAsDouble(FIXFieldTag_Price, 135155.0)
m.setFieldAsChar(FIXFieldTag_TimeInForce, b"0")
m.setFieldAsDouble(FIXFieldTag_LastQty, 0)
m.setFieldAsDouble(FIXFieldTag_LastPx, 0)
m.setFieldAsDouble(FIXFieldTag_LeavesQty, 25.0)
m.setFieldAsDouble(FIXFieldTag_CumQty, 0)
m.setFieldAsDouble(FIXFieldTag_AvgPx, 0)
m.setFieldAsChar(FIXFieldTag_HandlInst, b"1")
m.setFieldAsString(FIXFieldTag_Text, b"COMMENT12")
# convert msg to string, \1 delimiter replaced with '|', just for pretty printing. '|' code is 124
str = m.toString(124)
print("MSG = {0}".format(str))
# parse input string and create FIX message
m1 = p.parse(str, 124)
# just print several fields of m1, to make sure str parsed ok
print("MSG1 TYPE = {0}".format(m1.getType()))
print("MSG1 NAME = {0}".format(m1.getName()))
print("SenderCompID = {0}".format(m1.getFieldAsString(FIXFieldTag_SenderCompID)))
print("TargetCompID = {0}".format(m1.getFieldAsString(FIXFieldTag_TargetCompID)))
|
[
"dmitryme@gmail.com"
] |
dmitryme@gmail.com
|
5065cf76c38d0a632bff95fee81618ee568bade6
|
0ec65a65935e877dec4fe97ff9b9422eee1b0d74
|
/installer.py
|
468708a0518c911dfce8d2f35196f4101398872b
|
[] |
no_license
|
kuifye/-python-
|
48f438a9a5bac340175212810e2e8a7c89b6e5ec
|
26c2d2793901c611c498fe475d0e7af67e71de46
|
refs/heads/master
| 2022-11-02T21:45:19.345657
| 2022-10-18T12:30:00
| 2022-10-18T12:30:00
| 266,112,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyInstaller.__main__ import run
# -F:打包成一个EXE文件
# -w:不带console输出控制台,window窗体格式
# --paths:依赖包路径
# --icon:图标
# --noupx:不用upx压缩
# --clean:清理掉临时文件
if __name__ == '__main__':
opts = ['-F',
#'--paths=D:\\Program Files\\Python\\Lib\\site-packages\\PyQt5\\Qt\\bin',
#'--paths=D:\\Program Files\\Python\\Lib\\site-packages\\jpype',
#'--noupx',
#'--clean',
#'--hidden-import=numpy',
'Minions.py']
run(opts)
|
[
"noreply@github.com"
] |
kuifye.noreply@github.com
|
7917878c65d45f8c8695a604d021cbc8f6a8cbd8
|
68a52ad1df836c9f6d922515b2f896b6928ce6a0
|
/SafetyProductionSystem/SafetyProductionSystem/urls.py
|
8b569622a45638f563c0605df75fb3b870cccf20
|
[] |
no_license
|
Chuazhen0/SafetyProductionSystem
|
1141f845e04b032ff2a230c8def26066f061600c
|
442d5df3818d43aebb9830f2456c73018aae2acf
|
refs/heads/master
| 2020-05-20T12:47:46.365020
| 2019-05-08T09:56:01
| 2019-05-08T09:56:01
| 185,579,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
"""SafetyProductionSystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include
from django.contrib import admin
from systemsettings import views
from django.conf.urls import url
from django.views.static import serve
from . import settings
urlpatterns = [
url(r'^$', views.mylogin,name='login'), # 系统设置-登录
url(r'^admin/', admin.site.urls),
url(r'^netstructure/', include('netstructure.urls')), # 网络结构信息
url(r'^netstaff/', include('netstaff.urls')), # 网络结构信息
url(r'^staff_qua/', include('staff_qua.urls')), # 监督网络结构人员资质信息
url(r'^mon_plan_sum/', include('mon_plan_sum.urls')), # 月度计划与总结
url(r'^monworkexe/', include('monworkexe.urls')), # 月度工作执行
url(r'^yearplan/', include('yearplan.urls')), # 年度计划
url(r'^yearsum/', include('yearsum.urls')), # 年度总结
url(r'^warning/', include('warning.urls')), # 告警通知单
url(r'^warningre/', include('warningre.urls')), # 告警回执单
url(r'^standard/', include('standard.urls')), # 指标管理
url(r'^systemsettings/', include('systemsettings.urls')), # 系统设置
url(r'^weekworkplan/', include('weekworkplan.urls')), # 周期检测计划
url(r'^weekworktask/', include('weekworktask.urls')), # 周期检测任务
url(r'^qua25/', include('qua25.urls')), # 25项反措--资质管理
url(r'^quatype/', include('quatype.urls')), # 资质类型管理
url(r'^regularworkplan/', include('regularworkplan.urls')), # 定期工作标准
url(r'^regularworktask/', include('regularworktask.urls')), # 定期工作任务
#######################################工作流##############################################
# url(r'^wf/list/', RedirectView.as_view(url='/wf/list/'), name='home'),
url(r'^wf/', include('myworkflow.urls')),
url(r'^attachment/', include('lbattachment.urls')),
url(r'^myform/', include('myform.urls')),
url(r'media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
]
|
[
"Caohuazhenrn@163.com"
] |
Caohuazhenrn@163.com
|
9804f5a0d87e2460671de352627e37ed3849b111
|
9b32e4f0364d7299d44dde82dae2c785cea4accd
|
/python/q10.py
|
927bf2ae7a116d6e629dfce05fbc613df9f790fd
|
[] |
no_license
|
kyamaguchi/nlp100
|
59dc92b1f8900fb335565d7e37713995e3e78dd9
|
4b91454f5c20d3eef11ba48e4bf3a66cdcc7bf9a
|
refs/heads/master
| 2020-05-25T07:06:58.531070
| 2019-06-05T07:29:47
| 2019-06-05T08:20:07
| 187,678,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#!/usr/bin/env python
def question():
print("10. 行数のカウント")
print("行数をカウントせよ.確認にはwcコマンドを用いよ.")
import subprocess
res = subprocess.check_output('cat hightemp.txt | wc -l', shell=True, universal_newlines=True)
def main():
print(len(open('hightemp.txt').readlines()))
print(res.strip())
if __name__ == '__main__':
question()
main()
|
[
"kzh.yap@gmail.com"
] |
kzh.yap@gmail.com
|
3ec0c53be633c875cc471cd7a3a2712128f74189
|
893925888bff43d4a2681d8def70474234a05f59
|
/Scans/MACrossOver/Day100SMACrossDay200SMAFromBelow.py
|
3ded375a175fe4e87ed4c494de09d279de61e095
|
[] |
no_license
|
webclinic017/Burrito
|
05fbfade564a2855a08f7f7975b7094ed1bdc491
|
086d4d4632e53695e1f43b4efe61c318704936cc
|
refs/heads/main
| 2023-07-12T08:38:54.847707
| 2021-08-15T07:40:40
| 2021-08-15T07:40:40
| 460,657,475
| 1
| 0
| null | 2022-02-18T00:38:57
| 2022-02-18T00:38:56
| null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
import os.path
import start
import pandas as pd
from Scans.MACrossOver.MACrossOverScan import isMACrossOver
from Scans.Scan import Scan
import Constants
projectPath = Constants.ProjectPath
class Day100SMACrossDay200SMAFromBelow(Scan):
def __init__(self):
pass
def isCriteriaMet(self, symbol, candleSize="DAILY", apiProvider=None, timeSeries=None, interval=None):
if timeSeries is None:
if os.path.exists(projectPath + "/resources/" + symbol + "/" + candleSize):
timeSeries = pd.read_json(
projectPath + "/resources/" + symbol + "/" + candleSize + "/" + symbol + ".json",
convert_dates=True)
else:
timeSeries = start.getSeries(apiProvider, symbol, candleSize)
if timeSeries is not None:
return self.isCriteriaMet(symbol, candleSize, interval)
else:
return False
if interval is not None:
df_mask = (interval[0] <= timeSeries['timestamp']) & (timeSeries['timestamp'] <= interval[1])
timeSeries = timeSeries[df_mask]
if timeSeries is not None:
return isMACrossOver(symbol, [100,200], "SMA", candleSize=candleSize, timeSeries=timeSeries)
|
[
"dhariwal.mohit9@gmail.com"
] |
dhariwal.mohit9@gmail.com
|
0ba14538001baa0239a52b99643b728a1603f9bf
|
c2b8adb8b4062a14bfc7d8c8fa2938359530e028
|
/mfes/evaluate_function/eval_sys_mv.py
|
9e758009dcb744cd3338767496cf3339a1ee76f9
|
[] |
no_license
|
thomas-young-2013/hp-tuner
|
1e7d277f3c0135b9032884e3f20b050f19012918
|
e606569719a14d8445633e42aedc8296a63a577a
|
refs/heads/master
| 2023-04-15T08:41:02.514912
| 2020-09-14T13:23:55
| 2020-09-14T13:23:55
| 225,173,361
| 0
| 2
| null | 2023-03-24T22:31:25
| 2019-12-01T14:17:29
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
from __future__ import division, print_function, absolute_import
import os
import sys
from functools import partial
sys.path.append(os.getcwd())
from solnml.datasets.utils import load_train_test_data
from mfes.evaluate_function.sys.combined_evaluator import train as _train
train_node, test_node = load_train_test_data('mv', data_dir='./', task_type=0)
train = partial(_train, data_node=train_node)
|
[
"459240868@qq.com"
] |
459240868@qq.com
|
476ef2de7ac6276e12e24a4f0a01f5f5313822c1
|
949d8f2b9c91194ef75864fd1759902ef76ce770
|
/aa.py
|
8bc89b358efd5c841a23155c5ad28f25a961ff67
|
[] |
no_license
|
panchengl/yolov3_prune
|
8c4508b8b74fbd0f9a43743acc8bf75c687f99c2
|
9016e5efa63ffe17801c4e8c0892743f8cb48834
|
refs/heads/master
| 2020-08-22T06:09:11.719678
| 2020-03-31T07:23:02
| 2020-03-31T07:23:02
| 216,334,325
| 14
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,603
|
py
|
# a =[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 14, 15, 16, 17, 18, 22]
# b = 0
# new_shortcut_list = [0, 0, 0, 0, 0]
# new_shortcut_list_2 = [0, 0, 0, 0, 0]
#
# if 0 in a:
# new_shortcut_list[0] += 1
# if 1 in a:
# new_shortcut_list[1] += 1
# if 2 in a:
# new_shortcut_list[1] += 1
# if 3 in a:
# new_shortcut_list[2] += 1
# if 4 in a:
# new_shortcut_list[2] += 1
# if 5 in a:
# new_shortcut_list[2] += 1
# if 6 in a:
# new_shortcut_list[2] += 1
# if 7 in a:
# new_shortcut_list[2] += 1
# if 8 in a:
# new_shortcut_list[2] += 1
# if 9 in a:
# new_shortcut_list[2] += 1
# if 10 in a:
# new_shortcut_list[2] += 1
# if 11 in a:
# new_shortcut_list[3] += 1
# if 12 in a:
# new_shortcut_list[3] += 1
# if 13 in a:
# new_shortcut_list[3] += 1
# if 14 in a:
# new_shortcut_list[3] += 1
# if 15 in a:
# new_shortcut_list[3] += 1
# if 16 in a:
# new_shortcut_list[3] += 1
# if 17 in a:
# new_shortcut_list[3] += 1
# if 18 in a:
# new_shortcut_list[3] += 1
# if 19 in a:
# new_shortcut_list[4] += 1
# if 20 in a:
# new_shortcut_list[4] += 1
# if 21 in a:
# new_shortcut_list[4] += 1
# if 22 in a:
# new_shortcut_list[4] += 1
# print(new_shortcut_list)
#
# for i in range(23):
# if i in a:
# if i == 0:
# new_shortcut_list_2[0] += 1
# if 1 <= i and i < 3:
# new_shortcut_list_2[1] += 1
# if 3 <= i and i < 11:
# new_shortcut_list_2[2] += 1
# if 11 <= i and i < 19:
# new_shortcut_list_2[3] += 1
# if 19 <= i and i < 23:
# new_shortcut_list_2[4]+= 1
# print(new_shortcut_list_2)
# layer_prune_name = []
# prune_darknet_layer = [2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30,
# 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 50, 51]
# for i in prune_darknet_layer:
# if i == 0:
# layer_prune_name.append('yolov3/darknet53_body/Conv/weights:0')
# else:
# layer_prune_name.append('yolov3/darknet53_body/Conv_' + str(i) + '/weights:0')
# a = [11, 19, 21, 12, 20]
# b = []
# print(layer_prune_name)
#
# for prune_id in range(5):
# print('prune layer ')
# c = a[prune_id]
# b.append((prune_darknet_layer[2*c] ))
# b.append((prune_darknet_layer[2*c+1] ))
# print((prune_darknet_layer[2*c] ))
# print((prune_darknet_layer[2*c + 1]) )
#
# # first = dict()
# # second = dict()
# # third = dict()
# # fourth = dict()
# # last = dict()
# first = []
# second = []
# third = []
# fourth = []
# last = []
# import copy
# # for i in b:
# # layer_prune_name.remove('yolov3/darknet53_body/Conv_' + str(i) + '/weights:0')
# first = copy.deepcopy(layer_prune_name)
# first.remove('yolov3/darknet53_body/Conv_' + str(27) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(28) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(44) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(45) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(48) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(49) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(29) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(30) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(46) + '/weights:0')
# first.remove('yolov3/darknet53_body/Conv_' + str(47) + '/weights:0')
# # layer_prune_name.remove('yolov3/darknet53_body/Conv_' + str(27) + '/weights:0')
# for i, j in enumerate(layer_prune_name):
# print(str(j).split('/')[2][5:])
# # first[i] = j
# # if int(str(j).split('/')[2][5:]) >= b[0]:
# # first[i] = 'yolov3/darknet53_body/Conv_' + str(int(str(j).split('/')[2][5:]) -1) + '/weights:0'
# # if int(str(j).split('/')[2][5:]) >= b[1]:
# # first[i] = 'yolov3/darknet53_body/Conv_' + str(int(str(j).split('/')[2][5:]) - 1) + '/weights:0'
# # second = copy.deepcopy(first)
# # for i, j in enumerate(first):
# # print(str(j).split('/')[2][5:])
# # first[i] = j
# # if int(str(j).split('/')[2][5:]) >= b[0]:
# # first[i] = 'yolov3/darknet53_body/Conv_' + str(int(str(j).split('/')[2][5:]) -1) + '/weights:0'
#
# print(layer_prune_name)
# print(first)
#
# c = set(layer_prune_name)
# d = c.difference(first)
# print(d)
a =[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 14, 15, 16, 17, 18, 22]
b = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 14, 15, 16, 17, 18, 22]
import numpy as np
print(np.multiply(np.array(a), np.array(b)))
# print(a*b)
|
[
"2943499076@qq.com"
] |
2943499076@qq.com
|
839021b7c49f18913e28af75bad62f784ba5507c
|
99dcb18a9e3ea367272f740b8cbf3c34285a0c08
|
/tests/unit/aiplatform/test_pipeline_based_service.py
|
f7516714623b2b495b8d567b850c7b62006ddd59
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-aiplatform
|
926a4873f35dbea15b2fd86c0e16b5e6556d803e
|
76b95b92c1d3b87c72d754d8c02b1bca652b9a27
|
refs/heads/main
| 2023-08-19T23:49:02.180075
| 2023-08-19T13:25:59
| 2023-08-19T13:27:27
| 298,017,988
| 418
| 240
|
Apache-2.0
| 2023-09-14T21:08:33
| 2020-09-23T15:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 21,933
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import json
import pytest
from unittest import mock
from google.auth import credentials as auth_credentials
from google.protobuf import json_format
from google.cloud import storage
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform.metadata import constants
from google.cloud.aiplatform.utils import gcs_utils
from google.cloud.aiplatform_v1.services.pipeline_service import (
client as pipeline_service_client_v1,
)
from google.cloud.aiplatform_v1.types import (
pipeline_job as gca_pipeline_job_v1,
)
from google.cloud.aiplatform_v1.types import (
pipeline_state as gca_pipeline_state_v1,
)
from google.cloud.aiplatform._pipeline_based_service import (
pipeline_based_service,
)
from google.cloud.aiplatform_v1 import Execution as GapicExecution
from google.cloud.aiplatform_v1 import MetadataServiceClient
# pipeline job
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
_TEST_PIPELINE_JOB_DISPLAY_NAME = "sample-pipeline-job-display-name"
_TEST_PIPELINE_JOB_ID = "sample-test-pipeline-202111111"
_TEST_GCS_BUCKET_NAME = "my-bucket"
_TEST_CREDENTIALS = auth_credentials.AnonymousCredentials()
_TEST_SERVICE_ACCOUNT = "abcde@my-project.iam.gserviceaccount.com"
_TEST_COMPONENT_IDENTIFIER = "fake-pipeline-based-service"
_TEST_PIPELINE_NAME_IDENTIFIER = "my-pipeline"
_TEST_INVALID_PIPELINE_NAME_IDENTIFIER = "not-a-valid-pipeline-name"
_TEST_PIPELINE_CREATE_TIME = datetime.datetime.now()
_TEST_TEMPLATE_PATH = f"gs://{_TEST_GCS_BUCKET_NAME}/job_spec.json"
_TEST_TEMPLATE_REF = {"test_pipeline_type": _TEST_TEMPLATE_PATH}
_TEST_PIPELINE_ROOT = f"gs://{_TEST_GCS_BUCKET_NAME}/pipeline_root"
_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
_TEST_NETWORK = f"projects/{_TEST_PROJECT}/global/networks/{_TEST_PIPELINE_JOB_ID}"
_TEST_PIPELINE_JOB_NAME = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/pipelineJobs/{_TEST_PIPELINE_JOB_ID}"
_TEST_INVALID_PIPELINE_JOB_NAME = (
f"prj/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/{_TEST_PIPELINE_JOB_ID}"
)
# executions: this is used in test_list_pipeline_based_service
_TEST_EXECUTION_PARENT = (
f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default"
)
_TEST_RUN = "run-1"
_TEST_OTHER_RUN = "run-2"
_TEST_EXPERIMENT = "test-experiment"
_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_RUN}"
_TEST_EXECUTION_NAME = f"{_TEST_EXECUTION_PARENT}/executions/{_TEST_EXECUTION_ID}"
_TEST_OTHER_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_OTHER_RUN}"
_TEST_OTHER_EXECUTION_NAME = (
f"{_TEST_EXECUTION_PARENT}/executions/{_TEST_OTHER_EXECUTION_ID}"
)
# execution metadata parameters: used in test_list_pipeline_based_service
_TEST_PARAM_KEY_1 = "learning_rate"
_TEST_PARAM_KEY_2 = "dropout"
_TEST_PIPELINE_PARAM_KEY = "pipeline_job_resource_name"
_TEST_PARAMS = {
_TEST_PARAM_KEY_1: 0.01,
_TEST_PARAM_KEY_2: 0.2,
_TEST_PIPELINE_PARAM_KEY: _TEST_PIPELINE_JOB_NAME,
}
_TEST_OTHER_PARAMS = {_TEST_PARAM_KEY_1: 0.02, _TEST_PARAM_KEY_2: 0.3}
# pipeline based service template json
_TEST_PIPELINE_PARAMETER_VALUES = {
"string_param": "hello world",
"bool_param": True,
"double_param": 12.34,
"int_param": 5678,
"list_int_param": [123, 456, 789],
"list_string_param": ["lorem", "ipsum"],
"struct_param": {"key1": 12345, "key2": 67890},
}
_TEST_PIPELINE_SPEC_JSON = json.dumps(
{
"pipelineInfo": {"name": "my-pipeline"},
"root": {
"dag": {"tasks": {}},
"inputDefinitions": {
"parameters": {
"string_param": {"parameterType": "STRING"},
"bool_param": {"parameterType": "BOOLEAN"},
"double_param": {"parameterType": "NUMBER_DOUBLE"},
"int_param": {"parameterType": "NUMBER_INTEGER"},
"list_int_param": {"parameterType": "LIST"},
"list_string_param": {"parameterType": "LIST"},
"struct_param": {"parameterType": "STRUCT"},
}
},
},
"schemaVersion": "2.1.0",
"components": {},
}
)
_TEST_PIPELINE_JOB = json.dumps(
{
"runtimeConfig": {"parameterValues": {}},
"pipelineSpec": json.loads(_TEST_PIPELINE_SPEC_JSON),
}
)
def make_pipeline_job(state):
return gca_pipeline_job_v1.PipelineJob(
name=_TEST_PIPELINE_JOB_NAME,
state=state,
create_time=_TEST_PIPELINE_CREATE_TIME,
service_account=_TEST_SERVICE_ACCOUNT,
network=_TEST_NETWORK,
pipeline_spec=json.loads(_TEST_PIPELINE_SPEC_JSON),
job_detail=gca_pipeline_job_v1.PipelineJobDetail(
task_details=[
gca_pipeline_job_v1.PipelineTaskDetail(
task_id=123,
execution=GapicExecution(
name=_TEST_EXECUTION_NAME,
display_name=_TEST_RUN,
schema_title=constants.SYSTEM_RUN,
schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
metadata={"component_type": _TEST_COMPONENT_IDENTIFIER},
),
),
],
),
)
@pytest.fixture
def mock_pipeline_service_create():
with mock.patch.object(
pipeline_service_client_v1.PipelineServiceClient, "create_pipeline_job"
) as mock_create_pipeline_job:
mock_create_pipeline_job.return_value = make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
)
yield mock_create_pipeline_job
@pytest.fixture
def mock_pipeline_job_get():
with mock.patch.object(
pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
) as mock_get_pipeline_job:
mock_get_pipeline_job.side_effect = [
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
),
]
yield mock_get_pipeline_job
@pytest.fixture
def mock_pipeline_service_get_with_fail():
with mock.patch.object(
pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
) as mock_get_pipeline_job:
mock_get_pipeline_job.side_effect = [
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
),
make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_FAILED
),
]
yield mock_get_pipeline_job
@pytest.fixture
def mock_load_yaml_and_json(job_spec_json):
with mock.patch.object(
storage.Blob, "download_as_bytes"
) as mock_load_yaml_and_json:
mock_load_yaml_and_json.return_value = job_spec_json.encode()
yield mock_load_yaml_and_json
@pytest.fixture
def mock_pipeline_based_service_get():
with mock.patch.object(
pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
) as mock_get_pipeline_based_service:
mock_get_pipeline_based_service.return_value = make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
)
yield mock_get_pipeline_based_service
@pytest.fixture
def get_execution_mock():
with mock.patch.object(
MetadataServiceClient, "get_execution"
) as get_execution_mock:
get_execution_mock.return_value = GapicExecution(
name=_TEST_EXECUTION_NAME,
display_name=_TEST_RUN,
schema_title=constants.SYSTEM_RUN,
schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
metadata={"component_type": _TEST_COMPONENT_IDENTIFIER},
)
yield get_execution_mock
@pytest.fixture
def list_executions_mock():
with mock.patch.object(
MetadataServiceClient, "list_executions"
) as list_executions_mock:
list_executions_mock.return_value = [
GapicExecution(
name=_TEST_EXECUTION_NAME,
display_name=_TEST_RUN,
schema_title=constants.SYSTEM_RUN,
schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
metadata=_TEST_PARAMS,
),
GapicExecution(
name=_TEST_OTHER_EXECUTION_NAME,
display_name=_TEST_OTHER_RUN,
schema_title=constants.SYSTEM_RUN,
schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_RUN],
metadata=_TEST_OTHER_PARAMS,
),
]
yield list_executions_mock
@pytest.fixture
def mock_pipeline_bucket_exists():
def mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist(
output_artifacts_gcs_dir=None,
service_account=None,
project=None,
location=None,
credentials=None,
):
output_artifacts_gcs_dir = (
output_artifacts_gcs_dir
or gcs_utils.generate_gcs_directory_for_pipeline_artifacts(
project=project,
location=location,
)
)
return output_artifacts_gcs_dir
with mock.patch(
"google.cloud.aiplatform.utils.gcs_utils.create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist",
wraps=mock_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist,
) as mock_context:
yield mock_context
@pytest.mark.usefixtures("google_auth_mock")
class TestPipelineBasedService:
class FakePipelineBasedService(
pipeline_based_service._VertexAiPipelineBasedService
):
_template_ref = _TEST_TEMPLATE_REF
_metadata_output_artifact = "TODO"
_creation_log_message = (
"Created PipelineJob for your fake PipelineBasedService."
)
_component_identifier = _TEST_COMPONENT_IDENTIFIER
_template_name_identifier = None
@classmethod
def submit(cls) -> pipeline_based_service._VertexAiPipelineBasedService:
return cls._create_and_submit_pipeline_job(
template_params={}, template_path=_TEST_TEMPLATE_PATH
)
@pytest.mark.parametrize(
"job_spec_json",
[_TEST_PIPELINE_JOB],
)
@pytest.mark.parametrize(
"pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
)
def test_init_pipeline_based_service(
self,
pipeline_name,
mock_pipeline_job_get,
mock_pipeline_based_service_get,
mock_load_yaml_and_json,
job_spec_json,
mock_pipeline_service_create,
get_execution_mock,
mock_pipeline_bucket_exists,
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
staging_bucket=_TEST_GCS_BUCKET_NAME,
)
pipeline_service = self.FakePipelineBasedService(
pipeline_job_name=pipeline_name
)
mock_pipeline_based_service_get.assert_called_with(
name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
)
assert get_execution_mock.call_count == 1
# There are 2 get requests made for each item: 1 in the constructor and
# 1 in the validation method
assert mock_pipeline_based_service_get.call_count == 2
assert not mock_pipeline_service_create.called
assert pipeline_service.backing_pipeline_job._gca_resource == make_pipeline_job(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
)
@pytest.mark.parametrize(
"job_spec_json",
[_TEST_PIPELINE_JOB],
)
@pytest.mark.parametrize(
"pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
)
def test_init_pipeline_based_service_with_template_name_identifier(
self,
pipeline_name,
mock_pipeline_job_get,
mock_pipeline_based_service_get,
mock_load_yaml_and_json,
job_spec_json,
mock_pipeline_service_create,
get_execution_mock,
mock_pipeline_bucket_exists,
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
staging_bucket=_TEST_GCS_BUCKET_NAME,
)
self.FakePipelineBasedService._template_name_identifier = (
_TEST_PIPELINE_NAME_IDENTIFIER
)
self.FakePipelineBasedService(pipeline_job_name=_TEST_PIPELINE_JOB_ID)
mock_pipeline_based_service_get.assert_called_with(
name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
)
@pytest.mark.parametrize(
"job_spec_json",
[_TEST_PIPELINE_JOB],
)
@pytest.mark.parametrize(
"pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
)
def test_init_pipeline_based_service_with_invalid_template_name_identifier_raises(
self,
pipeline_name,
mock_pipeline_job_get,
mock_pipeline_based_service_get,
mock_load_yaml_and_json,
job_spec_json,
mock_pipeline_service_create,
get_execution_mock,
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
)
self.FakePipelineBasedService._template_name_identifier = (
_TEST_INVALID_PIPELINE_NAME_IDENTIFIER
)
with pytest.raises(ValueError):
self.FakePipelineBasedService(pipeline_job_name=_TEST_PIPELINE_JOB_ID)
@pytest.mark.parametrize(
"job_spec_json",
[_TEST_PIPELINE_JOB],
)
@pytest.mark.parametrize(
"pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
)
def test_init_pipeline_based_service_with_failed_pipeline_run(
self,
pipeline_name,
mock_pipeline_service_get_with_fail,
mock_load_yaml_and_json,
job_spec_json,
get_execution_mock,
mock_pipeline_bucket_exists,
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
staging_bucket=_TEST_GCS_BUCKET_NAME,
)
self.FakePipelineBasedService._template_name_identifier = None
self.FakePipelineBasedService(pipeline_job_name=_TEST_PIPELINE_JOB_ID)
mock_pipeline_service_get_with_fail.assert_called_with(
name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
)
assert get_execution_mock.call_count == 1
@pytest.mark.parametrize(
"pipeline_name", [_TEST_PIPELINE_JOB_ID, _TEST_PIPELINE_JOB_NAME]
)
def test_init_pipeline_based_service_without_template_ref_raises(
self,
pipeline_name,
mock_pipeline_job_get,
mock_pipeline_service_create,
):
"""Raises TypeError since abstract properties are not set.
_VertexAiPipelineBasedService class should only be instantiated
through a child class.
"""
with pytest.raises(TypeError):
pipeline_based_service._VertexAiPipelineBasedService(
pipeline_job_id=pipeline_name,
)
def test_init_pipeline_based_service_with_invalid_pipeline_run_id_raises(
self,
mock_pipeline_job_get,
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
)
with pytest.raises(ValueError):
self.FakePipelineBasedService(
pipeline_job_name=_TEST_INVALID_PIPELINE_JOB_NAME,
)
@pytest.mark.parametrize(
"job_spec_json",
[_TEST_PIPELINE_JOB],
)
def test_create_and_submit_pipeline_job(
self,
mock_pipeline_job_get,
mock_pipeline_service_create,
mock_load_yaml_and_json,
job_spec_json,
mock_pipeline_bucket_exists,
):
import yaml
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
staging_bucket=_TEST_GCS_BUCKET_NAME,
)
self.FakePipelineBasedService._template_name_identifier = None
test_pipeline_service = (
self.FakePipelineBasedService._create_and_submit_pipeline_job(
job_id=_TEST_PIPELINE_JOB_ID,
template_params=_TEST_PIPELINE_PARAMETER_VALUES,
template_path=_TEST_TEMPLATE_PATH,
pipeline_root=_TEST_PIPELINE_ROOT,
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
service_account=_TEST_SERVICE_ACCOUNT,
network=_TEST_NETWORK,
)
)
expected_runtime_config_dict = {
"gcsOutputDirectory": _TEST_PIPELINE_ROOT,
"parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
}
runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec_json = yaml.safe_load(job_spec_json)
pipeline_spec = job_spec_json.get("pipelineSpec") or job_spec_json
# Construct expected request
expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
"pipelineInfo": pipeline_spec["pipelineInfo"],
"root": pipeline_spec["root"],
"schemaVersion": "2.1.0",
},
runtime_config=runtime_config,
service_account=_TEST_SERVICE_ACCOUNT,
network=_TEST_NETWORK,
)
mock_pipeline_service_create.assert_called_once_with(
parent=_TEST_PARENT,
pipeline_job=expected_gapic_pipeline_job,
pipeline_job_id=_TEST_PIPELINE_JOB_ID,
timeout=None,
)
assert mock_pipeline_service_create.call_count == 1
test_backing_pipeline_job = test_pipeline_service.backing_pipeline_job
assert mock_pipeline_job_get.call_count == 1
assert (
test_pipeline_service.gca_resource.name
== test_backing_pipeline_job.resource_name
)
def test_list_pipeline_based_service(
self,
mock_pipeline_based_service_get,
get_execution_mock,
list_executions_mock,
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
)
test_list_request = self.FakePipelineBasedService.list()
list_executions_mock.assert_called_once_with(
request={
"parent": _TEST_EXECUTION_PARENT,
"filter": f"metadata.component_type.string_value={self.FakePipelineBasedService._component_identifier}",
}
)
assert isinstance(
test_list_request[0], pipeline_based_service._VertexAiPipelineBasedService
)
assert (
test_list_request[0]._template_ref
== self.FakePipelineBasedService._template_ref
)
# only 1 of the 2 executions in list_executions_mock matches the
# properties of FakePipelineBasedService
assert len(test_list_request) == 1
def test_list_pipeline_based_service_with_template_name_identifier(
self,
mock_pipeline_based_service_get,
get_execution_mock,
list_executions_mock,
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
)
self.FakePipelineBasedService._template_name_identifier = (
_TEST_INVALID_PIPELINE_NAME_IDENTIFIER
)
test_list_request = self.FakePipelineBasedService.list()
# None of the mock pipelines match the `_template_name_identifier`
# set above, so the returned list should be empty
assert len(test_list_request) == 0
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
a143f17f146e06bb8ade6dc3f16b63d24cb99ff9
|
07214da96a1dfa6bca6d84a4621efcd8a7ffa6ab
|
/venv/Scripts/easy_install-3.6-script.py
|
7bf61d6c077b4df38e305c86c56a7c13b70b2fde
|
[] |
no_license
|
jonys1994/scripts
|
027a45a5672ff3d8e1888f434f1a4a52699cb3f2
|
cf934e45df248892362f3b326225a0d7955bafd9
|
refs/heads/master
| 2020-03-23T19:06:33.206784
| 2019-01-18T11:49:31
| 2019-01-18T11:49:31
| 141,953,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
#!C:\Users\ShineMo\PycharmProjects\scripts\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.0.1','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.0.1', 'console_scripts', 'easy_install-3.6')()
)
|
[
"xiangp@shinemo.com"
] |
xiangp@shinemo.com
|
3faa1755d2e00628d1dd9f0b9f8cdd32cd5462a2
|
f15d66277991bbb1c5adaf2387570426b02d2aa6
|
/codingEncryption.py
|
c6c29babd6812448ad237a919011b5850ecd7e40
|
[] |
no_license
|
nataliaEscobar/Girls_Who_Code
|
2e0e8b803a746844779bc29bc975c92582220c19
|
d2136758d32f57b91b5d8992874b4562683e95ab
|
refs/heads/master
| 2020-12-03T03:59:44.434355
| 2017-07-14T22:45:27
| 2017-07-14T22:45:27
| 95,800,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
#GLOBAL VARIABLES
codeWords = []
realWords = []
#FUNCTIONS
def createCodeWords():
codingWords = True
while codingWords:
print("Would you like to add a word to your code? (y/n)")
answer = input().lower()
if(answer == "y" or "yes"):
print("What is the real word you would like to add?")
real = input().lower()
realWords.append(real)
print("What would your code word be?")
code = input().lower()
codeWords.append(code)
elif(answer == "n" or "no"):
print("Your code has been saved!")
codingWords = False
print("Your code words are")
print(codeWords)
print("They correspond to")
print(realWords)
else:
print("Security break! Abort mission")
exit()
def encryptMessage():
print()
print("______________________")
print()
print("What is your message that you would like enrypted?")
message = input().lower()
wordList = message.split()
codedMessage = ""
for word in wordList:
for realWord in realWords:
print("Checking Words" + word + "and" + realWord)
if(word == realWord):
print("MATCH FOUND!")
codedMessage = codedMessage + word + codeWords[0]
else:
codedMessage = codedMessage + word
#RUNNING CODE
createCodeWords()
encryptedMessage()
|
[
"noreply@github.com"
] |
nataliaEscobar.noreply@github.com
|
a341e8165f551618582e8bd6730337355be1684b
|
049911b1272653a3f44f0bfbd21c3ec5883775a9
|
/exercicios/2-6.py
|
5724d40dbd045950d8a18b198f1a5b07b8e5166f
|
[] |
no_license
|
jhoonb/III-JTI-MS
|
da0470404d8e167df9a44c51d1218e086478ede2
|
cfcedd95348b44d45f26294a2d63327bd5edc89c
|
refs/heads/master
| 2016-09-06T06:49:06.649975
| 2014-11-13T18:00:38
| 2014-11-13T18:00:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
# 6) Faça um programa que simule uma calculadora simples,
# informe qual operação: +, /, %, -, *
# logo após informe dois valores, retorne o valor dessa operação.
def soma(a, b):
return a+b
def sub(a, b):
return a-b
def mul(a, b):
return a*b
def div(a, b):
if b != 0:
return a/b
else:
return "divisao por zero"
def rest(a, b):
if b != 0:
return a%b
else:
return "divisao por zero"
#----------------------------------------------
def calculadora(op, a, b):
if op == "*":
return mul(a, b)
elif op == "/":
return div(a, b)
elif op == "%":
return rest(a, b)
elif op == "-":
return sub(a, b)
elif op == "+":
return soma(a, b)
else:
return "comando inválido"
#-------------------------------------------------
op = input("informa a operação (+, /, %, -, *): ")
v1 = float(input("informe o valor 1: "))
v2 = float(input("informe o valor 2: "))
print("resultado: ", calculadora(op, v1, v2))
|
[
"jpbanczek@gmail.com"
] |
jpbanczek@gmail.com
|
e4d17180e87356c886b83e79634455b68f5b9bfe
|
54bc958af18564f07498e6603a0ea875c938b587
|
/Advent_of_Code_2016/12.py
|
85a75bc46863d48fd6271ed472722da266414e15
|
[] |
no_license
|
drathke924/userscripts
|
8a07657f81974567b2776fe004f506bb5abcea6c
|
6de664a3becdf9846b5293fefde7e11d03bc6252
|
refs/heads/master
| 2022-12-24T14:38:13.580792
| 2022-12-15T23:25:57
| 2022-12-15T23:25:57
| 76,170,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
from time import time
start_time = time()
with open("12.txt", "r") as f:
data = f.read().splitlines()
def findReg(reg):
i = 0
while i < len(data):
line = data[i].split()
if line[0] == "cpy":
try:
reg[line[2]] = int(line[1])
except ValueError:
reg[line[2]] = reg[line[1]]
elif line[0] == "jnz":
if line[1] in reg.keys():
if reg[line[1]] != 0:
i += int(line[2])
continue
elif int(line[1]) != 0:
i += int(line[2])
continue
elif line[0] == "inc":
reg[line[1]] += 1
elif line[0] == "dec":
reg[line[1]] -= 1
i += 1
return reg["a"]
registry = {"a" : 0, "b" : 0, "c" : 0, "d" : 0}
print(findReg(registry))
print("Run time: %s" % (time() - start_time))
start_time = time()
registry = {"a" : 0, "b" : 0, "c" : 1, "d" : 0}
print(findReg(registry))
print("Run time: %s" % (time() - start_time))
|
[
"alpha.zero924@gmail.com"
] |
alpha.zero924@gmail.com
|
d86ef06603d9b162c2f8c9f7d23458b983808203
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/persistent_es/plot_toy_regression.py
|
e7888a7de3c7a5d525314fe775497b91f0625aa7
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434
| 2021-09-30T20:59:01
| 2021-09-30T21:07:02
| 156,725,548
| 1
| 0
|
Apache-2.0
| 2018-11-08T15:13:53
| 2018-11-08T15:13:52
| null |
UTF-8
|
Python
| false
| false
| 5,719
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plot loss curves from saved CSV files for the toy regression experiment.
Example:
--------
python plot_toy_regression.py
"""
import os
import csv
import ipdb
import pickle as pkl
from collections import defaultdict
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
sns.set_style('white')
sns.set_palette('bright')
# Darker colors
flatui = ["#E00072", "#00830B", "#2B1A7F", "#E06111", "#02D4F9", "#4F4C4B",]
sns.set_palette(flatui)
sns.palplot(sns.color_palette())
# Plotting from saved CSV files
def load_log(exp_dir, log_filename='train_log.csv'):
result_dict = defaultdict(list)
with open(os.path.join(exp_dir, log_filename), newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for key in row:
try:
if key in ['global_iteration', 'iteration', 'epoch']:
result_dict[key].append(int(row[key]))
else:
result_dict[key].append(float(row[key]))
except:
pass
return result_dict
def plot_heatmap(pkl_path,
xlabel,
ylabel,
smoothed=False,
sigma=5.0,
cmap=plt.cm.viridis,
colorbar=True,
figsize=(10,8)):
with open(pkl_path, 'rb') as f:
heatmap_data = pkl.load(f)
if smoothed:
smoothed_F_grid = scipy.ndimage.gaussian_filter(heatmap_data['L_grid'], sigma=sigma)
best_smoothed_theta = np.unravel_index(smoothed_F_grid.argmin(), smoothed_F_grid.shape)
best_smoothed_x = heatmap_data['xv'][best_smoothed_theta]
best_smoothed_y = heatmap_data['yv'][best_smoothed_theta]
plt.figure(figsize=figsize)
plt.pcolormesh(heatmap_data['xv'], heatmap_data['yv'], smoothed_F_grid, norm=colors.LogNorm(), cmap=cmap)
if colorbar:
plt.colorbar()
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel(xlabel, fontsize=22)
plt.ylabel(ylabel, fontsize=22)
else:
plt.figure(figsize=figsize)
plt.pcolormesh(heatmap_data['xv'], heatmap_data['yv'], heatmap_data['L_grid'], norm=colors.LogNorm(), cmap=cmap)
if colorbar:
plt.colorbar()
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel(xlabel, fontsize=22)
plt.ylabel(ylabel, fontsize=22)
if not os.path.exists('figures'):
os.makedirs('figures')
tbptt_k10 = load_log('saves/toy_regression/tbptt-s:linear-optim:adam-lr:0.01-T:100-K:10-N:100-sigma:1.0-seed:1', 'iteration.csv')
rtrl_k10 = load_log('saves/toy_regression/rtrl-s:linear-optim:adam-lr:0.01-T:100-K:10-N:100-sigma:1.0-seed:1', 'iteration.csv')
uoro_k10 = load_log('saves/toy_regression/uoro-s:linear-optim:adam-lr:0.01-T:100-K:10-N:100-sigma:1.0-seed:1', 'iteration.csv')
es_k10 = load_log('saves/toy_regression/es-s:linear-optim:adam-lr:0.01-T:100-K:10-N:100-sigma:1.0-seed:1', 'iteration.csv')
pes_k10 = load_log('saves/toy_regression/pes-s:linear-optim:adam-lr:0.01-T:100-K:10-N:100-sigma:1.0-seed:1', 'iteration.csv')
plot_heatmap('saves/toy_regression/sgd_lr:linear_sum_T_100_N_400_grid.pkl',
xlabel='Initial LR',
ylabel='Final LR',
smoothed=False,
cmap=plt.cm.Purples_r,
colorbar=False,
figsize=(7,5))
plt.plot(np.array(tbptt_k10['theta0']), np.array(tbptt_k10['theta1']), linewidth=3, label='TBPTT')
plt.plot(np.array(uoro_k10['theta0']), np.array(uoro_k10['theta1']), linewidth=3, label='UORO')
plt.plot(np.array(rtrl_k10['theta0']), np.array(rtrl_k10['theta1']), linewidth=3, label='RTRL')
plt.plot(np.array(es_k10['theta0']), np.array(es_k10['theta1']), linewidth=3, label='ES')
plt.plot(np.array(pes_k10['theta0']), np.array(pes_k10['theta1']), linewidth=3, label='PES')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel('Initial LR', fontsize=24)
plt.ylabel('Final LR', fontsize=24)
plt.legend(fontsize=20, fancybox=True, framealpha=0.7)
plt.savefig('figures/toy_regression_heatmap.png', bbox_inches='tight', pad_inches=0, dpi=300)
# ================================================================================================
plt.figure(figsize=(6,4))
plt.plot(tbptt_k10['inner_problem_steps'], tbptt_k10['L'], linewidth=3, label='TBPTT')
plt.plot(uoro_k10['inner_problem_steps'], uoro_k10['L'], linewidth=3, label='UORO')
plt.plot(rtrl_k10['inner_problem_steps'], rtrl_k10['L'], linewidth=3, label='RTRL')
plt.plot(es_k10['inner_problem_steps'], es_k10['L'], linewidth=3, label='ES')
plt.plot(pes_k10['inner_problem_steps'], pes_k10['L'], linewidth=3, label='PES')
plt.xscale('log')
plt.xticks(fontsize=18)
plt.yticks([500, 1000, 1500, 2000, 2500], fontsize=18)
plt.xlabel('Inner Iterations', fontsize=20)
plt.ylabel('Meta Objective', fontsize=20)
plt.legend(fontsize=18, fancybox=True, framealpha=0.3)
sns.despine()
plt.savefig('figures/toy_regression_meta_obj.pdf', bbox_inches='tight', pad_inches=0)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
276475239f152aff88af8db3c953385c75d8eec9
|
4bbfd93fd76e68cfd69e8908d17f275fb9416a76
|
/test/common.py
|
d51fe06cc8433a16f4c802d5f596fa91159f3548
|
[
"Apache-2.0"
] |
permissive
|
yunsukim86/language_model_sockeye
|
4aecc9521f674e24b3b7bc432311ed2bae5325e3
|
74883db1e5b45ae37a97401fa16634ab0f94698d
|
refs/heads/master
| 2022-01-05T08:51:59.264969
| 2018-06-18T07:27:16
| 2018-06-18T07:27:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,700
|
py
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import os
import random
import sys
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from typing import List, Optional, Tuple
from unittest.mock import patch
import mxnet as mx
import numpy as np
import sockeye.average
import sockeye.constants as C
import sockeye.evaluate
import sockeye.lexicon
import sockeye.prepare_data
import sockeye.train
import sockeye.translate
import sockeye.utils
from sockeye.evaluate import raw_corpus_bleu, raw_corpus_chrf
logger = logging.getLogger(__name__)
def gaussian_vector(shape, return_symbol=False):
"""
Generates random normal tensors (diagonal covariance)
:param shape: shape of the tensor.
:param return_symbol: True if the result should be a Symbol, False if it should be an Numpy array.
:return: A gaussian tensor.
"""
return mx.sym.random_normal(shape=shape) if return_symbol else np.random.normal(size=shape)
def integer_vector(shape, max_value, min_value=1, return_symbol=False):
"""
Generates a random positive integer tensor
:param shape: shape of the tensor.
:param max_value: maximum integer value.
:param min_value: minimum integer value.
:param return_symbol: True if the result should be a Symbol, False if it should be an Numpy array.
:return: A random integer tensor.
"""
return mx.sym.round(mx.sym.random.uniform(low=min_value, high=max_value, shape=shape)) if return_symbol \
else np.random.randint(low=min_value, high=max_value, size=shape)
def uniform_vector(shape, min_value=0, max_value=1, return_symbol=False):
"""
Generates a uniformly random tensor
:param shape: shape of the tensor
:param min_value: minimum possible value
:param max_value: maximum possible value (exclusive)
:param return_symbol: True if the result should be a mx.sym.Symbol, False if it should be a Numpy array
:return:
"""
return mx.sym.random.uniform(low=min_value, high=max_value, shape=shape) if return_symbol \
else np.random.uniform(low=min_value, high=max_value, size=shape)
def generate_random_sentence(vocab_size, max_len):
"""
Generates a random "sentence" as a list of integers.
:param vocab_size: Number of words in the "vocabulary". Note that due to
the inclusion of special words (BOS, EOS, UNK) this does *not*
correspond to the maximum possible value.
:param max_len: maximum sentence length.
"""
length = random.randint(1, max_len)
# Due to the special words, the actual words start at index 3 and go up to vocab_size+2
return [random.randint(3, vocab_size + 2) for _ in range(length)]
_DIGITS = "0123456789"
_MID = 5
def generate_digits_file(source_path: str,
target_path: str,
line_count: int = 100,
line_length: int = 9,
sort_target: bool = False,
line_count_empty: int = 0,
seed=13):
assert line_count_empty <= line_count
random_gen = random.Random(seed)
with open(source_path, "w") as source_out, open(target_path, "w") as target_out:
all_digits = []
for _ in range(line_count - line_count_empty):
digits = [random_gen.choice(_DIGITS) for _ in range(random_gen.randint(1, line_length))]
all_digits.append(digits)
for _ in range(line_count_empty):
all_digits.append([])
random_gen.shuffle(all_digits)
for digits in all_digits:
print(" ".join(digits), file=source_out)
if sort_target:
digits.sort()
print(" ".join(digits), file=target_out)
def generate_low_high_factors(source_path: str,
output_path: str):
"""
Writes low/high factor file given a source file of digit sequences.
"""
with open(source_path, 'r') as fin, open(output_path, 'w') as fout:
for line in fin:
digits = map(int, line.rstrip().split())
factors = ["l" if digit < _MID else "h" for digit in digits]
print(" ".join(factors), file=fout)
def generate_fast_align_lex(lex_path: str):
"""
Generate a fast_align format lex table for digits.
:param lex_path: Path to write lex table.
"""
with open(lex_path, "w") as lex_out:
for digit in _DIGITS:
print("{0}\t{0}\t0".format(digit), file=lex_out)
_LEXICON_PARAMS_COMMON = "-i {input} -m {model} -k 1 -o {json} {quiet}"
@contextmanager
def tmp_digits_dataset(prefix: str,
train_line_count: int, train_max_length: int,
dev_line_count: int, dev_max_length: int,
test_line_count: int, test_line_count_empty: int, test_max_length: int,
sort_target: bool = False,
seed_train: int = 13, seed_dev: int = 13,
with_source_factors: bool = False):
with TemporaryDirectory(prefix=prefix) as work_dir:
# Simple digits files for train/dev data
train_source_path = os.path.join(work_dir, "train.src")
train_target_path = os.path.join(work_dir, "train.tgt")
dev_source_path = os.path.join(work_dir, "dev.src")
dev_target_path = os.path.join(work_dir, "dev.tgt")
test_source_path = os.path.join(work_dir, "test.src")
test_target_path = os.path.join(work_dir, "test.tgt")
generate_digits_file(train_source_path, train_target_path, train_line_count,
train_max_length, sort_target=sort_target, seed=seed_train)
generate_digits_file(dev_source_path, dev_target_path, dev_line_count, dev_max_length, sort_target=sort_target,
seed=seed_dev)
generate_digits_file(test_source_path, test_target_path, test_line_count, test_max_length,
line_count_empty=test_line_count_empty, sort_target=sort_target, seed=seed_dev)
data = {'work_dir': work_dir,
'source': train_source_path,
'target': train_target_path,
'validation_source': dev_source_path,
'validation_target': dev_target_path,
'test_source': test_source_path,
'test_target': test_target_path}
if with_source_factors:
train_factor_path = train_source_path + ".factors"
dev_factor_path = dev_source_path + ".factors"
test_factor_path = test_source_path + ".factors"
generate_low_high_factors(train_source_path, train_factor_path)
generate_low_high_factors(dev_source_path, dev_factor_path)
generate_low_high_factors(test_source_path, test_factor_path)
data['train_source_factors'] = [train_factor_path]
data['dev_source_factors'] = [dev_factor_path]
data['test_source_factors'] = [test_factor_path]
yield data
_TRAIN_PARAMS_COMMON = "--use-cpu --max-seq-len {max_len} --source {train_source} --target {train_target}" \
" --validation-source {dev_source} --validation-target {dev_target} --output {model} {quiet}" \
" --seed {seed}"
_PREPARE_DATA_COMMON = " --max-seq-len {max_len} --source {train_source} --target {train_target}" \
" --output {output} {quiet}"
_TRAIN_WITH_FACTORS_COMMON = " --source-factors {source_factors}"
_DEV_WITH_FACTORS_COMMON = " --validation-source-factors {dev_source_factors}"
_TRAIN_PARAMS_PREPARED_DATA_COMMON = "--use-cpu --max-seq-len {max_len} --prepared-data {prepared_data}" \
" --validation-source {dev_source} --validation-target {dev_target} " \
"--output {model} {quiet}"
_TRANSLATE_PARAMS_COMMON = "--use-cpu --models {model} --input {input} --output {output} {quiet}"
_TRANSLATE_WITH_FACTORS_COMMON = " --input-factors {input_factors}"
_TRANSLATE_PARAMS_RESTRICT = "--restrict-lexicon {json}"
_EVAL_PARAMS_COMMON = "--hypotheses {hypotheses} --references {references} --metrics {metrics} {quiet}"
def run_train_translate(train_params: str,
translate_params: str,
translate_params_equiv: Optional[str],
train_source_path: str,
train_target_path: str,
dev_source_path: str,
dev_target_path: str,
test_source_path: str,
test_target_path: str,
train_source_factor_paths: Optional[List[str]] = None,
dev_source_factor_paths: Optional[List[str]] = None,
test_source_factor_paths: Optional[List[str]] = None,
use_prepared_data: bool = False,
max_seq_len: int = 10,
restrict_lexicon: bool = False,
work_dir: Optional[str] = None,
seed: int = 13,
quiet: bool = False) -> Tuple[float, float, float, float]:
"""
Train a model and translate a dev set. Report validation perplexity and BLEU.
:param train_params: Command line args for model training.
:param translate_params: First command line args for translation.
:param translate_params_equiv: Second command line args for translation. Should produce the same outputs
:param train_source_path: Path to the source file.
:param train_target_path: Path to the target file.
:param dev_source_path: Path to the development source file.
:param dev_target_path: Path to the development target file.
:param test_source_path: Path to the test source file.
:param test_target_path: Path to the test target file.
:param train_source_factor_paths: Optional list of paths to training source factor files.
:param dev_source_factor_paths: Optional list of paths to dev source factor files.
:param test_source_factor_paths: Optional list of paths to test source factor files.
:param use_prepared_data: Whether to use the prepared data functionality.
:param max_seq_len: The maximum sequence length.
:param restrict_lexicon: Additional translation run with top-k lexicon-based vocabulary restriction.
:param work_dir: The directory to store the model and other outputs in.
:param seed: The seed used for training.
:param quiet: Suppress the console output of training and decoding.
:return: A tuple containing perplexity, bleu scores for standard and reduced vocab decoding, chrf score.
"""
if quiet:
quiet_arg = "--quiet"
else:
quiet_arg = ""
with TemporaryDirectory(dir=work_dir, prefix="test_train_translate.") as work_dir:
# Optionally create prepared data directory
if use_prepared_data:
prepared_data_path = os.path.join(work_dir, "prepared_data")
params = "{} {}".format(sockeye.prepare_data.__file__,
_PREPARE_DATA_COMMON.format(train_source=train_source_path,
train_target=train_target_path,
output=prepared_data_path,
max_len=max_seq_len,
quiet=quiet_arg))
if train_source_factor_paths is not None:
params += _TRAIN_WITH_FACTORS_COMMON.format(source_factors=" ".join(train_source_factor_paths))
logger.info("Creating prepared data folder.")
with patch.object(sys, "argv", params.split()):
sockeye.prepare_data.main()
# Train model
model_path = os.path.join(work_dir, "model")
params = "{} {} {}".format(sockeye.train.__file__,
_TRAIN_PARAMS_PREPARED_DATA_COMMON.format(prepared_data=prepared_data_path,
dev_source=dev_source_path,
dev_target=dev_target_path,
model=model_path,
max_len=max_seq_len,
quiet=quiet_arg),
train_params)
if dev_source_factor_paths is not None:
params += _DEV_WITH_FACTORS_COMMON.format(dev_source_factors=" ".join(dev_source_factor_paths))
logger.info("Starting training with parameters %s.", train_params)
with patch.object(sys, "argv", params.split()):
sockeye.train.main()
else:
# Train model
model_path = os.path.join(work_dir, "model")
params = "{} {} {}".format(sockeye.train.__file__,
_TRAIN_PARAMS_COMMON.format(train_source=train_source_path,
train_target=train_target_path,
dev_source=dev_source_path,
dev_target=dev_target_path,
model=model_path,
max_len=max_seq_len,
seed=seed,
quiet=quiet_arg),
train_params)
if train_source_factor_paths is not None:
params += _TRAIN_WITH_FACTORS_COMMON.format(source_factors=" ".join(train_source_factor_paths))
if dev_source_factor_paths is not None:
params += _DEV_WITH_FACTORS_COMMON.format(dev_source_factors=" ".join(dev_source_factor_paths))
logger.info("Starting training with parameters %s.", train_params)
with patch.object(sys, "argv", params.split()):
sockeye.train.main()
logger.info("Translating with parameters %s.", translate_params)
# Translate corpus with the 1st params
out_path = os.path.join(work_dir, "out.txt")
params = "{} {} {}".format(sockeye.translate.__file__,
_TRANSLATE_PARAMS_COMMON.format(model=model_path,
input=test_source_path,
output=out_path,
quiet=quiet_arg),
translate_params)
if test_source_factor_paths is not None:
params += _TRANSLATE_WITH_FACTORS_COMMON.format(input_factors=" ".join(test_source_factor_paths))
with patch.object(sys, "argv", params.split()):
sockeye.translate.main()
# Translate corpus with the 2nd params
if translate_params_equiv is not None:
out_path_equiv = os.path.join(work_dir, "out_equiv.txt")
params = "{} {} {}".format(sockeye.translate.__file__,
_TRANSLATE_PARAMS_COMMON.format(model=model_path,
input=test_source_path,
output=out_path_equiv,
quiet=quiet_arg),
translate_params_equiv)
if test_source_factor_paths is not None:
params += _TRANSLATE_WITH_FACTORS_COMMON.format(input_factors=" ".join(test_source_factor_paths))
with patch.object(sys, "argv", params.split()):
sockeye.translate.main()
# read-in both outputs, ensure they are the same
with open(out_path, 'rt') as f:
lines = f.readlines()
with open(out_path_equiv, 'rt') as f:
lines_equiv = f.readlines()
assert all(a == b for a, b in zip(lines, lines_equiv))
# Test restrict-lexicon
out_restrict_path = os.path.join(work_dir, "out-restrict.txt")
if restrict_lexicon:
# fast_align lex table
lex_path = os.path.join(work_dir, "lex")
generate_fast_align_lex(lex_path)
# Top-K JSON
json_path = os.path.join(work_dir, "json")
params = "{} {}".format(sockeye.lexicon.__file__,
_LEXICON_PARAMS_COMMON.format(input=lex_path,
model=model_path,
json=json_path,
quiet=quiet_arg))
with patch.object(sys, "argv", params.split()):
sockeye.lexicon.main()
# Translate corpus with restrict-lexicon
params = "{} {} {} {}".format(sockeye.translate.__file__,
_TRANSLATE_PARAMS_COMMON.format(model=model_path,
input=test_source_path,
output=out_restrict_path,
quiet=quiet_arg),
translate_params,
_TRANSLATE_PARAMS_RESTRICT.format(json=json_path))
if test_source_factor_paths is not None:
params += _TRANSLATE_WITH_FACTORS_COMMON.format(input_factors=" ".join(test_source_factor_paths))
with patch.object(sys, "argv", params.split()):
sockeye.translate.main()
# test averaging
points = sockeye.average.find_checkpoints(model_path=model_path,
size=1,
strategy='best',
metric=C.PERPLEXITY)
assert len(points) > 0
averaged_params = sockeye.average.average(points)
assert averaged_params
# get best validation perplexity
metrics = sockeye.utils.read_metrics_file(path=os.path.join(model_path, C.METRICS_NAME))
perplexity = min(m[C.PERPLEXITY + '-val'] for m in metrics)
hypotheses = open(out_path, "r").readlines()
references = open(test_target_path, "r").readlines()
assert len(hypotheses) == len(references)
# compute metrics
bleu = raw_corpus_bleu(hypotheses=hypotheses, references=references, offset=0.01)
chrf = raw_corpus_chrf(hypotheses=hypotheses, references=references)
bleu_restrict = None
if restrict_lexicon:
bleu_restrict = raw_corpus_bleu(hypotheses=hypotheses, references=references, offset=0.01)
# Run BLEU cli
eval_params = "{} {} ".format(sockeye.evaluate.__file__,
_EVAL_PARAMS_COMMON.format(hypotheses=out_path,
references=test_target_path,
metrics="bleu chrf",
quiet=quiet_arg), )
with patch.object(sys, "argv", eval_params.split()):
sockeye.evaluate.main()
return perplexity, bleu, bleu_restrict, chrf
|
[
"tran@i6.informatik.rwth-aachen.de"
] |
tran@i6.informatik.rwth-aachen.de
|
60a2ec58deba6a1dc051a8bc0b9be5886e4c99a7
|
64d1211404c89da4e09d77d859f2cdf6609a057e
|
/models/official/nlp/train_ctl_continuous_finetune_test.py
|
45aa308403b8bb2e3eb86771ad459312e88c0916
|
[
"Apache-2.0"
] |
permissive
|
Nerfertili/Deep_learning_learning_udemy
|
f375209e0675ab8f4da9551d8a5bdee4f2948ed8
|
0fe6c1f36019b29151acb17a1f248b34d6089aeb
|
refs/heads/master
| 2023-02-17T10:10:52.536426
| 2021-01-19T02:48:23
| 2021-01-19T02:48:23
| 330,823,730
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,196
|
py
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
import tensorflow as tf
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.nlp import train_ctl_continuous_finetune
FLAGS = flags.FLAGS
tfm_flags.define_flags()
class ContinuousFinetuneTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
@parameterized.parameters(None, 1)
def testTrainCtl(self, pretrain_steps):
src_model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode='continuous_train_and_eval',
model_dir=self._model_dir,
params_override={
'task': {
'init_checkpoint': src_model_dir,
},
'trainer': {
'continuous_eval_timeout': 1,
'steps_per_loop': 1,
'train_steps': 1,
'validation_steps': 1,
'best_checkpoint_export_subdir': 'best_ckpt',
'best_checkpoint_eval_metric': 'acc',
'optimizer_config': {
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
}
}
})
with flagsaver.flagsaver(**flags_dict):
# Train and save some checkpoints.
params = train_utils.parse_configuration(flags.FLAGS)
distribution_strategy = tf.distribute.get_strategy()
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=src_model_dir)
_ = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='train',
params=params,
model_dir=src_model_dir)
params = train_utils.parse_configuration(FLAGS)
eval_metrics = train_ctl_continuous_finetune.run_continuous_finetune(
FLAGS.mode,
params,
FLAGS.model_dir,
run_post_eval=True,
pretrain_steps=pretrain_steps)
self.assertIn('best_acc', eval_metrics)
if __name__ == '__main__':
tf.test.main()
|
[
"leal.afonso@outlook.com"
] |
leal.afonso@outlook.com
|
bf52ca5f4007c17ab8c7804bf69009ab397da25b
|
e888ca363d84f58ac7128d4ad8a1c54b1ff8555c
|
/tools/c7n_kube/c7n_kube/resources/core/volume.py
|
a14c868dc2893d4084518afcd8215a75694e3e58
|
[
"Apache-2.0"
] |
permissive
|
ksteigerwald/cloud-custodian
|
9f455e0a401f91e2d56302a4b95d173ead4b1b82
|
b5af4d66fd303579250214082d7ed3e7f4f5ab79
|
refs/heads/master
| 2020-03-27T19:30:20.544001
| 2019-02-26T11:05:37
| 2019-02-26T11:05:37
| 146,993,589
| 1
| 0
|
Apache-2.0
| 2018-09-01T11:42:20
| 2018-09-01T11:42:20
| null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
# Copyright 2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from c7n_kube.query import QueryResourceManager, TypeInfo
from c7n_kube.provider import resources
@resources.register('volume')
class PersistentVolume(QueryResourceManager):
class resource_type(TypeInfo):
group = 'Core'
version = 'V1'
enum_spec = ('list_persistent_volume', 'items', None)
@resources.register('volume-claim')
class PersistentVolumeClaim(QueryResourceManager):
class resource_type(TypeInfo):
group = 'Core'
version = 'V1'
enum_spec = ('list_persistent_volume_claim_for_all_namespaces', 'items', None)
|
[
"noreply@github.com"
] |
ksteigerwald.noreply@github.com
|
c3827c6deb502162f773d367c141c37b57bbb1df
|
d03aba5cdefa68cc375cd7c5ad5214bb1f41328e
|
/python/searchCrawler/App.py
|
310576152c11735869d03abcd0ecea3366d06826
|
[
"Apache-2.0"
] |
permissive
|
TeamTitanz/Word2vecLegalDocumentRetrieval
|
75a2a06d4c456f40dc6603d5c4cf0b5f5c896c51
|
d7b8c88cc42f3fdce8926abee0c662632f9597f9
|
refs/heads/master
| 2021-01-25T06:49:12.580150
| 2017-09-22T09:08:46
| 2017-09-22T09:08:46
| 93,614,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,102
|
py
|
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, NoSuchElementException, StaleElementReferenceException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from bs4 import BeautifulSoup
import config
import time
driver = None
sleep_time = 1
caseURL = []
caseNames = []
def loadDriver():
global driver
try:
profile = webdriver.FirefoxProfile()
profile.accept_untrusted_certs = True
profile.set_preference('permissions.default.stylesheet', 2)
profile.set_preference('permissions.default.image', 2)
profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
firefox_capabilities = DesiredCapabilities.FIREFOX
firefox_capabilities['marionette'] = True
driver = webdriver.Firefox(executable_path=config.GECKO_DRIVER_PATH)
driver.set_window_size(1124, 850)
except WebDriverException:
print("Web driver error")
return False
def getUrl(url):
global driver, sleep_time
# Navigate to the hostel form
#'http://lawcrawler.findlaw.com/LCsearch.html?restrict=lp&client=lp&entry=harvey+v.+veneman'
driver.get(url)
time.sleep(1)
result = driver.page_source
soup = BeautifulSoup(result, 'html.parser')
return soup.find(id="gsa_n_1").find("div", class_="gsa_result_url").getText()
def getCaseURL(name):
oldCharList = [" ", "'", ",", "$", "&", ":", "/", "?"]
newCharList = ["+", "%27", "%2C", "%24", "%26", "%3A", "%2F", "%3F"]
searchURL = "http://lawcrawler.findlaw.com/LCsearch.html?restrict=lp&client=lp&entry="
for char in name:
if(char in oldCharList):
searchURL += newCharList[oldCharList.index(char)]
else:
searchURL += char
#print searchURL
return getUrl(searchURL)
def uniqueID(temID):
temID = str(temID)
prefixLen = 10 - len(temID)
prefix = '0' * prefixLen
return prefix + temID
def createGraph():
global caseURL
loadDriver()
with open("NameGraph.csv", "r") as ins:
for line in ins:
caseURL.append(line.split("_=r=_")[0])
caseNames.append(line.split("_=;=_")[-1])
newCf = open('newCases.txt','w')
graphf = open('obGraph.txt','w')
index = 0
for mentionNames in caseNames:
graphf.write(uniqueID(index)+':')
plist = []
temName = mentionNames.split("_=,=_")
for name in temName:
try:
newURL = getCaseURL(name)
except AttributeError:
continue
print newURL
if(newURL in caseURL):
temID = uniqueID(caseURL.index(newURL))
if(temID not in plist):
plist.append(temID)
else:
newCf.write(newURL+'\n')
temID = uniqueID(len(caseURL))
plist.append(temID)
caseURL.append(newURL)
graphf.write(",".join(plist)+'\n')
index += 1
newCf.close()
graphf.close()
createGraph()
|
[
"buddhiayesha.13@cse.mrt.ac.lk"
] |
buddhiayesha.13@cse.mrt.ac.lk
|
093732790e4b2c8635b724e49bb6bdab7dceb469
|
3f9da25e18b9728ec832dc51ac6477d52cd65f58
|
/blog/migrations/0001_initial.py
|
f90aca9c6c7dddfebe0896073486eabc6b7d463d
|
[] |
no_license
|
rachelconnor/my-first-blog
|
53651d57ca2460552888c533052839668c9fd2a2
|
72d56c7ebe467e29192a5ae26c57fb538ca517c6
|
refs/heads/master
| 2021-01-23T13:16:44.593151
| 2017-06-03T14:38:02
| 2017-06-03T14:38:02
| 93,239,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-03 13:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"rachconnor@hotmail.com"
] |
rachconnor@hotmail.com
|
333b9e5c7b90c01830816f1cadd469fbec6bcaec
|
7235deb273d66561d409338d00f6e404c67a8a16
|
/checkout/views.py
|
03486986f63d8932c1b0d168fc64ee291262bc4f
|
[] |
no_license
|
redlik/django_sportclub_m4
|
b4839e33b3ebf79c7e89f09aa00e2bc79ca6b003
|
24dda251821a0138a6bcf3325335c65b676008e7
|
refs/heads/master
| 2023-02-07T00:34:08.501911
| 2020-12-31T07:36:17
| 2020-12-31T07:36:17
| 285,994,067
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,727
|
py
|
from django.shortcuts import render, redirect, reverse, get_object_or_404, HttpResponse
from django.views.decorators.http import require_POST
from django.contrib import messages
from django.conf import settings
import stripe
import json
from .forms import OrderForm
from .models import Order, OrderLineItem
from profiles.models import UserProfile
from profiles.forms import UserProfileForm
from products.models import Product
from basket.context import basket_contents
@require_POST
def cache_checkout_data(request):
try:
pid = request.POST.get('client_secret').split('_secret')[0]
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.PaymentIntent.modify(pid, metadata={
'basket': json.dumps(request.session.get('basket', {})),
'save_info': request.POST.get('save_info'),
'username': request.user,
})
return HttpResponse(status=200)
except Exception as e:
messages.error(request, 'Sorry, your payment cannot be \
processed right now. Please try again later.')
return HttpResponse(content=e, status=400)
def checkout(request):
""" Function to show checkout page and process the payment """
stripe_public_key = settings.STRIPE_PUBLIC_KEY
stripe_secret_key = settings.STRIPE_SECRET_KEY
if request.method == 'POST':
basket = request.session.get('basket', {})
form_data = {
'full_name': request.POST['full_name'],
'email': request.POST['email'],
'phone_number': request.POST['phone_number'],
'country': request.POST['country'],
'postcode': request.POST['postcode'],
'city': request.POST['city'],
'address1': request.POST['address1'],
'address2': request.POST['address2'],
}
order_form = OrderForm(form_data)
if order_form.is_valid():
order = order_form.save()
for product_id, item_data in basket.items():
try:
product = Product.objects.get(id=product_id)
if isinstance(item_data, int):
order_line_item = OrderLineItem(
order=order,
product=product,
quantity=item_data,
)
order_line_item.save()
else:
for size, quantity in item_data['products_by_size'].items():
order_line_item = OrderLineItem(
order=order,
product=product,
quantity=quantity,
product_size=size,
)
order_line_item.save()
except Product.DoesNotExist:
messages.error(request, (
"One of the products in your basket wasn't found in our database. "
"Please call us for assistance!")
)
order.delete()
return redirect(reverse('view_basket'))
request.session['save_info'] = 'save-info' in request.POST
return redirect(reverse('checkout_success', args=[order.order_number]))
else:
messages.error(request, 'There was an error with your form. \
Please double check your information.')
else:
basket = request.session.get('basket', {})
if not basket:
messages.error(request, "The basket is empty at the moment")
return redirect(reverse('shop:all_products'))
current_basket = basket_contents(request)
total = current_basket['grand_total']
stripe_total = round(total * 100)
stripe.api_key = stripe_secret_key
intent = stripe.PaymentIntent.create(
amount=stripe_total,
currency=settings.STRIPE_CURRENCY,
)
# Attempt to prefill the form with any info the user maintains in their profile
if request.user.is_authenticated:
try:
profile = UserProfile.objects.get(user=request.user)
order_form = OrderForm(initial={
'full_name': profile.user.get_full_name(),
'email': profile.user.email,
'phone_number': profile.default_phone_number,
'country': profile.default_country,
'postcode': profile.default_postcode,
'city': profile.default_city,
'address1': profile.default_address1,
'address2': profile.default_address2,
})
except UserProfile.DoesNotExist:
order_form = OrderForm()
else:
order_form = OrderForm()
if not stripe_public_key:
messages.warning(request, 'Stripe public key is missing. \
Did you forget to set it in your environment?')
context = {
'order_form': order_form,
'stripe_public_key': stripe_public_key,
'client_secret': intent.client_secret,
}
return render(request, 'checkout/checkout.html', context)
def checkout_success(request, order_number):
"""
Handle successful checkouts
"""
save_info = request.session.get('save_info')
order = get_object_or_404(Order, order_number=order_number)
if request.user.is_authenticated:
profile = UserProfile.objects.get(user=request.user)
# Attach the user's profile to the order
order.user_profile = profile
order.save()
# Save the user's info
if save_info:
profile_data = {
'default_phone_number': order.phone_number,
'default_country': order.country,
'default_postcode': order.postcode,
'default_city': order.city,
'default_address1': order.address1,
'default_address2': order.address2,
}
user_profile_form = UserProfileForm(profile_data, instance=profile)
if user_profile_form.is_valid():
user_profile_form.save()
messages.success(request, f'Order successfully processed! \
Your order number is {order_number}. A confirmation \
email will be sent to {order.email}.')
if 'basket' in request.session:
del request.session['basket']
template = 'checkout/checkout_success.html'
context = {
'order': order,
}
return render(request, template, context)
|
[
"ralphr@outlook.com"
] |
ralphr@outlook.com
|
cdba20bfe148ff4fafae0bdf0cd76f543d590cb8
|
50b2f16f0fcd2d0f1b54471dcc9604e87b401b25
|
/core/lib/datetime_converters.py
|
f1627c6c8cfc4f6ce7bb39fa811df0832cabee5b
|
[] |
no_license
|
nickmatsnev/CRMCars
|
cf5ca47f5adce8e951386f2bad5d58305a638443
|
f99ccfa392ac5769b707239ac73ab0fdeb010d8a
|
refs/heads/master
| 2023-08-03T14:15:36.253308
| 2019-08-23T21:02:47
| 2019-08-23T21:02:47
| 411,224,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from datetime import datetime
from core.lib.constants import BASE_DATE
from core.lib.constants import BASE_DATETIME
def datetime_converter(input):
if input is None:
my_datetime = BASE_DATETIME
else:
my_datetime = input
ret = datetime.strptime(my_datetime, '%Y-%m-%d %H:%M:%S')
return ret.__str__()
def date_converter(input):
if input is None:
date = BASE_DATE
else:
date = input
try:
ret = datetime.strptime(date, '%Y-%m-%d').date()
except:
try:
ret = datetime.strptime(date, '%d.%m.%Y').date()
except:
ret = datetime.strptime(input, '%Y-%m-%d %H:%M:%S')
return ret.__str__()
|
[
"moalexv@mail.ru"
] |
moalexv@mail.ru
|
2e69daa7c245f25b1a0ca826480e3e6a788c0129
|
3ca33805ab72db091c27372d7e6e7f0d982d43cd
|
/argux_server/trigger/__init__.py
|
aa0eaa86d1e64a3ac54e60da716c73c2ff98f6ef
|
[
"Apache-2.0"
] |
permissive
|
gitter-badger/server-4
|
f239abf489c0d4d716b269104abaf0af2f07a130
|
07b6d4d4c5f58f8259179426feac6bf8b40dea52
|
refs/heads/master
| 2021-01-21T03:22:26.550105
| 2016-05-25T10:32:38
| 2016-05-25T10:32:38
| 59,810,905
| 0
| 0
| null | 2016-05-27T06:47:33
| 2016-05-27T06:47:33
| null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
"""Trigger Module for Worker-Class."""
from threading import (
Thread
)
import time
from sqlalchemy.orm import (
sessionmaker
)
from argux_server.dao import DAO
class TriggerWorker(Thread):
"""
TriggerWorker class.
Evaluates all triggers and creates alert objects.
"""
def __init__(self):
super(TriggerWorker, self).__init__()
self.daemon = True
def run(self):
Session = sessionmaker()
session = Session()
dao = DAO(session)
"""Thread body."""
while True:
# Run once a minute.
triggers = dao.trigger_dao.get_all_triggers()
for trigger in triggers:
dao.trigger_dao.evaluate_trigger(trigger)
session.flush()
session.commit()
try:
time.sleep(10)
except KeyboardInterrupt:
self.stop()
session.close()
|
[
"stephan@xfce.org"
] |
stephan@xfce.org
|
336ed3e2e152a0f3a8a294dc2ecd0844ae2d4408
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/bps_cs22891-188/sdB_bps_cs22891-188_coadd.py
|
07aa8963d07237f7852743287c0b29cf1dfe6704
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[293.160958,-60.760281], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_bps_cs22891-188/sdB_bps_cs22891-188_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_bps_cs22891-188/sdB_bps_cs22891-188_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
da5074200b2a4fd42fd0f6dbb7664cb97e9e57cc
|
fd12e9ad5a78be7cc10776ab4beb88c6c2cc37a8
|
/testproject/testproject/settings.py
|
088ec853dd549f523fd7c0218cfe62f9355f1866
|
[
"BSD-3-Clause"
] |
permissive
|
squarepegsys/django-sass
|
5467752c70ef8c088a4609dc50d193390a27a769
|
3e5b59a5f7f79a283efa77ba1d6d189ce4b1abba
|
refs/heads/master
| 2022-11-27T05:18:49.491716
| 2020-08-01T19:31:16
| 2020-08-01T19:31:16
| 284,320,948
| 0
| 0
| null | 2020-08-01T18:55:48
| 2020-08-01T18:55:47
| null |
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
"""
Django settings for testproject project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-_wl=tq26(*wyvfza+ncg_436c53pu81d=07j62+vm5y2pc)f^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app1',
'app2',
'django_sass',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"salvino@coderedcorp.com"
] |
salvino@coderedcorp.com
|
98cc6ae66252911a9e33c16162428f6939d9e972
|
5705ea1ee285738a7176831d08707039977a1283
|
/mockserver/settings.py
|
3681ff27df14e5aca76a90e5416f542f39793c0d
|
[] |
no_license
|
officina/vortice-mock
|
70d1c87734529210df5c30876d551a2c7e81f346
|
03cfdb68f17196abfa786e996c02bbbcb0171fac
|
refs/heads/master
| 2020-03-17T11:56:21.833831
| 2018-06-20T16:16:44
| 2018-06-20T16:16:44
| 133,569,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
"""
Django settings for mockserver project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from devicesim.apps import DevicesimConfig
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e=cj_2*sxs8h5q*ebodak=!3kl+jv^qz5d76nj5es%3c1iy)-s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'devicesim.apps.DevicesimConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mockserver.urls'
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mockserver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"andrea.maschio@gmail.com"
] |
andrea.maschio@gmail.com
|
5d78979a0c009d521bcec91337e3895092584f07
|
48e58e60489ddaef6aa40c7ade5f5891269babe6
|
/lib/plotting.py
|
944e9e2cc5ed6b6ba4515babd5cfa41ce65d8c9c
|
[] |
no_license
|
jsonbao/adults-dataset-machinelearning
|
7a552fd9f77074434380f1da4bbcfbec061aae3d
|
dd87d0a83fe357bd1474c8ea8160f0a77df42693
|
refs/heads/master
| 2020-12-29T00:59:10.237349
| 2016-05-02T04:24:20
| 2016-05-02T04:24:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,145
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def bar_graph(title, name1,name2,name3, x_data,y_data, maxy=None,ylabel=None):
#Create values and labels for bar chart
values = y_data
inds = x_data
labels = [name1,name2,name3]
#Plot a bar chart
plt.figure(1, figsize=(6,4)) #6x4 is the aspect ratio for the plot
plt.bar(inds, values, align='center') #This plots the data
plt.grid(True) #Turn the grid on
plt.ylabel(ylabel) #Y-axis label
plt.xlabel("Method") #X-axis label
plt.title(ylabel +" vs Method - " +title) #Plot title
plt.xlim(0.5,3.5) #set x axis range
plt.ylim(0,maxy) #Set yaxis range
#Set the bar labels
plt.gca().set_xticks(inds) #label locations
plt.gca().set_xticklabels(labels) #label values
#Make sure labels and titles are inside plot area
plt.tight_layout()
#Save the chart
plt.savefig("./Figures/"+title+ylabel+"_bar_chart.pdf")
#Displays the charts.
#You must close the plot window for the code following each show()
#to continue to run
#plt.show()
##clear graph fpr next set
plt.clf()
def line_graph_alpha_error(title, name1,name2,name3, x_data,y_data,maxy=None,ylabel=None):
#Create values and labels for line graphs
values = y_data
inds = x_data
labels =[name1,name2,name3]
flatteny = reduce(list.__add__, (list(mi) for mi in y_data))
#Plot a line graph
plt.figure(2, figsize=(6,4)) #6x4 is the aspect ratio for the plot
plt.plot(inds,values[0],'or-', linewidth=3) #Plot the first series in red with circle marker
plt.plot(inds,values[1],'sb-', linewidth=3) #Plot the first series in blue with square marker
plt.plot(inds,values[2],'^g-', linewidth=3) #Plot the first series in gren with ^ marker
#This plots the data
plt.grid(True) #Turn the grid on
plt.ylabel("Error") #Y-axis label
plt.xlabel("alpha Values") #X-axis label
plt.title("Error vs alpha Value - " +title) #Plot title
plt.xlim(-1,max(x_data)*1.1) #set x axis range
plt.ticklabel_format(style='sci', axis='x')
plt.ylim(0,max(flatteny)*1.1) #Set yaxis range
plt.legend(labels,loc="best")
#Make sure labels and titles are inside plot area
plt.tight_layout()
#Save the chart
plt.savefig("./Figures/"+title+"_line_plot.pdf")
#Displays the plots.
#You must close the plot window for the code following each show()
#to continue to run
##plt.show()
##clear graph fpr next set
plt.clf()
def plot_img_array(B,patch_size,grey=False):
#This function displays the first 100 elements of an image patch bases.
#B is expected to have shape (N,Q) where Q = H*W*3, where H = patch_size[0], and
#W=patch_size[1]. Each row of B is converted to a (HxWx3) array, scaled to be positive,
#and then displayed as an image.
S = min(10,np.ceil(np.sqrt(B.shape[0])))
N = min(100,B.shape[0])
for i, comp in enumerate(B[:N]):
plt.subplot(S, S, i + 1)
comp=comp-min(comp.flatten())
comp=comp/max(comp.flatten())
if(grey==False):
plt.imshow(comp.reshape((patch_size[0],patch_size[1],3)),interpolation="nearest")
else:
plt.imshow(comp.reshape((patch_size[0],patch_size[1])),interpolation="nearest",cmap='gray')
plt.xticks(())
plt.yticks(())
|
[
"yongbin999@gmail.com"
] |
yongbin999@gmail.com
|
1dbbb51e92472953d51cab51bc50828513d980ab
|
7a3c7778855cca805aebd56bf9d4599ebcc3afb6
|
/overhead.py
|
a4e06d036fff72e2ba7b4bb7357d68790d64e794
|
[
"MIT"
] |
permissive
|
hcooper/overhead
|
b95ed1afc86ba47c059bcdce82aef3c5183fcf1f
|
ddd73acd71ae375fb2b9206760fdf6da1e527852
|
refs/heads/master
| 2020-03-20T08:48:54.598092
| 2019-12-17T07:54:33
| 2019-12-17T07:54:33
| 137,319,394
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,356
|
py
|
#!/usr/bin/env python3
import csv
import json
import re
from typing import List
import requests
class Aircraft(object):
""" Object to hold details of a particular aircraft """
def __init__(self, track, model, orig, dest, flight_no):
self.track = track
self.model_code = model
self.orig = orig
self.dest = dest
self.flight_no = flight_no
@property
def orig_speech(self):
return airports.get(self.orig, " ".join(self.orig)).replace(
" International Airport", ""
)
@property
def dest_speech(self):
return airports.get(self.dest, " ".join(self.dest)).replace(
" International Airport", ""
)
@property
def airline(self):
return airlines.get(self.airline_code, " ".join(self.airline_code))
@property
def airline_code(self):
return self.flight_no[:2]
@property
def model_name(self):
return models.get(self.model_code, self.model_code)
@property
def direction(self):
return track_to_direction(self.track)
@property
def model_speech(self):
return model_to_speech(self.model_name)
@property
def direction_arrow(self):
return track_to_direction(self.track, True)
def track_to_direction(track, arrow=False):
""" For a heading in degrees, return a compass direction """
if track >= 338 or track <= 22:
return "↑" if arrow else "north"
if track >= 23 and track <= 67:
return "↗" if arrow else "north-east"
if track >= 68 and track <= 112:
return "→" if arrow else "east"
if track >= 113 and track <= 157:
return "↘" if arrow else "south-east"
if track >= 158 and track <= 202:
return "↓" if arrow else "south"
if track >= 203 and track <= 247:
return "↙" if arrow else "south-west"
if track >= 248 and track <= 292:
return "←" if arrow else "west"
if track >= 293 and track <= 337:
return "↖" if arrow else "north-west"
def model_to_speech(model: str) -> str:
""" Covert the model name into a format which makes text-to-speech engines
pronounce it realisticly, e.g.
Boeing 737-400 --> Boeing 7 3 7 400
Airbus A319 --> Airbus A 3 19
"""
if model == "unmatched":
return "Unmatched aircraft"
if model == "De Havilland Canada DHC-8-400 Dash 8Q":
return "Bombardier Dash 8 Q400"
res = re.match("(.*) A?(.{3})-?(.{3})?", model)
if res:
if res.group(1) == "Boeing":
resp = "A Boeing " + " ".join(res.group(2))
if res.group(3):
resp += " " + res.group(3)
return resp
if res.group(1) == "Airbus":
resp = "An Airbus A 3 " + res.group(2)[1:]
if res.group(3):
resp += " " + res.group(3)
return resp
return model # it's a model we don't have a custom lexicon for
def get_aircrafts(bounds: str) -> List[Aircraft]:
url = (
"https://data-live.flightradar24.com/zones/fcgi/feed.js?bounds={}"
"&faa=1&mlat=1&flarm=1&adsb=1&gnd=1&air=1&vehicles=1&estimated=1&maxage=14400&gliders=1&stats=1"
).format(bounds)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0"
}
data = json.loads(requests.get(url, headers=headers).text)
for f in ["full_count", "version", "stats"]:
data.pop(f) # strip unwanted fields
return [Aircraft(v[3], v[8], v[11], v[12], v[13]) for v in data.values()]
# There's bound to be a better way to do this than using globals.
global models
models = {id2: name for name, id1, id2 in list(csv.reader(open("planes.dat")))}
global airports
airports = {a["iata"]: a["name"] for a in json.loads(open("airports.json").read())}
global airlines
airlines = {a["iata"]: a["name"] for a in json.loads(open("airlines.json").read())}
if __name__ == "__main__":
bounds = "51.72,51.44,-0.59,0.34" # Central London
for aircraft in get_aircrafts(bounds):
print(
"{:<3}->{:<3} {:<6} {:<1} ({})".format(
aircraft.orig,
aircraft.dest,
aircraft.flight_no,
aircraft.direction_arrow,
aircraft.model_name,
)
)
|
[
"coops@fawk.eu"
] |
coops@fawk.eu
|
63dc625a4015936f64d30995ee0d8ba96e695ffb
|
ff8bd1967aeb66ffec17c3ae78102c168414761a
|
/PythonIntroduction/FlowControl/For/sumofitems.py
|
a8ccfad115b90247fc45b425f5fb140b3c8e07b3
|
[] |
no_license
|
charan2108/Pythondirectories
|
d5cbec41db0685bbfc41a3135edc3e41fd223474
|
30b4a9f9171fe2295efbf12cbf9cbcf88bdf29b3
|
refs/heads/master
| 2023-03-28T03:29:25.039491
| 2021-03-29T02:45:19
| 2021-03-29T02:45:19
| 352,484,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
product_cost = [1000, 3000, 5000]
sum = 0
for i in product_cost:
sum =(sum + i)
print(sum)
|
[
"sumacharan.adabala@gmail.com"
] |
sumacharan.adabala@gmail.com
|
1d13978884bc27abab748785e5b1509cb1d641ab
|
b0b904a2ac11f1e3e3e9c7f1832a262be4bf1120
|
/Raspberry Pi/Tests/test_obd2_stats.py
|
0e1aad24c3a4d76e09f955cb0080b41f276862eb
|
[] |
no_license
|
aschuldhaus/SmartAVL
|
8e3ab6de852ecbae8529b3751f977974d32fd7de
|
6f2e3e5acc2a7cad890d97228d91f2affd8f21d7
|
refs/heads/master
| 2020-04-19T13:32:11.609947
| 2019-04-12T21:06:59
| 2019-04-12T21:06:59
| 168,220,068
| 0
| 1
| null | 2019-04-11T05:27:13
| 2019-01-29T20:04:34
|
Java
|
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
# Unit tests for obd2_stats.py
import unittest
import can
import sys
sys.path.append("..")
from obd2_stats import *
class TestOBD2Stats(unittest.TestCase):
def test_extract_speed(self):
msg = can.Message(arbitration_id=OBD2_RESPONSE_ID,
data=[0x06, 0x41, PID_SPEED, 0x19, 0x00, 0x00, 0x00, 0x00])
self.assertEqual(extract_speed(msg), 0x19)
def test_extract_rpm(self):
msg = can.Message(arbitration_id=OBD2_RESPONSE_ID,
data=[0x06, 0x41, PID_RPM, 0x0c, 0xda, 0x00, 0x00, 0x00])
self.assertEqual(extract_rpm(msg), 0xcda / 4)
def test_extract_distance_since_clear(self):
msg = can.Message(arbitration_id=OBD2_RESPONSE_ID,
data=[0x06, 0x41, PID_DISTANCE_SINCE_CLEAR, 0xff, 0x3a, 0x00, 0x00, 0x00])
self.assertEqual(extract_distance_since_clear(msg), 0xff3a)
def test_extract_monitor_status_light_off(self):
msg = can.Message(arbitration_id=OBD2_RESPONSE_ID,
data=[0x06, 0x41, PID_MONITOR_STATUS, 0x00, 0x07, 0xe5, 0x00, 0x00])
self.assertEqual(extract_monitor_status(msg), 0)
def test_extract_monitor_status_light_on(self):
msg = can.Message(arbitration_id=OBD2_RESPONSE_ID,
data=[0x06, 0x41, PID_MONITOR_STATUS, 0x80, 0x07, 0xe5, 0x00, 0x00])
self.assertEqual(extract_monitor_status(msg), 1)
if __name__ == "__main__":
unittest.main()
|
[
"snickerless1@shaw.ca"
] |
snickerless1@shaw.ca
|
883acf8289e01f3d452d04feeb7202efdfcc7732
|
147b34b565341789530ece3a953dbdc16cbd6f66
|
/note/migrations/0010_auto_20191124_1026.py
|
76c8c1462de994e382477d67ed4bdbebb38da87f
|
[] |
no_license
|
robakrobak/NOTE_APP
|
8bfdfb291fb092e4033190e21f7799ad3e88239d
|
e95c1ecb8c3e2bc3364cb83b17f7295aee7aa307
|
refs/heads/master
| 2023-08-09T10:46:19.440507
| 2020-03-14T19:09:04
| 2020-03-14T19:09:04
| 216,201,521
| 1
| 2
| null | 2023-07-22T19:12:38
| 2019-10-19T12:17:25
|
Python
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
# Generated by Django 2.2 on 2019-11-24 09:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('note', '0009_auto_20191123_1513'),
]
operations = [
migrations.AlterField(
model_name='note',
name='title',
field=models.CharField(max_length=128),
),
]
|
[
"robakrobak@protonmail.com"
] |
robakrobak@protonmail.com
|
39d4d774d2f72b80c2fc6dc86b1a82206de45e67
|
99f053c883bc610031c037fc1b038f41408acc7d
|
/cryptolite/keys.py
|
a6e7254bc563a764b716c3c03365e8027d4d1f3f
|
[
"MIT"
] |
permissive
|
davidcarboni/cryptolite-python
|
55755f4d8ee31475670b92e2c3e741790e84cded
|
e40ce2f9584fdd9015caad9bf443a50201df743a
|
refs/heads/master
| 2020-03-14T17:58:35.238920
| 2018-05-02T17:37:28
| 2018-05-02T17:37:28
| 131,732,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,070
|
py
|
"""
Generates cryptographic keys.
Key types
---------
* Secret keys (either randomly generated or deterministic, based on a password).
* Public-Private key pairs.
How to use keys
---------------
* Secret keys are used for encryption (see Crypto).
* Secret keys are also used to secure other secret keys and private keys (see KeyWrapper)
* Public-Private keys are used for digital signatures (see DigitalSignature).
* Public-Private keys are also used for key exchange (see KeyExchange).
Managing encryption keys
------------------------
A good applied cryptography design is all about how you manage secrets: keys and passwords.
Assuming you're using primitives correctly (that's what Cryptolite does for you)
then it'll be all about your key management design.
Here are some examples, based on using secret keys to encrypt user data,
to give you a primer on the things you'll want to consider when designing with encryption.
In these examples, we're choosing between random and deterministic (password-based) keys.
Deterministic key design
------------------------
Deterministic keys are the easiest to manage as you don't need to store the key itself.
Providing the password used to generate the key is properly managed and is available
when you need access to the key, the key can be reliably regenerated each time.
The drawback is that if you want to generate more than one key you'll need more than one password.
However, if you do only need one key, this approach can be ideal as you could use, say, the user's
plaintext password to generate the key. You never store a user's plaintext password (see
``password.hash``) so the right key can only be generated when the user logs in.
Bear in mind however that if the user changes (or resets) their password this will generate a
different key, so you'll need a plan for recovering data encrypted with the old key and
re-encrypting it with the new one.
Random key design
-----------------
Random keys are simple to generate, but need to be stored because there's no way
to regenerate the same key.
To store a key you can use ``key_wrapper.wrapSecretKey()``.
This encrypts the key which means it can be safely stored in, for example,
a database or configuration value.
The benefit of the ``key_wrapper`` approach is that
when a user changes their password you'll only need to re-encrypt the stored keys using a new
``key_wrapper`` initialised with the new password, rather than have to re-encrypt all
data that was encrypted with a key generated based on the user's password
(as in a deterministic design).
Password recovery and reset
---------------------------
In both designs, when a user changes their password you will have the old and the new plaintext
passwords, meaning you can decrypt with the old an re-encrypt with the new.
The difficulty comes when you need to reset a password, because it's not possible to recover
the old password, so you can't recover the encryption key either. In this case you'll either
need a backup way to recover the encryption key, or you'll need to be clear that data cannot
be recovered at all.
Whatever your solution, remember that storing someone's password in any recoverable form is not OK,
so you'll need to put some thought into the recovery process.
"""
import os
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptolite import byte_array
__author__ = "David Carboni"
backend = default_backend()
# Please treat the following values as constants.
# They are implemented as variables just in case you do need to alter them.
# These are the settings that provide "right" cryptography so you'll need to
# know what you're doing if you want to alter them.
"""The secret key algorithm."""
SYMMETRIC_ALGORITHM = "AES"
"""The key size for secret keys."""
SYMMETRIC_KEY_SIZE = 256
"""The algorithm to use to generate password-based secret keys."""
SYMMETRIC_PASSWORD_ALGORITHM = "PBKDF2WithHmacSHA1"
"""The number of iteration rounds to use for password-based secret keys."""
SYMMETRIC_PASSWORD_ITERATIONS = 1024
"""The public-private key pair algorithm."""
ASYMMETRIC_ALGORITHM = "RSA"
"""The key size for public-private key pairs."""
ASYMMETRIC_KEY_SIZE = 4096
def new_secret_key():
"""
Generates a new secret (also known as symmetric) key for use with AES.
The key size is determined by SYMMETRIC_KEY_SIZE.
:return: A new, randomly generated secret key.
"""
# FYI: AES keys are just random bytes from a strong source of randomness.
return os.urandom(SYMMETRIC_KEY_SIZE // 8)
def generate_secret_key(password, salt):
"""
Generates a new secret (or symmetric) key for use with AES using the given password and salt values.
Given the same password and salt, this method will always (re)generate the same key.
:param password: The starting point to use in generating the key. This can be a password, or any
suitably secret string. It's worth noting that, if a user's plaintext password is
used, this makes key derivation secure, but means the key can never be recovered
if a user forgets their password. If a different value, such as a password hash is
used, this is not really secure, but does mean the key can be recovered if a user
forgets their password. It's all about risk, right?
:param salt: A value for this parameter can be generated by calling
``generate.salt()``. You'll need to store the salt value (this is ok to do
because salt isn't particularly sensitive) and use the same salt each time in
order to always generate the same key. Using salt is good practice as it ensures
that keys generated from the same password will be different - i.e. if two users
use the password, having a salt value avoids the generated keys being
identical which might give away someone's password.
:return: A deterministic secret key, defined by the given password and salt
"""
if password is None:
return None
salt_bytes = bytes(byte_array.from_base64(salt))
key_generator = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=SYMMETRIC_KEY_SIZE // 8,
salt=salt_bytes,
iterations=SYMMETRIC_PASSWORD_ITERATIONS,
backend=backend
)
password_bytes = password.encode("utf-8")
return key_generator.derive(password_bytes)
def new_key_pair():
"""
Generates a new public-private (or asymmetric) key pair for use with ASYMMETRIC_ALGORITHM.
The key size will be ASYMMETRIC_KEY_SIZE bits.
:return: A new, randomly generated asymmetric key pair.
"""
return rsa.generate_private_key(
public_exponent=65537,
key_size=ASYMMETRIC_KEY_SIZE,
backend=default_backend()
)
|
[
"david@carboni.io"
] |
david@carboni.io
|
3538b2cbe981187259b70a05a0484c14cde4e479
|
f04eed5e6c4499d22fb8e339667267aa59c8dfc7
|
/6.00.1x Files/ssss.py
|
557a28808dc8539dcba5391d3ddb6bb6e2877169
|
[
"Giftware"
] |
permissive
|
shanjgit/previous-work
|
4ca7e29e231498891752307ba4b04c9726f0eb67
|
664cc40bd0b97e3adc10f551e18a4a7a62e5a760
|
refs/heads/master
| 2021-01-19T20:02:57.744302
| 2017-08-24T17:57:55
| 2017-08-24T17:57:55
| 101,217,213
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
s = 'cqbauathqiqwovzvfx'
a = 'abcdefghijklmnopqrstuvwxyz'
l1, M = "", ""
for i in range(len(s)-1):
alph = s[i]
beta = s[i+1]
if l1 =='':
l1 = l1+alph
if a.index(alph)<=a.index(beta):
l1 = l1+beta
if i == len(s)-2 and len(l1)>len(M):
M = l1
elif a.index(alph)>a.index(beta):
if len(l1)>len(M):
M = l1
l1 = ''
print 'Longest substring in alphabetical order is:',M
|
[
"noreply@github.com"
] |
shanjgit.noreply@github.com
|
3e778c892e35ddad7a0a39bc1c5cfc6b2b2aecfa
|
9228193701e92d841e2f2b6f56418cf561ecf159
|
/python/system.py
|
cecb157dc9094fa91594217db01890f9ee704237
|
[] |
no_license
|
tberrueta/MPPI_Rat
|
6ff4f7302d6458ff7490478334edd7f0df66fba1
|
ccc6814c8fcd497bc3fc34629fba504c09003231
|
refs/heads/master
| 2023-01-01T01:43:26.801829
| 2020-10-24T15:15:49
| 2020-10-24T15:15:49
| 306,904,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
from utils import *
def diff_drive(x,u):
xvel = [u[0]*np.cos(x[2]),
u[0]*np.sin(x[2]),
u[1]]
return np.array(xvel).flatten()
def single_int(x,u):
xvel = [u[0],
u[1]]
return np.array(xvel).flatten()
def quadratic_objective(xvec,uvec,xdes=None,Q=None,R=None):
if Q is None:
Q = np.eye(xvec.shape[0])
if R is None:
R = np.eye(uvec.shape[0])
if xdes is None:
xd = np.zeros(xvec.shape)
elif len(xdes.shape) == 1:
xd = np.repeat(xdes.reshape(-1,1),xvec.shape[1],axis=1)
c = 0
for i in range(xvec.shape[1]):
c+=(xvec[:,i]-xd[:,i]).dot(Q).dot((xvec[:,i]-xd[:,i]).T) + uvec[:,i].dot(R).dot(uvec[:,i].T)
return c
def rattling_objective(xvec, uvec, dt=0.05, w1=1, w2=1, coord_fun=None, w_sz=20, ov=1, xdes=None, Q=None, R=None):
c = w1*quadratic_objective(xvec,uvec,xdes,Q,R)
if coord_fun is None:
r = rattling_windows(xvec.T, dt, w_sz, ov)[0]
c += w2*np.mean(r)
else:
r = rattling_windows(coord_fun(xvec).T, dt, w_sz, ov)[0]
c += w2*np.mean(r)
return c
|
[
"tberrueta@github.com"
] |
tberrueta@github.com
|
9101fca3a8ea3a3c2c454d385594c8825fe66392
|
da865ba713a95ca0d4d900c974d6952661c5d24e
|
/exercises/spark/logistic_regression.py
|
3a90b948dddc24ab03ee9588287350719d152b08
|
[] |
no_license
|
tonellotto/cloud-computing
|
7befe5f28ec7e6a61a00dec41c1c4168b79a0c9b
|
71d566f9a5ba18575f7de60638f9a2f6b5ba2bf7
|
refs/heads/master
| 2022-08-21T18:50:35.381207
| 2021-05-04T12:14:40
| 2021-05-04T12:14:40
| 253,494,101
| 14
| 15
| null | 2022-08-11T21:16:57
| 2020-04-06T12:41:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
"""
A logistic regression implementation that uses NumPy (http://www.numpy.org)
to act on batches of input data using efficient matrix operations.
"""
import sys
from random import random
from operator import add
import numpy as np ### We need to install numpy on all nodes with sudo!!!!
from pyspark import SparkContext
D = 10 # Number of dimensions
def readPoint(line):
return np.fromstring(line, dtype=np.float32, sep=' ')
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: logistic_regression <file> <iterations>", file=sys.stderr)
sys.exit(-1)
master = "yarn"
sc = SparkContext(master, "LogisticRegression")
points = sc.textFile(sys.argv[1]).filter(lambda line: len(line) > 0).map(readPoint).cache()
iterations = int(sys.argv[2])
# Initialize w to a random value
w = 2 * np.random.ranf(size=D) - 1
print("Initial w: " + str(w))
def gradient(point):
y = point[0]
x = point[1:]
# For each point (x, y), compute gradient function, then sum these up
return ((1.0 / (1.0 + np.exp(-y * x.dot(w))) - 1.0) * y * x.T)
def add(x, y):
x += y
return x
for i in range(iterations):
print("On iteration %i" % (i + 1))
w -= points.map(lambda point: gradient(point)).reduce(add)
print("Final w: " + str(w))
|
[
"nicola.tonellotto@gmail.com"
] |
nicola.tonellotto@gmail.com
|
9b6b75c1c908ce3bb942074888acb287dbebe9ed
|
21f9090b4f6d6d73ccfd17f3982b75870bee4a57
|
/center_sys/account/form.py
|
58cb1e76110acaadcc79e508e952d7164d6fa96b
|
[] |
no_license
|
tmars/DS_CW
|
a41aaf181518d4ba066fe33d90eaad4c84df6bcf
|
36ae1da8319c51988ad16975762d2a5d211ffeb2
|
refs/heads/master
| 2016-09-05T22:59:03.075915
| 2014-05-29T20:55:10
| 2014-05-29T20:55:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
#coding=utf8
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(max_length=50, label='Имя пользователя')
password = forms.CharField(max_length=50, label='Пароль')
username.widget.attrs['class'] = 'form-control'
password.widget.attrs['class'] = 'form-control'
|
[
"mtalipov@DD_MOS_IT_23.hcredit.local"
] |
mtalipov@DD_MOS_IT_23.hcredit.local
|
e6ece6f7e93e27a090427560e839962053487eb3
|
15c5419733fd45a7798bf74b59e461da7866b391
|
/tutes-and-labs/week5lab-text_server.py
|
3bcb95abc1b5289e3be69bc751963e47f1778152
|
[] |
no_license
|
Kennyhcto/cs1010-21t2
|
a7314632ed8b9924cb831a419c792e64fd1ea6cc
|
fae8ecc99a45bfcb08c943bcf4222fdcf974c7e1
|
refs/heads/main
| 2023-07-16T05:21:20.031026
| 2021-08-20T02:26:07
| 2021-08-20T02:26:07
| 398,131,492
| 0
| 0
| null | 2021-08-20T02:25:41
| 2021-08-20T02:25:40
| null |
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
from flask import Flask, request
from pyhtml import html, title, body, label, p, table, tr, td, strong, head, form, input_
import string
app = Flask(__name__)
@app.route('/')
def main():
code = html(
head(
title("Text Server")
),
body(
form(action="analyse")
(
input_(type="text", name="text"),
input_(type="Submit", value="Analyse Text")
)
)
)
return str(code)
@app.route('/analyse', methods=["POST"])
def analyse_text():
#print(f"{request.form=}")
#print(f"all punctuation characters: {string.punctuation}")
text = request.form['text']
num_characters = len(text)
num_punc = 0
for character in text:
if character in string.punctuation or \
character in string.whitespace:
num_punc += 1
num_letters = num_characters - num_punc
words = text.split()
code = html(
head(
title("First PyHTML Program")
),
body(
p(f"Your text: {text}"),
p(f"Number of characters: {num_characters}"),
p(f"Number of letters: {num_letters}"),
p(f"Number of words: {len(words)}")
)
)
return str(code)
if __name__ == "__main__":
app.run(debug=True)
|
[
"s.mautner@unsw.edu.au"
] |
s.mautner@unsw.edu.au
|
9f68b1dfa8525979e01314b634254a27ed0f300d
|
87fb1775654e4367820e7f421ab4e8e151b15a68
|
/utils/config_utils.py
|
b5e6f502f6b5f47f3e95e43481e6188eb910911a
|
[] |
no_license
|
Walleclipse/AGPC
|
20dab082f9d4e30c2f06e6b06b6e8235561bea45
|
126bc75ca5dae1d60590d48ee4d071e7cdbd5077
|
refs/heads/master
| 2023-03-31T22:20:58.437731
| 2019-12-17T14:45:11
| 2019-12-17T14:45:11
| 189,249,804
| 35
| 7
| null | 2023-03-24T22:30:16
| 2019-05-29T15:13:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
def get_pcgn_model_config(config):
encode_num_layers = config["encoder"]["num_layers"]
encode_num_units = config["encoder"]["num_units"]
encode_cell_type = config["encoder"]["cell_type"]
encode_bidir = config["encoder"]["bidirectional"]
attn_num_units = config["decoder"]["attn_num_units"]
decode_num_layers = config["decoder"]["num_layers"]
decode_num_units = config["decoder"]["num_units"]
decode_cell_type = config["decoder"]["cell_type"]
use_user_feat = config["user_profile"]["use_user_feat"]
use_gate_memory = config["user_profile"]["use_gate_memory"]
use_user_desc = config["user_profile"]["use_user_desc"]
use_blog_user_coattn = config["user_profile"]["use_blog_user_coattn"]
use_external_desc_express = config["user_profile"]["use_external_desc_express"]
use_external_feat_express = config["user_profile"]["use_external_feat_express"]
user_feat_dim = config["user_profile"]["user_feat_dim"]
user_feat_unit = config["user_profile"]["user_feat_unit"]
user_feat_mem_unit = config["user_profile"]["user_feat_mem_unit"]
desc_rnn_unit = config["user_profile"]["desc_rnn_unit"]
desc_attn_num_units = config["user_profile"]["desc_attn_num_units"]
user_map_unit = config["user_profile"]["user_map_unit"]
return (encode_num_layers, encode_num_units, encode_cell_type, encode_bidir,
attn_num_units, decode_num_layers, decode_num_units, decode_cell_type,
use_user_feat,use_gate_memory,use_user_desc,use_blog_user_coattn,
use_external_desc_express,use_external_feat_express,
user_feat_dim,user_feat_unit,user_feat_mem_unit,
desc_rnn_unit,desc_attn_num_units,user_map_unit,
)
def get_pcgn_training_config(config):
train_file = config["training"]["train_file"]
dev_file = config["training"]["dev_file"]
source_max_length = config["training"]["source_max_length"]
target_max_length = config["training"]["target_max_length"]
desc_max_length = config["training"]["desc_max_length"]
gpu_fraction = config["training"]["gpu_fraction"]
gpu_id = config["training"]["gpu_id"]
train_steps = config["training"]["train_steps"] # 最大训练步数
checkpoint_every = config["training"]["checkpoint_every"] # 保存模型的步数
print_every = config["training"]["print_every"] # 打印信息
batch_size = config["training"]["batch_size"]
is_beam_search = False
beam_size = 1
infer_max_iter = config["training"]["infer_max_iter"]
l2_regularize = config["training"]["l2_regularize"]
learning_rate = config["training"]["learning_rate"]
max_checkpoints = config["training"]["max_checkpoints"] # 最大保留模型的个数
max_gradient_norm = config["training"]["max_gradient_norm"] # 最大保留模型的个数
return (train_file, dev_file,
source_max_length, target_max_length, desc_max_length,
gpu_fraction, gpu_id, train_steps, checkpoint_every, print_every,
batch_size,is_beam_search,beam_size,infer_max_iter,
l2_regularize,learning_rate,max_checkpoints,max_gradient_norm,
)
def get_pcgn_infer_config(config):
is_beam_search = config["inference"]["is_beam_search"]
beam_size = config["inference"]["beam_size"]
batch_size = config["inference"]["infer_batch_size"]
infer_file = config["inference"]["infer_file"]
infer_source_max_length = config["inference"]["infer_source_max_length"]
infer_target_max_length = config["inference"]["infer_target_max_length"]
infer_desc_max_length = config["inference"]["infer_desc_max_length"]
infer_max_iter = config["inference"]["infer_max_iter"]
output_path = config["inference"]["output_path"]
gpu_fraction = config["training"]["gpu_fraction"]
gpu_id = config["training"]["gpu_id"]
return (infer_file, batch_size,is_beam_search, beam_size,
infer_source_max_length, infer_target_max_length,infer_desc_max_length,infer_max_iter,
output_path, gpu_fraction, gpu_id)
|
[
"abduwali54@163.com"
] |
abduwali54@163.com
|
c9c2a1f4c8d2769981c22ebce87a57a84cb69346
|
50d74779d4a89db7ed0cc0d08452a65a76f84e93
|
/directory_analyzer.py
|
2aee995593eb8413598c451b3d8b96b2f7e4419b
|
[
"MIT"
] |
permissive
|
sayanm-10/py-modules
|
1db83f66a64b2127fa03c5756af7a42f57df2715
|
67a55f7650752bbe7b145510363b45cbb2b8f9cb
|
refs/heads/master
| 2020-04-02T22:43:14.140376
| 2018-10-26T20:30:38
| 2018-10-26T20:30:38
| 154,842,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
import os
from prettytable import PrettyTable
def print_dir_summary(dir_path):
''' given a directory name, searches that directory for Python files and summarises
- the file name
- the total number of lines in the file
- the total number of characters in the file
- the number of Python functions (lines that begin with 'def ') - you should include class methods in the number of functions
- the number of Python classes (lines that begin with 'class ') '''
try:
py_files = [f for f in os.listdir(path=dir_path) if f.endswith('.py')]
except FileNotFoundError:
print("System cannot find the path specified", dir_path)
else:
if len(py_files) > 0:
print("\nSummary for", dir_path)
pt = PrettyTable(field_names=['File Name', 'Classes', 'Functions', 'Lines', 'Characters'])
for f in py_files:
try:
fp = open(f, 'r')
except PermissionError:
pt.add_row([f, 'NA', 'NA', 'NA', 'NA'])
else:
classes, functions, lines, chars = analyze_file(fp)
pt.add_row([f, classes, functions, lines, chars])
print(pt)
print("\n* NA denotes that file could not be read due to permission error.")
else:
print("No .py files found in", dir_path)
def analyze_file(fp):
''' given a filepointer returns a list containing
number of classes, number of functions,
number of lines and number of chars in the file'''
with fp:
line_count = 0
char_count = 0
function_count = 0
class_count = 0
for line in fp:
line_count += 1
char_count += len(line)
if line.lstrip().startswith('def '):
function_count += 1
if line.lstrip().startswith('class '):
class_count += 1
return class_count, function_count, line_count, char_count
|
[
"sayan.mukherjee108@gmail.com"
] |
sayan.mukherjee108@gmail.com
|
beac57a4cdfe2c9cfb783294e1d1516bb67d8896
|
1b66eaff887c73b69a0ae6a0c498f211fb3dc44d
|
/DeepLearningNLP/tensorflowRNN.py
|
9bf917f40593dc529493eb16ec779b9352c374da
|
[] |
no_license
|
oguzhanbbcn/DataScience
|
70f7f2bc61c25d8d9d3bfa4be1fdbc093e7fb45f
|
3ecb5b2507b263cf70f04e91256b52ebeed11226
|
refs/heads/master
| 2020-04-13T23:27:39.453786
| 2018-06-09T03:31:46
| 2018-06-09T03:31:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,494
|
py
|
from data_helpers2 import *
import numpy as np
from sklearn.model_selection import train_test_split
import nltk
from nltk.stem import WordNetLemmatizer
import tensorflow as tf
# from tensorflow.python.ops import rnn as rnn_module
from tensorflow.python.ops.rnn import rnn as get_rnn_output
from tensorflow.python.ops.rnn_cell import BasicRNNCell, GRUCell
from sklearn.utils import shuffle
# from util import init_weight, all_parity_pairs_with_sequence_labels, all_parity_pairs
def init_weight(Mi, Mo):
return np.random.randn(Mi, Mo) / np.sqrt(Mi + Mo)
class SimpleRNN:
def __init__(self, M):
self.M = M # hidden layer size
def fit(self, X, Y, batch_sz=20, learning_rate=10e-1, mu=0.99, activation=tf.nn.sigmoid, epochs=100, show_fig=False):
D = 1
N, T = X.shape # X is of size N x T(n) x D
K = len(set(Y.flatten()))
M = self.M
self.f = activation
# initial weights
# note: Wx, Wh, bh are all part of the RNN unit and will be created
# by BasicRNNCell
Wo = init_weight(M, K).astype(np.float32)
bo = np.zeros(K, dtype=np.float32)
# make them tf variables
self.Wo = tf.Variable(Wo)
self.bo = tf.Variable(bo)
# tf Graph input
tfX = tf.placeholder(tf.float32, shape=(batch_sz, T, D), name='inputs')
tfY = tf.placeholder(tf.int64, shape=(batch_sz, T), name='targets')
# turn tfX into a sequence, e.g. T tensors all of size (batch_sz, D)
sequenceX = x2sequence(tfX, T, D, batch_sz)
# create the simple rnn unit
rnn_unit = BasicRNNCell(num_units=self.M, activation=self.f)
# Get rnn cell output
# outputs, states = rnn_module.rnn(rnn_unit, sequenceX, dtype=tf.float32)
outputs, states = get_rnn_output(rnn_unit, sequenceX, dtype=tf.float32)
# outputs are now of size (T, batch_sz, M)
# so make it (batch_sz, T, M)
outputs = tf.transpose(outputs, (1, 0, 2))
outputs = tf.reshape(outputs, (T*batch_sz, M))
# Linear activation, using rnn inner loop last output
logits = tf.matmul(outputs, self.Wo) + self.bo
predict_op = tf.argmax(logits, 1)
targets = tf.reshape(tfY, (T*batch_sz,))
cost_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets))
train_op = tf.train.MomentumOptimizer(learning_rate, momentum=mu).minimize(cost_op)
costs = []
n_batches = N / batch_sz
init = tf.initialize_all_variables()
with tf.Session() as session:
session.run(init)
for i in xrange(epochs):
X, Y = shuffle(X, Y)
n_correct = 0
cost = 0
for j in xrange(n_batches):
Xbatch = X[j*batch_sz:(j+1)*batch_sz]
Ybatch = Y[j*batch_sz:(j+1)*batch_sz]
_, c, p = session.run([train_op, cost_op, predict_op], feed_dict={tfX: Xbatch, tfY: Ybatch})
cost += c
for b in xrange(batch_sz):
idx = (b + 1)*T - 1
n_correct += (p[idx] == Ybatch[b][-1])
if i % 10 == 0:
print "i:", i, "cost:", cost, "classification rate:", (float(n_correct)/N)
if n_correct == N:
print "i:", i, "cost:", cost, "classification rate:", (float(n_correct)/N)
break
costs.append(cost)
if show_fig:
plt.plot(costs)
plt.show()
if __name__ == '__main__':
X, y, vocabulary, vocabulary_inv = load_data()
rnn = SimpleRNN(4)
rnn.fit(X, y,
batch_sz=10,
learning_rate=0.001,
epochs=2,
activation=tf.nn.sigmoid,
show_fig=False
)
|
[
"mevanoff24@gmail.com"
] |
mevanoff24@gmail.com
|
39ee95e61f896ac11d87bc34295ff6fd9acef9e7
|
44209d963ee6f6fd87cbd558e9e3743c14e369b2
|
/voice2age.py
|
207a6731daa6c809da0003eb777e0109a2612b5d
|
[] |
no_license
|
eseku/voice-to-age
|
0de01b8de025f2a1fa73ebc421deaf93891df8db
|
76f97594df2c0912918927d3eb2c55e367ee9eb7
|
refs/heads/master
| 2022-08-02T19:01:59.058927
| 2020-05-26T01:34:29
| 2020-05-26T01:34:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
import numpy as np
import sys
import os
import sox
import preprocess
import scipy.io.wavfile as wav
import numpy as np
from mfcc import *
from keras.models import load_model
from keras import backend as K
from keras.models import Model
from collections import Counter
def compute_mel_log(file_name):
print (file_name)
rate, data = wav.read(file_name);
mfcc = MFCC(nfilt = 40, ncep = 13, samprate = rate,
wlen = 0.0256, frate = 100,
lowerf=133.33334, upperf=6855.4976)
mel_log = mfcc.sig2logspec(data)
return mel_log
if __name__ == "__main__":
argv = sys.argv[1:]
if(len(argv) != 1):
print("Usage: python voice2age.py <full path to wav file>")
sys.exit()
else:
model = load_model("age_range_classifier.h5")
tfm = sox.Transformer()
tfm.convert(samplerate=16000)
tfm.build(argv[0],'downsampled.wav')
mel_log = compute_mel_log('downsampled.wav')
os.remove('downsampled.wav')
preprocessed_x = preprocess.preprocess(mel_log,9)
age = model.predict(preprocessed_x)
print(age)
|
[
"joojoquartey11@gmail.com"
] |
joojoquartey11@gmail.com
|
309c3e487ead949217892accd9a0b2e14f2e9918
|
f33b24766d1d2f4af54e9a0215eca81b4bc2d7db
|
/Implementation/divisible-sum-pairs.py
|
f69ac2e26b31c73a11160463d81d66c8f7272268
|
[] |
no_license
|
Brkgng/HackerRank_Solutions
|
c5f07662ee7ef512fc9ad22ddb03fd9b25a8a7b2
|
23dcd7d7f8123cddea053460152186c206b7a8b0
|
refs/heads/master
| 2023-08-11T07:10:42.863443
| 2021-10-16T17:52:54
| 2021-10-16T17:52:54
| 383,604,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
#
# See the problem
# https://www.hackerrank.com/challenges/divisible-sum-pairs/problem
#
#
def divisibleSumPairs(n, k, ar):
count = 0
for i in range(n - 1):
for j in range(i+1, n):
if (ar[i] + ar[j]) % k == 0:
count += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
ar = list(map(int, input().rstrip().split()))
result = divisibleSumPairs(n, k, ar)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"berkergng457@gmail.com"
] |
berkergng457@gmail.com
|
67315c8bfa6224070d3c979ce9a8d29c7689d65c
|
905c3fafca1e3d44c50b812da87bbfd0fd340021
|
/ThresholdAdaptativo/threshold.py
|
aeb85233b76ba327d5cfb24626ef22b8ec2a02d2
|
[] |
no_license
|
JCarlosSL/ComputacionGrafica
|
fce675a278e7b8c9b6e03123aad6fe66beb71e12
|
1f1984f7cb2ca6045b60394b6f7fbb67e5bde568
|
refs/heads/master
| 2022-11-09T18:11:21.767551
| 2020-06-30T06:46:24
| 2020-06-30T06:46:24
| 262,100,015
| 0
| 2
| null | 2020-05-22T06:46:04
| 2020-05-07T16:18:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
import numpy as np
import cv2 as cv
vecinos = [[1,-1][-1,0],[-1,+1],
[0,-1],[0,+1],[+1,-1],[+1,0],[+1,+1]]
"""
vecinos=[[-1,-1],[-1,0],[-1,+1],
[0,-1],[0,+1],
[+1,-1],[+1,0],[+1,+1],
[-2,-2],[-2,-1],[-2,0],[-2,+1],[-2,+2],
[-1,-2],[-1,+2],
[0,-2],[0,+2],
[+1,-2],[+1,+2],
[+2,-2],[+2,-1],[+2,0],[+2,+1],[+2,+2]]
"""
size_vecinos=len(vecinos)
class ThresholdAdaptativo:
def __init__(self,_img):
self.img=_img;
self.rows = _img.shape[0]
self.cols = _img.shape[1]
def Threshold(self,c):
new_Matrix= [[] for i in range(self.rows)]
for i in range(self.rows):
for j in range(self.cols):
new_Matrix[i].append(self.change(self.promedio(i,j)-c,img[i,j]))
return np.array(new_Matrix)
def promedio(self,h,k):
suma=0
s=0
x=self.rows
y=self.cols
for i in range(size_vecinos):
if(0<=h+vecinos[i][0] < x and 0<=k+vecinos[i][1]< y):
suma+=self.img[h+vecinos[i][0],k+vecinos[i][1]]
s+=1
prom=(suma + self.img[h,k])/(s+1)
return prom
def change(self,prom, limite):
if(prom<limite):
return 255
else:
return 0
img = cv.imread('paper6.jpg',0)
th = ThresholdAdaptativo(img)
new = th.Threshold(5)
cv.imwrite('newpapere.png',new)
|
[
"jsonccolu@unsa.edu.pe"
] |
jsonccolu@unsa.edu.pe
|
9f423f0d26cdec42b948eb0f8833483b203737d2
|
7e8592bb05c935472253146dedde903e350b0e73
|
/samples/polybench/deriche.py
|
8505bec3720f8ffa25aff05d4a3dc5088e25bf8a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
1C4nfaN/dace
|
71d492c56a1947e9365e9294ee773963740966e0
|
4d65e0951c112160fe783766404a806b6043b521
|
refs/heads/master
| 2022-12-05T20:16:28.516525
| 2020-08-24T07:19:41
| 2020-08-24T07:19:41
| 290,365,799
| 1
| 0
|
BSD-3-Clause
| 2020-08-26T01:39:37
| 2020-08-26T01:39:36
| null |
UTF-8
|
Python
| false
| false
| 5,371
|
py
|
import math
import dace
import polybench
W = dace.symbol('W')
H = dace.symbol('H')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float32
# Dataset sizes
sizes = [{
W: 64,
H: 64,
}, {
W: 192,
H: 128,
}, {
W: 720,
H: 480,
}, {
W: 4096,
H: 2160,
}, {
W: 7680,
H: 4320,
}]
args = [
([W, H], datatype),
([W, H], datatype),
]
# Constants
alpha = datatype(0.25)
k = (datatype(1.0) - math.exp(-alpha)) * (datatype(1.0) - math.exp(-alpha)) / (
datatype(1.0) + datatype(2.0) * alpha * math.exp(-alpha) -
math.exp(datatype(2.0) * alpha))
a1 = a5 = k
a2 = a6 = k * math.exp(-alpha) * (alpha - datatype(1.0))
a3 = a7 = k * math.exp(-alpha) * (alpha + datatype(1.0))
a4 = a8 = -k * math.exp(datatype(-2.0) * alpha)
b1 = math.pow(datatype(2.0), -alpha)
b2 = -math.exp(datatype(-2.0) * alpha)
c1 = c2 = 1
def init_array(imgIn, imgOut):
w = W.get()
h = H.get()
for i in range(w):
for j in range(h):
imgIn[i, j] = datatype((313 * i + 991 * j) % 65536) / 65535.0
@dace.program(datatype[W, H], datatype[W, H])
def deriche(imgIn, imgOut):
y1 = dace.define_local([W, H], dtype=datatype)
y2 = dace.define_local([W, H], dtype=datatype)
ym1 = dace.define_local([1], datatype)
ym2 = dace.define_local([1], datatype)
xm1 = dace.define_local([1], datatype)
tm1 = dace.define_local([1], datatype)
yp1 = dace.define_local([1], datatype)
yp2 = dace.define_local([1], datatype)
xp1 = dace.define_local([1], datatype)
xp2 = dace.define_local([1], datatype)
tp1 = dace.define_local([1], datatype)
tp2 = dace.define_local([1], datatype)
for i in range(W):
@dace.tasklet
def reset():
in_ym1 >> ym1
in_ym2 >> ym2
in_xm1 >> xm1
in_ym1 = 0
in_ym2 = 0
in_xm1 = 0
for j in range(H):
@dace.tasklet
def comp_y1():
in_img << imgIn[i, j]
in_xm1 << xm1
in_ym1 << ym1
in_ym2 << ym2
out_y1 >> y1[i, j]
out_xm1 >> xm1
out_ym1 >> ym1
out_ym2 >> ym2
out_y1 = a1 * in_img + a2 * in_xm1 + b1 * in_ym1 + b2 * in_ym2
out_xm1 = in_img
out_ym2 = in_ym1
out_ym1 = out_y1
for i in range(W):
@dace.tasklet
def reset2():
in_yp1 >> yp1
in_yp2 >> yp2
in_xp1 >> xp1
in_xp2 >> xp2
in_yp1 = 0
in_yp2 = 0
in_xp1 = 0
in_xp2 = 0
for j in range(H - 1, -1, -1):
@dace.tasklet
def comp_y2():
in_img << imgIn[i, j]
in_xp1 << xp1
in_xp2 << xp2
in_yp1 << yp1
in_yp2 << yp2
out_y2 >> y2[i, j]
out_xp1 >> xp1
out_xp2 >> xp2
out_yp1 >> yp1
out_yp2 >> yp2
out_y2 = a3 * in_xp1 + a4 * in_xp2 + b1 * in_yp1 + b2 * in_yp2
out_xp2 = in_xp1
out_xp1 = in_img
out_yp2 = in_yp1
out_yp1 = out_y2
@dace.map
def comp_iout(i: _[0:W], j: _[0:H]):
in_y1 << y1[i, j]
in_y2 << y2[i, j]
out_img >> imgOut[i, j]
out_img = c1 * (in_y1 + in_y2)
for j in range(H):
@dace.tasklet
def reset3():
in_ym1 >> ym1
in_ym2 >> ym2
in_tm1 >> tm1
in_ym1 = 0
in_ym2 = 0
in_tm1 = 0
for i in range(W):
@dace.tasklet
def comp_y12():
in_img << imgOut[i, j]
in_tm1 << tm1
in_ym1 << ym1
in_ym2 << ym2
out_y1 >> y1[i, j]
out_tm1 >> tm1
out_ym1 >> ym1
out_ym2 >> ym2
out_y1 = a5 * in_img + a6 * in_tm1 + b1 * in_ym1 + b2 * in_ym2
out_tm1 = in_img
out_ym2 = in_ym1
out_ym1 = out_y1
for j in range(H):
@dace.tasklet
def reset4():
in_yp1 >> yp1
in_yp2 >> yp2
in_tp1 >> tp1
in_tp2 >> tp2
in_yp1 = 0
in_yp2 = 0
in_tp1 = 0
in_tp2 = 0
for i in range(W - 1, -1, -1):
@dace.tasklet
def comp_y22():
in_img << imgOut[i, j]
in_tp1 << tp1
in_tp2 << tp2
in_yp1 << yp1
in_yp2 << yp2
out_y2 >> y2[i, j]
out_tp1 >> tp1
out_tp2 >> tp2
out_yp1 >> yp1
out_yp2 >> yp2
out_y2 = a7 * in_tp1 + a8 * in_tp2 + b1 * in_yp1 + b2 * in_yp2
out_tp2 = in_tp1
out_tp1 = in_img
out_yp2 = in_yp1
out_yp1 = out_y2
@dace.map
def comp_iout2(i: _[0:W], j: _[0:H]):
in_y1 << y1[i, j]
in_y2 << y2[i, j]
out_img >> imgOut[i, j]
out_img = c1 * (in_y1 + in_y2)
if __name__ == '__main__':
polybench.main(sizes, args, [(1, 'imgOut')], init_array, deriche)
|
[
"talbn@inf.ethz.ch"
] |
talbn@inf.ethz.ch
|
742a2b870de99651b62e2152aad6c01ba7a5846a
|
51153c560c0e0443415e08bd2be45419cd97b2c6
|
/manage.py
|
8f3a11fbcad74fa8207272abfe6997a6f7385bb9
|
[
"MIT"
] |
permissive
|
gregschmit/django-pufsim
|
3b9c91bf87ba577e1739f48a3e52416e2365b37a
|
5578cdbd2ced5b897868ca6a84217f0843c1269b
|
refs/heads/master
| 2021-06-25T21:43:20.428504
| 2019-06-19T19:53:05
| 2019-06-19T19:53:05
| 145,260,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
#!/usr/bin/env python3
"""
Proxy python3 script for the ``manage.py`` in the ``pufsim`` package.
"""
import os
import subprocess
import sys
if __name__ == "__main__":
# get this directory
d = os.path.dirname(os.path.abspath(__file__))
# set the dev directory env variable
os.environ["DJANGO_PUFSIM_DEVPATH"] = d
# spawn the app manage.py
args = sys.argv.copy()
args[0] = os.path.join(d, 'pufsim/manage.py')
args.insert(0, sys.executable)
print("REPO LEVEL EXECUTION\ncwd: {}".format(d))
print("argv: {}\nspawning...".format(str(args)))
r = subprocess.run(
args,
stdout=sys.stdout.fileno(),
stderr=sys.stderr.fileno(),
stdin=sys.stdin.fileno(),
cwd=d,
)
|
[
"schmitgreg@gmail.com"
] |
schmitgreg@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.