blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b7228c351689314206f06bf6e3f4bbc36f1dac00
|
2777bf7a60bb92d45ad1c707d572d85d7b30f8fa
|
/__main__.py
|
37184c7d2c483856226401ceb819245cf1079e30
|
[
"Python-2.0"
] |
permissive
|
mitsuhiko/python-modernize
|
59c61885506f207dd9bf551ff0dbe5ac9ebc77ca
|
162161a439eee9ba124edac8b19c399020d0b227
|
refs/heads/master
| 2023-06-08T17:51:39.679335
| 2020-07-22T11:35:26
| 2020-07-22T11:35:26
| 3,354,059
| 306
| 27
|
NOASSERTION
| 2020-07-19T01:30:23
| 2012-02-04T17:18:11
|
Python
|
UTF-8
|
Python
| false
| false
| 114
|
py
|
from __future__ import absolute_import
from libmodernize import main
if __name__ == '__main__':
main.main()
|
[
"brett@python.org"
] |
brett@python.org
|
b67e7413ebd454a0df9bebc99db9a2fd9f8592d7
|
b79a6c3a72a0cb23978e9b7d49a96a51a1870ce5
|
/grid.py
|
9d4d5441dfe4ae10433ec9f209df492abda2be00
|
[] |
no_license
|
Keyril/Pathfinder
|
81d6efdd0f65f3af39e16236029718bff83cee6c
|
c207da209dad39bfd387109ecb997acf405f4f8c
|
refs/heads/master
| 2020-12-21T11:47:20.339558
| 2020-01-30T20:20:30
| 2020-01-30T20:20:30
| 236,421,661
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,722
|
py
|
import time
from tkinter import *
import random
import bfs
import aStar
import aStarNums
import dijktra
window = Tk()
window.title('PathFinder')
boardFrame = Frame(window)
doubleFrame = Frame(window)
def createBoard(m, n):
global running
global startBox
global endBox
global seFlag
global board
global startX
global startY
global endX
global endY
global walls
global numFlag
numFlag = 0
running = False
startBox = endBox = startX = startY = endX = endY = None
seFlag = 0
walls = []
# clear board filling
for widget in boardFrame.winfo_children():
widget.destroy()
board = [[Label(boardFrame) for _ in range(m)] for _ in range(n)]
for i in range(0, len(board)):
for j in range(0, len(board[0])):
board[i][j] = Label(boardFrame, width=2, height=1, relief='groove')
board[i][j].grid(row=i + 5, column=j, sticky=NSEW)
board[i][j].bind("<Button-1>", lambda _, x=i, y=j: select(board, x, y))
boardFrame.pack(side=BOTTOM)
def createWithValues(n, m):
global running
global startBox
global endBox
global seFlag
global numFlag
global board
global startX
global startY
global endX
global endY
global walls
running = False
startBox = endBox = startX = startY = endX = endY = None
seFlag = 0
walls = []
numFlag = 1
# clear board filling
for widget in boardFrame.winfo_children():
widget.destroy()
board = [[Label(boardFrame) for _ in range(m)] for _ in range(n)]
for i in range(0, len(board)):
for j in range(0, len(board[0])):
num = random.randint(1, 50)
board[i][j] = Label(boardFrame, width=2, height=1, text=str(num), relief='groove')
board[i][j].grid(row=i, column=j, sticky=NSEW)
board[i][j].bind("<Button-1>", lambda _, x=i, y=j: select(board, x, y))
boardFrame.pack(side=BOTTOM)
def select(b, x, y):
global running
global startBox
global startX
global startY
global endBox
global endX
global endY
global seFlag
global wallFlag
global walls
if running:
return
if wallFlag:
if [x, y] not in walls and b[x][y] is not startBox and b[x][y] is not endBox:
walls.append([x, y])
b[x][y].config(bg='grey')
else:
walls.remove([x, y])
b[x][y].config(bg='white')
else:
if startBox is None and [x, y] not in walls:
startBox = b[x][y]
startX = x
startY = y
startBox.config(bg="blue")
seFlag = 1
elif endBox is None and b[x][y] is not startBox and [x, y] not in walls:
endBox = b[x][y]
endX = x
endY = y
endBox.config(bg="red")
seFlag = 0
elif b[x][y] is not startBox and b[x][y] is not endBox and seFlag == 0 and [x, y] not in walls:
startBox.config(bg="white")
startBox = b[x][y]
startX = x
startY = y
b[x][y].config(bg="blue")
seFlag = 1
elif b[x][y] is not startBox and b[x][y] is not endBox and seFlag == 1 and [x, y] not in walls:
endBox.config(bg="white")
endBox = b[x][y]
endX = x
endY = y
b[x][y].config(bg="red")
seFlag = 0
def bfSearch():
global endX
global endY
grid, x, y = buildGrid()
path, seen = bfs.bfs(grid, (x, y))
path.remove(path[0])
path.remove(path[-1])
seen.remove(seen[0])
seen.remove((endX, endY))
colourBoard(seen, path)
def aStarSearch():
global endX
global endY
grid, x, y = buildGrid()
seen, path = aStar.aStar(grid, (x, y), (endX, endY))
path.remove(path[-1])
seen.remove(seen[0])
colourBoard(seen, path)
def aStarNumsSearch():
global endX
global endY
grid, x, y = buildGrid()
seen, path = aStarNums.aStar(grid, (x, y), (endX, endY))
path.remove(path[-1])
seen.remove(seen[0])
colourBoard(seen, path)
def dijkstraSearch():
global endX
global endY
grid, x, y = buildGrid()
seen, path = dijktra.dijkstra(grid, (x, y), (endX, endY))
path.remove(path[-1])
seen.remove(seen[0])
seen.remove([endX, endY])
colourBoard(seen, path)
def buildGrid():
global board
global startX
global startY
global endX
global endY
global walls
global numFlag
grid = [[0] * len(board[0]) for _ in range(len(board))]
if numFlag:
for i in range(len(board)):
for j in range(len(board[0])):
grid[i][j] = (int(board[i][j].cget('text')))
if startX is not None and startY is not None:
grid[startX][startY] = 'S'
if endX is not None and endY is not None:
grid[endX][endY] = 'X'
for w, z in walls:
grid[w][z] = 'W'
if startX is not None and startY is not None and endX is not None and endY is not None:
return grid, startX, startY
def colourBoard(seen, path):
global running
running = True
for s1, s2 in seen:
boardFrame.update()
time.sleep(.05)
board[s1][s2].config(bg="orange")
for p1, p2 in path:
boardFrame.update()
time.sleep(.100)
board[p1][p2].config(bg="green")
running = False
def SetWalls():
global wallFlag
if wallFlag:
wallFlag = False
else:
wallFlag = True
seFlag = wallFlag = running = numFlag = 0
startBox = startX = startY = endBox = endX = endY = board = None
walls = []
inputFrame = Frame(window)
l1 = Label(inputFrame, text='Choose Board Dimensions').grid(row=0, column=0)
e1 = Entry(inputFrame, width=5)
e2 = Entry(inputFrame, width=5)
e1.insert(0, 2)
e2.insert(0, 2)
el1 = Label(inputFrame, text='x', width=1)
e1.grid(row=0, column=1)
el1.grid(row=0, column=2)
e2.grid(row=0, column=3)
b1 = Button(inputFrame, text="Make Board", command=lambda: createBoard(int(e1.get()), int(e2.get()))) \
.grid(row=0, column=4)
b2 = Button(inputFrame, text="Walls", command=lambda: SetWalls()).grid(row=1, column=5)
b3 = Button(inputFrame, text="BFS", command=lambda: bfSearch()).grid(row=1, column=1)
b4 = Button(inputFrame, text="A*", command=lambda: aStarSearch()).grid(row=1, column=2)
b5 = Button(inputFrame, text="A*Nums", command=lambda: aStarNumsSearch()).grid(row=1, column=3)
b6 = Button(inputFrame, text="Dijkstra", command=lambda: dijkstraSearch()).grid(row=1, column=4)
b7 = Button(inputFrame, text="Board with Values", command=lambda: createWithValues(int(e1.get()), int(e2.get()))) \
.grid(row=0, column=6)
inputFrame.pack(side=TOP)
window.mainloop()
|
[
"noreply@github.com"
] |
Keyril.noreply@github.com
|
2fbb9672c4956b8618fca557af5eb6f8f2734dac
|
61f140ba963a9ea803917e1c2ef87fe3f9475877
|
/gr/gbi-client/app/geobox/web/views/tasks.py
|
c972e020bc5b5b9f991323104d839740b7966e52
|
[
"Apache-2.0"
] |
permissive
|
Web-Dev-Collaborative/Github-Based-Projects
|
41db5fa30157cd8810d2dfd7f457e61306e2871f
|
7f8f7189337dfcc351c3215d1b2a7b5e8252c9f0
|
refs/heads/master
| 2023-07-03T10:50:11.475717
| 2021-08-04T20:53:17
| 2021-08-04T20:53:17
| 392,795,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,498
|
py
|
# This file is part of the GBI project.
# Copyright (C) 2012 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Blueprint, render_template, abort, g, flash
from flaskext.babel import _
from ..helper import redirect_back
from ...model.tasks import Task
tasks_view = Blueprint("tasks", __name__)
@tasks_view.route("/tasks")
def list():
query = g.db.query(Task).with_polymorphic("*").group_by(Task.project)
tasks = query.all()
unprojected_tasks = []
projects = []
for task in tasks:
if task.project_id:
if task.project not in projects:
projects.append(task.project)
else:
unprojected_tasks.append(task)
return render_template(
"tasks/task_list.html", tasks=unprojected_tasks, projects=projects
)
@tasks_view.route("/task/<int:id>")
def detail(id):
query = g.db.query(Task).with_polymorphic("*")
task = query.get(id)
if not task:
abort(404)
return render_template("tasks/detail.html", task=task)
@tasks_view.route("/task/<int:id>/pause", methods=["POST"])
def pause(id):
query = g.db.query(Task)
task = query.get(id)
if not task:
abort(404)
# the task process handles is_active/is_running
task.is_paused = True
g.db.commit()
flash(_("paused task successful"))
return redirect_back(".list")
@tasks_view.route("/task/<int:id>/start", methods=["POST"])
def start(id):
query = g.db.query(Task)
task = query.get(id)
if not task:
abort(404)
task.is_paused = False
g.db.commit()
flash(_("start task successful"))
return redirect_back(".list")
@tasks_view.route("/task/<int:id>/remove", methods=["POST"])
def remove(id):
task = g.db.query(Task).with_polymorphic("*").filter_by(id=id).first()
if not task:
abort(404)
g.db.delete(task)
g.db.commit()
flash(_("delete task successful"))
return redirect_back(".list")
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
2dcb9548eb9fa671ec9f09da36d38eac661f678e
|
2cdbbf8ffbe787c5e91b0c2176d853897e286dfb
|
/blog/migrations/0002_permissionlist_rolelist_user.py
|
7cf01c60293ba1bd9309d79aef2380d57db5538f
|
[] |
no_license
|
liying1993/lyblog
|
43134d9b7ecd462c119ea90438d7e297a1dbfcac
|
e719d985cc2b257dc3fdc0b9f5cd75aeb3ac3b38
|
refs/heads/master
| 2021-01-21T12:01:25.070305
| 2017-05-19T06:34:09
| 2017-05-19T06:34:09
| 91,772,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-17 08:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PermissionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('url', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='RoleList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('permission', models.ManyToManyField(blank=True, null=True, to='blog.PermissionList')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(db_index=True, max_length=40, unique=True)),
('email', models.EmailField(max_length=255)),
('is_active', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('nickname', models.CharField(max_length=64, null=True)),
('sex', models.CharField(max_length=2, null=True)),
('role', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.RoleList')),
],
options={
'abstract': False,
},
),
]
|
[
"13040871401@163.com"
] |
13040871401@163.com
|
dd0d49ae1d1f4977d218d13bb5e40ba88e66955e
|
8f011c1bcc2702d2d3a98a11900efbc95f0c9fc8
|
/tests/test_authresource.py
|
df7f06a2a4bb1025f34532cccc95587a3b0569d2
|
[
"Apache-2.0"
] |
permissive
|
apigram/HospitalWaiterAuthService
|
c66b3dc07732116dc51c8634682c8cfbcba1e9f3
|
9fcff5c215f3ec99658ab2b2d300dd6f511d52fc
|
refs/heads/master
| 2020-03-18T13:42:36.336916
| 2018-11-08T11:07:48
| 2018-11-08T11:07:48
| 134,802,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import os
import tempfile
import pytest
from app import app
@pytest.fixture
def client():
db_fd, app.config['DATABASE'] = tempfile.mkstemp()
app.config['TESTING'] = True
client = app.test_client()
with app.app_context():
app.init_db()
yield client
os.close(db_fd)
os.unlink(app.app.config['DATABASE'])
def test_empty_db(client):
"""Start with a blank database."""
rv = client.get('/')
assert b'No entries here so far' in rv.data
|
[
"andrew.pigram@toukanlabs.com"
] |
andrew.pigram@toukanlabs.com
|
c0db3478511e45f7764b489091567f96ebd70cff
|
03ee40240b2a44e55ab2982e962c37436dfa2afc
|
/crawler/ongoing/lotter/lecai/spider.py
|
af09250b6c528d6acad001a107abc14ea1b2da7a
|
[] |
no_license
|
535521469/fetch_sth
|
81583a19de42bf933d83f08b33036ef83fe21dd3
|
a74bd3697d96559ff345e2d481c5ee2dab0ef3db
|
refs/heads/master
| 2021-01-25T07:34:48.128448
| 2013-03-24T13:58:08
| 2013-03-24T13:58:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
'''
Created on 2013-3-20
@author: corleone
'''
from scrapy.spider import BaseSpider
class LeCaiHomeSpider(BaseSpider):
home_url = u"http://www.lecai.com"
|
[
"535521469@qq.com"
] |
535521469@qq.com
|
3183352386e354fae0c15f240db4fece609a5626
|
1f6d8f78d8b898816cbeca667a4e08b0931d3611
|
/split_and_join.py
|
011566c5115d7d0d654b172fa52c7aee26010c40
|
[] |
no_license
|
scottwedge/HackerRank
|
392293db64d0f25d74a984dd5303881caecb6f34
|
a4095622c8f4c112219906c85e4a18e738ed33bc
|
refs/heads/master
| 2022-05-12T04:49:34.902408
| 2022-05-12T02:57:23
| 2022-05-12T02:57:23
| 242,226,409
| 0
| 0
| null | 2020-02-21T20:38:32
| 2020-02-21T20:38:31
| null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
#!/usr/bin/env python3
# In Python, a string can be split on a delimiter.
# Example:
# >>> a = "this is a string"
# >>> a = a.split(" ") # a is converted to a list of strings.
# >>> print a
# ['this', 'is', 'a', 'string']
#
# Joining a string is simple:
# >>> a = "-".join(a)
# >>> print a
# this-is-a-string
#
# Task
# You are given a string. Split the string on a " " (space) delimiter and join using a - hyphen.
#
# Function Description
# Complete the split_and_join function in the editor below.
#
# split_and_join has the following parameters:
# string line: a string of space-separated words
#
# Returns
# string: the resulting string
#
# Input Format
# The one line contains a string consisting of space separated words.
#
# Sample Input: this is a string
# Sample Output: this-is-a-string
import pytest
def test1():
assert split_and_join("This is a line") == "This-is-a-line"
def split_and_join(line):
result = line.split(sep = " ")
result = "-".join(result)
return result
if __name__ == "__main__":
line = "This is a line"
res = split_and_join(line)
print(res)
|
[
"scott.wedge@gmail.com"
] |
scott.wedge@gmail.com
|
84e834cf23653a27be4d8a915b1552f2c7d35a09
|
1bc322d9d8d787eaae079ef39a86809661fee47e
|
/scribble/diagrams.py
|
4954097423ce063d57462e0834aebaa35c1e6892
|
[
"Apache-2.0"
] |
permissive
|
seanmao/scribble
|
46179f566d430ac95d18e8ebf77486897c81a6a6
|
82647f852e191717169f212be8886a568a4a29f5
|
refs/heads/master
| 2022-11-30T10:58:28.894513
| 2020-08-14T20:24:07
| 2020-08-14T20:24:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,259
|
py
|
# Copyright 2019 SiFive, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You should have received a copy of LICENSE.Apache2 along with
# this software. If not, you may obtain a copy at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import html
from typing import List, Union
from pathlib import Path
from base64 import b64encode
import subprocess
import json
from scribble.exceptions import DocumentException
from scribble.section import Text, text
from scribble.path_interpolation import pathLookup
@text
def Figure(
img: Union[str, bytes], *, suffix="", title="", id="", width="", **kwargs
) -> Text:
"""
Include an image file or image bytes in the document.
This routine "hides" a bunch of details
- is the image inlined?
- how are the title and id formatted?
- sets the width if requested
"""
# Generate title and reference ids if requested.
if id:
yield f"[[{id}]]\n"
if title:
yield f".{title}\n"
# Generate the asciidoc image call.
yield from Image(img, alt=title, width=width, suffix=suffix, **kwargs)
def Image(img: Union[str, bytes], *, alt="", width="", suffix="", **kwargs) -> Text:
"""
A document section consisting solely if an image.
"""
# Create a data source url from the image data
if isinstance(img, str):
src = dataURL(img, **kwargs)
elif isinstance(img, bytes) and suffix:
src = dataUrlFromBytes(img, suffix)
else:
raise DocumentException(
f"Image - must pass file name or bytes+suffix id={id} alt={alt}"
)
# Create the asciidoc "image::...." with an optional width.
w = f", width={width}" if width else "" # optional width
img = f"image::{src}[{alt}{w}]"
return Text(img)
def dataURL(file: str, _file_=None, **kwargs) -> str:
"""
Convert an image file into an inlined data url.
"""
path = Path(pathLookup(file, _file_))
if not path.exists():
raise DocumentException(f"Image file {file} can't be found")
return dataUrlFromBytes(path.read_bytes(), path.suffix)
mimeHeader = {".svg": "svg+xml;base64", ".png": "png;base64"}
def dataUrlFromBytes(img: bytes, suffix: str) -> str:
"""
Convert a sequence of image bytes into an inlined data url
"""
if suffix not in mimeHeader:
raise DocumentException("embedImg: Unable to embed images of type {type")
src = f"data:image/{mimeHeader[suffix]},{base64(img)}"
return src
def base64(b: bytes) -> str:
"""
Convert a sequence of bytes into a base64 string
"""
b64 = b64encode(b)
s = b64.decode() # as a string
return s
# ###################
#
# The following routines run an external program, sending it data via stdin and reading back stdout.
# They are contained in Diagrams.py because they are only used for generating diagrams.
# When others have need for them, they should be refactored to a different
# scribble/xxx source file.
#
# ###################
def pipe_json_to_str(cmd: List[str], **params) -> str:
"""
Run an external command, passing it json and reading back characters.
"""
return pipe_json_to_bytes(cmd, **params).decode()
def pipe_json_to_bytes(cmd: List[str], **params) -> bytes:
"""
Run an external command, passing it JSON and returning binary bytes.
"""
return pipe(cmd, toJSON(params).encode())
def pipe(cmd: List[str], input: bytes) -> bytes:
"""
Run an external command, passing it bytes and returning bytes.
"""
result = subprocess.run(args=cmd, input=input, check=True, stdout=subprocess.PIPE)
output = result.stdout
return output
def toJSON(obj) -> str:
# Convert HTML escapes to unicode before sending.
return html.unescape(json.dumps(obj))
def fromJSON(data: str) -> any:
return json.loads(data)
|
[
"rxia@sifive.com"
] |
rxia@sifive.com
|
c854845081d775436dd9babafb974f875d309733
|
846febdd0d6762eae6f954c3d60e4073ec152fa8
|
/hw5/hw5/hw5_submit/analysis.py
|
4e3c25cae091602b672a6d0eaf9cdc4c6bbe1a7b
|
[] |
no_license
|
budiryan/CS4641
|
a29c28c9b0c9e2f04d9ddafd604499e07b664128
|
58fbfe5aa1a6892eeaaeac261c3080dd3d4cf4f6
|
refs/heads/master
| 2021-06-10T11:25:19.109764
| 2017-01-18T11:11:39
| 2017-01-18T11:11:39
| 78,998,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
# analysis.py
# -----------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
######################
# ANALYSIS QUESTIONS #
######################
# Change these default values to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
answerNoise = 0.001
return answerDiscount, answerNoise
def question3a():
answerDiscount = 0.01
answerNoise = 0.0
answerLivingReward = -0.1
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
answerDiscount = 0.5
answerNoise = 0.1
answerLivingReward = -1
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
answerDiscount = 0.9
answerNoise = 0.0
answerLivingReward = -0.8
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
answerDiscount = 0.9
answerNoise = 0.0
answerLivingReward = 10
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question6():
answerEpsilon = None
answerLearningRate = None
return 'NOT POSSIBLE'
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
|
[
"budiryan@github.com"
] |
budiryan@github.com
|
961e8c2e8a8b9573defc0ddab31c2e169b9834a8
|
27089a27296b4be2d1e192d30c5132dc00e9afb0
|
/pysgrid/tests/test_sgrid_variable_wrf.py
|
d9ae63bd69bca88a4785964f046eef884f9bbc86
|
[
"BSD-3-Clause"
] |
permissive
|
sgrid/pysgrid
|
8090dd82696ff6fc92c8d8a6ed2d0eaf272f4db2
|
4eb947aec7a807cd2f9a74e20884dc0e06a96d2b
|
refs/heads/master
| 2020-05-21T22:37:55.613523
| 2017-05-25T02:02:13
| 2017-05-25T02:02:13
| 30,425,660
| 11
| 16
| null | 2017-04-19T19:35:01
| 2015-02-06T18:02:49
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
"""
Test SGrid Variables WRF.
Created on Apr 15, 2015
@author: ayan
"""
from __future__ import (absolute_import, division, print_function)
import pytest
from ..sgrid import SGrid
from ..utils import GridPadding
from ..variables import SGridVariable
from .write_nc_test_files import wrf_sgrid
@pytest.fixture
def sgrid_var_wrf(wrf_sgrid):
face_padding = [GridPadding(mesh_topology_var=u'grid',
face_dim=u'west_east',
node_dim=u'west_east_stag',
padding=u'none'),
GridPadding(mesh_topology_var=u'grid',
face_dim=u'south_north',
node_dim=u'south_north_stag',
padding=u'none')]
node_dimensions = 'west_east_stag south_north_stag'
return dict(sgrid=SGrid(face_padding=face_padding,
node_dimensions=node_dimensions),
test_var_1=wrf_sgrid.variables['SNOW'],
test_var_2=wrf_sgrid.variables['FAKE_U'])
def test_face_location_inference1(sgrid_var_wrf):
sg_var = SGridVariable.create_variable(sgrid_var_wrf['test_var_1'],
sgrid_var_wrf['sgrid'])
sg_var_location = sg_var.location
expected_location = 'face'
assert sg_var_location == expected_location
def test_edge_location_inference2(sgrid_var_wrf):
sg_var = SGridVariable.create_variable(sgrid_var_wrf['test_var_2'],
sgrid_var_wrf['sgrid'])
sg_var_location = sg_var.location
expected_location = 'edge1'
assert sg_var_location == expected_location
|
[
"ocefpaf@gmail.com"
] |
ocefpaf@gmail.com
|
73f1d72b0f3787d740c6dd4e4332008d055d261a
|
d468d517c28a7e964a989730b0a87888a27b4478
|
/test/test_trim.py
|
439e97300c68a4b11b0b3d3ff462a095f1f1a67b
|
[] |
no_license
|
connorjclark/zquest-data
|
b86aa612fcaedc57017df6fc3a1d7a41d854f9fc
|
a52b5d19cb73439137ef281110c8831774000c5e
|
refs/heads/master
| 2023-07-25T21:48:31.787397
| 2023-07-06T20:04:19
| 2023-07-06T20:20:44
| 199,337,465
| 0
| 0
| null | 2022-11-22T06:43:54
| 2019-07-28T21:09:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,592
|
py
|
import os
import unittest
import hashlib
from pathlib import Path
from zquest.extract import ZeldaClassicReader
from examples.trim_sections import trim_qst
from zquest.section_utils import SECTION_IDS
os.makedirs('.tmp', exist_ok=True)
def get_section_sizes(reader):
return {id: header.size for (id, header) in reader.section_headers.items()}
def hash_file(file):
return hashlib.md5(Path(file).read_bytes()).hexdigest()
class TestTrim(unittest.TestCase):
def get_hash(self, sections):
trim_qst('test_data/1st.qst', '.tmp/1st-trim-test.qst', sections)
return hash_file('.tmp/1st-trim-test.qst')
def test_trim_qst(self):
expected_title = 'Original NES 1st Quest\x00st Quest'
self.assertEqual(self.get_hash([]), hash_file('test_data/1st.qst'))
self.assertEqual(self.get_hash(
[SECTION_IDS.TILES]), '7196e5c6fcb9e6c0ef7dc2f1fd31a1d9')
reader = ZeldaClassicReader('.tmp/1st-trim-test.qst', {
'only_sections': [SECTION_IDS.HEADER, SECTION_IDS.MAPS],
})
reader.read_qst()
self.assertEqual(reader.header.title, expected_title)
self.assertEqual(len(reader.maps), 3)
self.assertEqual(reader.tiles, None)
self.assertEqual(get_section_sizes(reader), {
b'HDR ': 2240, b'RULE': 20, b'STR ': 5978, b'DOOR': 607, b'DMAP': 122882,
b'MISC': 1732, b'MCLR': 35, b'ICON': 8, b'ITEM': 45826, b'WPN ': 18178, b'MAP ': 555914,
b'CMBO': 2722, b'CMBA': 18432, b'CSET': 330498, b'MIDI': 32, b'CHT ': 169, b'INIT': 1034,
b'GUY ': 97792, b'LINK': 165, b'SUBS': 14074, b'FFSC': 18592, b'SFX ': 32, b'DROP': 1380, b'FAVS': 804,
})
self.assertEqual(self.get_hash(
[SECTION_IDS.TILES, SECTION_IDS.MAPS]), '21623d5f9cefbe238d3d9c94de82d0ae')
reader = ZeldaClassicReader('.tmp/1st-trim-test.qst', {
'only_sections': [SECTION_IDS.HEADER],
})
reader.read_qst()
self.assertEqual(reader.header.title, expected_title)
self.assertEqual(reader.maps, None)
self.assertEqual(get_section_sizes(reader), {
b'HDR ': 2240, b'RULE': 20, b'STR ': 5978, b'DOOR': 607, b'DMAP': 122882,
b'MISC': 1732, b'MCLR': 35, b'ICON': 8, b'ITEM': 45826, b'WPN ': 18178,
b'CMBO': 2722, b'CMBA': 18432, b'CSET': 330498, b'MIDI': 32, b'CHT ': 169, b'INIT': 1034,
b'GUY ': 97792, b'LINK': 165, b'SUBS': 14074, b'FFSC': 18592, b'SFX ': 32, b'DROP': 1380, b'FAVS': 804,
})
if __name__ == '__main__':
unittest.main()
|
[
"cjamcl@gmail.com"
] |
cjamcl@gmail.com
|
65519143a0bce015ee013f6f3fa2166c5884894f
|
4e22d14f05d0dd0cdb653aea65dc7c6b66a242da
|
/catnapping.py
|
732e47e6081332fdd9960dff43b59cce86458763
|
[] |
no_license
|
ZeroPaul/automate-python
|
1d58f4f94fc400af146734fdfdf1af418a715e37
|
64f67f2504856a2b0dd22436c9a624f7b2977635
|
refs/heads/master
| 2020-04-12T04:20:40.595759
| 2019-05-03T21:28:53
| 2019-05-03T21:28:53
| 162,292,910
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('''Dear Xo,
Eve's cat has arrested for catnapping, cat burglary, and extortion.
Sincelery,
Paul''')
print('Dear Xo,\n\nEve\'s cat has been arrested for catnapping, cat burglary, and extortion.\n\nSincelery,\nPaul')
|
[
"landerspaulzero@gmail.com"
] |
landerspaulzero@gmail.com
|
e42b0da7b51a142ec3db9fbb310be57cd2a9913c
|
d411f12b4fdaa89a5f3b7bb33f9ab5dd38927840
|
/hello.py
|
823c44f84ea2900a7eaf3d2d433cb7628f1f92d4
|
[] |
no_license
|
JohnDS4/CM1101
|
2cbab84551f77120788591553c5d793e1755d3d6
|
4e953e5203bdb90b0e3df5a2274953556582d46a
|
refs/heads/master
| 2020-03-31T21:35:30.953625
| 2018-10-11T12:46:48
| 2018-10-11T12:46:48
| 152,586,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
print("Hello")
print("Changes in another")
print("More changes")
print("Still more changes")
|
[
"DisandoloJJ@cardiff.ac.uk"
] |
DisandoloJJ@cardiff.ac.uk
|
e75ffdaf41a4507709644c55e7ac2e10365b0c25
|
e6b458c9962ddf4d376d240fb240340a513615f3
|
/0711/특정 웹툰 제목 회차 불러오기.py
|
440e08d5ad0b6df7029c8860da0a4347f4b0f088
|
[] |
no_license
|
inchangsung/Web_Crawling
|
05c67b6c2ed9d083dcc634f7bb279d8fd78268f7
|
d1c1a55fd8f42075800d08483ae1b7f9515a391e
|
refs/heads/main
| 2023-06-16T22:01:07.792271
| 2021-07-11T17:32:47
| 2021-07-11T17:32:47
| 377,426,095
| 0
| 0
| null | 2021-07-11T17:19:46
| 2021-06-16T08:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 334
|
py
|
import requests
from bs4 import BeautifulSoup
res = requests.get("https://comic.naver.com/webtoon/list?titleId=736277&weekday=sun")
soup = BeautifulSoup(res.text,"html.parser")
webtoons = soup.find("table",attrs ={"class":"viewList"}).find_all("td",attrs = {"class":"title"})
for webtoon in webtoons:
print(webtoon.get_text())
|
[
"inchangsung@naver.com"
] |
inchangsung@naver.com
|
e874eb748ccd66c9efe51246c9da4bfde08897ed
|
984b8a6648f2b38005bcb447fe23b1e6529a6691
|
/Leet_Code/724Find Pivot Index.py
|
97b0e3a72a6c185c34b030423a2367dfc77fe601
|
[] |
no_license
|
shalakatakale/Python_leetcode
|
c6eaff3d9e934a35bfa03238b23195c6b4b37b03
|
8c2ab2edf241b018336bd39c0fdcec91a3c594e8
|
refs/heads/main
| 2023-05-01T15:15:24.570869
| 2021-05-15T15:44:21
| 2021-05-15T15:44:21
| 349,558,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
# 724. Find Pivot Index
# complexity O(n)
# Space Complexity: O(1) to store leftsum
class Solution(object):
def pivotIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
leftsum = 0
for i, x in enumerate(nums):
if leftsum == sum(nums) - leftsum - x:
return i
leftsum += x
return -1
# Same as above but explained using weighing balance scale
class Solution:
def pivotIndex(self, nums: List[int]) -> int:
# Initialization:
# Left hand side be empty, and
# Right hand side holds all weights.
total_weight_on_left, total_weight_on_right = 0, sum(nums)
for idx, current_weight in enumerate(nums):
total_weight_on_right -= current_weight
if total_weight_on_left == total_weight_on_right:
# balance is met on both sides
# i.e., sum( nums[ :idx] ) == sum( nums[idx+1: ] )
return idx
total_weight_on_left += current_weight
return -1
|
[
"shalakatakale27@gmail.com"
] |
shalakatakale27@gmail.com
|
e21346c75c4066ba079d7264127a62e1cdf231d6
|
237602f537c32524dfba55db443229e0cbaf62d6
|
/setup/tests.py
|
28d0292a8f239c071919d6534d9d9626cd5e3dac
|
[] |
no_license
|
6abi/tdd_busca_animal
|
ddaba6afc568d1df3f7a95caad9898960b308daf
|
38adc0423189e608c8df87303eb64bf279fb7db0
|
refs/heads/master
| 2023-06-09T15:52:33.078695
| 2021-07-02T03:19:32
| 2021-07-02T03:19:32
| 381,556,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
from django.test import LiveServerTestCase
from selenium import webdriver
import time
from animais.models import Animal
class AnimaisTestCase(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Chrome('E:\\tdd_busca_animal\\chromedriver.exe')
self.animal = Animal.objects.create(
nome_animal = 'leão',
predador = 'Não',
venenoso = 'Não',
domestico = 'Não'
)
def tearDown(self):
self.browser.quit()
def test_buscanco_um_novo_animal(self):
"""Teste se um usuário encontra um animal pesquisando"""
home_page = self.browser.get(self.live_server_url + '/')
brand_element = self.browser.find_element_by_css_selector('.navbar')
self.assertEqual('Busca animal', brand_element.text)
buscar_animal_input = self.browser.find_element_by_css_selector('input#buscar-animal')
self.assertEqual(buscar_animal_input.get_attribute('placeholder'), 'Exemplo: leão, urso ...')
buscar_animal_input.send_keys('leão')
# time.sleep(2)
self.browser.find_element_by_css_selector('form button').click()
caracteristicas = self.browser.find_elements_by_css_selector('.result-description')
self.assertGreater(len(caracteristicas), 3)
|
[
"6abi.cardoso@gmail.com"
] |
6abi.cardoso@gmail.com
|
0035c145c171aa5878a816b91da572d9c60413f6
|
a4e06ffffadca0f924bc074b581ade682cff639f
|
/Form/testbox/urls.py
|
d4c40f882bc69be296b8bcd84bd3a10ee4cc09c2
|
[] |
no_license
|
adminq80/djangoexamples
|
98ed7673190b1e7e482cc4a89bb365f5bfe98a50
|
ab1d4e8221fcd238b0b3f74e1df17ce127ada72f
|
refs/heads/master
| 2021-01-17T14:06:24.938927
| 2016-01-13T15:23:51
| 2016-01-13T15:23:51
| 49,582,042
| 0
| 0
| null | 2016-01-13T15:20:39
| 2016-01-13T15:20:38
| null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index,),
url(r'^test/$', views.test,),
]
|
[
"parki06780678@gmail.com"
] |
parki06780678@gmail.com
|
3e8a512ce275320132e9ff525f640ae46e265dc7
|
9ba21becafcf9b6d8908392fddd7c5bc56f81b8c
|
/CollapsingIntervals.py
|
a0d8a1c83d4dd1d58c8459c7e61db37d03d9de4b
|
[] |
no_license
|
aliceliang22/Data-Structures
|
bb8c6ab03fa0a3c73cf695a757df1e05a95eb485
|
be0bbcd558ff63ca1cd7f3c5a9947c8afc7e0f99
|
refs/heads/main
| 2023-06-01T08:33:55.486376
| 2021-06-29T19:19:14
| 2021-06-29T19:19:14
| 381,469,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,388
|
py
|
# Name: Alice Liang
# EID: axl84
# Unique Section Number: 84825
# Assignment: #3
# Date: 6/19/20
# Input: tuples_list is an unsorted list of tuples denoting intervals
# Output: a list of merged tuples sorted by the lower number of the
# interval
def merge_tuples(tuples_list):
tuples_list.sort(key=lambda x: x[0])
newTupList = []
for i in range(len(tuples_list)):
tup = tuples_list[i]
overlap = True
if len(newTupList) == 0:
newTupList.append(tup)
else:
for j in range(len(newTupList)):
newTup = newTupList[j]
if tup[1] < newTup[0]:
newTupList.insert(j, tup)
overlap = False
break
# check if tup and newTup overlap
overlap = check_overlap(tup, newTup)
if overlap == True:
newInterval = collapse_interval(tup, newTup)
newTupList.remove(newTup)
newTupList.insert(j, newInterval)
break
if j == len(newTupList) - 1:
newTupList.append(tup)
return newTupList
def check_overlap(tup1, tup2):
if tup1[0] > tup2[1] or tup1[1] < tup2[0]:
return False
else:
return True
def collapse_interval(tup1, tup2):
min = tup1[0]
max = tup1[1]
if tup2[0] < min:
min = tup2[0]
if tup2[1] > max:
max = tup2[1]
return (min, max)
# Input: tuples_list is a list of tuples of denoting intervals
# Output: a list of tuples sorted by ascending order of the size of
# the interval
# if two intervals have the size then it will sort by the
# lower number in the interval
def sort_by_interval_size(tuples_list):
tuples_list.sort(key=lambda x: x[0])
newTupList = []
for i in range(len(tuples_list)):
tup = tuples_list[i]
if len(newTupList) == 0:
newTupList.append(tup)
else:
for j in range(len(newTupList)):
newTup = newTupList[j]
if tup[1] - tup[0] < newTup[1] - newTup[0]:
newTupList.insert(j, tup)
break
if j == len(newTupList) - 1:
newTupList.append(tup)
return newTupList
# Input: no input
# Output: a string denoting all test cases have passed
def test_cases():
# write your own test cases
assert merge_tuples([(1, 2)]) == [(1, 2)]
assert merge_tuples([(14, 17), (-8, -5), (26, 29), (-20, -15), (12, 15), (2, 3), (-10, -7), (25, 30), (2, 4), (-21, -16),
(13, 18), (22, 27), (-6, -3), (3, 6), (-25, -14)]) == [(-25, -14), (-10, -3), (2, 6), (12, 18), (22, 30)]
assert merge_tuples([(12, 15), (-10, -3), (6, 7), (25, 40), (4, 6),
(9, 12), (13, 14)]) == [(-10, -3), (4, 7), (9, 15), (25, 40)]
assert merge_tuples([(-9, -3), (-10, -5), (1, 2), (1, 3),
(4, 6)]) == [(-10, -3), (1, 3), (4, 6)]
# write your own test cases
assert sort_by_interval_size([(1, 3), (4, 5)]) == [(4, 5), (1, 3)]
assert sort_by_interval_size([(-25, -14), (-10, -3), (2, 6), (12, 18), (22, 30)]) == [
(2, 6), (12, 18), (-10, -3), (22, 30), (-25, -14)]
assert sort_by_interval_size(
[(-10, -3), (4, 7), (9, 15), (25, 40)]) == [(4, 7), (9, 15), (-10, -3), (25, 40)]
assert sort_by_interval_size(
[(-10, -3), (1, 3), (4, 6)]) == [(1, 3), (4, 6), (-10, -3)]
return "all test cases passed"
def main():
# open file intervals.in and read the data and create a list of tuples
infile = open("intervals.in", "r")
n = int(infile.readline().strip())
if n < 1 or n > 100:
return "invalid N: 1 <= N <= 100"
tupList = []
for i in range(n):
num = infile.readline().strip()
tmp = num.split()
try:
tupList.append((int(tmp[0]), int(tmp[1])))
except:
pass
# merge the list of tuples
mergeList = merge_tuples(tupList)
# sort the list of tuples according to the size of the interval
sort_by_interval_size(mergeList)
# run your test cases
print(test_cases())
# open file intervals.out and write the output list of tuples from the two functions
if __name__ == "__main__":
main()
|
[
"aliceliang8888@gmail.com"
] |
aliceliang8888@gmail.com
|
99e87bda0171e916ca08493d0d6c76e8fba36454
|
66bed0443c65f7879a09ccc797db0997618fb303
|
/core/migrations/0008_comment_comment_time.py
|
4fa205b0791ac2119c40cafe45d4f6b7f1423193
|
[] |
no_license
|
momentum-cohort-2019-02/w6-apile-tablescapes
|
bc15e7d028130bfeb2a321d45fb8846c299c80b9
|
d06fe6edc42677de1a5ae6e9049cef0dbfd9644a
|
refs/heads/master
| 2020-04-29T18:36:12.427498
| 2019-04-01T20:48:44
| 2019-04-01T20:48:44
| 176,328,918
| 0
| 0
| null | 2019-03-24T17:11:48
| 2019-03-18T16:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
# Generated by Django 2.1.7 on 2019-03-24 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20190324_1154'),
]
operations = [
migrations.AddField(
model_name='comment',
name='comment_time',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
[
"bjgribb@gmail.com"
] |
bjgribb@gmail.com
|
9be8174c34a1f443330cb0de64cfd5e310f1a722
|
22c91ce11e19cc5d80074639e60a715939b0bd91
|
/pyPiFiles/auth.py
|
cc5fc773b667d41c7d5ec37d2b084f42b1cf958e
|
[] |
no_license
|
ViniciusRuiz/Gotcha
|
10781caa79ad187b6836aaf3c3ae594b22e0e7e5
|
9ffbf037cca6e638eadef04661dff8f71dd22c81
|
refs/heads/master
| 2022-03-15T15:04:48.950229
| 2019-11-21T08:54:57
| 2019-11-21T08:54:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,324
|
py
|
#!/usr/bin/env python
import face_recognition
import numpy
import subprocess
import glob
import time
from google.cloud import firestore
from google.cloud import storage
from PIL import Image
authorized_encodings = []
# updates local face encodings from remote images in google cloud platform bucket resource
def update_local_faces():
update_start = time.time()
# remove all faces from /home/pi/Desktop/faces
remove_faces = 'rm -f face_*.jpg'
p_faces = subprocess.Popen(remove_faces, shell=True, stdout=subprocess.PIPE)
p_faces.wait()
print('All local authorized face images removed')
# pull images from firebase storage, exclude *test*.jpg
download_blob('gotcha-233622.appspot.com', 'face_')
# Get all images from test directory (no test images)
pics = glob.glob('/home/pi/Desktop/faces/face_*')
# clear face encodings list
authorized_encodings.clear()
i = 0 # track image number in blob
# crop the faces and create new encodings for each image, append to auth array
for image in pics:
# load image into user_face
user_face = face_recognition.load_image_file(image)
# recognize faces: should only be a single face, asserted in test image
# all pi photos may contain more than one face
face_locations = face_recognition.face_locations(user_face)
# TODO: create single crop function - third use
# crop image
top,right,bottom,left = face_locations[0]
# error if adding 50 pixel perimeter around face detected by library # use standard?
face_image = user_face[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
# save image
path = 'face_{}.jpg'.format(i)
pil_image.save(path)
# process image
print('Encoding: %s ' % pics[i])
cropped = face_recognition.load_image_file(path)
# create encoding
encoding = face_recognition.face_encodings(cropped)[0]
# append encoding to global auth list
authorized_encodings.append(encoding)
#print(authorized_encodings)
i += 1
print('Editing and Encoding Update Cost: %s seconds' % (time.time() - update_start))
# downloads all pictures in bucket to local pi storage
def download_blob(bucket_name, prefix_):
client = storage.Client()
# Retrieve all blobs with a prefix matching the file.
bucket = client.get_bucket(bucket_name)
bucket_name = 'gotcha-233622.appspot.com'
folder='/home/pi/Desktop/faces'
delimiter='/'
# List only face_*.jpg images in bucket
blobs=bucket.list_blobs(prefix=prefix_, delimiter=delimiter)
for blob in blobs:
# get name of resident
name = (blob.name)[len(prefix_):]
#print(name)
dest_path = '{}/{}'.format(folder, blob.name)
blob.download_to_filename(dest_path)
#print('{}\'s face downloaded to {}.'.format(name, dest_path))
# tests the temporary picture in firebase, update firebase field
def picture_test():
# get test picture from blob, filter with 'test' prefix
download_blob('gotcha-233622.appspot.com', 'test')
# Get test image from faces folder on disk
img = glob.glob('/home/pi/Desktop/faces/test.*')
# process image
test_image = face_recognition.load_image_file(img[0])
face_locations = face_recognition.face_locations(test_image)
# assert single face in test image
if(len(face_locations) == 1):
# crop image
top,right,bottom,left = face_locations[0]
face_image = test_image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
# save image
path = 'test_img.jpg'
pil_image.save(path)
#load
cropped = face_recognition.load_image_file(path)
# create encoding of test face
test_encoding = face_recognition.face_encodings(cropped)[0]
# test against authorized face encodings
auth_name = is_auth(test_encoding)
# remove test image
#remove_faces = 'rm -f test_img.jpg'
#p_faces = subprocess.Popen(remove_faces, shell=True, stdout=subprocess.PIPE)
#p_faces.wait()
else:
update_document('image_test', 'testResult', 'Error: Multiple Faces')
# update db with test_name
if(not 'unknown' in auth_name):
update_document('image_test', 'hasTested', 'true')
update_document('image_test', 'testResult', '{}'.format(auth_name))
else:
update_document('image_test', 'hasTested', 'true')
update_document('image_test', 'testResult', 'unknown')
# determines if user at door is authentic
def is_auth(encoding):
# compares encoding of face with authorized encodings on disk
auth_name = ''
# check global list
for auth_encoding in authorized_encodings:
match = face_recognition.compare_faces(auth_encoding, encoding)
if match:
# TODO
# extract the name of the person from the face encoding
return 'authorized'
# not authenticated if still empty at end of testing
if auth_name == '':
return 'unknown'
update_local_faces()
picture_test()
|
[
"ldm437@mst.edu"
] |
ldm437@mst.edu
|
6e23428edb1530ddc25330249ae15f45e1b5be4f
|
d33da395d3f091a1009f1dd7f8e50ca0e39d5489
|
/venv/lib/python3.6/site-packages/weasyprint/tests/test_boxes.py
|
bed7b30e6b2e9b057ef2f1de69d6ccef99a0bb1b
|
[] |
no_license
|
kid-kodi/bio-strain
|
cf9206d7309946e5522b682be8807629724eef65
|
94712f0549cecabee87980ad5a62f9c778a1dba1
|
refs/heads/master
| 2022-11-27T11:27:53.845217
| 2019-02-01T11:18:58
| 2019-02-01T11:18:58
| 167,825,680
| 0
| 0
| null | 2022-11-22T02:55:27
| 2019-01-27T15:41:52
|
Python
|
UTF-8
|
Python
| false
| false
| 52,928
|
py
|
# coding: utf-8
"""
weasyprint.tests.test_boxes
---------------------------
Test that the "before layout" box tree is correctly constructed.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import functools
from .. import images
from ..css import PageType, get_all_computed_styles
from ..formatting_structure import boxes, build, counters
from ..layout.pages import set_page_type_computed_styles
from ..urls import path2url
from .testing_utils import (
FakeHTML, assert_no_logs, capture_logs, resource_filename)
PROPER_CHILDREN = dict((key, tuple(map(tuple, value))) for key, value in {
# Children can be of *any* type in *one* of the lists.
boxes.BlockContainerBox: [[boxes.BlockLevelBox], [boxes.LineBox]],
boxes.LineBox: [[boxes.InlineLevelBox]],
boxes.InlineBox: [[boxes.InlineLevelBox]],
boxes.TableBox: [[boxes.TableCaptionBox,
boxes.TableColumnGroupBox, boxes.TableColumnBox,
boxes.TableRowGroupBox, boxes.TableRowBox]],
boxes.InlineTableBox: [[boxes.TableCaptionBox,
boxes.TableColumnGroupBox, boxes.TableColumnBox,
boxes.TableRowGroupBox, boxes.TableRowBox]],
boxes.TableColumnGroupBox: [[boxes.TableColumnBox]],
boxes.TableRowGroupBox: [[boxes.TableRowBox]],
boxes.TableRowBox: [[boxes.TableCellBox]],
}.items())
# Dummy filename, but in the right directory.
BASE_URL = path2url(resource_filename('<test>'))
def serialize(box_list):
"""Transform a box list into a structure easier to compare for testing."""
return [
(box.element_tag,
('Anon' if (box.style.anonymous and
type(box) not in (boxes.TextBox, boxes.LineBox))
else '') + type(box).__name__[:-3],
# All concrete boxes are either text, replaced, column or parent.
(box.text if isinstance(box, boxes.TextBox)
else '<replaced>' if isinstance(box, boxes.ReplacedBox)
else serialize(
getattr(box, 'column_groups', ()) + tuple(box.children))))
for box in box_list]
def unwrap_html_body(box):
"""Test that the box tree starts with a ``<html>`` and a ``<body>`` blocks.
Remove them to simplify further tests. These are always at the root
of HTML documents.
"""
assert box.element_tag == 'html'
assert isinstance(box, boxes.BlockBox)
assert len(box.children) == 1
box = box.children[0]
assert isinstance(box, boxes.BlockBox)
assert box.element_tag == 'body'
return box.children
def to_lists(box_tree):
"""Serialize and unwrap ``<html>`` and ``<body>``."""
return serialize(unwrap_html_body(box_tree))
def _parse_base(html_content, base_url=BASE_URL):
document = FakeHTML(string=html_content, base_url=base_url)
style_for, _, _ = get_all_computed_styles(document)
get_image_from_uri = functools.partial(
images.get_image_from_uri, {}, document.url_fetcher)
return document.etree_element, style_for, get_image_from_uri, base_url
def parse(html_content):
"""Parse some HTML, apply stylesheets and transform to boxes."""
box, = build.element_to_box(*_parse_base(html_content))
return box
def parse_all(html_content, base_url=BASE_URL):
"""Like parse() but also run all corrections on boxes."""
box = build.build_formatting_structure(*_parse_base(
html_content, base_url))
sanity_checks(box)
return box
def render_pages(html_content):
"""Lay out a document and return a list of PageBox objects."""
return [p._page_box for p in FakeHTML(
string=html_content, base_url=BASE_URL
).render(enable_hinting=True).pages]
def assert_tree(box, expected):
"""Check the box tree equality.
The obtained result is prettified in the message in case of failure.
box: a Box object, starting with <html> and <body> blocks.
expected: a list of serialized <body> children as returned by to_lists().
"""
lists = to_lists(box)
if lists != expected:
assert lists == expected
def sanity_checks(box):
"""Check that the rules regarding boxes are met.
This is not required and only helps debugging.
- A block container can contain either only block-level boxes or
only line boxes;
- Line boxes and inline boxes can only contain inline-level boxes.
"""
if not isinstance(box, boxes.ParentBox):
return
acceptable_types_lists = None # raises when iterated
for class_ in type(box).mro():
if class_ in PROPER_CHILDREN:
acceptable_types_lists = PROPER_CHILDREN[class_]
break
assert any(
all(isinstance(child, acceptable_types) or
not child.is_in_normal_flow()
for child in box.children)
for acceptable_types in acceptable_types_lists
), (box, box.children)
for child in box.children:
sanity_checks(child)
@assert_no_logs
def test_box_tree():
"""Test the creation of trees from HTML strings."""
assert_tree(parse('<p>'), [('p', 'Block', [])])
assert_tree(parse(
'''
<style>
span { display: inline-block }
</style>
<p>Hello <em>World <images src="pattern.png"><span>L</span></em>!</p>'''),
[('p', 'Block', [
('p', 'Text', 'Hello '),
('em', 'Inline', [
('em', 'Text', 'World '),
('images', 'InlineReplaced', '<replaced>'),
('span', 'InlineBlock', [
('span', 'Text', 'L')])]),
('p', 'Text', '!')])])
@assert_no_logs
def test_html_entities():
"""Test the management of HTML entities."""
for quote in ['"', '"', '"', '"']:
assert_tree(parse('<p>{0}abc{1}'.format(quote, quote)), [
('p', 'Block', [
('p', 'Text', '"abc"')])])
@assert_no_logs
def test_inline_in_block():
"""Test the management of inline boxes in block boxes."""
source = '<div>Hello, <em>World</em>!\n<p>Lipsum.</p></div>'
expected = [
('div', 'Block', [
('div', 'AnonBlock', [
('div', 'Line', [
('div', 'Text', 'Hello, '),
('em', 'Inline', [
('em', 'Text', 'World')]),
('div', 'Text', '!\n')])]),
('p', 'Block', [
('p', 'Line', [
('p', 'Text', 'Lipsum.')])])])]
box = parse(source)
box = build.inline_in_block(box)
assert_tree(box, expected)
source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\n</div>'
expected = [
('div', 'Block', [
('p', 'Block', [
('p', 'Line', [
('p', 'Text', 'Lipsum.')])]),
('div', 'AnonBlock', [
('div', 'Line', [
('div', 'Text', 'Hello, '),
('em', 'Inline', [
('em', 'Text', 'World')]),
('div', 'Text', '!\n')])])])]
box = parse(source)
box = build.inline_in_block(box)
assert_tree(box, expected)
# Absolutes are left in the lines to get their static position later.
source = '''<p>Hello <em style="position:absolute;
display: block">World</em>!</p>'''
expected = [
('p', 'Block', [
('p', 'Line', [
('p', 'Text', 'Hello '),
('em', 'Block', [
('em', 'Line', [
('em', 'Text', 'World')])]),
('p', 'Text', '!')])])]
box = parse(source)
box = build.inline_in_block(box)
assert_tree(box, expected)
box = build.block_in_inline(box)
assert_tree(box, expected)
# Floats are pull to the top of their containing blocks
source = '<p>Hello <em style="float: left">World</em>!</p>'
box = parse(source)
box = build.inline_in_block(box)
box = build.block_in_inline(box)
assert_tree(box, [
('p', 'Block', [
('p', 'Line', [
('p', 'Text', 'Hello '),
('em', 'Block', [
('em', 'Line', [
('em', 'Text', 'World')])]),
('p', 'Text', '!')])])])
@assert_no_logs
def test_block_in_inline():
"""Test the management of block boxes in inline boxes."""
box = parse('''
<style>
p { display: inline-block; }
span, i { display: block; }
</style>
<p>Lorem <em>ipsum <strong>dolor <span>sit</span>
<span>amet,</span></strong><span><em>conse<i></i></em></span></em></p>''')
box = build.inline_in_block(box)
assert_tree(box, [
('body', 'Line', [
('p', 'InlineBlock', [
('p', 'Line', [
('p', 'Text', 'Lorem '),
('em', 'Inline', [
('em', 'Text', 'ipsum '),
('strong', 'Inline', [
('strong', 'Text', 'dolor '),
('span', 'Block', [ # This block is "pulled up"
('span', 'Line', [
('span', 'Text', 'sit')])]),
# No whitespace processing here.
('strong', 'Text', '\n '),
('span', 'Block', [ # This block is "pulled up"
('span', 'Line', [
('span', 'Text', 'amet,')])])]),
('span', 'Block', [ # This block is "pulled up"
('span', 'Line', [
('em', 'Inline', [
('em', 'Text', 'conse'),
('i', 'Block', [])])])])])])])])])
box = build.block_in_inline(box)
assert_tree(box, [
('body', 'Line', [
('p', 'InlineBlock', [
('p', 'AnonBlock', [
('p', 'Line', [
('p', 'Text', 'Lorem '),
('em', 'Inline', [
('em', 'Text', 'ipsum '),
('strong', 'Inline', [
('strong', 'Text', 'dolor ')])])])]),
('span', 'Block', [
('span', 'Line', [
('span', 'Text', 'sit')])]),
('p', 'AnonBlock', [
('p', 'Line', [
('em', 'Inline', [
('strong', 'Inline', [
# Whitespace processing not done yet.
('strong', 'Text', '\n ')])])])]),
('span', 'Block', [
('span', 'Line', [
('span', 'Text', 'amet,')])]),
('p', 'AnonBlock', [
('p', 'Line', [
('em', 'Inline', [
('strong', 'Inline', [])])])]),
('span', 'Block', [
('span', 'AnonBlock', [
('span', 'Line', [
('em', 'Inline', [
('em', 'Text', 'conse')])])]),
('i', 'Block', []),
('span', 'AnonBlock', [
('span', 'Line', [
('em', 'Inline', [])])])]),
('p', 'AnonBlock', [
('p', 'Line', [
('em', 'Inline', [])])])])])])
@assert_no_logs
def test_styles():
"""Test the application of CSS to HTML."""
box = parse('''
<style>
span { display: block; }
* { margin: 42px }
html { color: blue }
</style>
<p>Lorem <em>ipsum <strong>dolor <span>sit</span>
<span>amet,</span></strong><span>consectetur</span></em></p>''')
box = build.inline_in_block(box)
box = build.block_in_inline(box)
descendants = list(box.descendants())
assert len(descendants) == 31
assert descendants[0] == box
for child in descendants:
# All boxes inherit the color
assert child.style.color == (0, 0, 1, 1) # blue
# Only non-anonymous boxes have margins
if child.style.anonymous:
assert child.style.margin_top == (0, 'px')
else:
assert child.style.margin_top == (42, 'px')
@assert_no_logs
def test_whitespace():
"""Test the management of white spaces."""
# TODO: test more cases
# http://www.w3.org/TR/CSS21/text.html#white-space-model
assert_tree(parse_all('''
<p>Lorem \t\r\n ipsum\t<strong> dolor
<images src=pattern.png> sit
<span style="position: absolute"></span> <em> amet </em>
consectetur</strong>.</p>
<pre>\t foo\n</pre>
<pre style="white-space: pre-wrap">\t foo\n</pre>
<pre style="white-space: pre-line">\t foo\n</pre>
'''), [
('p', 'Block', [
('p', 'Line', [
('p', 'Text', 'Lorem ipsum '),
('strong', 'Inline', [
('strong', 'Text', 'dolor '),
('images', 'InlineReplaced', '<replaced>'),
('strong', 'Text', ' sit '),
('span', 'Block', []),
('em', 'Inline', [
('em', 'Text', 'amet ')]),
('strong', 'Text', 'consectetur')]),
('p', 'Text', '.')])]),
('pre', 'Block', [
('pre', 'Line', [
# pre
('pre', 'Text', '\t foo\n')])]),
('pre', 'Block', [
('pre', 'Line', [
# pre-wrap
('pre', 'Text', '\t foo\n')])]),
('pre', 'Block', [
('pre', 'Line', [
# pre-line
('pre', 'Text', ' foo\n')])])])
@assert_no_logs
def test_page_style():
"""Test the management of page styles."""
document = FakeHTML(string='''
<style>
@page { margin: 3px }
@page :first { margin-top: 20px }
@page :right { margin-right: 10px; margin-top: 10px }
@page :left { margin-left: 10px; margin-top: 10px }
</style>
''')
style_for, cascaded_styles, computed_styles = get_all_computed_styles(
document)
def assert_page_margins(page_type, top, right, bottom, left):
"""Check the page margin values."""
style = style_for(page_type)
assert style.margin_top == (top, 'px')
assert style.margin_right == (right, 'px')
assert style.margin_bottom == (bottom, 'px')
assert style.margin_left == (left, 'px')
# Force the generation of the style for all possible page types as it's
# generally only done during the rendering for needed page types.
standard_page_type = PageType(
side=None, blank=False, first=False, name=None)
set_page_type_computed_styles(
standard_page_type, cascaded_styles, computed_styles, document)
assert_page_margins(
PageType(side='left', first=True, blank=False, name=None),
top=20, right=3, bottom=3, left=10)
assert_page_margins(
PageType(side='right', first=True, blank=False, name=None),
top=20, right=10, bottom=3, left=3)
assert_page_margins(
PageType(side='left', first=False, blank=False, name=None),
top=10, right=3, bottom=3, left=10)
assert_page_margins(
PageType(side='right', first=False, blank=False, name=None),
top=10, right=10, bottom=3, left=3)
@assert_no_logs
def test_images():
"""Test images that may or may not be available."""
with capture_logs() as logs:
result = parse_all('''
<p><images src=pattern.png
/><images alt="No src"
/><images src=inexistent.jpg alt="Inexistent src" /></p>
''')
assert len(logs) == 1
assert 'ERROR: Failed to load image' in logs[0]
assert 'inexistent.jpg' in logs[0]
assert_tree(result, [
('p', 'Block', [
('p', 'Line', [
('images', 'InlineReplaced', '<replaced>'),
('images', 'Inline', [
('images', 'Text', 'No src')]),
('images', 'Inline', [
('images', 'Text', 'Inexistent src')])])])])
with capture_logs() as logs:
result = parse_all('<p><images src=pattern.png alt="No base_url">',
base_url=None)
assert len(logs) == 1
assert 'ERROR: Relative URI reference without a base URI' in logs[0]
assert_tree(result, [
('p', 'Block', [
('p', 'Line', [
('images', 'Inline', [
('images', 'Text', 'No base_url')])])])])
@assert_no_logs
def test_tables():
# Rules in http://www.w3.org/TR/CSS21/tables.html#anonymous-boxes
# Rule 1.3
# Also table model: http://www.w3.org/TR/CSS21/tables.html#model
assert_tree(parse_all('''
<x-table>
<x-tr>
<x-th>foo</x-th>
<x-th>bar</x-th>
</x-tr>
<x-tfoot></x-tfoot>
<x-thead><x-th></x-th></x-thead>
<x-caption style="caption-side: bottom"></x-caption>
<x-thead></x-thead>
<x-col></x-col>
<x-caption>top caption</x-caption>
<x-tr>
<x-td>baz</x-td>
</x-tr>
</x-table>
'''), [
('x-table', 'AnonBlock', [
('x-caption', 'TableCaption', [
('x-caption', 'Line', [
('x-caption', 'Text', 'top caption')])]),
('x-table', 'Table', [
('x-table', 'AnonTableColumnGroup', [
('x-col', 'TableColumn', [])]),
('x-thead', 'TableRowGroup', [
('x-thead', 'AnonTableRow', [
('x-th', 'TableCell', [])])]),
('x-table', 'AnonTableRowGroup', [
('x-tr', 'TableRow', [
('x-th', 'TableCell', [
('x-th', 'Line', [
('x-th', 'Text', 'foo')])]),
('x-th', 'TableCell', [
('x-th', 'Line', [
('x-th', 'Text', 'bar')])])])]),
('x-thead', 'TableRowGroup', []),
('x-table', 'AnonTableRowGroup', [
('x-tr', 'TableRow', [
('x-td', 'TableCell', [
('x-td', 'Line', [
('x-td', 'Text', 'baz')])])])]),
('x-tfoot', 'TableRowGroup', [])]),
('x-caption', 'TableCaption', [])])])
# Rules 1.4 and 3.1
assert_tree(parse_all('''
<span style="display: table-cell">foo</span>
<span style="display: table-cell">bar</span>
'''), [
('body', 'AnonBlock', [
('body', 'AnonTable', [
('body', 'AnonTableRowGroup', [
('body', 'AnonTableRow', [
('span', 'TableCell', [
('span', 'Line', [
('span', 'Text', 'foo')])]),
('span', 'TableCell', [
('span', 'Line', [
('span', 'Text', 'bar')])])])])])])])
# http://www.w3.org/TR/CSS21/tables.html#anonymous-boxes
# Rules 1.1 and 1.2
# Rule XXX (not in the spec): column groups have at least one column child
assert_tree(parse_all('''
<span style="display: table-column-group">
1
<em style="display: table-column">
2
<strong>3</strong>
</em>
<strong>4</strong>
</span>
<ins style="display: table-column-group"></ins>
'''), [
('body', 'AnonBlock', [
('body', 'AnonTable', [
('span', 'TableColumnGroup', [
('em', 'TableColumn', [])]),
('ins', 'TableColumnGroup', [
('ins', 'AnonTableColumn', [])])])])])
# Rules 2.1 then 2.3
assert_tree(parse_all('<x-table>foo <div></div></x-table>'), [
('x-table', 'AnonBlock', [
('x-table', 'Table', [
('x-table', 'AnonTableRowGroup', [
('x-table', 'AnonTableRow', [
('x-table', 'AnonTableCell', [
('x-table', 'AnonBlock', [
('x-table', 'Line', [
('x-table', 'Text', 'foo ')])]),
('div', 'Block', [])])])])])])])
# Rule 2.2
assert_tree(parse_all('<x-thead style="display: table-header-group">'
'<div></div><x-td></x-td></x-thead>'), [
('body', 'AnonBlock', [
('body', 'AnonTable', [
('x-thead', 'TableRowGroup', [
('x-thead', 'AnonTableRow', [
('x-thead', 'AnonTableCell', [
('div', 'Block', [])]),
('x-td', 'TableCell', [])])])])])])
# TODO: re-enable this once we support inline-table
# Rule 3.2
assert_tree(parse_all('<span><x-tr></x-tr></span>'), [
('body', 'Line', [
('span', 'Inline', [
('span', 'AnonInlineBlock', [
('span', 'AnonInlineTable', [
('span', 'AnonTableRowGroup', [
('x-tr', 'TableRow', [])])])])])])])
# Rule 3.1
# Also, rule 1.3 does not apply: whitespace before and after is preserved
assert_tree(parse_all('''
<span>
<em style="display: table-cell"></em>
<em style="display: table-cell"></em>
</span>
'''), [
('body', 'Line', [
('span', 'Inline', [
# Whitespace is preserved in table handling, then collapsed
# into a single space.
('span', 'Text', ' '),
('span', 'AnonInlineBlock', [
('span', 'AnonInlineTable', [
('span', 'AnonTableRowGroup', [
('span', 'AnonTableRow', [
('em', 'TableCell', []),
('em', 'TableCell', [])])])])]),
('span', 'Text', ' ')])])])
# Rule 3.2
assert_tree(parse_all('<x-tr></x-tr>\t<x-tr></x-tr>'), [
('body', 'AnonBlock', [
('body', 'AnonTable', [
('body', 'AnonTableRowGroup', [
('x-tr', 'TableRow', []),
('x-tr', 'TableRow', [])])])])])
assert_tree(parse_all('<x-col></x-col>\n<x-colgroup></x-colgroup>'), [
('body', 'AnonBlock', [
('body', 'AnonTable', [
('body', 'AnonTableColumnGroup', [
('x-col', 'TableColumn', [])]),
('x-colgroup', 'TableColumnGroup', [
('x-colgroup', 'AnonTableColumn', [])])])])])
@assert_no_logs
def test_table_style():
html = parse_all('<table style="margin: 1px; padding: 2px"></table>')
body, = html.children
wrapper, = body.children
table, = wrapper.children
assert isinstance(wrapper, boxes.BlockBox)
assert isinstance(table, boxes.TableBox)
assert wrapper.style.margin_top == (1, 'px')
assert wrapper.style.padding_top == (0, 'px')
assert table.style.margin_top == (0, 'px')
assert table.style.padding_top == (2, 'px')
@assert_no_logs
def test_column_style():
html = parse_all('''
<table>
<col span=3 style="width: 10px"></col>
<col span=2></col>
</table>
''')
body, = html.children
wrapper, = body.children
table, = wrapper.children
colgroup, = table.column_groups
widths = [col.style.width for col in colgroup.children]
assert widths == [(10, 'px'), (10, 'px'), (10, 'px'), 'auto', 'auto']
assert [col.grid_x for col in colgroup.children] == [0, 1, 2, 3, 4]
# copies, not the same box object
assert colgroup.children[0] is not colgroup.children[1]
@assert_no_logs
def test_nested_grid_x():
html = parse_all('''
<table>
<col span=2></col>
<colgroup span=2></colgroup>
<colgroup>
<col></col>
<col span=2></col>
</colgroup>
<col></col>
</table>
''')
body, = html.children
wrapper, = body.children
table, = wrapper.children
grid = [(colgroup.grid_x, [col.grid_x for col in colgroup.children])
for colgroup in table.column_groups]
assert grid == [(0, [0, 1]), (2, [2, 3]), (4, [4, 5, 6]), (7, [7])]
@assert_no_logs
def test_colspan_rowspan():
"""
+---+---+---+
| A | B | C | #
+---+---+---+
| D | E | #
+---+---+ +---+
| F ...| | | <-- overlap
+---+---+---+ +
| H | # # | G |
+---+---+ + +
| I | J | # | |
+---+---+ +---+
# empty cells
"""
html = parse_all('''
<table>
<tr>
<td>A <td>B <td>C
</tr>
<tr>
<td>D <td colspan=2 rowspan=2>E
</tr>
<tr>
<td colspan=2>F <td rowspan=0>G
</tr>
<tr>
<td>H
</tr>
<tr>
<td>I <td>J
</tr>
</table>
''')
body, = html.children
wrapper, = body.children
table, = wrapper.children
group, = table.children
assert [[c.grid_x for c in row.children] for row in group.children] == [
[0, 1, 2],
[0, 1],
[0, 3],
[0],
[0, 1],
]
assert [[c.colspan for c in row.children] for row in group.children] == [
[1, 1, 1],
[1, 2],
[2, 1],
[1],
[1, 1],
]
assert [[c.rowspan for c in row.children] for row in group.children] == [
[1, 1, 1],
[1, 2],
[1, 3],
[1],
[1, 1],
]
# A cell box cannot extend beyond the last row box of a table.
html = parse_all('''
<table>
<tr>
<td rowspan=5></td>
<td></td>
</tr>
<tr>
<td></td>
</tr>
</table>
''')
body, = html.children
wrapper, = body.children
table, = wrapper.children
group, = table.children
assert [[c.grid_x for c in row.children] for row in group.children] == [
[0, 1],
[1],
]
assert [[c.colspan for c in row.children] for row in group.children] == [
[1, 1],
[1],
]
assert [[c.rowspan for c in row.children] for row in group.children] == [
[2, 1], # Not 5
[1],
]
@assert_no_logs
def test_before_after():
"""Test the ::before and ::after pseudo-elements."""
assert_tree(parse_all('''
<style>
p:before { content: normal }
div:before { content: none }
section::before { color: black }
</style>
<p></p>
<div></div>
<section></section>
'''), [
# No content in pseudo-element, no box generated
('p', 'Block', []),
('div', 'Block', []),
('section', 'Block', [])])
assert_tree(parse_all('''
<style>
p:before { content: 'a' 'b' }
p::after { content: 'd' 'e' }
</style>
<p>
c
</p>
'''), [
('p', 'Block', [
('p', 'Line', [
('p::before', 'Inline', [
('p::before', 'Text', 'ab')]),
('p', 'Text', ' c '),
('p::after', 'Inline', [
('p::after', 'Text', 'de')])])])])
assert_tree(parse_all('''
<style>
a[href]:before { content: '[' attr(href) '] ' }
</style>
<p><a href="some url">some text</a></p>
'''), [
('p', 'Block', [
('p', 'Line', [
('a', 'Inline', [
('a::before', 'Inline', [
('a::before', 'Text', '[some url] ')]),
('a', 'Text', 'some text')])])])])
assert_tree(parse_all('''
<style>
body { quotes: '«' '»' '“' '”' }
q:before { content: open-quote ' '}
q:after { content: ' ' close-quote }
</style>
<p><q>Lorem ipsum <q>dolor</q> sit amet</q></p>
'''), [
('p', 'Block', [
('p', 'Line', [
('q', 'Inline', [
('q::before', 'Inline', [
('q::before', 'Text', '« ')]),
('q', 'Text', 'Lorem ipsum '),
('q', 'Inline', [
('q::before', 'Inline', [
('q::before', 'Text', '“ ')]),
('q', 'Text', 'dolor'),
('q::after', 'Inline', [
('q::after', 'Text', ' ”')])]),
('q', 'Text', ' sit amet'),
('q::after', 'Inline', [
('q::after', 'Text', ' »')])])])])])
with capture_logs() as logs:
assert_tree(parse_all('''
<style>
p:before {
content: 'a' url(pattern.png) 'b';
/* Invalid, ignored in favor of the one above.
Regression test: this used to crash: */
content: some-function(nested-function(something));
}
</style>
<p>c</p>
'''), [
('p', 'Block', [
('p', 'Line', [
('p::before', 'Inline', [
('p::before', 'Text', 'a'),
('p::before', 'AnonInlineReplaced', '<replaced>'),
('p::before', 'Text', 'b')]),
('p', 'Text', 'c')])])])
assert len(logs) == 1
assert 'nested-function(' in logs[0]
assert 'invalid value' in logs[0]
@assert_no_logs
def test_counters():
"""Test counter-reset, counter-increment, content: counter() counters()"""
assert_tree(parse_all('''
<style>
p { counter-increment: p 2 }
p:before { content: counter(p); }
p:nth-child(1) { counter-increment: none; }
p:nth-child(2) { counter-increment: p; }
</style>
<p></p>
<p></p>
<p></p>
<p style="counter-reset: p 117 p"></p>
<p></p>
<p></p>
<p style="counter-reset: p -13"></p>
<p></p>
<p></p>
<p style="counter-reset: p 42"></p>
<p></p>
<p></p>'''), [
('p', 'Block', [
('p', 'Line', [
('p::before', 'Inline', [
('p::before', 'Text', counter)])])])
for counter in '0 1 3 2 4 6 -11 -9 -7 44 46 48'.split()])
assert_tree(parse_all('''
<ol style="list-style-position: inside">
<li></li>
<li></li>
<li></li>
<li><ol>
<li></li>
<li style="counter-increment: none"></li>
<li></li>
</ol></li>
<li></li>
</ol>'''), [
('ol', 'Block', [
('li', 'Block', [
('li', 'Line', [
('li::marker', 'Text', '1.')])]),
('li', 'Block', [
('li', 'Line', [
('li::marker', 'Text', '2.')])]),
('li', 'Block', [
('li', 'Line', [
('li::marker', 'Text', '3.')])]),
('li', 'Block', [
('li', 'AnonBlock', [
('li', 'Line', [
('li::marker', 'Text', '4.')])]),
('ol', 'Block', [
('li', 'Block', [
('li', 'Line', [
('li::marker', 'Text', '1.')])]),
('li', 'Block', [
('li', 'Line', [
('li::marker', 'Text', '1.')])]),
('li', 'Block', [
('li', 'Line', [
('li::marker', 'Text', '2.')])])])]),
('li', 'Block', [
('li', 'Line', [
('li::marker', 'Text', '5.')])])])])
assert_tree(parse_all('''
<style>
p { display: list-item; list-style: inside decimal }
</style>
<div>
<p></p>
<p></p>
<p style="counter-reset: list-item 7 list-item -56"></p>
</div>
<p></p>'''), [
('div', 'Block', [
('p', 'Block', [
('p', 'Line', [
('p::marker', 'Text', '1.')])]),
('p', 'Block', [
('p', 'Line', [
('p::marker', 'Text', '2.')])]),
('p', 'Block', [
('p', 'Line', [
('p::marker', 'Text', '-55.')])])]),
('p', 'Block', [
('p', 'Line', [
('p::marker', 'Text', '1.')])])])
assert_tree(parse_all('''
<style>
section:before { counter-reset: h; content: '' }
h1:before { counter-increment: h; content: counters(h, '.') }
</style>
<body>
<section><h1></h1>
<h1></h1>
<section><h1></h1>
<h1></h1>
</section>
<h1></h1>
</section>
</body>'''), [
('section', 'Block', [
('section', 'AnonBlock', [
('section', 'Line', [
('section::before', 'Inline', [])])]),
('h1', 'Block', [
('h1', 'Line', [
('h1::before', 'Inline', [
('h1::before', 'Text', '1')])])]),
('h1', 'Block', [
('h1', 'Line', [
('h1::before', 'Inline', [
('h1::before', 'Text', '2')])])]),
('section', 'Block', [
('section', 'AnonBlock', [
('section', 'Line', [
('section::before', 'Inline', [])])]),
('h1', 'Block', [
('h1', 'Line', [
('h1::before', 'Inline', [
('h1::before', 'Text', '2.1')])])]),
('h1', 'Block', [
('h1', 'Line', [
('h1::before', 'Inline', [
('h1::before', 'Text', '2.2')])])])]),
('h1', 'Block', [
('h1', 'Line', [
('h1::before', 'Inline', [
('h1::before', 'Text', '3')])])])])])
assert_tree(parse_all('''
<style>
p:before { content: counter(c) }
</style>
<div>
<span style="counter-reset: c">
Scope created now, deleted after the div
</span>
</div>
<p></p>'''), [
('div', 'Block', [
('div', 'Line', [
('span', 'Inline', [
('span', 'Text',
'Scope created now, deleted after the div ')])])]),
('p', 'Block', [
('p', 'Line', [
('p::before', 'Inline', [
('p::before', 'Text', '0')])])])])
# counter-increment may interfere with display: list-item
assert_tree(parse_all('''
<p style="counter-increment: c;
display: list-item; list-style: inside decimal">'''), [
('p', 'Block', [
('p', 'Line', [
('p::marker', 'Text', '0.')])])])
@assert_no_logs
def test_counter_styles():
"""Test the various counter styles."""
assert_tree(parse_all('''
<style>
body { counter-reset: p -12 }
p { counter-increment: p }
p:nth-child(1):before { content: '-' counter(p, none) '-'; }
p:nth-child(2):before { content: counter(p, disc); }
p:nth-child(3):before { content: counter(p, circle); }
p:nth-child(4):before { content: counter(p, square); }
p:nth-child(5):before { content: counter(p); }
</style>
<p></p>
<p></p>
<p></p>
<p></p>
<p></p>
'''), [
('p', 'Block', [
('p', 'Line', [
('p::before', 'Inline', [
('p::before', 'Text', counter)])])])
for counter in '-- • ◦ ▪ -7'.split()])
assert_tree(parse_all('''
<style>
p { counter-increment: p }
p::before { content: counter(p, decimal-leading-zero); }
</style>
<p style="counter-reset: p -1987"></p>
<p></p>
<p style="counter-reset: p -12"></p>
<p></p>
<p></p>
<p></p>
<p style="counter-reset: p -2"></p>
<p></p>
<p></p>
<p></p>
<p style="counter-reset: p 8"></p>
<p></p>
<p></p>
<p style="counter-reset: p 98"></p>
<p></p>
<p></p>
<p style="counter-reset: p 4134"></p>
<p></p>
'''), [
('p', 'Block', [
('p', 'Line', [
('p::before', 'Inline', [
('p::before', 'Text', counter)])])])
for counter in '''-1986 -1985 -11 -10 -09 -08 -01 00 01 02 09 10 11
99 100 101 4135 4136'''.split()])
# Same test as above, but short-circuit HTML and boxes
assert [counters.format(value, 'decimal-leading-zero') for value in [
-1986, -1985,
-11, -10, -9, -8,
-1, 0, 1, 2,
9, 10, 11,
99, 100, 101,
4135, 4136
]] == '''
-1986 -1985 -11 -10 -09 -08 -01 00 01 02 09 10 11
99 100 101 4135 4136
'''.split()
# Now that we’re confident that they do the same, use the shorter form.
# http://test.csswg.org/suites/css2.1/20110323/html4/content-counter-007.htm
assert [counters.format(value, 'lower-roman') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
49, 50,
389, 390,
3489, 3490, 3491,
4999, 5000, 5001
]] == '''
-1986 -1985 -1 0 i ii iii iv v vi vii viii ix x xi xii
xlix l ccclxxxix cccxc mmmcdlxxxix mmmcdxc mmmcdxci
mmmmcmxcix 5000 5001
'''.split()
# http://test.csswg.org/suites/css2.1/20110323/html4/content-counter-008.htm
assert [counters.format(value, 'upper-roman') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
49, 50,
389, 390,
3489, 3490, 3491,
4999, 5000, 5001
]] == '''
-1986 -1985 -1 0 I II III IV V VI VII VIII IX X XI XII
XLIX L CCCLXXXIX CCCXC MMMCDLXXXIX MMMCDXC MMMCDXCI
MMMMCMXCIX 5000 5001
'''.split()
assert [counters.format(value, 'lower-alpha') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4,
25, 26, 27, 28, 29,
2002, 2003
]] == '''
-1986 -1985 -1 0 a b c d y z aa ab ac bxz bya
'''.split()
assert [counters.format(value, 'upper-alpha') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4,
25, 26, 27, 28, 29,
2002, 2003
]] == '''
-1986 -1985 -1 0 A B C D Y Z AA AB AC BXZ BYA
'''.split()
assert [counters.format(value, 'lower-latin') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4,
25, 26, 27, 28, 29,
2002, 2003
]] == '''
-1986 -1985 -1 0 a b c d y z aa ab ac bxz bya
'''.split()
assert [counters.format(value, 'upper-latin') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4,
25, 26, 27, 28, 29,
2002, 2003
]] == '''
-1986 -1985 -1 0 A B C D Y Z AA AB AC BXZ BYA
'''.split()
# http://test.csswg.org/suites/css2.1/20110323/html4/content-counter-009.htm
assert [counters.format(value, 'georgian') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
20, 30, 40, 50, 60, 70, 80, 90, 100,
200, 300, 400, 500, 600, 700, 800, 900, 1000,
2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000,
19999, 20000, 20001
]] == '''
-1986 -1985 -1 0 ა
ბ გ დ ე ვ ზ ჱ თ ი ია იბ
კ ლ მ ნ ჲ ო პ ჟ რ
ს ტ ჳ ფ ქ ღ ყ შ ჩ
ც ძ წ ჭ ხ ჴ ჯ ჰ ჵ
ჵჰშჟთ 20000 20001
'''.split()
# http://test.csswg.org/suites/css2.1/20110323/html4/content-counter-010.htm
assert [counters.format(value, 'armenian') for value in [
-1986, -1985,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
20, 30, 40, 50, 60, 70, 80, 90, 100,
200, 300, 400, 500, 600, 700, 800, 900, 1000,
2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000,
9999, 10000, 10001
]] == '''
-1986 -1985 -1 0 Ա
Բ Գ Դ Ե Զ Է Ը Թ Ժ ԺԱ ԺԲ
Ի Լ Խ Ծ Կ Հ Ձ Ղ Ճ
Մ Յ Ն Շ Ո Չ Պ Ջ Ռ
Ս Վ Տ Ր Ց Ւ Փ Ք
ՔՋՂԹ 10000 10001
'''.split()
@assert_no_logs
def test_margin_boxes():
"""
Test that the correct margin boxes are created.
"""
page_1, page_2 = render_pages('''
<style>
@page {
/* Make the page content area only 10px high and wide,
so every word in <p> end up on a page of its own. */
size: 30px;
margin: 10px;
@top-center { content: "Title" }
}
@page :first {
@bottom-left { content: "foo" }
@bottom-left-corner { content: "baz" }
}
</style>
<p>lorem ipsum
''')
assert page_1.children[0].element_tag == 'html'
assert page_2.children[0].element_tag == 'html'
margin_boxes_1 = [box.at_keyword for box in page_1.children[1:]]
margin_boxes_2 = [box.at_keyword for box in page_2.children[1:]]
assert margin_boxes_1 == ['@top-center', '@bottom-left',
'@bottom-left-corner']
assert margin_boxes_2 == ['@top-center']
html, top_center = page_2.children
line_box, = top_center.children
text_box, = line_box.children
assert text_box.text == 'Title'
@assert_no_logs
def test_margin_box_string_set():
"""Test string-set / string() in margin boxes."""
# Test that both pages get string in the `bottom-center` margin box
page_1, page_2 = render_pages('''
<style>
@page {
@bottom-center { content: string(text_header); }
}
p{
string-set: text_header content();
}
.page{
page-break-before: always;
}
</style>
<p>first assignment</p>
<div class="page"></div>
''')
html, bottom_center = page_2.children
line_box, = bottom_center.children
text_box, = line_box.children
assert text_box.text == 'first assignment'
html, bottom_center = page_1.children
line_box, = bottom_center.children
text_box, = line_box.children
assert text_box.text == 'first assignment'
def simple_string_set_test(content_val, extra_style=""):
page_1, = render_pages('''
<style>
@page {
@top-center { content: string(text_header); }
}
p{
string-set: text_header content(%(content_val)s);
}
%(extra_style)s
</style>
<p>first assignment</p>
''' % dict(content_val=content_val, extra_style=extra_style))
html, top_center = page_1.children
line_box, = top_center.children
text_box, = line_box.children
if content_val in ('before', 'after'):
assert text_box.text == 'pseudo'
else:
assert text_box.text == 'first assignment'
# Test each accepted value of `content()` as an arguemnt to `string-set`
for value in ('', 'text', 'before', 'after'):
if value in ('before', 'after'):
extra_style = "p:%s{content: 'pseudo'}" % value
simple_string_set_test(value, extra_style)
else:
simple_string_set_test(value)
# Test `first` (default value) ie. use the first assignment on the page
page_1, = render_pages('''
<style>
@page {
@top-center { content: string(text_header, first); }
}
p{
string-set: text_header content();
}
</style>
<p>first assignment</p>
<p>Second assignment</p>
''')
html, top_center = page_1.children
line_box, = top_center.children
text_box, = line_box.children
assert text_box.text == 'first assignment'
# test `first-except` ie. exclude from page on which value is assigned
page_1, page_2 = render_pages('''
<style>
@page {
@top-center { content: string(header_nofirst, first-except); }
}
p{
string-set: header_nofirst content();
}
.page{
page-break-before: always;
}
</style>
<p>first_excepted</p>
<div class="page"></div>
''')
html, top_center = page_1.children
assert len(top_center.children) == 0
html, top_center = page_2.children
line_box, = top_center.children
text_box, = line_box.children
assert text_box.text == 'first_excepted'
# Test `last` ie. use the most-recent assignment
page_1, = render_pages('''
<style>
@page {
@top-center { content: string(header_last, last); }
}
p{
string-set: header_last content();
}
</style>
<p>String set</p>
<p>Second assignment</p>
''')
html, top_center = page_1.children[:2]
line_box, = top_center.children
text_box, = line_box.children
assert text_box.text == 'Second assignment'
# Test multiple complex string-set values
page_1, = render_pages('''
<style>
@page {
@top-center { content: string(text_header, first); }
@bottom-center { content: string(text_footer, last); }
}
html { counter-reset: a }
body { counter-increment: a }
ul { counter-reset: b }
li {
counter-increment: b;
string-set:
text_header content(before) "-" content() "-" content(after)
counter(a, upper-roman) '.' counters(b, '|'),
text_footer content(before) '-' attr(class)
counters(b, '|') "/" counter(a, upper-roman);
}
li:before { content: 'before!' }
li:after { content: 'after!' }
li:last-child:before { content: 'before!last' }
li:last-child:after { content: 'after!last' }
</style>
<ul>
<li class="firstclass">first
<li>
<ul>
<li class="secondclass">second
''')
html, top_center, bottom_center = page_1.children
top_line_box, = top_center.children
top_text_box, = top_line_box.children
assert top_text_box.text == 'before!-first-after!I.1'
bottom_line_box, = bottom_center.children
bottom_text_box, = bottom_line_box.children
assert bottom_text_box.text == 'before!last-secondclass2|1/I'
@assert_no_logs
def test_page_counters():
"""Test page-based counters."""
pages = render_pages('''
<style>
@page {
/* Make the page content area only 10px high and wide,
so every word in <p> end up on a page of its own. */
size: 30px;
margin: 10px;
@bottom-center {
content: "Page " counter(page) " of " counter(pages) ".";
}
}
</style>
<p>lorem ipsum dolor
''')
for page_number, page in enumerate(pages, 1):
html, bottom_center = page.children
line_box, = bottom_center.children
text_box, = line_box.children
assert text_box.text == 'Page {0} of 3.'.format(page_number)
@assert_no_logs
def test_border_collapse():
html = parse_all('<table></table>')
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
assert isinstance(table, boxes.TableBox)
assert not hasattr(table, 'collapsed_border_grid')
def get_grid(html):
html = parse_all(html)
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
return tuple(
[[(style, width, color) if width else None
for _score, (style, width, color) in column]
for column in grid]
for grid in table.collapsed_border_grid)
grid = get_grid('<table style="border-collapse: collapse"></table>')
assert grid == ([], [])
black = (0, 0, 0, 1)
red = (1, 0, 0, 1)
green = (0, 1, 0, 1) # lime in CSS
blue = (0, 0, 1, 1)
yellow = (1, 1, 0, 1)
vertical_borders, horizontal_borders = get_grid('''
<style>td { border: 1px solid red }</style>
<table style="border-collapse: collapse; border: 3px solid black">
<tr> <td>A</td> <td>B</td> </tr>
<tr> <td>C</td> <td>D</td> </tr>
</table>
''')
black_3 = ('solid', 3, black)
red_1 = ('solid', 1, red)
assert vertical_borders == [
[black_3, red_1, black_3],
[black_3, red_1, black_3],
]
assert horizontal_borders == [
[black_3, black_3],
[red_1, red_1],
[black_3, black_3],
]
# hidden vs. none
vertical_borders, horizontal_borders = get_grid('''
<style>table, td { border: 3px solid }</style>
<table style="border-collapse: collapse">
<tr> <td>A</td> <td style="border-style: hidden">B</td> </tr>
<tr> <td>C</td> <td style="border-style: none">D</td> </tr>
</table>
''')
assert vertical_borders == [
[black_3, None, None],
[black_3, black_3, black_3],
]
assert horizontal_borders == [
[black_3, None],
[black_3, None],
[black_3, black_3],
]
yellow_5 = ('solid', 5, yellow)
green_5 = ('solid', 5, green)
dashed_blue_5 = ('dashed', 5, blue)
vertical_borders, horizontal_borders = get_grid('''
<style>td { border: 1px solid red }</style>
<table style="border-collapse: collapse; border: 5px solid yellow">
<col style="border: 3px solid black" />
<tr> <td></td> <td></td> <td></td> </tr>
<tr> <td></td> <td style="border: 5px dashed blue"></td>
<td style="border: 5px solid lime"></td> </tr>
<tr> <td></td> <td></td> <td></td> </tr>
<tr> <td></td> <td></td> <td></td> </tr>
</table>
''')
assert vertical_borders == [
[yellow_5, black_3, red_1, yellow_5],
[yellow_5, dashed_blue_5, green_5, green_5],
[yellow_5, black_3, red_1, yellow_5],
[yellow_5, black_3, red_1, yellow_5],
]
assert horizontal_borders == [
[yellow_5, yellow_5, yellow_5],
[red_1, dashed_blue_5, green_5],
[red_1, dashed_blue_5, green_5],
[red_1, red_1, red_1],
[yellow_5, yellow_5, yellow_5],
]
# rowspan and colspan
vertical_borders, horizontal_borders = get_grid('''
<style>col, tr { border: 3px solid }</style>
<table style="border-collapse: collapse">
<col /><col /><col />
<tr> <td rowspan=2></td> <td></td> <td></td> </tr>
<tr> <td colspan=2></td> </tr>
</table>
''')
assert vertical_borders == [
[black_3, black_3, black_3, black_3],
[black_3, black_3, None, black_3],
]
assert horizontal_borders == [
[black_3, black_3, black_3],
[None, black_3, black_3],
[black_3, black_3, black_3],
]
|
[
"konedangui@gmail.com"
] |
konedangui@gmail.com
|
559d1b2d86cb1efa7b683844ed1ce138556acad7
|
623b83f99501313c3bf3f9fcd78f35d9c0c5836c
|
/MAIN/normalplanes.py
|
afa090ce7653f8c0c6d8cd13edfccfa0b6316dd0
|
[] |
no_license
|
CompVis-LV/3D-LV
|
2d9ed915054885e5333d5a1aeec204071320283d
|
0ec4cdc1eb4061713cdf22702dd71f03c6d65a38
|
refs/heads/master
| 2021-01-01T15:25:28.057745
| 2020-06-17T20:50:10
| 2020-06-17T20:50:10
| 239,329,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
import sys
sys.path.append('C:\\Users\\Jared\\Documents\ECTE458\\3D-LV\Python\\Tests')
import numpy as np
import argparse
import cv2
import signal
from createMask import create_mask
from functools import wraps
import errno
import os
import copy
def most_frequent(List):
dict = {}
count, itm = 0, ''
for item in reversed(List):
dict[item] = dict.get(item, 0) + 1
if dict[item] >= count :
count, itm = dict[item], item
return(itm)
def extract_normals(d_im):
d_im = d_im.astype("float64")
normals = np.array(d_im, dtype="float32")
h,w,d = d_im.shape
shape = [h, w, d]
print("%s%s%s" % (h, w, d))
dict = {}
count, itm = 0, ''
for i in range(1,w-1):
for j in range(1,h-1):
t = np.array([i,j-1,d_im[j-1,i,0]],dtype="float64")
f = np.array([i-1,j,d_im[j,i-1,0]],dtype="float64")
c = np.array([i,j,d_im[j,i,0]] , dtype = "float64")
d = np.cross(f-c,t-c)
n = d / np.sqrt((np.sum(d**2)))
normals[j,i,:] = n
#print(n)
item = ("%s%s%s" % (n[0], n[1], n[2]))
#print(item)
dict[item] = dict.get(item, 0) + 1
if dict[item] >= count :
count, itm = dict[item], item
print(itm)
return normals, shape, itm
def remove_common_noise(normals, shape, itm):
for i in range(1,shape[1]-1):
for j in range(1,shape[0]-1):
if ("%s%s%s" % (normals[j,i,0], normals[j,i,1], normals[j,i,2])) == itm:
normals[j,i,:] = np.nan
#print("nannafide")
return normals
# normals = normals[1:h-1, 1:w-1, :]
# h,w,d = normals.shape
# print("%s%s%s" % (h, w, d))
def main():
imagePath = "C:\\Users\\Jared\\Documents\\ECTE458\\3D-LV\\Datasets\\user\\0_depth.png"
image = cv2.imread(imagePath)
shape = np.empty([1,3])
#normals = image.copy()
print("extracting normals")
(normals, shape, itm) = extract_normals(image)
cv2.imshow("normals", normals)
cv2.waitKey(0)
print(itm)
print("Normals extracted, removing noise")
normals = remove_common_noise(normals, shape, itm)
#save image to be passed and openned as plt
#create correct masking
#cv2.imwrite('temp.png', normals)
print("Noise removed, create a mask")
# fromCenter = False
# mask3d = create_mask(normals)
# print(r[0], r[1], r[2], r[3])
# print(a)
# extract = normals[r[1]:r[1]+r[3], r[0]:r[0]+r[2]]
# print(extract.shape)
# cv2.imshow("extract", extract)
# cv2.waitKey(0)
# extract[:,:,1] = np.nanmean(extract)
# extract[:,:,2] = np.nanmean(extract)
# extract[:,:,0] = np.nanmean(extract)
# cv2.imshow("extract", extract)
# cv2.waitKey(0)
# print(normals[0,0,:])
# #cv2.imwrite("normal.jpg",normals*255)
# cv2.imshow("normals", normals)
# cv2.waitKey(0)
if __name__ == "__main__":
main()
|
[
"jared.bellingham@gmail.com"
] |
jared.bellingham@gmail.com
|
edaad73f7db304cbbed0a72749453d01bdfe33ec
|
b20685cacbacf4f574e6c0d5fd90c1686e284901
|
/__main__.py
|
986a842d83307253e0f359a29432ec1e243d2ed9
|
[] |
no_license
|
aaronjanse/interlocking-braces
|
92f29be0eefd7ea72f6f7a9698f706aada4df6f8
|
a71daa0d2daf613bb1e8b850f3dbacc3edd87e42
|
refs/heads/master
| 2021-08-24T10:35:17.663445
| 2017-09-12T02:00:52
| 2017-09-12T02:00:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
import fileinput
from interpret import Interpreter
from callbacks import DefualtCallbacks
def preprocess_string_literals(program):
output = ''
buffer_txt = ''
in_string = False
invert = False
for char in list(program):
if char == 'i' and not in_string:
invert = True
elif char == '"':
if in_string:
in_string = False
if invert:
buffer_txt = buffer_txt[::-1]
output += ''.join([str(ord(char))+'_' for char in list(buffer_txt)])
else:
in_string = True
buffer_txt = ''
continue
elif not in_string:
invert = False
if in_string:
buffer_txt += char
else:
output += char
return output
program = ''.join([line for line in fileinput.input()])
program = preprocess_string_literals(program)
print(program)
Interpreter(program, DefualtCallbacks).run()
print()
|
[
"aaron.janse@gmail.com"
] |
aaron.janse@gmail.com
|
a16e690cf4695cac7fe4065391cd02a75f3df873
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2464487_0/Python/74th/small.py
|
83f5ac658df9d59b6e3e55720766377c63ef1dde
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
testcase = input()
DEBUG = False
def p(string):
if DEBUG :
print string
def p2(string1,string2):
if DEBUG :
print str(string1) + ":" + str(string2)
def ans(q,string):
print "Case #" + str(q+1) + ": " + str(string)
for q in xrange(testcase) :
line = raw_input().split(' ')
shiro = int(line[0])
ink = int(line[1])
flg = True
useink = 0
#First
shiro_hankei = shiro
kuro_hankei = shiro_hankei + 1
#塗った回数
nutta = 0
while flg :
# 白分
shiro_menseki = shiro_hankei * shiro_hankei
p2("白",shiro_menseki)
# 黒分
kuro_menseki = kuro_hankei * kuro_hankei
p2("黒",kuro_menseki)
use = kuro_menseki - shiro_menseki
useink = useink + use
p2("使用料",useink)
if useink > ink :
ans(q,nutta)
break
nutta += 1
shiro_hankei += 2
kuro_hankei += 2
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
fa195aff45a180a759d0566986c666f0905602b3
|
b26a9fe238654d3fea04ebea6f2fc3898200d382
|
/autoPyTorch/utils/modules.py
|
36504e0f5b177f8f800d3e31dade500771eb8e78
|
[
"Apache-2.0"
] |
permissive
|
ArlindKadra/Auto-PyTorch
|
1d02c088ad943e2dd42bfc492b692e1426c90655
|
6e72d5ba088981b89371f29773d243a211a4d068
|
refs/heads/master
| 2021-11-18T21:31:42.705180
| 2021-08-05T18:58:15
| 2021-08-05T18:58:15
| 248,235,869
| 1
| 0
|
Apache-2.0
| 2020-11-13T00:03:39
| 2020-03-18T13:14:30
|
Python
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
import torch.nn as nn
class Reshape(nn.Module):
def __init__(self, size):
super(Reshape, self).__init__()
self.size = size
def forward(self, x):
# import logging
# l = logging.getLogger('autonet')
# l.debug(x.shape)
# l.debug((x.reshape(-1, self.size)).shape)
return x.reshape(-1, self.size)
|
[
"marius.rks@googlemail.com"
] |
marius.rks@googlemail.com
|
79eace9e04cf5acc075464305933d5bd9d983ee2
|
5dd47abf7061201d9378e73e51f08fbb314ba2fd
|
/envdsys/envdsys/routing.py
|
ef1367eb35ff1bf237f5794f8d8c02faa85f8355
|
[
"Unlicense"
] |
permissive
|
NOAA-PMEL/envDataSystem
|
4d264ae5209015e4faee648f37608d68a4461d0a
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
refs/heads/master
| 2023-02-23T22:33:14.334737
| 2021-07-22T01:09:16
| 2021-07-22T01:09:16
| 191,809,007
| 1
| 0
|
Unlicense
| 2023-02-08T00:45:54
| 2019-06-13T17:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 447
|
py
|
# envdsys/routing.py
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import envdaq.routing
# import envnet.routing
application = ProtocolTypeRouter(
{
# (http->django views is added by default)
"websocket": AuthMiddlewareStack(
URLRouter(envdaq.routing.websocket_urlpatterns)
# URLRouter(envnet.routing.websocket_urlpatterns)
),
}
)
|
[
"derek.coffman@noaa.gov"
] |
derek.coffman@noaa.gov
|
73f83ae285b913d794a31a92ba1cdd3cf5063e4b
|
972be801215790ba193804d2eca0fee95f67ca34
|
/codechef/beginner/solved/FRK.py
|
219087195478f38219ce6e95ba5d50c8e5f2b427
|
[] |
no_license
|
rikithreddy/codeinpy
|
6f4a998e707a83349a19ed4e014eebcafbe4f6d0
|
4ec9668dd15d9ab501e7fd43946474638ac64cd0
|
refs/heads/master
| 2021-06-29T22:41:25.249869
| 2020-10-01T11:18:56
| 2020-10-01T11:18:56
| 164,993,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
n = int(input())
subs = ['ch','he','ef', 'che' , 'hef', 'chef']
count = 0
for _ in range(n):
name = input().strip()
for x in subs:
if x in name:
count+=1
break
print(count)
|
[
"rikith.legend@gmail.com"
] |
rikith.legend@gmail.com
|
6c64ea02c75bb92916378d0cda343e8105d77116
|
ffe569cdf01cf0a85f01dc810418c9b43cf3bc2c
|
/pyrl/agents/uct.py
|
85b25fa2ff70dd87ccce0d006e2a017cc154e4bb
|
[] |
no_license
|
strin/curriculum-deep-RL
|
46dad178af7dd875961fc6b7738054c4e0e8f09d
|
b4ca89e4cb37c77c8ecb8428f64b7c1fe7436e8f
|
refs/heads/master
| 2020-04-04T15:54:07.919881
| 2016-05-18T09:36:20
| 2016-05-18T09:36:20
| 43,320,487
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
# upper-confidence table for the agent to measure uncertainty.
import dill as pickle
from pyrl.agents.agent import StateTable
class UCT(object):
def __init__(self):
self.state_table = StateTable()
def visit(self, state, action):
curr_action_counts = self.state_table[state]
if not curr_action_counts:
curr_action_counts = {}
self.state_table[state] = curr_action_counts
if action not in curr_action_counts:
curr_action_counts[action] = 0.
curr_action_counts[action] += 1.
def count_sa(self, state, action):
curr_action_counts = self.state_table[state]
if not curr_action_counts:
return 0.
if action not in curr_action_counts:
return 0.
return curr_action_counts[action]
def count_s(self, state):
curr_action_counts = self.state_table[state]
if not curr_action_counts:
return 0.
return sum(curr_action_counts.values())
|
[
"tianlins@stanford.edu"
] |
tianlins@stanford.edu
|
5aaed2a6ad865e0b79721558178467921183db45
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_179/ch79_2020_04_06_14_35_13_363818.py
|
ef752554d1d2ac997c9c02e2ec540329c84c45e5
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def monta_dicionario (l1, l2):
dicionario = {}
dicionario.append(l1[0]) = l1[1]
dicionario.append(l2[0]) = l2[1]
return dicionario
|
[
"you@example.com"
] |
you@example.com
|
ca2f3757aa406e821a20d55b6fddb629179c8d43
|
6909de83dd90ee1169d6c453c327ab2ce2687485
|
/lab13/tests/split-at.py
|
2929639eb68ca1204f2ffb7d0ac4c356a2c97a4d
|
[] |
no_license
|
dantefung/cs61a-2021-summer
|
730cb0b9ab7327c32c619779d71882531bf328dd
|
4f22f20fcfddfb5bf121081919310413209da1b2
|
refs/heads/master
| 2023-08-19T14:51:27.380738
| 2021-11-01T06:54:33
| 2021-11-01T06:54:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,624
|
py
|
test = {
"name": "split-at",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
scm> (car (split-at '(1 2 3 4 5) 3))
(1 2 3)
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> (cdr (split-at '(1 2 3 4 5) 3))
(4 5)
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> (car (split-at '(1 2 3 4 5) 10))
(1 2 3 4 5)
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> (cdr (split-at '(1 2 3 4 5) 10))
()
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> (car (split-at '(0 1 1 2 3) 0))
()
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> (cdr (split-at '(0 1 1 2 3) 0))
(0 1 1 2 3)
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": r"""
scm> (load-all ".")
""",
"teardown": "",
"type": "scheme",
}
],
}
|
[
"wuyudi1109@gmail.com"
] |
wuyudi1109@gmail.com
|
c84e6668ba81af4bbb0bf2f25af89817f6da2860
|
5a711a4a49f94ae1497a723e29c981facf1d5f37
|
/app/sports/infrastructure/filters.py
|
a5d210155279f673773341e42550f869c20f57a4
|
[
"MIT"
] |
permissive
|
fabien-roy/glo-2005-sportsapp
|
2509583fb47bce9cff1e211cb1ed7adebaf3fdd0
|
3b5b5f9cdcfe53d1e6e702609587068c4bd3310d
|
refs/heads/master
| 2023-04-09T07:30:31.512069
| 2020-05-10T23:21:42
| 2020-05-10T23:21:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
from app.interfaces.infrastructure.filters import MySQLFilter
from app.sports.infrastructure.tables import MySQLSportTable as Sports
from app.climates.infrastructure.tables import MySQLSportClimateTable as SportClimates
class MySQLSportFilter(MySQLFilter):
joined_climate_name_col = f'C.{SportClimates.climate_name_col}'
def get_col_names(self):
return [self.joined_climate_name_col, Sports.name_col]
def get_values(self, form=None):
return [] if form is None else [form.climate.data, form.name.data]
|
[
"fabienroy28@gmail.com"
] |
fabienroy28@gmail.com
|
97d10861d9c57da92fc26b537518c04b36805480
|
f8815db141c9ea8ab8a101afd1a68d535f1ffa43
|
/pruevas.py
|
2a2d0964f900df9490d91b68e8f08ac835ae15cb
|
[] |
no_license
|
yochairo/modelado
|
5f5be83981404a0ee006fb4b88a645c8225a3b2e
|
c97ddc3131635dc19100977f399c08b7e426b097
|
refs/heads/master
| 2023-03-18T06:55:04.165370
| 2021-02-17T15:19:32
| 2021-02-17T15:19:32
| 339,763,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
import pandas as pd
import math
import numpy as nps
def promediarLista(lista):
sum=0.0
for i in range(0,len(lista)):
sum=sum+lista[i][0]
return sum/len(lista)
def varianza(lista):
sum=0.0
for i in range(0,len(lista)):
sum=sum+math.pow((lista[i][0]-promediarLista(lista)),2)
return sum/(len(lista))
xls=pd.ExcelFile('practica simulado real.xlsx')
print(xls.sheet_names)
df=xls.parse("Hoja1")
df1=xls.parse("Hoja2")
re=[]
re=df.values.tolist()
si=[]
si=df1.values.tolist()
print("reales ")
mere=promediarLista(re)
vame=varianza(re)
print(mere,vame)
print ("simulados")
mesi=promediarLista(si)
vasi=varianza(si)
print(mesi,vasi)
tdetabla=1.671
T=(mesi-mere)/(math.sqrt(vame/len(re)))
print(T,len(re))
print("hipotesis nula : media de datos reales = media de datos simulados")
print("hipotesis alterna : media de datos reales > media de datos simulados")
print("cola derecaha")
if(T>tdetabla):
print("se rechaza la hipotesis nula y se acepta la hipotesis alterna ")
else:
print("se acepta la hipotesis nula y se rechaza la hipotesis alterna ")
print("cola izquierda")
if(T<-tdetabla):
print("se rechaza la hipotesis nula y se acepta la hipotesis alterna ")
else:
print("se acepta la hipotesis nula y se rechaza la hipotesis alterna ")
print("dos cola")
if(abs(T)>2.000):
print("se rechaza la hipotesis nula y se acepta la hipotesis alterna ")
else:
print("se acepta la hipotesis nula y se rechaza la hipotesis alterna ")
|
[
"joromabe@gmail.com"
] |
joromabe@gmail.com
|
8ffd6ec6c4ae62555678ce24a1a4f2d7dc9411b4
|
daebe1587552153c62e695c74e154d44bb58fcc1
|
/tester.py
|
4272b7aab0de5df57f8d0689b9bfd356b423cfa0
|
[
"MIT"
] |
permissive
|
alex-stoica/Pytorch-Lite
|
91e70625c853c6cfe8495fa72bbee3a08669f3cb
|
6b01db8ad84551c27aa3a6da4d21f248ccbc9177
|
refs/heads/master
| 2020-08-28T22:58:03.900293
| 2019-10-27T13:13:08
| 2019-10-27T13:13:08
| 217,845,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
import torch
from constants import DEVICE
def test_net(net, loader):
correct_top1 = 0
correct_top5 = 0
total = 0
count = 0
net.to(DEVICE)
with torch.no_grad():
for data in loader:
count += 1
if count % 200 == 0:
print(count)
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
outputs = net(images)
_, predicted_top1 = torch.max(outputs.data, 1)
_, predicted_top5 = outputs.topk(k=5, dim=1)
total += labels.size(0)
correct_top1 += (predicted_top1 == labels).sum().item()
for target, label in zip(predicted_top5.tolist(), labels.tolist()):
if label in target:
correct_top5 += 1
return {'TOP1': 100 * correct_top1 / total,
'TOP5': 100 * correct_top5 / total}
# print('Accuracy of the network: TOP 1: {}, TOP 5: {} %'.format(
# 100 * correct_top1 / total, 100 * correct_top5 / total))
|
[
"alexstoica@protonmail.com"
] |
alexstoica@protonmail.com
|
3520c59125e6523da53d8478bd964f8bb9ee9f2c
|
fa879c8c017d92b547e19271cb69e6bcffc59bbf
|
/meditate/meditate/settings.py
|
cabfb9bd29b490c6f12c6946b2d6fc5f0c112303
|
[] |
no_license
|
Aikay-Egwu/meditations
|
17ebad9b6e46e83cf5e47c22c3e952f4a6830078
|
e7862319bf73111d406d51cf60ca9f97492d44f8
|
refs/heads/main
| 2023-05-11T19:48:38.602086
| 2021-05-27T15:38:35
| 2021-05-27T15:38:35
| 371,404,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,351
|
py
|
"""
Django settings for meditate project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-o87mcju-fct!^8w-9exf!d92wgy!qndemun6e51i+lyv-1ik=#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rhema',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meditate.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR / 'templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meditate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static"
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"justaikay@gmail.com"
] |
justaikay@gmail.com
|
773065f0d792a996af9e859c9a0e4d69c37bef2f
|
3d231adc44731f778f4798bb7760152c6ef7a7e1
|
/questions of day3/Q5.py
|
07c67a92c2111af2ec297baf69209c2094e68dd0
|
[] |
no_license
|
uniqstha/PYTHON
|
1b6a426f95daaf6c41114f8604f9d79d6c62a420
|
2925bd407e6cb880fdb57c9ed0f7dbe496c99ece
|
refs/heads/master
| 2023-06-11T16:16:28.902772
| 2021-07-13T02:25:47
| 2021-07-13T02:25:47
| 375,231,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
#Write a program to convert second into hour, minute & second.
sec = int(input('Enter seconds: '))
hr = sec // 3600
sec %= 3600
min = sec // 60
sec %= 60
sec = sec % (24 * 3600)
print(f'{hr} hours and {min} minutes and {sec} seconds')
|
[
"unique2059@gmail.com"
] |
unique2059@gmail.com
|
75a3db85d2dcef4ce1abfedee8643d4b8582acc2
|
3bfe2c44c4803ca3a0ed867c133a40a7e4cecec6
|
/test_items.py
|
040ac9310317a8e411a364d73956dca53184773d
|
[] |
no_license
|
senyprim/StepicTestProject
|
78ea868ceedb07c9e9ec95702b98d6a52f31d003
|
39234b7c24101d6414a3d54001da24a3c059a839
|
refs/heads/master
| 2023-04-04T22:11:28.541348
| 2019-07-22T13:45:19
| 2019-07-22T13:45:19
| 198,141,279
| 0
| 0
| null | 2021-04-20T18:22:14
| 2019-07-22T03:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 801
|
py
|
import pytest
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
def test_exist_btn_add_to_basket(browser):
lang=browser.execute_script("return navigator.language || navigator.userLanguage")
browser.get("http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/")
try:
WebDriverWait(browser,7).until(EC.presence_of_element_located((By.CSS_SELECTOR,".btn-add-to-baskett")))
assert True
except:
assert False,f'Кнопка "добавить в корзину"" отсутствует на странице. Browser={browser.capabilities["browserName"]} Language={lang}'
|
[
"noreply@github.com"
] |
senyprim.noreply@github.com
|
c1e0f96342e5e39589ee52a167ae417bfa98b811
|
92eec9d251e24f48fa42160e5061c5fb3120fdbb
|
/ipython1/trunk/ipython1/external/twisted/web2/dav/method/__init__.py
|
e560fda343c3c39cfaf4266c3a9febd5d54f354d
|
[] |
no_license
|
minrk/ipython-svn-archive
|
55dde54f52284c5c3b32ac2a12bb4734e5f8e7f8
|
9b32089282c94c706d819333a3a2388179e99e86
|
refs/heads/master
| 2022-12-24T19:50:01.359278
| 2020-09-25T09:34:04
| 2020-09-25T09:34:04
| 298,268,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
##
# Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV methods.
Modules in this package provide the implementation of
ipython1.external.twisted.web2.dav.static.DAVFile's dispatched methods.
"""
__all__ = [
"copymove",
"delete",
"lock",
"mkcol",
"propfind",
"proppatch",
"put",
"report",
"report_expand",
]
|
[
"ellisonbg@gmail.com"
] |
ellisonbg@gmail.com
|
e8702e4273d04c4646dc09c87821bcdc74eeea3b
|
8aa0d1d407bb1c66d01261f7e2c4e9832e856a2d
|
/experiments/experiments_toy/sparsity/nmf_gibbs.py
|
191ea3d17786029565da075ad28ceb22a9412f92
|
[] |
no_license
|
garedaba/BNMTF_ARD
|
59e3ec1dbfd2a9ab9f4ec61368ec06e3783c3ee4
|
0a89e4b4971ff66c25010bd53ee2622aeaf69ae9
|
refs/heads/master
| 2022-01-16T06:57:12.581285
| 2018-06-10T10:22:12
| 2018-06-10T10:22:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,862
|
py
|
'''
Test the performance of Gibbs sampling for recovering a toy dataset, where we
vary the fraction of entries that are missing.
We repeat this 10 times per fraction and average that.
'''
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BNMTF_ARD.code.models.bnmf_gibbs import bnmf_gibbs
from BNMTF_ARD.code.cross_validation.mask import calc_inverse_M
from BNMTF_ARD.code.cross_validation.mask import try_generate_M
import numpy, matplotlib.pyplot as plt
''' Experiment settings. '''
repeats = 10
fractions_unknown = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] #[ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9 ]
input_folder = project_location+"BNMTF_ARD/data/toy/bnmf/"
output_folder = project_location+"BNMTF_ARD/experiments/experiments_toy/sparsity/results/"
output_file = output_folder+'nmf_gibbs.txt'
metrics = ['MSE', 'R^2', 'Rp']
''' Model settings. '''
iterations = 500
burn_in = 400
thinning = 2
init_UV = 'random'
I,J,K = 100, 80, 10
ARD = False
lambdaU, lambdaV = 0.1, 0.1
alphatau, betatau = 1., 1.
alpha0, beta0 = 1., 1.
hyperparams = { 'alphatau':alphatau, 'betatau':betatau, 'alpha0':alpha0, 'beta0':beta0, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
''' Load in data. '''
R = numpy.loadtxt(input_folder+"R.txt")
''' Generate matrices M - one list of M's for each fraction. '''
M_attempts = 100
all_Ms = [
[try_generate_M(I,J,fraction,M_attempts)[0] for r in range(0,repeats)]
for fraction in fractions_unknown
]
all_Ms_test = [ [calc_inverse_M(M) for M in Ms] for Ms in all_Ms ]
''' Make sure each M has no empty rows or columns. '''
def check_empty_rows_columns(M,fraction):
sums_columns = M.sum(axis=0)
sums_rows = M.sum(axis=1)
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobserved row in M, row %s. Fraction %s." % (i,fraction)
for j,c in enumerate(sums_columns):
assert c != 0, "Fully unobserved column in M, column %s. Fraction %s." % (j,fraction)
for Ms,fraction in zip(all_Ms,fractions_unknown):
for M in Ms:
check_empty_rows_columns(M,fraction)
''' We now run the Gibbs sampler on each of the M's for each fraction. '''
all_performances = {metric:[] for metric in metrics}
average_performances = {metric:[] for metric in metrics} # averaged over repeats
for (fraction,Ms,Ms_test) in zip(fractions_unknown,all_Ms,all_Ms_test):
print "Trying fraction %s." % fraction
# Run the algorithm <repeats> times and store all the performances
for metric in metrics:
all_performances[metric].append([])
for (repeat,M,M_test) in zip(range(0,repeats),Ms,Ms_test):
print "Repeat %s of fraction %s." % (repeat+1, fraction)
BNMF = bnmf_gibbs(R,M,K,ARD,hyperparams)
BNMF.initialise(init_UV)
BNMF.run(iterations)
# Measure the performances
performances = BNMF.predict(M_test,burn_in,thinning)
for metric in metrics:
# Add this metric's performance to the list of <repeat> performances for this fraction
all_performances[metric][-1].append(performances[metric])
# Compute the average across attempts
for metric in metrics:
average_performances[metric].append(sum(all_performances[metric][-1])/repeats)
''' Print and store the performances. '''
print "repeats=%s \nfractions_unknown = %s \nall_performances = %s \naverage_performances = %s" % \
(repeats,fractions_unknown,all_performances,average_performances)
open(output_file,'w').write("%s" % all_performances)
''' Plot the average performances. '''
for metric in ['MSE']:
plt.figure()
x = fractions_unknown
y = average_performances[metric]
plt.plot(x,y)
plt.xlabel("Fraction missing")
plt.ylabel(metric)
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
a376732a001593189577b5db902da9b29d46dc53
|
d9565c31dbb091e4379c9f6ce5feaec3ea711516
|
/main.py
|
d3b1958817656a13980b7bd35ce4aed5c6146930
|
[] |
no_license
|
jfish10/quote-scraper
|
3bbfd0e2a2c9242c54124f66554f684fa903d622
|
63da47575f875bf9694769384373a0054ced1f1f
|
refs/heads/master
| 2023-02-21T21:18:51.130858
| 2021-01-04T23:57:28
| 2021-01-04T23:57:28
| 326,830,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
from bs4 import BeautifulSoup
import requests
import csv
import additional_quotes
result = requests.get('http://quotes.toscrape.com/')
page = result.text
soup = BeautifulSoup(page, 'html.parser')
quotes = soup.find_all('div', class_='quote')
scraped = []
for quote in quotes:
text = quote.find('span', class_='text').text
author = quote.find('small', class_='author').text
scraped.append([text, author])
additional_quotes = additional_quotes.handle_additional_site('https://www.keepinspiring.me/famous-quotes/')
with open('famous-quotes.csv', 'w') as quotations:
writer = csv.writer(quotations, delimiter=',')
for quote in scraped:
writer.writerow(quote)
for quote in additional_quotes:
writer.writerow(quote)
print("Quotes added successfully!")
|
[
"everestmf8@gmail.com"
] |
everestmf8@gmail.com
|
c5004198e13ad75f7282c8c2a9a8fbd200d6eef4
|
df1abc2cf262e830994fc985172f748e8226e562
|
/processor.py
|
4c44dcf7136ff159f7ebe3e75bfdd5f20323e043
|
[] |
no_license
|
etu7912a48/paperwork
|
092fbb1ca6955516b00ea87905d47746410da2b7
|
de0cdfb4043d085e6ad2d2a634688f6f8a259d6e
|
refs/heads/master
| 2023-09-04T16:59:24.949501
| 2021-11-22T04:39:08
| 2021-11-22T04:39:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,829
|
py
|
import fitz
import cv2
import numpy as np
import easyocr
import re
import logging
#mod logger
m_logger = logging.getLogger('paperwork.processor')
#class of the main processor
class Processor():
def __init__(self, data_type, *args, **kwargs) -> None:
self.logger = logging.getLogger('paperwork.processor.Processor')
self.input_dir = kwargs['input_dir']
self.output_dir = kwargs['output_dir']
self.error_dir = kwargs['error_dir']
self.data_type = data_type
self.logger.info('input_dir is {}'.format(self.input_dir))
self.logger.info('output_dir is {}'.format(self.output_dir))
self.logger.info('error_dir is {}'.format(self.error_dir))
self.logger.info('data_type is {}'.format(self.data_type))
def set_dataType(self, d):
self.data_type = d
#pdf core
def set_pdfCore(self, p):
self.pdf_core = p
if self.data_type != None:
self.pdf_core.data_type = self.data_type
def convert_pdf(self, pdf_list):
try:
self.logger.info('start to convert pdf')
p = self.pdf_core.convert_pdf(pdf_list, self.input_dir)
return p
except:
self.logger.warning('some error happened when converting pdf')
#img core
def set_imgCore(self, i):
self.img_core = i
if self.data_type != None:
self.pdf_core.data_type = self.data_type
def refine_img(self, img_list):
try:
return self.img_core.refine_img(img_list)
except:
self.logger.warning('some error happened when refining img')
#ocr core
def set_ocrCore(self, o):
self.ocr_core = o
if self.output_dir != None:
self.ocr_core.output_dir = self.output_dir
def classify_img(self, img_list):
try:
self.ocr_core.classify_img(img_list, self.output_dir, self.error_dir)
self.logger.info('All img has been classified')
except:
self.logger.warning('some error happened when doing classification of img', exc_info=True)
# sub cores of the main processor including pdf, img and ocr
class PDF_core():
def __init__(self, *args, **kwargs) -> None:
self.logger = logging.getLogger('paperwork.processor.PDF_core')
self.data_type = "DR"
def convert_pdf(self, pdf_list, input_dir):
img_list = []
pdfs = list(map(lambda f: fitz.open('{}/{}'.format(input_dir, f)), pdf_list))
for pdf in pdfs:
for page in pdf:
pix = page.get_pixmap(matrix=fitz.Matrix(3, 3))
img = np.frombuffer(pix.samples, dtype=np.uint8).reshape(pix.h, pix.w, pix.n)
img_list.append(img)
return img_list
class IMG_core():
def __init__(self, *args, **kwargs) -> None:
self.logger = logging.getLogger('paperwork.processor.IMG_core')
self.data_type = "DR"
def refine_img(self, img_list):
self.logger.info('start to refine img')
th_list = []
for img in img_list:
ori = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGBA2BGR)
gray = cv2.cvtColor(ori, cv2.COLOR_BGR2GRAY)
k = np.ones((2, 2), np.uint8)
close_img = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, k)
th = cv2.adaptiveThreshold(close_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
th_list.append(th)
return th_list
class OCR_core():
def __init__(self) -> None:
self.logger = logging.getLogger('paperwork.processor.OCR_core')
self.reader = easyocr.Reader(['en'])
def classify_img(self, img_list, output_dir, error_dir):
problem_count = 0
for i in img_list:
temp_i = i[160:290, 320:720]
text_list = self.reader.readtext(temp_i, detail=0)
fn = list(filter(lambda o :re.compile(r'\d{12,}').findall(o), text_list))
#pytesseract.pytesseract.tesseract_cmd=r'Tesseract-OCR/tesseract.exe'
#s = pytesseract.image_to_string(temp_i, lang='eng')
#fn = re.compile(r'\d{12}').findall(s)
if fn == []:
problem_count = problem_count + 1
cv2.imwrite(r'{}/error-{}.png'.format(error_dir, problem_count), i)
self.logger.warning(r'failed to recognize the delivery receipt')
self.logger.warning(r'OCR detected those things: {}'.format(text_list))
else:
cv2.imwrite(r'{}/{}.png'.format(output_dir, fn[0]), i)
if problem_count > 0:
self.logger.warning('Detect {} files with error . Please cheeck error dir to rename those files manual'.format(problem_count))
if __name__ == '__main__':
m_logger.warning('the module processor has been executed')
|
[
"noreply@github.com"
] |
etu7912a48.noreply@github.com
|
237ff58e8e70b4779aeaf9f549eed85c60a17b8c
|
922127ab8352f4270ee08303f5b396a5509cf36e
|
/external/AR/pytracking/features/util.py
|
6f546b6f79abfbae49af01232b780a8856118f9e
|
[
"MIT"
] |
permissive
|
Omri-L/Stark
|
6eff8711ce6a53cb9c5f44160bc52aed35084eaf
|
f1f29c087a5d9c3b311f915349e4a880638b0aa2
|
refs/heads/main
| 2023-04-21T00:11:48.696501
| 2021-05-04T13:14:13
| 2021-05-04T13:14:13
| 373,739,719
| 1
| 0
|
MIT
| 2021-06-04T06:14:38
| 2021-06-04T06:14:37
| null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
import torch
from pytracking.features.featurebase import FeatureBase
class Concatenate(FeatureBase):
"""A feature that concatenates other features.
args:
features: List of features to concatenate.
"""
def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True):
super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray)
self.features = features
self.input_stride = self.features[0].stride()
for feat in self.features:
if self.input_stride != feat.stride():
raise ValueError('Strides for the features must be the same for a bultiresolution feature.')
def dim(self):
return sum([f.dim() for f in self.features])
def stride(self):
return self.pool_stride * self.input_stride
def extract(self, im: torch.Tensor):
return torch.cat([f.get_feature(im) for f in self.features], 1)
|
[
"yan_bin@mail.dlut.edu.cn"
] |
yan_bin@mail.dlut.edu.cn
|
b89a3fc12ce46f175c9da53e5533e5edb63bd3a4
|
442637ed688f4add41bd97660e795615ae86b69d
|
/setup.py
|
c6965f5d0f8cd81f4b10ced511595ac72e12da96
|
[
"MIT"
] |
permissive
|
ElsevierSoftwareX/SOFTX_2020_220
|
2a740cfc8adbc07bcc7db57b27eb1c3eb1729164
|
ec26946f494406f22a220fd4975e045c13ee41a3
|
refs/heads/master
| 2023-02-06T04:14:56.274690
| 2020-12-30T14:30:15
| 2020-12-30T14:30:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="vcd",
version="4.3.0",
author="Marcos Nieto",
author_email="mnieto@vicomtech.org",
description="Video Content Description (VCD) library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Vicomtech/video-content-description-VCD",
project_urls={
"VCD project": "https://vcd.vicomtech.org"
},
packages=setuptools.find_packages(),
install_requires=[
'jsonschema>=3.2',
'protobuf',
'numpy>=1.19.0,<1.19.4'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
python_requires='>=3.6',
)
|
[
"marcos.nieto.doncel@gmail.com"
] |
marcos.nieto.doncel@gmail.com
|
58fe279d70367b1aa69c6d3f868d1e30d72871df
|
24a9ca4a9c7b2f36f20cc2dac1a4017cb5b49ceb
|
/Real-ChemTS/ChemTS/mcts_node.py
|
21b50438c3d64d478c3120b0efd96712d789d541
|
[
"MIT"
] |
permissive
|
jinzhezhang/erbb1_project
|
0cc6cef23515151a5c9949364102b359600ad9b9
|
047969d47719821526bf6b81230c43341bf476cf
|
refs/heads/main
| 2023-07-09T15:42:10.560628
| 2021-08-18T02:13:14
| 2021-08-18T02:13:14
| 396,380,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,357
|
py
|
import numpy as np
from keras.preprocessing import sequence
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import MolFromSmiles
import sascorer
import networkx as nx
from rdkit.Chem import rdmolops
import sys
sys.path.append('../..')
from gnn.main.predict import predict_vec
import gnn.main.config as config
import gnn.main.preprocess as pp
def expand_node(model, state, val):
all_nodes = []
position = []
position.extend(state)
get_index = []
for j in range(len(position)):
#print ("val.index(position[j])1:", val.index(position[j]))
get_index.append(val.index(position[j]))
get_index = get_index
x = np.reshape(get_index, (1, len(get_index)))
x_pad = sequence.pad_sequences(x, maxlen=81, dtype='int32',
padding='post', truncating='pre', value=0.)
for i in range(30):
predictions = model.predict(x_pad)
preds = np.asarray(predictions[0][len(get_index) - 1]).astype('float64')
preds = np.log(preds) / 1.0
preds = np.exp(preds) / np.sum(np.exp(preds))
next_probas = np.random.multinomial(1, preds, 1)
next_int = np.argmax(next_probas)
all_nodes.append(next_int)
all_nodes = list(set(all_nodes))
return all_nodes
def add_node(all_nodes, val):
added_nodes = []
for i in range(len(all_nodes)):
added_nodes.append(val[all_nodes[i]])
return added_nodes
def simulate_node(model, state, val, added_nodes):
all_posible = []
maxlen = 81
end = "$"
for i in range(len(added_nodes)):
position = []
position.extend(state)
position.append(added_nodes[i])
total_generated = []
get_index = []
for j in range(len(position)):
get_index.append(val.index(position[j]))
x = np.reshape(get_index, (1, len(get_index)))
x_pad = sequence.pad_sequences(x, maxlen=maxlen, dtype='int32',
padding='post', truncating='pre', value=0.)
while not get_index[-1] == val.index(end):
predictions = model.predict(x_pad)
preds = np.asarray(predictions[0][len(get_index) - 1]).astype('float64')
preds = np.log(preds) / 1.0
preds = np.exp(preds) / np.sum(np.exp(preds))
next_probas = np.random.multinomial(1, preds, 1)
next_int = np.argmax(next_probas)
get_index.append(next_int)
x = np.reshape(get_index, (1, len(get_index)))
x_pad = sequence.pad_sequences(x, maxlen=maxlen, dtype='int32',
padding='post', truncating='pre', value=0.)
if len(get_index) > maxlen:
break
total_generated.append(get_index)
all_posible.extend(total_generated)
return all_posible
def predict_smile(all_posible, val):
new_compound = []
for i in range(len(all_posible)):
total_generated = all_posible[i]
generate_smile = []
for j in range(len(total_generated) - 1):
generate_smile.append(val[total_generated[j]])
generate_smile.remove("&")
new_compound.append(generate_smile)
return new_compound
def make_smile(generate_smile):
new_compound = []
for i in range(len(generate_smile)):
middle = []
for j in range(len(generate_smile[i])):
middle.append(generate_smile[i][j])
com = ''.join(middle)
new_compound.append(com)
return new_compound
def evaluate_erbb1_log50(gnn_model, dicts, new_compound):
node_index = []
valid_compound = []
scores = []
for i in range(len(new_compound)):
try:
m = Chem.AddHs(Chem.MolFromSmiles(str(new_compound[i])))
except:
print('None')
m = None
with open('ChemTS_erbb1_2000_optimized_score.txt','a') as f:
f.write(str(new_compound[i]) + '| None' + '\n')
if m and len(new_compound[i]) <= 81:
node_index.append(i)
valid_compound.append(new_compound[i])
try:
res = predict_vec(gnn_model,'regression',
'../../gnn/dataset/regression/erbb1_clean_log_ic50/',
1, 'erbb1_clean_log_ic50', config.device, [new_compound[i]],
dicts)
except:
res = [99]
with open('ChemTS_erbb1_2000_optimized_score.txt','a') as f:
f.write(str(new_compound[i]) + '|' + str(-res[0]) + '\n')
scores.append(max(5-res[0], 0))
return node_index, scores, valid_compound
def evaluate_node_logp(new_compound):
node_index = []
valid_compound = []
score = []
logP_values = np.loadtxt('logP_values.txt')
logP_mean = np.mean(logP_values)
logP_std = np.std(logP_values)
cycle_scores = np.loadtxt('cycle_scores.txt')
cycle_mean = np.mean(cycle_scores)
cycle_std = np.std(cycle_scores)
SA_scores = np.loadtxt('SA_scores.txt')
SA_mean = np.mean(SA_scores)
SA_std = np.std(SA_scores)
for i in range(len(new_compound)):
try:
m = Chem.AddHs(Chem.MolFromSmiles(str(new_compound[i])))
except:
print ('None')
if m != None and len(new_compound[i]) <= 81:
try:
logp = Descriptors.MolLogP(m)
except:
logp = -1
node_index.append(i)
valid_compound.append(new_compound[i])
SA_score = -sascorer.calculateScore(MolFromSmiles(new_compound[i]))
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(MolFromSmiles(new_compound[i]))))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score = -cycle_length
SA_score_norm = (SA_score - SA_mean) / SA_std
logp_norm = (logp - logP_mean) / logP_std
cycle_score_norm = (cycle_score - cycle_mean) / cycle_std
score_one = SA_score_norm + logp_norm + cycle_score_norm
score.append(score_one)
return node_index, score, valid_compound
|
[
"jinzhe.zhang01@gmail.com"
] |
jinzhe.zhang01@gmail.com
|
cc94a07b64fa9f7baf4cd0f66827740d8bb7a35d
|
1ac2511b35cfcc3275f788a708d25623ac0f1759
|
/sobol_sequence_generator.py
|
3f324b3825626f2b71699200e29f52e4f3c4e8f5
|
[] |
no_license
|
QuantumElephant/manifold
|
44c6050b641bd0f1328be53bc7d94a875ade3481
|
a1cfd90dd49e9d30dded33aef084de63bc48e3fb
|
refs/heads/master
| 2021-07-01T14:59:38.447500
| 2017-09-21T03:24:30
| 2017-09-21T03:24:30
| 103,166,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 08:37:52 2017
@author: SKY
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 00:57:48 2017
@author: SKY
"""
import numpy as np
import sobol_sequence as sobol_seq
from scipy.spatial.distance import cdist
def generator(atom_number, molecule_number):
total=sobol_seq.i4_sobol_generate_std_normal(3,atom_number*molecule_number)
out=np.empty((0,3))
for i in range(molecule_number):
array=total[i*atom_number:(i+1)*atom_number]
F = cdist(array, array, 'euclidean')
F.sort()
F=F[:,1:]
R_smallest=F.min()
coordinate=array/R_smallest
out=np.append(out,coordinate,axis=0)
return out
out=generator(atom_number=3, molecule_number=10**3)
np.savetxt('coordi_3.txt',out)
out=generator(atom_number=4, molecule_number=10**3)
np.savetxt('coordi_4.txt',out)
out=generator(atom_number=5, molecule_number=10**3)
np.savetxt('coordi_5.txt',out)
out=generator(atom_number=6, molecule_number=10**3)
np.savetxt('coordi_6.txt',out)
|
[
"shikunyu8@163.com"
] |
shikunyu8@163.com
|
67952539816dfe0ff6680c594523e28d0b430f60
|
73c587d98cfb62a8c074844e9fea92018b8bf81a
|
/main.py
|
2d45324cd5d3675b2d536edd94768ad2d9fc78e6
|
[] |
no_license
|
JDavid17/fuzzyproject
|
d76dd12ef27794d20872c3a765561770cd5f577e
|
4715212a9d7f2f93eaf356ef55d760f549d8826e
|
refs/heads/master
| 2020-05-24T05:00:22.721997
| 2019-05-18T02:59:47
| 2019-05-18T02:59:47
| 187,102,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,054
|
py
|
from src.database import *
from src.defuzzification import Defuzzification
from src.aggregation import AggregationMethods
from src.aggregation import *
from src.utilis import category_selector as cs
from src.utilis import price_selector as pc
import sys
import matplotlib.pyplot as pypol
rules = [
"micro_regular & videoCard_regular & price_regular",
"micro_regular & videoCard_regular & price_veryGood",
"micro_regular & videoCard_good & price_regular",
"micro_regular & videoCard_veryGood & price_good",
"micro_regular & videoCard_veryGood & price_veryGood",
"micro_good & videoCard_regular & price_regular",
"micro_good & videoCard_regular & price_good",
"micro_good & videoCard_good & price_regular",
"micro_good & videoCard_good & price_veryGood",
"micro_good & videoCard_veryGood & price_regular",
"micro_veryGood & videoCard_regular & price_regular",
"micro_veryGood & videoCard_good & price_regular",
"micro_veryGood & videoCard_veryGood & price_regular",
]
impl = [
"Maybe",
"No",
"Yes",
"Yes",
"Maybe",
"Maybe",
"No",
"Yes",
"No",
"Yes",
"Maybe",
"Yes",
"Yes"
]
class Personal_buyer:
def __init__(self, micro, videoCard, price):
self.micro = cs(micro)
self.videoCard = cs(videoCard)
self.price = pc(price)
self.buy = "No"
def __str__(self):
return "micro.value: {} - micro.category: {}\n" \
"videoCard.value: {} - videoCard.category: {}\n" \
"price.value: {} - price.category: {}\n".format(self.micro["value"], self.micro["category"],
self.videoCard["value"], self.videoCard["category"],
self.price["value"], self.price["category"])
if __name__ == "__main__":
if len(sys.argv) > 3:
print(sys.argv)
micro_value = int(sys.argv[1])
videoCard_value = int(sys.argv[2])
price_value = int(sys.argv[3])
Personal_buyer(micro_value, videoCard_value, price_value)
values = {"micro_regular": micro_value,
"micro_good": micro_value,
"micro_veryGood": micro_value,
"videoCard_regular": videoCard_value,
"videoCard_good": videoCard_value,
"videoCard_veryGood": videoCard_value,
"price_regular": price_value,
"price_good": price_value,
"price_veryGood": price_value
}
memb = {
"micro_regular": regular,
"micro_good": good,
"micro_veryGood": veryGood,
"videoCard_regular": regular,
"videoCard_good": good,
"videoCard_veryGood": veryGood,
"price_regular": regular,
"price_good": good,
"price_veryGood": veryGood,
"Yes": Yes,
"No": No,
"Maybe": Maybe
}
rules_results = []
index_rules = [Rule(item.split(' ')) for item in rules]
for item in index_rules:
# temp = {k: memb[k](v) for k, v in values.items()}
val = item.evaluate({k: memb[k](v) for k, v in values.items()})
rules_results.append(val)
# print(rules_results)
m_x, m_y = AggregationMethods.Mamdani(impl, rules_results, memb, (0, 2))
l_x, l_y = AggregationMethods.Larsen(impl, rules_results, memb, (0, 2))
# print(m_x)
# print(m_y)
print("Defuzzification Using Centroid")
print("Defuzzification for Mamdani using Centroide: " + str(Defuzzification.centroide(m_x, m_y)))
print("Defuzzification for Larsen using Centroide: " + str(Defuzzification.centroide(l_x, l_y)))
print("Defuzzification Using Bisection")
print("Defuzzification for Mamdani using Bisection: " + str(Defuzzification.bisection(m_x, m_y)))
print("Defuzzification for Larsen using Bisection: " + str(Defuzzification.bisection(l_x, l_y)))
pypol.plot(m_x, m_y)
pypol.plot(l_x, l_y)
pypol.show()
else:
print('Introduzca bien los parametros, siguiendo este formato: \n'
'Calidad del Micro: Int[1-30]\n'
'Calidad de la Tarjeta de Video: Int[1-30]\n'
'Caldiad del Precio: Int[1-30]\n'
'EJemplo: python main.py 25 9 10')
# micro_value = 25
# videoCard_value = 9
# price_value = 10
# assistant = Personal_buyer(micro_value, videoCard_value, price_value, 'bisection')
# categorys = {"micro": assistant.micro["category"],
# "videoCard": assistant.videoCard["category"],
# "price": assistant.price["category"]}
# print(assistant.micro["value"])
# print(assistant.micro["category"])
# print(assistant.videoCard["value"])
# print(assistant.videoCard["category"])
# print(assistant.price["value"])
# print(assistant.price["category"])
|
[
"jdavidhc1710@gmail.com"
] |
jdavidhc1710@gmail.com
|
0aec6aa36174cd2ae7b17de6075d450a280d9e65
|
16bbc9a76dbf6960456e170110ac5e34fd7eb084
|
/cms/settings.py
|
3bbe3de3144f4a9c7b036ef9f223f56f6a458070
|
[] |
no_license
|
zidianqs/changeju-cms
|
0faa1d83b3060594bccd1c94c08bc028469d1fa0
|
024d70c4205cd53d7290e8fd908ae14c75db16c8
|
refs/heads/master
| 2020-04-15T10:57:07.011944
| 2014-08-13T04:18:36
| 2014-08-13T04:18:36
| 20,514,709
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
"""
Django settings for cms project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o83!zfm=l^lgpnm8rmgce-w6aahf*h$tn@y8)wvy7q##uh5mim'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG = False
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'icms.changeju.com', '10.0.100.1']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'property',
'manual',
'demo',
'ds',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cms.urls'
WSGI_APPLICATION = 'cms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# },
'default': {
'ENGINE': 'django.db.backends.mysql',
'PORT': '3306',
# 'HOST': '10.0.100.1',
# 'USER': 'remote',
# 'PASSWORD': 'remote',
'HOST': '127.0.0.1',
'USER': 'root',
'PASSWORD': 'root',
'NAME': 'cj_cms',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
#STATIC_ROOT = 'static/'
STATIC_ROOT = '/var/www/static/'
TEMPLATE_DIRS = 'templates/'
|
[
"yanhaoliang2007@gmail.com"
] |
yanhaoliang2007@gmail.com
|
144de343f3f9ab62a51a16f1c5c0a0f3d77f4b99
|
c6c5c6554e31b0c5967ea66d4c01644bc5703a2b
|
/Approach2/splitcrimes.py
|
970ee04035b3d81bb8a6b9077b041ef668c235cc
|
[] |
no_license
|
viswajithiii/analyze-this
|
7dc0eb2e4564e26a752a7878290402a381c965de
|
cf54e55a4ae90e430d751840d8499752c1be1cc9
|
refs/heads/master
| 2021-01-18T05:04:34.747546
| 2014-08-21T10:30:08
| 2014-08-21T10:30:08
| 23,017,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
import numpy as np
traindatafile = open('Training_Dataset_clued.csv','r')
c1file = open('TrD1.csv','w')
c2file = open('TrD2.csv','w')
crimeone = 0
crimetwo = 0
firstlineseen = False
for line in traindatafile:
if not firstlineseen:
firstlineseen = True
continue
splitline = line.split(',')
crime = int(splitline[1][-1])
if crime == 1:
crimeone += 1
c1file.write(','.join(splitline[2:]))
else:
crimetwo += 1
c2file.write(','.join(splitline[2:]))
|
[
"viswajithiii@gmail.com"
] |
viswajithiii@gmail.com
|
81fe82e780e762a54cf533855f00114ad3ef1997
|
5c5b34f6f598a43ddfbd473228737a27c26d1d8e
|
/剑指offer/面试题63. 股票的最大利润.py
|
c935a25747fbda4516f2627db965bc1ae5f9d1ce
|
[] |
no_license
|
lovehhf/LeetCode
|
34a1bc140b10dc83a32ef9a70f9c73176948a9c4
|
5d3574ccd282d0146c83c286ae28d8baaabd4910
|
refs/heads/master
| 2021-11-04T04:52:34.518621
| 2021-10-26T15:34:47
| 2021-10-26T15:34:47
| 173,673,492
| 0
| 0
| null | 2020-03-03T14:54:09
| 2019-03-04T04:26:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
# -*- coding:utf-8 -*-
"""
假设把某股票的价格按照时间先后顺序存储在数组中,请问买卖该股票一次可能获得的最大利润是多少?
示例 1:
输入: [7,1,5,3,6,4]
输出: 5
解释: 在第 2 天(股票价格 = 1)的时候买入,在第 5 天(股票价格 = 6)的时候卖出,最大利润 = 6-1 = 5 。
注意利润不能是 7-1 = 6, 因为卖出价格需要大于买入价格。
示例 2:
输入: [7,6,4,3,1]
输出: 0
解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。
限制:
0 <= 数组长度 <= 20000
注意:本题与主站 121 题相同:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
n = len(prices)
f = [[0] * 2 for _ in range(n)]
f[0][1] = -prices[0]
for i in range(1, n):
f[i][0] = max(f[i - 1][0], f[i - 1][1] + prices[i])
f[i][1] = max(f[i - 1][1], -prices[i])
return f[n - 1][0]
|
[
"853885165@qq.com"
] |
853885165@qq.com
|
f17fb73b0910108296139cb944d9b687cf6d8558
|
c0f4104194a7989e44d7f0161b2425c5a5bc3a98
|
/barbican/model/repositories.py
|
ed44d73acf0784126efcef49bc68b8b13c9a73f5
|
[] |
no_license
|
bopopescu/Openstack-2
|
f65470bdd0ee4736c45b6f869f0453cb8eb446c8
|
6f06133562e3dfd490695a92c9ddf1a322675104
|
refs/heads/master
| 2022-11-28T09:19:21.633850
| 2016-06-23T07:55:32
| 2016-06-23T07:55:32
| 282,095,817
| 0
| 0
| null | 2020-07-24T01:44:49
| 2020-07-24T01:44:48
| null |
UTF-8
|
Python
| false
| false
| 81,929
|
py
|
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines interface for DB access that Resource controllers may reference
TODO: The top part of this file was 'borrowed' from Glance, but seems
quite intense for sqlalchemy, and maybe could be simplified.
"""
import logging
import sys
import time
import uuid
from oslo_utils import timeutils
import sqlalchemy
from sqlalchemy import func as sa_func
from sqlalchemy import or_
import sqlalchemy.orm as sa_orm
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
from barbican.model.migration import commands
from barbican.model import models
LOG = utils.getLogger(__name__)
_ENGINE = None
_SESSION_FACTORY = None
BASE = models.BASE
sa_logger = None
# Singleton repository references, instantiated via get_xxxx_repository()
# functions below. Please keep this list in alphabetical order.
_CA_REPOSITORY = None
_CONTAINER_ACL_REPOSITORY = None
_CONTAINER_CONSUMER_REPOSITORY = None
_CONTAINER_REPOSITORY = None
_CONTAINER_SECRET_REPOSITORY = None
_ENCRYPTED_DATUM_REPOSITORY = None
_KEK_DATUM_REPOSITORY = None
_ORDER_PLUGIN_META_REPOSITORY = None
_ORDER_BARBICAN_META_REPOSITORY = None
_ORDER_REPOSITORY = None
_ORDER_RETRY_TASK_REPOSITORY = None
_PREFERRED_CA_REPOSITORY = None
_PROJECT_REPOSITORY = None
_PROJECT_CA_REPOSITORY = None
_PROJECT_QUOTAS_REPOSITORY = None
_SECRET_ACL_REPOSITORY = None
_SECRET_META_REPOSITORY = None
_SECRET_USER_META_REPOSITORY = None
_SECRET_REPOSITORY = None
_TRANSPORT_KEY_REPOSITORY = None
CONF = config.CONF
def hard_reset():
"""Performs a hard reset of database resources, used for unit testing."""
# TODO(jvrbanac): Remove this as soon as we improve our unit testing
# to not require this.
global _ENGINE, _SESSION_FACTORY
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
_SESSION_FACTORY = None
# Make sure we reinitialize the engine and session factory
setup_database_engine_and_factory()
def setup_database_engine_and_factory():
global sa_logger, _SESSION_FACTORY, _ENGINE
LOG.info('Setting up database engine and session factory')
LOG.debug('Sql connection = %s', CONF.sql_connection)
if CONF.debug:
sa_logger = logging.getLogger('sqlalchemy.engine')
sa_logger.setLevel(logging.DEBUG)
if CONF.sql_pool_logging:
pool_logger = logging.getLogger('sqlalchemy.pool')
pool_logger.setLevel(logging.DEBUG)
_ENGINE = _get_engine(_ENGINE)
# Utilize SQLAlchemy's scoped_session to ensure that we only have one
# session instance per thread.
session_maker = sa_orm.sessionmaker(bind=_ENGINE)
_SESSION_FACTORY = sqlalchemy.orm.scoped_session(session_maker)
def start():
"""Start for read-write requests placeholder
Typically performed at the start of a request cycle, say for POST or PUT
requests.
"""
pass
def start_read_only():
"""Start for read-only requests placeholder
Typically performed at the start of a request cycle, say for GET or HEAD
requests.
"""
pass
def commit():
"""Commit session state so far to the database.
Typically performed at the end of a request cycle.
"""
get_session().commit()
def rollback():
"""Rollback session state so far.
Typically performed when the request cycle raises an Exception.
"""
get_session().rollback()
def clear():
"""Dispose of this session, releases db resources.
Typically performed at the end of a request cycle, after a
commit() or rollback().
"""
if _SESSION_FACTORY: # not initialized in some unit test
_SESSION_FACTORY.remove()
def get_session():
"""Helper method to grab session."""
return _SESSION_FACTORY()
def _get_engine(engine):
if not engine:
connection = CONF.sql_connection
if not connection:
raise exception.BarbicanException(
u._('No SQL connection configured'))
# TODO(jfwood):
# connection_dict = sqlalchemy.engine.url.make_url(_CONNECTION)
engine_args = {
'pool_recycle': CONF.sql_idle_timeout,
'echo': False,
'convert_unicode': True}
if CONF.sql_pool_class:
engine_args['poolclass'] = utils.get_class_for(
'sqlalchemy.pool', CONF.sql_pool_class)
if CONF.sql_pool_size:
engine_args['pool_size'] = CONF.sql_pool_size
if CONF.sql_pool_max_overflow:
engine_args['max_overflow'] = CONF.sql_pool_max_overflow
db_connection = None
try:
engine = _create_engine(connection, **engine_args)
db_connection = engine.connect()
except Exception as err:
msg = u._("Error configuring registry database with supplied "
"sql_connection. Got error: {error}").format(error=err)
LOG.exception(msg)
raise exception.BarbicanException(msg)
finally:
if db_connection:
db_connection.close()
if CONF.db_auto_create:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
_auto_generate_tables(engine, tables)
else:
LOG.info(u._LI('Not auto-creating barbican registry DB'))
return engine
def is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def _create_engine(connection, **engine_args):
LOG.debug("Sql connection: %s; Args: %s", connection, engine_args)
engine = sqlalchemy.create_engine(connection, **engine_args)
# TODO(jfwood): if 'mysql' in connection_dict.drivername:
# TODO(jfwood): sqlalchemy.event.listen(_ENGINE, 'checkout',
# TODO(jfwood): ping_listener)
# Wrap the engine's connect method with a retry decorator.
engine.connect = wrap_db_error(engine.connect)
return engine
def _auto_generate_tables(engine, tables):
if tables and 'alembic_version' in tables:
# Upgrade the database to the latest version.
LOG.info(u._LI('Updating schema to latest version'))
commands.upgrade()
else:
# Create database tables from our models.
LOG.info(u._LI('Auto-creating barbican registry DB'))
models.BASE.metadata.create_all(engine)
# Sync the alembic version 'head' with current models.
commands.stamp()
def wrap_db_error(f):
"""Retry DB connection. Copied from nova and modified."""
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except sqlalchemy.exc.OperationalError as e:
if not is_db_connection_error(e.args[0]):
raise
remaining_attempts = CONF.sql_max_retries
while True:
LOG.warning(u._LW('SQL connection failed. %d attempts left.'),
remaining_attempts)
remaining_attempts -= 1
time.sleep(CONF.sql_retry_interval)
try:
return f(*args, **kwargs)
except sqlalchemy.exc.OperationalError as e:
if (remaining_attempts <= 0 or not
is_db_connection_error(e.args[0])):
raise
except sqlalchemy.exc.DBAPIError:
raise
except sqlalchemy.exc.DBAPIError:
raise
_wrap.__name__ = f.__name__
return _wrap
def clean_paging_values(offset_arg=0, limit_arg=CONF.default_limit_paging):
"""Cleans and safely limits raw paging offset/limit values."""
offset_arg = offset_arg or 0
limit_arg = limit_arg or CONF.default_limit_paging
try:
offset = int(offset_arg)
if offset < 0:
offset = 0
if offset > sys.maxsize:
offset = 0
except ValueError:
offset = 0
try:
limit = int(limit_arg)
if limit < 1:
limit = 1
if limit > CONF.max_limit_paging:
limit = CONF.max_limit_paging
except ValueError:
limit = CONF.default_limit_paging
LOG.debug("Clean paging values limit=%s, offset=%s",
limit, offset
)
return offset, limit
def delete_all_project_resources(project_id):
"""Logic to cleanup all project resources.
This cleanup uses same alchemy session to perform all db operations as a
transaction and will commit only when all db operations are performed
without error.
"""
session = get_session()
container_repo = get_container_repository()
container_repo.delete_project_entities(
project_id, suppress_exception=False, session=session)
# secret children SecretStoreMetadatum, EncryptedDatum
# and container_secrets are deleted as part of secret delete
secret_repo = get_secret_repository()
secret_repo.delete_project_entities(
project_id, suppress_exception=False, session=session)
kek_repo = get_kek_datum_repository()
kek_repo.delete_project_entities(
project_id, suppress_exception=False, session=session)
project_repo = get_project_repository()
project_repo.delete_project_entities(
project_id, suppress_exception=False, session=session)
class BaseRepo(object):
"""Base repository for the barbican entities.
This class provides template methods that allow sub-classes to hook
specific functionality as needed. Clients access instances of this class
via singletons, therefore implementations should be stateless aside from
configuration.
"""
def get_session(self, session=None):
LOG.debug("Getting session...")
return session or get_session()
def get(self, entity_id, external_project_id=None,
force_show_deleted=False,
suppress_exception=False, session=None):
"""Get an entity or raise if it does not exist."""
session = self.get_session(session)
try:
query = self._do_build_get_query(entity_id,
external_project_id,
session)
# filter out deleted entities if requested
if not force_show_deleted:
query = query.filter_by(deleted=False)
entity = query.one()
except sa_orm.exc.NoResultFound:
LOG.exception(u._LE("Not found for %s"), entity_id)
entity = None
if not suppress_exception:
_raise_entity_not_found(self._do_entity_name(), entity_id)
return entity
def create_from(self, entity, session=None):
"""Sub-class hook: create from entity."""
if not entity:
msg = u._(
"Must supply non-None {entity_name}."
).format(entity_name=self._do_entity_name())
raise exception.Invalid(msg)
if entity.id:
msg = u._(
"Must supply {entity_name} with id=None (i.e. new entity)."
).format(entity_name=self._do_entity_name())
raise exception.Invalid(msg)
LOG.debug("Begin create from...")
start = time.time() # DEBUG
# Validate the attributes before we go any further. From my
# (unknown Glance developer) investigation, the @validates
# decorator does not validate
# on new records, only on existing records, which is, well,
# idiotic.
self._do_validate(entity.to_dict())
try:
LOG.debug("Saving entity...")
entity.save(session=session)
except sqlalchemy.exc.IntegrityError:
LOG.exception(u._LE('Problem saving entity for create'))
_raise_entity_already_exists(self._do_entity_name())
LOG.debug('Elapsed repo '
'create secret:%s', (time.time() - start)) # DEBUG
return entity
def save(self, entity):
"""Saves the state of the entity."""
entity.updated_at = timeutils.utcnow()
# Validate the attributes before we go any further. From my
# (unknown Glance developer) investigation, the @validates
# decorator does not validate
# on new records, only on existing records, which is, well,
# idiotic.
self._do_validate(entity.to_dict())
entity.save()
def delete_entity_by_id(self, entity_id, external_project_id,
session=None):
"""Remove the entity by its ID."""
session = self.get_session(session)
entity = self.get(entity_id=entity_id,
external_project_id=external_project_id,
session=session)
entity.delete(session=session)
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "Entity"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return None
def _do_convert_values(self, values):
"""Sub-class hook: convert text-based values to target types
This is specifically for database values.
"""
pass
def _do_validate(self, values):
"""Sub-class hook: validate values.
Validates the incoming data and raises an Invalid exception
if anything is out of order.
:param values: Mapping of entity metadata to check
"""
status = values.get('status', None)
if not status:
# TODO(jfwood): I18n this!
msg = u._("{entity_name} status is required.").format(
entity_name=self._do_entity_name())
raise exception.Invalid(msg)
if not models.States.is_valid(status):
msg = u._("Invalid status '{status}' for {entity_name}.").format(
status=status, entity_name=self._do_entity_name())
raise exception.Invalid(msg)
return values
def _update_values(self, entity_ref, values):
for k in values:
if getattr(entity_ref, k) != values[k]:
setattr(entity_ref, k, values[k])
def _build_get_project_entities_query(self, project_id, session):
"""Sub-class hook: build a query to retrieve entities for a project.
:param project_id: id of barbican project entity
:param session: existing db session reference.
:returns: A query object for getting all project related entities
This will filter deleted entities if there.
"""
msg = u._(
"{entity_name} is missing query build method for get "
"project entities.").format(
entity_name=self._do_entity_name())
raise NotImplementedError(msg)
def get_project_entities(self, project_id, session=None):
"""Gets entities associated with a given project.
:param project_id: id of barbican project entity
:param session: existing db session reference. If None, gets session.
:returns: list of matching entities found otherwise returns empty list
if no entity exists for a given project.
Sub-class should implement `_build_get_project_entities_query` function
to delete related entities otherwise it would raise NotImplementedError
on its usage.
"""
session = self.get_session(session)
query = self._build_get_project_entities_query(project_id, session)
if query:
return query.all()
else:
return []
def get_count(self, project_id, session=None):
"""Gets count of entities associated with a given project
:param project_id: id of barbican project entity
:param session: existing db session reference. If None, gets session.
:return: an number 0 or greater
Sub-class should implement `_build_get_project_entities_query` function
to delete related entities otherwise it would raise NotImplementedError
on its usage.
"""
session = self.get_session(session)
query = self._build_get_project_entities_query(project_id, session)
if query:
return query.count()
else:
return 0
def delete_project_entities(self, project_id,
suppress_exception=False,
session=None):
"""Deletes entities for a given project.
:param project_id: id of barbican project entity
:param suppress_exception: Pass True if want to suppress exception
:param session: existing db session reference. If None, gets session.
Sub-class should implement `_build_get_project_entities_query` function
to delete related entities otherwise it would raise NotImplementedError
on its usage.
"""
session = self.get_session(session)
query = self._build_get_project_entities_query(project_id,
session=session)
try:
# query cannot be None as related repo class is expected to
# implement it otherwise error is raised in build query call
for entity in query:
# Its a soft delete so its more like entity update
entity.delete(session=session)
except sqlalchemy.exc.SQLAlchemyError:
LOG.exception(u._LE('Problem finding project related entity to '
'delete'))
if not suppress_exception:
raise exception.BarbicanException(u._('Error deleting project '
'entities for '
'project_id=%s'),
project_id)
class ProjectRepo(BaseRepo):
"""Repository for the Project entity."""
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "Project"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.Project).filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def find_by_external_project_id(self, external_project_id,
suppress_exception=False, session=None):
session = self.get_session(session)
try:
query = session.query(models.Project)
query = query.filter_by(external_id=external_project_id)
entity = query.one()
except sa_orm.exc.NoResultFound:
entity = None
if not suppress_exception:
LOG.exception(u._LE("Problem getting Project %s"),
external_project_id)
raise exception.NotFound(u._(
"No {entity_name} found with keystone-ID {id}").format(
entity_name=self._do_entity_name(),
id=external_project_id))
return entity
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving project for given id."""
query = session.query(models.Project)
return query.filter_by(id=project_id).filter_by(deleted=False)
class SecretRepo(BaseRepo):
"""Repository for the Secret entity."""
def get_by_create_date(self, external_project_id, offset_arg=None,
limit_arg=None, name=None, alg=None, mode=None,
bits=0, secret_type=None, suppress_exception=False,
session=None, acl_only=None, user_id=None):
"""Returns a list of secrets
The returned secrets are ordered by the date they were created at
and paged based on the offset and limit fields. The external_project_id
is external-to-Barbican value assigned to the project by Keystone.
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
utcnow = timeutils.utcnow()
query = session.query(models.Secret)
query = query.filter_by(deleted=False)
query = query.filter(or_(models.Secret.expiration.is_(None),
models.Secret.expiration > utcnow))
if name:
query = query.filter(models.Secret.name.like(name))
if alg:
query = query.filter(models.Secret.algorithm.like(alg))
if mode:
query = query.filter(models.Secret.mode.like(mode))
if bits > 0:
query = query.filter(models.Secret.bit_length == bits)
if secret_type:
query = query.filter(models.Secret.secret_type == secret_type)
if acl_only and acl_only.lower() == 'true' and user_id:
query = query.join(models.SecretACL)
query = query.join(models.SecretACLUser)
query = query.filter(models.SecretACLUser.user_id == user_id)
else:
query = query.join(models.Project)
query = query.filter(
models.Project.external_id == external_project_id)
total = query.count()
end_offset = offset + limit
LOG.debug('Retrieving from %s to %s', offset, end_offset)
query = query.limit(limit).offset(offset)
entities = query.all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "Secret"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
utcnow = timeutils.utcnow()
expiration_filter = or_(models.Secret.expiration.is_(None),
models.Secret.expiration > utcnow)
query = session.query(models.Secret)
query = query.filter_by(id=entity_id, deleted=False)
query = query.filter(expiration_filter)
query = query.join(models.Project)
query = query.filter(models.Project.external_id == external_project_id)
return query
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving Secrets associated with a given project
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
query = session.query(models.Secret).filter_by(deleted=False)
query = query.filter(models.Secret.project_id == project_id)
return query
def get_secret_by_id(self, entity_id, suppress_exception=False,
session=None):
"""Gets secret by its entity id without project id check."""
session = self.get_session(session)
try:
utcnow = timeutils.utcnow()
expiration_filter = or_(models.Secret.expiration.is_(None),
models.Secret.expiration > utcnow)
query = session.query(models.Secret)
query = query.filter_by(id=entity_id, deleted=False)
query = query.filter(expiration_filter)
entity = query.one()
except sa_orm.exc.NoResultFound:
entity = None
if not suppress_exception:
LOG.exception(u._LE("Problem getting secret %s"),
entity_id)
raise exception.NotFound(u._(
"No secret found with secret-ID {id}").format(
entity_name=self._do_entity_name(),
id=entity_id))
return entity
class EncryptedDatumRepo(BaseRepo):
"""Repository for the EncryptedDatum entity
Stores encrypted information on behalf of a Secret.
"""
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "EncryptedDatum"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.EncryptedDatum).filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class SecretStoreMetadatumRepo(BaseRepo):
"""Repository for the SecretStoreMetadatum entity
Stores key/value information on behalf of a Secret.
"""
def save(self, metadata, secret_model):
"""Saves the the specified metadata for the secret.
:raises NotFound if entity does not exist.
"""
now = timeutils.utcnow()
for k, v in metadata.items():
meta_model = models.SecretStoreMetadatum(k, v)
meta_model.updated_at = now
meta_model.secret = secret_model
meta_model.save()
def get_metadata_for_secret(self, secret_id):
"""Returns a dict of SecretStoreMetadatum instances."""
session = get_session()
query = session.query(models.SecretStoreMetadatum)
query = query.filter_by(deleted=False)
query = query.filter(
models.SecretStoreMetadatum.secret_id == secret_id)
metadata = query.all()
return {m.key: m.value for m in metadata}
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "SecretStoreMetadatum"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.SecretStoreMetadatum)
return query.filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class SecretUserMetadatumRepo(BaseRepo):
"""Repository for the SecretUserMetadatum entity
Stores key/value information on behalf of a Secret.
"""
def create_replace_user_metadata(self, secret_id, metadata):
"""Creates or replaces the the specified metadata for the secret."""
now = timeutils.utcnow()
session = get_session()
query = session.query(models.SecretUserMetadatum)
query = query.filter_by(secret_id=secret_id)
query.delete()
for k, v in metadata.items():
meta_model = models.SecretUserMetadatum(k, v)
meta_model.secret_id = secret_id
meta_model.updated_at = now
meta_model.save(session=session)
def get_metadata_for_secret(self, secret_id):
"""Returns a dict of SecretUserMetadatum instances."""
session = get_session()
query = session.query(models.SecretUserMetadatum)
query = query.filter_by(deleted=False)
query = query.filter(
models.SecretUserMetadatum.secret_id == secret_id)
metadata = query.all()
return {m.key: m.value for m in metadata}
def create_replace_user_metadatum(self, secret_id, key, value):
now = timeutils.utcnow()
session = get_session()
query = session.query(models.SecretUserMetadatum)
query = query.filter_by(secret_id=secret_id)
query = query.filter_by(key=key)
query.delete()
meta_model = models.SecretUserMetadatum(key, value)
meta_model.secret_id = secret_id
meta_model.updated_at = now
meta_model.save(session=session)
def delete_metadatum(self, secret_id, key):
"""Removes a key from a SecretUserMetadatum instances."""
session = get_session()
query = session.query(models.SecretUserMetadatum)
query = query.filter_by(secret_id=secret_id)
query = query.filter_by(key=key)
query.delete()
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "SecretUserMetadatum"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.SecretUserMetadatum)
return query.filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class KEKDatumRepo(BaseRepo):
"""Repository for the KEKDatum entity
Stores key encryption key (KEK) metadata used by crypto plugins to
encrypt/decrypt secrets.
"""
def find_or_create_kek_datum(self, project,
plugin_name,
suppress_exception=False,
session=None):
"""Find or create a KEK datum instance."""
if not plugin_name:
raise exception.BarbicanException(
u._('Tried to register crypto plugin with null or empty '
'name.'))
kek_datum = None
session = self.get_session(session)
# TODO(jfwood): Reverse this...attempt insert first, then get on fail.
try:
query = session.query(models.KEKDatum)
query = query.filter_by(project_id=project.id,
plugin_name=plugin_name,
active=True,
deleted=False)
kek_datum = query.one()
except sa_orm.exc.NoResultFound:
kek_datum = models.KEKDatum()
kek_datum.kek_label = "project-{0}-key-{1}".format(
project.external_id, uuid.uuid4())
kek_datum.project_id = project.id
kek_datum.plugin_name = plugin_name
kek_datum.status = models.States.ACTIVE
self.save(kek_datum)
return kek_datum
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "KEKDatum"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.KEKDatum).filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving KEK Datum instance(s).
The returned KEK Datum instance(s) are related to a given project.
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
return session.query(models.KEKDatum).filter_by(
project_id=project_id).filter_by(deleted=False)
class OrderRepo(BaseRepo):
"""Repository for the Order entity."""
def get_by_create_date(self, external_project_id, offset_arg=None,
limit_arg=None, meta_arg=None,
suppress_exception=False, session=None):
"""Returns a list of orders
The list is ordered by the date they were created at and paged
based on the offset and limit fields.
:param external_project_id: The keystone id for the project.
:param offset_arg: The entity number where the query result should
start.
:param limit_arg: The maximum amount of entities in the result set.
:param meta_arg: Optional meta field used to filter results.
:param suppress_exception: Whether NoResultFound exceptions should be
suppressed.
:param session: SQLAlchemy session object.
:returns: Tuple consisting of (list_of_entities, offset, limit, total).
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.Order)
query = query.order_by(models.Order.created_at)
query = query.filter_by(deleted=False)
if meta_arg:
query = query.filter(models.Order.meta.contains(meta_arg))
query = query.join(models.Project, models.Order.project)
query = query.filter(models.Project.external_id == external_project_id)
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total
)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "Order"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.Order)
query = query.filter_by(id=entity_id, deleted=False)
query = query.join(models.Project, models.Order.project)
query = query.filter(models.Project.external_id == external_project_id)
return query
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving orders related to given project.
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
return session.query(models.Order).filter_by(
project_id=project_id).filter_by(deleted=False)
class OrderPluginMetadatumRepo(BaseRepo):
"""Repository for the OrderPluginMetadatum entity
Stores key/value plugin information on behalf of an Order.
"""
def save(self, metadata, order_model):
"""Saves the the specified metadata for the order.
:raises NotFound if entity does not exist.
"""
now = timeutils.utcnow()
session = get_session()
for k, v in metadata.items():
meta_model = models.OrderPluginMetadatum(k, v)
meta_model.updated_at = now
meta_model.order = order_model
meta_model.save(session=session)
def get_metadata_for_order(self, order_id):
"""Returns a dict of OrderPluginMetadatum instances."""
session = get_session()
try:
query = session.query(models.OrderPluginMetadatum)
query = query.filter_by(deleted=False)
query = query.filter(
models.OrderPluginMetadatum.order_id == order_id)
metadata = query.all()
except sa_orm.exc.NoResultFound:
metadata = {}
return {m.key: m.value for m in metadata}
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "OrderPluginMetadatum"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.OrderPluginMetadatum)
return query.filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class OrderBarbicanMetadatumRepo(BaseRepo):
"""Repository for the OrderBarbicanMetadatum entity
Stores key/value plugin information on behalf of a Order.
"""
def save(self, metadata, order_model):
"""Saves the the specified metadata for the order.
:raises NotFound if entity does not exist.
"""
now = timeutils.utcnow()
session = get_session()
for k, v in metadata.items():
meta_model = models.OrderBarbicanMetadatum(k, v)
meta_model.updated_at = now
meta_model.order = order_model
meta_model.save(session=session)
def get_metadata_for_order(self, order_id):
"""Returns a dict of OrderBarbicanMetadatum instances."""
session = get_session()
try:
query = session.query(models.OrderBarbicanMetadatum)
query = query.filter_by(deleted=False)
query = query.filter(
models.OrderBarbicanMetadatum.order_id == order_id)
metadata = query.all()
except sa_orm.exc.NoResultFound:
metadata = {}
return {m.key: m.value for m in metadata}
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "OrderBarbicanMetadatum"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.OrderBarbicanMetadatum)
return query.filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class OrderRetryTaskRepo(BaseRepo):
"""Repository for the OrderRetryTask entity."""
def get_by_create_date(
self, only_at_or_before_this_date=None,
offset_arg=None, limit_arg=None,
suppress_exception=False,
session=None):
"""Returns a list of order retry task entities
The list is ordered by the date they were created at and paged
based on the offset and limit fields.
:param only_at_or_before_this_date: If specified, only entities at or
before this date are returned.
:param offset_arg: The entity number where the query result should
start.
:param limit_arg: The maximum amount of entities in the result set.
:param suppress_exception: Whether NoResultFound exceptions should be
suppressed.
:param session: SQLAlchemy session object.
:returns: Tuple consisting of (list_of_entities, offset, limit, total).
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.OrderRetryTask)
query = query.order_by(models.OrderRetryTask.created_at)
query = query.filter_by(deleted=False)
if only_at_or_before_this_date:
query = query.filter(
models.OrderRetryTask.retry_at <= only_at_or_before_this_date)
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total
)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "OrderRetryTask"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.OrderRetryTask)
query = query.filter_by(id=entity_id, deleted=False)
return query
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class ContainerRepo(BaseRepo):
"""Repository for the Container entity."""
def get_by_create_date(self, external_project_id, offset_arg=None,
limit_arg=None, name_arg=None,
suppress_exception=False, session=None):
"""Returns a list of containers
The list is ordered by the date they were created at and paged
based on the offset and limit fields. The external_project_id is
external-to-Barbican value assigned to the project by Keystone.
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.Container)
query = query.order_by(models.Container.created_at)
query = query.filter_by(deleted=False)
if name_arg:
query = query.filter(models.Container.name.like(name_arg))
query = query.join(models.Project, models.Container.project)
query = query.filter(models.Project.external_id == external_project_id)
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total
)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "Container"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.Container)
query = query.filter_by(id=entity_id, deleted=False)
query = query.join(models.Project, models.Container.project)
query = query.filter(models.Project.external_id == external_project_id)
return query
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving container related to given project.
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
return session.query(models.Container).filter_by(
deleted=False).filter_by(project_id=project_id)
def get_container_by_id(self, entity_id, suppress_exception=False,
session=None):
"""Gets container by its entity id without project id check."""
session = self.get_session(session)
try:
query = session.query(models.Container)
query = query.filter_by(id=entity_id, deleted=False)
entity = query.one()
except sa_orm.exc.NoResultFound:
entity = None
if not suppress_exception:
LOG.exception(u._LE("Problem getting container %s"),
entity_id)
raise exception.NotFound(u._(
"No container found with container-ID {id}").format(
entity_name=self._do_entity_name(),
id=entity_id))
return entity
class ContainerSecretRepo(BaseRepo):
"""Repository for the ContainerSecret entity."""
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "ContainerSecret"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.ContainerSecret
).filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class ContainerConsumerRepo(BaseRepo):
"""Repository for the Service entity."""
def get_by_container_id(self, container_id,
offset_arg=None, limit_arg=None,
suppress_exception=False, session=None):
"""Returns a list of Consumers
The list is ordered by the date they were created at and paged
based on the offset and limit fields.
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.ContainerConsumerMetadatum)
query = query.order_by(models.ContainerConsumerMetadatum.name)
query = query.filter_by(deleted=False)
query = query.filter(
models.ContainerConsumerMetadatum.container_id == container_id
)
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total
)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def get_by_values(self, container_id, name, URL, suppress_exception=False,
show_deleted=False, session=None):
session = self.get_session(session)
try:
query = session.query(models.ContainerConsumerMetadatum)
query = query.filter_by(
container_id=container_id,
name=name,
URL=URL)
if not show_deleted:
query.filter_by(deleted=False)
consumer = query.one()
except sa_orm.exc.NoResultFound:
consumer = None
if not suppress_exception:
raise exception.NotFound(
u._("Could not find {entity_name}").format(
entity_name=self._do_entity_name()))
return consumer
def create_or_update_from(self, new_consumer, container, session=None):
session = self.get_session(session)
try:
container.updated_at = timeutils.utcnow()
container.consumers.append(new_consumer)
container.save(session=session)
except sqlalchemy.exc.IntegrityError:
session.rollback() # We know consumer already exists.
# This operation is idempotent, so log this and move on
LOG.debug("Consumer %s already exists for container %s,"
" continuing...", (new_consumer.name, new_consumer.URL),
new_consumer.container_id)
# Get the existing entry and reuse it by clearing the deleted flags
existing_consumer = self.get_by_values(
new_consumer.container_id, new_consumer.name, new_consumer.URL,
show_deleted=True)
existing_consumer.deleted = False
existing_consumer.deleted_at = None
# We are not concerned about timing here -- set only, no reads
existing_consumer.save()
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "ContainerConsumer"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.ContainerConsumerMetadatum)
return query.filter_by(id=entity_id, deleted=False)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving consumers associated with given project
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
query = session.query(
models.ContainerConsumerMetadatum).filter_by(deleted=False)
query = query.filter(
models.ContainerConsumerMetadatum.project_id == project_id)
return query
class TransportKeyRepo(BaseRepo):
"""Repository for the TransportKey entity
Stores transport keys for wrapping the secret data to/from a
barbican client.
"""
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "TransportKey"
def get_by_create_date(self, plugin_name=None,
offset_arg=None, limit_arg=None,
suppress_exception=False, session=None):
"""Returns a list of transport keys
The list is ordered from latest created first. The search accepts
plugin_id as an optional parameter for the search.
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.TransportKey)
query = query.order_by(models.TransportKey.created_at)
if plugin_name is not None:
query = session.query(models.TransportKey)
query = query.filter_by(deleted=False, plugin_name=plugin_name)
else:
query = query.filter_by(deleted=False)
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number of entities retrieved: %s out of %s',
len(entities), total)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def get_latest_transport_key(self, plugin_name, suppress_exception=False,
session=None):
"""Returns the latest transport key for a given plugin."""
entity, offset, limit, total = self.get_by_create_date(
plugin_name, offset_arg=0, limit_arg=1,
suppress_exception=suppress_exception, session=session)
return entity
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.TransportKey).filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class CertificateAuthorityRepo(BaseRepo):
"""Repository for the CertificateAuthority entity.
CertificateAuthority entries are not soft delete. So there is no
need to have deleted=False filter in queries.
"""
def get_by_create_date(self, offset_arg=None, limit_arg=None,
plugin_name=None, plugin_ca_id=None,
suppress_exception=False, session=None,
show_expired=False, project_id=None,
restrict_to_project_cas=False):
"""Returns a list of certificate authorities
The returned certificate authorities are ordered by the date they
were created and paged based on the offset and limit fields.
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
if restrict_to_project_cas:
# get both subCAs which have been defined for your project
# (cas for which the ca.project_id == project_id) AND
# project_cas which are defined for your project
# (pca.project_id = project_id)
query1 = session.query(models.CertificateAuthority)
query1 = query1.filter(
models.CertificateAuthority.project_id == project_id)
query2 = session.query(models.CertificateAuthority)
query2 = query2.join(models.ProjectCertificateAuthority)
query2 = query2.filter(
models.ProjectCertificateAuthority.project_id == project_id)
query = query1.union(query2)
else:
# get both subcas that have been defined for your project
# (cas for which ca.project_id == project_id) AND
# all top-level CAs (ca.project_id == None)
query = session.query(models.CertificateAuthority)
query = query.filter(or_(
models.CertificateAuthority.project_id == project_id,
models.CertificateAuthority.project_id.is_(None)
))
query = query.order_by(models.CertificateAuthority.created_at)
query = query.filter_by(deleted=False)
if not show_expired:
utcnow = timeutils.utcnow()
query = query.filter(or_(
models.CertificateAuthority.expiration.is_(None),
models.CertificateAuthority.expiration > utcnow))
if plugin_name:
query = query.filter(
models.CertificateAuthority.plugin_name.like(plugin_name))
if plugin_ca_id:
query = query.filter(
models.CertificateAuthority.plugin_ca_id.like(plugin_ca_id))
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total
)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def update_entity(self, old_ca, parsed_ca_in, session=None):
"""Updates CA entry and its sub-entries."""
parsed_ca = dict(parsed_ca_in)
# these fields cannot be modified
parsed_ca.pop('plugin_name', None)
parsed_ca.pop('plugin_ca_id', None)
expiration = parsed_ca.pop('expiration', None)
expiration_iso = timeutils.parse_isotime(expiration.strip())
new_expiration = timeutils.normalize_time(expiration_iso)
session = self.get_session(session)
query = session.query(models.CertificateAuthority).filter_by(
id=old_ca.id, deleted=False)
entity = query.one()
entity.expiration = new_expiration
for k, v in entity.ca_meta.items():
if k not in parsed_ca.keys():
v.delete(session)
for key in parsed_ca:
if key not in entity.ca_meta.keys():
meta = models.CertificateAuthorityMetadatum(
key, parsed_ca[key])
entity.ca_meta[key] = meta
else:
entity.ca_meta[key].value = parsed_ca[key]
entity.save()
return entity
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "CertificateAuthority"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
utcnow = timeutils.utcnow()
# TODO(jfwood): Performance? Is the many-to-many join needed?
expiration_filter = or_(
models.CertificateAuthority.expiration.is_(None),
models.CertificateAuthority.expiration > utcnow)
query = session.query(models.CertificateAuthority)
query = query.filter_by(id=entity_id, deleted=False)
query = query.filter(expiration_filter)
return query
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving CA related to given project.
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
return session.query(models.CertificateAuthority).filter_by(
project_id=project_id).filter_by(deleted=False)
class CertificateAuthorityMetadatumRepo(BaseRepo):
"""Repository for the CertificateAuthorityMetadatum entity
Stores key/value information on behalf of a CA.
"""
def save(self, metadata, ca_model):
"""Saves the the specified metadata for the CA.
:raises NotFound if entity does not exist.
"""
now = timeutils.utcnow()
session = get_session()
for k, v in metadata.items():
meta_model = models.CertificateAuthorityMetadatum(k, v)
meta_model.updated_at = now
meta_model.ca = ca_model
meta_model.save(session=session)
def get_metadata_for_certificate_authority(self, ca_id):
"""Returns a dict of CertificateAuthorityMetadatum instances."""
session = get_session()
try:
query = session.query(models.CertificateAuthorityMetadatum)
query = query.filter_by(deleted=False)
query = query.filter(
models.CertificateAuthorityMetadatum.ca_id == ca_id)
metadata = query.all()
except sa_orm.exc.NoResultFound:
metadata = dict()
return dict((m.key, m.value) for m in metadata)
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "CertificateAuthorityMetadatum"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.CertificateAuthorityMetadatum)
return query.filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
class ProjectCertificateAuthorityRepo(BaseRepo):
"""Repository for the ProjectCertificateAuthority entity.
ProjectCertificateAuthority entries are not soft delete. So there is no
need to have deleted=False filter in queries.
"""
def get_by_create_date(self, offset_arg=None, limit_arg=None,
project_id=None, ca_id=None,
suppress_exception=False, session=None):
"""Returns a list of project CAs
The returned project are ordered by the date they
were created and paged based on the offset and limit fields.
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.ProjectCertificateAuthority)
query = query.order_by(models.ProjectCertificateAuthority.created_at)
query = query.filter_by(deleted=False)
if project_id:
query = query.filter(
models.ProjectCertificateAuthority.project_id.like(project_id))
if ca_id:
query = query.filter(
models.ProjectCertificateAuthority.ca_id.like(ca_id))
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total
)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "ProjectCertificateAuthority"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.ProjectCertificateAuthority).filter_by(
id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving CA related to given project.
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
return session.query(models.ProjectCertificateAuthority).filter_by(
project_id=project_id)
class PreferredCertificateAuthorityRepo(BaseRepo):
"""Repository for the PreferredCertificateAuthority entity.
PreferredCertificateAuthority entries are not soft delete. So there is no
need to have deleted=False filter in queries.
"""
def get_by_create_date(self, offset_arg=None, limit_arg=None,
project_id=None, ca_id=None,
suppress_exception=False, session=None):
"""Returns a list of preferred CAs
The returned CAs are ordered by the date they
were created and paged based on the offset and limit fields.
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.PreferredCertificateAuthority)
query = query.order_by(models.PreferredCertificateAuthority.created_at)
if project_id:
query = query.filter(
models.PreferredCertificateAuthority.project_id.like(
project_id))
if ca_id:
query = query.filter(
models.PreferredCertificateAuthority.ca_id.like(ca_id))
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total
)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def create_or_update_by_project_id(self, project_id, ca_id, session=None):
"""Create or update preferred CA for a project by project_id.
:param project_id: ID of project whose preferred CA will be saved
:param ca_id: ID of preferred CA
:param session: SQLAlchemy session object.
:return: None
"""
session = self.get_session(session)
query = session.query(models.PreferredCertificateAuthority)
query = query.filter_by(project_id=project_id)
try:
entity = query.one()
except sa_orm.exc.NoResultFound:
self.create_from(
models.PreferredCertificateAuthority(project_id, ca_id),
session=session)
else:
entity.ca_id = ca_id
entity.save(session)
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "PreferredCertificateAuthority"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.PreferredCertificateAuthority).filter_by(
id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def _build_get_project_entities_query(self, project_id, session):
"""Builds query for retrieving preferred CA related to given project.
:param project_id: id of barbican project entity
:param session: existing db session reference.
"""
return session.query(models.PreferredCertificateAuthority).filter_by(
project_id=project_id)
class SecretACLRepo(BaseRepo):
"""Repository for the SecretACL entity.
There is no need for SecretACLUserRepo as none of logic access
SecretACLUser (ACL user data) directly. Its always derived from
SecretACL relationship.
SecretACL and SecretACLUser data is not soft delete. So there is no need
to have deleted=False filter in queries.
"""
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "SecretACL"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.SecretACL)
query = query.filter_by(id=entity_id)
return query
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def get_by_secret_id(self, secret_id, session=None):
"""Return list of secret ACLs by secret id."""
session = self.get_session(session)
query = session.query(models.SecretACL)
query = query.filter_by(secret_id=secret_id)
return query.all()
def create_or_replace_from(self, secret, secret_acl, user_ids=None,
session=None):
session = self.get_session(session)
secret.updated_at = timeutils.utcnow()
secret_acl.updated_at = timeutils.utcnow()
secret.secret_acls.append(secret_acl)
secret.save(session=session)
self._create_or_replace_acl_users(secret_acl, user_ids,
session=session)
def _create_or_replace_acl_users(self, secret_acl, user_ids, session=None):
"""Creates or updates secret acl user based on input user_ids list.
user_ids is expected to be list of ids (enforced by schema validation).
Input user ids should have complete list of acl users. It does not
apply partial update of user ids.
If user_ids is None, no change is made in acl user data.
If user_ids list is not None, then following change is made.
For existing acl users, just update timestamp if user_id is present in
input user ids list. Otherwise, remove existing acl user entries.
Then add the remaining input user ids as new acl user db entries.
"""
if user_ids is None:
return
user_ids = set(user_ids)
now = timeutils.utcnow()
session = self.get_session(session)
secret_acl.updated_at = now
for acl_user in secret_acl.acl_users:
if acl_user.user_id in user_ids: # input user_id already exists
acl_user.updated_at = now
user_ids.remove(acl_user.user_id)
else:
acl_user.delete(session)
for user_id in user_ids:
acl_user = models.SecretACLUser(secret_acl.id, user_id)
secret_acl.acl_users.append(acl_user)
secret_acl.save(session=session)
def get_count(self, secret_id, session=None):
"""Gets count of existing secret ACL(s) for a given secret."""
session = self.get_session(session)
query = session.query(sa_func.count(models.SecretACL.id))
query = query.filter(models.SecretACL.secret_id == secret_id)
return query.scalar()
def delete_acls_for_secret(self, secret, session=None):
session = self.get_session(session)
for entity in secret.secret_acls:
entity.delete(session=session)
class ContainerACLRepo(BaseRepo):
"""Repository for the ContainerACL entity.
There is no need for ContainerACLUserRepo as none of logic access
ContainerACLUser (ACL user data) directly. Its always derived from
ContainerACL relationship.
ContainerACL and ContainerACLUser data is not soft delete. So there is no
need to have deleted=False filter in queries.
"""
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "ContainerACL"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
query = session.query(models.ContainerACL)
query = query.filter_by(id=entity_id)
return query
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def get_by_container_id(self, container_id, session=None):
"""Return list of container ACLs by container id."""
session = self.get_session(session)
query = session.query(models.ContainerACL)
query = query.filter_by(container_id=container_id)
return query.all()
def create_or_replace_from(self, container, container_acl,
user_ids=None, session=None):
session = self.get_session(session)
container.updated_at = timeutils.utcnow()
container_acl.updated_at = timeutils.utcnow()
container.container_acls.append(container_acl)
container.save(session=session)
self._create_or_replace_acl_users(container_acl, user_ids, session)
def _create_or_replace_acl_users(self, container_acl, user_ids,
session=None):
"""Creates or updates container acl user based on input user_ids list.
user_ids is expected to be list of ids (enforced by schema validation).
Input user ids should have complete list of acl users. It does not
apply partial update of user ids.
If user_ids is None, no change is made in acl user data.
If user_ids list is not None, then following change is made.
For existing acl users, just update timestamp if user_id is present in
input user ids list. Otherwise, remove existing acl user entries.
Then add the remaining input user ids as new acl user db entries.
"""
if user_ids is None:
return
user_ids = set(user_ids)
now = timeutils.utcnow()
session = self.get_session(session)
container_acl.updated_at = now
for acl_user in container_acl.acl_users:
if acl_user.user_id in user_ids: # input user_id already exists
acl_user.updated_at = now
user_ids.remove(acl_user.user_id)
else:
acl_user.delete(session)
for user_id in user_ids:
acl_user = models.ContainerACLUser(container_acl.id, user_id)
container_acl.acl_users.append(acl_user)
container_acl.save(session=session)
def get_count(self, container_id, session=None):
"""Gets count of existing container ACL(s) for a given container."""
session = self.get_session(session)
query = session.query(sa_func.count(models.ContainerACL.id))
query = query.filter(models.ContainerACL.container_id == container_id)
return query.scalar()
def delete_acls_for_container(self, container, session=None):
session = self.get_session(session)
for entity in container.container_acls:
entity.delete(session=session)
class ProjectQuotasRepo(BaseRepo):
"""Repository for the ProjectQuotas entity."""
def _do_entity_name(self):
"""Sub-class hook: return entity name, such as for debugging."""
return "ProjectQuotas"
def _do_build_get_query(self, entity_id, external_project_id, session):
"""Sub-class hook: build a retrieve query."""
return session.query(models.ProjectQuotas).filter_by(id=entity_id)
def _do_validate(self, values):
"""Sub-class hook: validate values."""
pass
def get_by_create_date(self, offset_arg=None, limit_arg=None,
suppress_exception=False, session=None):
"""Returns a list of ProjectQuotas
The list is ordered by the date they were created at and paged
based on the offset and limit fields.
:param offset_arg: The entity number where the query result should
start.
:param limit_arg: The maximum amount of entities in the result set.
:param suppress_exception: Whether NoResultFound exceptions should be
suppressed.
:param session: SQLAlchemy session object.
:raises NotFound: if no quota config is found for the project
:returns: Tuple consisting of (list_of_entities, offset, limit, total).
"""
offset, limit = clean_paging_values(offset_arg, limit_arg)
session = self.get_session(session)
query = session.query(models.ProjectQuotas)
query = query.order_by(models.ProjectQuotas.created_at)
query = query.join(models.Project, models.ProjectQuotas.project)
start = offset
end = offset + limit
LOG.debug('Retrieving from %s to %s', start, end)
total = query.count()
entities = query.offset(start).limit(limit).all()
LOG.debug('Number entities retrieved: %s out of %s',
len(entities), total)
if total <= 0 and not suppress_exception:
_raise_no_entities_found(self._do_entity_name())
return entities, offset, limit, total
def create_or_update_by_project_id(self, project_id,
parsed_project_quotas,
session=None):
"""Create or update Project Quotas config for a project by project_id.
:param project_id: ID of project whose quota config will be saved
:param parsed_project_quotas: Python dict with quota definition
:param session: SQLAlchemy session object.
:return: None
"""
session = self.get_session(session)
query = session.query(models.ProjectQuotas)
query = query.filter_by(project_id=project_id)
try:
entity = query.one()
except sa_orm.exc.NoResultFound:
self.create_from(
models.ProjectQuotas(project_id,
parsed_project_quotas),
session=session)
else:
self._update_values(entity, parsed_project_quotas)
entity.save(session)
def get_by_external_project_id(self, external_project_id,
suppress_exception=False, session=None):
"""Return configured Project Quotas for a project by project_id.
:param external_project_id: external ID of project to get quotas for
:param suppress_exception: when True, NotFound is not raised
:param session: SQLAlchemy session object.
:raises NotFound: if no quota config is found for the project
:return: None or Python dict of project quotas for project
"""
session = self.get_session(session)
query = session.query(models.ProjectQuotas)
query = query.join(models.Project, models.ProjectQuotas.project)
query = query.filter(models.Project.external_id == external_project_id)
try:
entity = query.one()
except sa_orm.exc.NoResultFound:
if suppress_exception:
return None
else:
_raise_no_entities_found(self._do_entity_name())
return entity
def delete_by_external_project_id(self, external_project_id,
suppress_exception=False, session=None):
"""Remove configured Project Quotas for a project by project_id.
:param external_project_id: external ID of project to delete quotas
:param suppress_exception: when True, NotFound is not raised
:param session: SQLAlchemy session object.
:raises NotFound: if no quota config is found for the project
:return: None
"""
session = self.get_session(session)
query = session.query(models.ProjectQuotas)
query = query.join(models.Project, models.ProjectQuotas.project)
query = query.filter(models.Project.external_id == external_project_id)
try:
entity = query.one()
except sa_orm.exc.NoResultFound:
if suppress_exception:
return
else:
_raise_no_entities_found(self._do_entity_name())
entity.delete(session=session)
def get_ca_repository():
"""Returns a singleton Secret repository instance."""
global _CA_REPOSITORY
return _get_repository(_CA_REPOSITORY, CertificateAuthorityRepo)
def get_container_acl_repository():
"""Returns a singleton Container ACL repository instance."""
global _CONTAINER_ACL_REPOSITORY
return _get_repository(_CONTAINER_ACL_REPOSITORY, ContainerACLRepo)
def get_container_consumer_repository():
"""Returns a singleton Container Consumer repository instance."""
global _CONTAINER_CONSUMER_REPOSITORY
return _get_repository(_CONTAINER_CONSUMER_REPOSITORY,
ContainerConsumerRepo)
def get_container_repository():
"""Returns a singleton Container repository instance."""
global _CONTAINER_REPOSITORY
return _get_repository(_CONTAINER_REPOSITORY, ContainerRepo)
def get_container_secret_repository():
"""Returns a singleton Container-Secret repository instance."""
global _CONTAINER_SECRET_REPOSITORY
return _get_repository(_CONTAINER_SECRET_REPOSITORY, ContainerSecretRepo)
def get_encrypted_datum_repository():
"""Returns a singleton Encrypted Datum repository instance."""
global _ENCRYPTED_DATUM_REPOSITORY
return _get_repository(_ENCRYPTED_DATUM_REPOSITORY, EncryptedDatumRepo)
def get_kek_datum_repository():
"""Returns a singleton KEK Datum repository instance."""
global _KEK_DATUM_REPOSITORY
return _get_repository(_KEK_DATUM_REPOSITORY, KEKDatumRepo)
def get_order_plugin_meta_repository():
"""Returns a singleton Order-Plugin meta repository instance."""
global _ORDER_PLUGIN_META_REPOSITORY
return _get_repository(_ORDER_PLUGIN_META_REPOSITORY,
OrderPluginMetadatumRepo)
def get_order_barbican_meta_repository():
"""Returns a singleton Order-Barbican meta repository instance."""
global _ORDER_BARBICAN_META_REPOSITORY
return _get_repository(_ORDER_BARBICAN_META_REPOSITORY,
OrderBarbicanMetadatumRepo)
def get_order_repository():
"""Returns a singleton Order repository instance."""
global _ORDER_REPOSITORY
return _get_repository(_ORDER_REPOSITORY, OrderRepo)
def get_order_retry_tasks_repository():
"""Returns a singleton OrderRetryTask repository instance."""
global _ORDER_RETRY_TASK_REPOSITORY
return _get_repository(_ORDER_RETRY_TASK_REPOSITORY, OrderRetryTaskRepo)
def get_preferred_ca_repository():
"""Returns a singleton Secret repository instance."""
global _PREFERRED_CA_REPOSITORY
return _get_repository(_PREFERRED_CA_REPOSITORY,
PreferredCertificateAuthorityRepo)
def get_project_repository():
"""Returns a singleton Project repository instance."""
global _PROJECT_REPOSITORY
return _get_repository(_PROJECT_REPOSITORY, ProjectRepo)
def get_project_ca_repository():
"""Returns a singleton Secret repository instance."""
global _PROJECT_CA_REPOSITORY
return _get_repository(_PROJECT_CA_REPOSITORY,
ProjectCertificateAuthorityRepo)
def get_project_quotas_repository():
"""Returns a singleton Project Quotas repository instance."""
global _PROJECT_QUOTAS_REPOSITORY
return _get_repository(_PROJECT_QUOTAS_REPOSITORY,
ProjectQuotasRepo)
def get_secret_acl_repository():
"""Returns a singleton Secret ACL repository instance."""
global _SECRET_ACL_REPOSITORY
return _get_repository(_SECRET_ACL_REPOSITORY, SecretACLRepo)
def get_secret_meta_repository():
"""Returns a singleton Secret meta repository instance."""
global _SECRET_META_REPOSITORY
return _get_repository(_SECRET_META_REPOSITORY, SecretStoreMetadatumRepo)
def get_secret_user_meta_repository():
"""Returns a singleton Secret user meta repository instance."""
global _SECRET_USER_META_REPOSITORY
return _get_repository(_SECRET_USER_META_REPOSITORY,
SecretUserMetadatumRepo)
def get_secret_repository():
"""Returns a singleton Secret repository instance."""
global _SECRET_REPOSITORY
return _get_repository(_SECRET_REPOSITORY, SecretRepo)
def get_transport_key_repository():
"""Returns a singleton Transport Key repository instance."""
global _TRANSPORT_KEY_REPOSITORY
return _get_repository(_TRANSPORT_KEY_REPOSITORY, TransportKeyRepo)
def _get_repository(global_ref, repo_class):
if not global_ref:
global_ref = repo_class()
return global_ref
def _raise_entity_not_found(entity_name, entity_id):
raise exception.NotFound(u._("No {entity} found with ID {id}").format(
entity=entity_name,
id=entity_id))
def _raise_entity_id_not_found(entity_id):
raise exception.NotFound(u._("Entity ID {entity_id} not "
"found").format(entity_id=entity_id))
def _raise_no_entities_found(entity_name):
raise exception.NotFound(
u._("No entities of type {entity_name} found").format(
entity_name=entity_name))
def _raise_entity_already_exists(entity_name):
raise exception.Duplicate(
u._("Entity '{entity_name}' "
"already exists").format(entity_name=entity_name))
|
[
"egonmin@CN00119199"
] |
egonmin@CN00119199
|
46a316c5f2c6b406cdc0d01b4f096d3adeda6ad9
|
7bd4a14d77aca4a8872a9dbfc67b1e95be0ec2f0
|
/gallery/settings.py
|
4c216d7323d620b302f784a2454ecb96829f41b5
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nancy88199488/photo-gallery
|
f147bc17ed723206cc53f1cc007df01266ab415a
|
acb75015060e18311e46e41d4b9ae41a3ded18d2
|
refs/heads/master
| 2023-06-12T18:13:02.813950
| 2021-07-06T10:16:49
| 2021-07-06T10:16:49
| 382,945,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,532
|
py
|
"""
Django settings for gallery project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-kd7f8i9mi!095gygrzgrqs1x5x$16g=j0%p_^v48l8)3l67qiy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pictures',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'gallery',
'USER':'postgres',
'PASSWORD': 'mary',
'HOST': 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"nancy.moranga@student.moringaschool.com"
] |
nancy.moranga@student.moringaschool.com
|
28df0510b9052ed52efa691425708812be268066
|
9f8ca3212ff75897a89d7629cb3ccd34fc6fd6bf
|
/django_app/manage.py
|
4dc22bad3b81b71ca0ceca8530c439d49299a6cc
|
[] |
no_license
|
frbgd/AskFrbgd
|
1f6b519d9131227422b1669683c9cbec31f0c44b
|
491ed26dbac5c894bca3d30fe7f76bcb3e216669
|
refs/heads/master
| 2023-06-03T07:09:36.328249
| 2021-06-22T15:59:17
| 2021-06-22T15:59:17
| 359,468,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Ask_Frbgd.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"kap17597@gmail.com"
] |
kap17597@gmail.com
|
40e22450f0ea4f3fc6109626969542b02a4fdf17
|
bdd43e7a563a281692cc869f07fc6686205e3b6b
|
/Simple Map viewer/bin/Debug/ArcGISRuntime10.2.7/LocalServer64/ArcToolbox/Scripts/CreateFeaturesFromTextFile.py
|
e754e6813809b6127f6ac75fec0e86385c0e3493
|
[] |
no_license
|
sangl003/Simple-Map-viewer
|
5d315190ba5d80c27e5dcf9f4de54cca6076789e
|
5af7a78d5563f749179a98799d01564ae1eaf48c
|
refs/heads/master
| 2021-01-13T15:10:58.394042
| 2016-12-13T17:46:40
| 2016-12-13T17:46:40
| 76,205,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,107
|
py
|
'''----------------------------------------------------------------------------------
Tool Name: CreateFeaturesFromTextFile
Source Name: CreateFeaturesFromTextFile.py
Version: ArcGIS 9.1
Author: Environmental Systems Research Institute Inc.
Required Argumuments: An Input Text File containing feature coordinates
An Input Character designating the decimal separator used in the text file.
An output feature class
Optional Arguments: A spatial reference can be specified. This will be the
spatial reference of the output fc.
Description: Reads a text file with feature coordinates and creates a feature class
from the coordinates.
----------------------------------------------------------------------------------'''
import string, os, sys, locale, arcgisscripting
gp = arcgisscripting.create()
gp.overwriteoutput = 1
msgErrorTooFewParams = "Not enough parameters provided."
msgUnknownDataType = " is not a valid datatype. Datatype must be point, multipoint, polyline or polygon."
msgErrorCreatingPoint = "Error creating point %s on feature %s"
# sets all the point properties
def createPoint(point, geometry):
try:
point.id = geometry[0]
point.x = geometry[1]
point.y = geometry[2]
# When empty values are written out from pyWriteGeomToTextFile, they come as 1.#QNAN
# Additionally, the user need not supply these values, so if they aren't in the list don't add them
if len(geometry) > 3:
if geometry[3].lower().find("nan") == -1: point.z = geometry[3]
if len(geometry) > 4:
if geometry[4].lower().find("nan") == -1: point.m = geometry[4]
return point
except:
raise Exception, msgErrorCreatingPoint
try:
# get the provided parameters
inputTxtFile = open(gp.getparameterastext(0))
fileSepChar = gp.getparameterastext(1)
outputFC = gp.getparameterastext(2)
# spatial reference is optional
outputSR = gp.getparameterastext(3)
# make sure the text type specified in the text file is valid.
inDataType = inputTxtFile.readline().strip().lower()
dataTypes = ["point", "multipoint", "polyline", "polygon"]
if inDataType.lower() not in dataTypes:
msgUnknownDataType = "%s%s" % (inDataType, msgUnknownDataType)
raise Exception, msgUnknownDataType
# create the new featureclass
gp.toolbox = "management"
gp.CreateFeatureclass(os.path.split(outputFC)[0], os.path.split(outputFC)[1], inDataType, "#", "ENABLED", "ENABLED", outputSR)
# create a new field to assure the id of each feature is preserved.
idfield = "File_ID"
gp.addfield(outputFC, idfield, "LONG")
# get some information about the new featureclass for later use.
outDesc = gp.describe(outputFC)
shapefield = outDesc.ShapeFieldName
# create the cursor and objects necessary for the geometry creation
rows = gp.insertcursor(outputFC)
pnt = gp.createobject("point")
pntarray = gp.createobject("Array")
partarray = gp.createobject("Array")
locale.setlocale(locale.LC_ALL, '')
sepchar = locale.localeconv()['decimal_point']
# loop through the text file.
featid = 0
lineno = 1
for line in inputTxtFile.readlines():
lineno += 1
# create an array from each line in the input text file
values = line.replace("\n", "").replace("\r", "").replace(fileSepChar, sepchar).split(" ")
# for a point feature class simply populate a point object and insert it.
if inDataType == "point" and values[0].lower() != "end":
row = rows.newrow()
pnt = createPoint(pnt, values)
row.SetValue(shapefield, pnt)
row.SetValue(idfield, int(values[0]))
rows.insertrow(row)
# for a multipoint the text file is organized a bit differently. Groups of points must be inserted at the same time.
elif inDataType == "multipoint":
if len(values) > 2:
pnt = createPoint(pnt, values)
pntarray.add(pnt)
elif (len(values) == 2 and lineno != 2) or values[0].lower() == "end":
row = rows.newrow()
row.SetValue(shapefield, pntarray)
# store the feature id just in case there is an error. helps track down the offending line in the input text file.
if values[0].lower() != "end":
row.SetValue(idfield, featid)
featid = int(values[0])
else:
row.SetValue(idfield, featid)
rows.insertrow(row)
pntarray.removeall()
elif (len(values) == 2 and lineno == 2):
featid = int(values[0])
# for polygons and lines. polygons have a bit of logic for interior rings (donuts).
# lines use the same logic as polygons (except for the interior rings)
elif inDataType == "polygon" or inDataType == "polyline":
#takes care of
#adds the point array to the part array and then part array to the feature
if (len(values) == 2 and float(values[1]) == 0 and lineno != 2) or values[0].lower() == "end":
partarray.add(pntarray)
row = rows.newrow()
row.SetValue(shapefield, partarray)
# store the feature id just in case there is an error. helps track down the offending line in the input text file.
if values[0].lower() != "end":
row.SetValue(idfield, featid)
featid = int(values[0])
else:
row.SetValue(idfield, featid)
rows.insertrow(row)
partarray.removeall()
pntarray.removeall()
#adds parts and/or interior rings to the part array
elif (len(values) == 2 and float(values[1]) > 0) or values[0].lower() == "interiorring":
partarray.add(pntarray)
pntarray.removeall()
#add points to the point array
elif len(values) > 2:
pnt = createPoint(pnt, values)
pntarray.add(pnt)
elif (len(values) == 2 and lineno == 2):
featid = int(values[0])
inputTxtFile.close()
del rows
del row
except Exception, ErrorDesc:
# handle the errors here. if the point creation fails, want to keep track of which point failed (easier to fix the
# text file if we do)
if ErrorDesc[0] == msgErrorCreatingPoint:
if inDataType.lower() == "point":
msgErrorCreatingPoint = msgErrorCreatingPoint % (values[0], values[0])
else:
msgErrorCreatingPoint = msgErrorCreatingPoint % (values[0], featid)
gp.AddError(msgErrorCreatingPoint)
elif ErrorDesc[0] != "":
gp.AddError(str(ErrorDesc))
gp.AddError(gp.getmessages(2))
# make sure to close up the fileinput no matter what.
if inputTxtFile: inputTxtFile.close()
|
[
"sangl003@umn.edu"
] |
sangl003@umn.edu
|
59d05ec2e9f2d8d72c372a356a622ed059d63ef6
|
6181fcd4a266d963a0ee85971768c97922ca77cd
|
/src/garage/tf/embeddings/encoder.py
|
4a533d386313e0c5ff2a22ef5ffd4f34132b7b4e
|
[
"MIT"
] |
permissive
|
rlworkgroup/garage
|
5d215bbecb3a4e74b504988d6684a7b04df69a80
|
2d594803636e341660cab0e81343abbe9a325353
|
refs/heads/master
| 2023-08-21T22:58:49.338034
| 2023-01-04T06:06:27
| 2023-01-04T06:06:27
| 136,846,372
| 1,832
| 363
|
MIT
| 2023-09-11T11:36:40
| 2018-06-10T21:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,401
|
py
|
"""Encoders in TensorFlow."""
# pylint: disable=abstract-method
from garage.np.embeddings import Encoder as BaseEncoder
from garage.np.embeddings import StochasticEncoder as BaseStochasticEncoder
from garage.tf.models import Module, StochasticModule
class Encoder(BaseEncoder, Module):
"""Base class for encoders in TensorFlow."""
def get_latent(self, input_value):
"""Get a sample of embedding for the given input.
Args:
input_value (numpy.ndarray): Tensor to encode.
Returns:
numpy.ndarray: An embedding sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
"""
def get_latents(self, input_values):
"""Get samples of embedding for the given inputs.
Args:
input_values (numpy.ndarray): Tensors to encode.
Returns:
numpy.ndarray: Embeddings sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (list[numpy.ndarray]): Means of the distribution.
- log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
"""
def clone(self, name):
"""Return a clone of the encoder.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created encoder. It has to be
different from source encoder if cloned under the same
computational graph.
Returns:
garage.tf.embeddings.encoder.Encoder: Newly cloned encoder.
"""
class StochasticEncoder(BaseStochasticEncoder, StochasticModule):
"""Base class for stochastic encoders in TensorFlow."""
def build(self, embedding_input, name=None):
"""Build encoder.
After buil, self.distribution is a Gaussian distribution conitioned
on embedding_input.
Args:
embedding_input (tf.Tensor) : Embedding input.
name (str): Name of the model, which is also the name scope.
"""
|
[
"noreply@github.com"
] |
rlworkgroup.noreply@github.com
|
86839270f23d98319f47902673cdb538855f4d9c
|
c46fb8015fb59c1c51f2f439643d8f0e6fb1d3e7
|
/libangp2p/settings_local.py
|
3b9c634fd65a59253dcce8f4cabbaf9057ee8aa5
|
[] |
no_license
|
daxiaoluo/LiBangP2P
|
51b364cf96cb0d58d07ce3f292647ae642f9d6c4
|
e474a9d8aba5b3c4ff950be7c1cf139d75f0fea1
|
refs/heads/master
| 2016-09-05T14:54:40.981722
| 2015-01-05T09:12:01
| 2015-01-05T09:12:01
| 28,805,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,630
|
py
|
# Django settings for libangp2p project.
# coding=utf8
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = (
('yirenjun', 'yirenjun@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'localDatabase/libangp2p.db',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-cn'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(SITE_ROOT, r'../media/dev/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(SITE_ROOT, r'../static/')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '=3qj8b)y@gz6w$l&vq$c&kdq6efy6z7ly4$k71lrc7_dt7r8f!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'utils.disableCSRF.DisableCSRF',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'libangp2p.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'libangp2p.wsgi_dev.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, r'../templates').replace('\\', '/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
# Uncomment the next line to enable the admin:
# 'django_admin_bootstrapped',
# 'suit',
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'xadmin',
'crispy_forms',
'reversion',
'rest_framework',
'rest_framework.authtoken',
'core',
'DjangoUeditor',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS': 'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'PAGINATE_BY': 20,
'MAX_PAGINATE_BY': 100,
'PAGINATE_BY_PARAM': 'page_size',
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(levelname)s] [%(asctime)s] [%(module)s] [%(funcName)s] [%(lineno)s] [%(message)s]'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'log/dev/LiBangP2P_dev.log',
'formatter': 'verbose',
'when': 'D',
'backupCount': 30,
'interval': 1,
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins',],
'level': 'ERROR',
'propagate': True,
},
'core.views': {
'handlers': ['file',],
'level': 'DEBUG',
'propagate': True,
},
'core.crontab': {
'handlers': ['file',],
'level': 'DEBUG',
'propagate': True,
},
}
}
UEDITOR_SETTINGS = {
'toolbars':{"testa":[['fullscreen', 'source', '|', 'undo', 'redo', '|','bold', 'italic', 'underline']],
"testb":[[ 'source', '|','bold', 'italic', 'underline']],
"mytoolbars":[['fullscreen', 'source', '|', 'undo', 'redo', '|',
'bold', 'italic', 'underline', 'fontborder', 'strikethrough', 'superscript', 'subscript', 'removeformat', 'formatmatch', 'autotypeset', 'blockquote', 'pasteplain', '|', 'forecolor', 'backcolor', 'insertorderedlist', 'insertunorderedlist', 'selectall', 'cleardoc', '|',
'rowspacingtop', 'rowspacingbottom', 'lineheight', '|',
'customstyle', 'paragraph', 'fontfamily', 'fontsize', '|',
'directionalityltr', 'directionalityrtl', 'indent', '|',
'justifyleft', 'justifycenter', 'justifyright', 'justifyjustify', '|', 'touppercase', 'tolowercase', '|',
'link', 'unlink', 'anchor', '|', 'imagenone', 'imageleft', 'imageright', 'imagecenter', '|',
'insertimage', 'emotion', 'scrawl', 'insertvideo', 'music', 'attachment', 'map', 'gmap', 'insertframe','insertcode', 'webapp', 'pagebreak', 'template', 'background', '|',
'horizontal', 'date', 'time', 'spechars', 'snapscreen', 'wordimage', '|',
'inserttable', 'deletetable', 'insertparagraphbeforetable', 'insertrow', 'deleterow', 'insertcol', 'deletecol', 'mergecells', 'mergeright', 'mergedown', 'splittocells', 'splittorows', 'splittocols', 'charts', '|',
'print', 'preview', 'searchreplace', 'help', 'drafts']],
},
"images_upload":{
"allow_type":"jpg,png,gif", #定义允许的上传的图片类型
"path": os.path.join(MEDIA_ROOT, r'upload/img'), #定义默认的上传路径
"max_size":"22222kb" #定义允许上传的图片大小,0代表不限制
},
"files_upload":{
"allow_type":"zip,rar,txt,gif,pdf", #定义允许的上传的文件类型
"path": os.path.join(MEDIA_ROOT, r'upload/file'), #定义默认的上传路径
"max_size":"22222kb" #定义允许上传的文件大小,0代表不限制
},
"image_manager":{
"path": os.path.join(MEDIA_ROOT, r'upload/imglib'), #图片管理器的位置,如果没有指定,默认跟图片路径上传一样
},
"scrawl_upload":{
"path": os.path.join(MEDIA_ROOT, r'upload/thumbnail'), #涂鸦图片默认的上传路径
}
}
|
[
"taoluo@yahoo-inc.com"
] |
taoluo@yahoo-inc.com
|
cef64f6409c5dc09a99e25ec6e38910be4bcc2a8
|
3276853a9cf821925e8cf09d8c9242852cd53e82
|
/revolv/project/migrations/0043_auto_20150504_0137.py
|
be889e1db6fc0bc3e58e7871b389afe4eb36e014
|
[] |
no_license
|
RE-volv/revolv
|
6b1d2672c428e9ee88dd6da5a5b23c908f0a0870
|
88172f5fa829a187ceb94d1d160cc1405ce0ab2e
|
refs/heads/master
| 2022-12-07T02:42:12.614770
| 2021-04-29T01:30:01
| 2021-04-29T01:30:01
| 52,948,154
| 4
| 15
| null | 2022-11-22T00:43:30
| 2016-03-02T08:52:23
|
CSS
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0042_auto_20150503_2140'),
]
operations = [
migrations.AlterField(
model_name='donationlevel',
name='description',
field=models.TextField(),
),
]
|
[
"sameera.vemulapalli@berkeley.edu"
] |
sameera.vemulapalli@berkeley.edu
|
fbb662e15e48e52bc6bfc3b63d44116d112939a0
|
f7816d48a57b4497b4631bf3d0932eee8c4af656
|
/defa/wsgi.py
|
e03a0e3ccee384ca07c95b1a421924b578f7f192
|
[] |
no_license
|
ThiagoDiasV/defa-stock-management
|
a38af15d8b9f99e00d9b3d3dfde3f29adb5408d7
|
81167f80c2d57f9668612381a0fa43f4acd16bc0
|
refs/heads/master
| 2021-09-27T17:27:39.785567
| 2020-04-08T19:59:34
| 2020-04-08T19:59:34
| 244,090,455
| 0
| 0
| null | 2021-06-10T18:43:24
| 2020-03-01T05:06:13
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for defa project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "defa.settings")
application = get_wsgi_application()
|
[
"thiago76ers@gmail.com"
] |
thiago76ers@gmail.com
|
6c52fcc117baf730bc43b87d478345168749bb0f
|
88dd2d9233aede5f186a1f3608187bf6756b2349
|
/omc3/plotting/plot_bbq.py
|
41d9b369edea66da9ef427c33b651da242558621
|
[
"MIT"
] |
permissive
|
pylhc/omc3
|
d6462e3a7c4d8cffca20a9f9d2de7e37c0f4dcf9
|
f8180791c20c27dbe3f717c0681f379cd13d03cc
|
refs/heads/master
| 2023-09-01T08:07:29.940932
| 2023-08-29T09:19:41
| 2023-08-29T09:19:41
| 145,120,244
| 13
| 5
|
MIT
| 2023-09-13T20:23:25
| 2018-08-17T12:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 8,996
|
py
|
"""
Plot BBQ
--------
Provides the plotting function for the extracted and cleaned BBQ data from timber.
**Arguments:**
*--Required--*
- **input**:
BBQ data as data frame or tfs file.
*--Optional--*
- **interval** *(float)*:
x_axis interval that was used in calculations.
- **kick**:
Kick file as data frame or tfs file.
- **manual_style** *(DictAsString)*:
Additional style rcParameters which update the set of predefined ones.
default: ``{}``
- **output** *(str)*:
Save figure to this location.
- **plot_styles** *(UnionPathStr)*:
Which plotting styles to use, either from plotting.styles.*.mplstyles
or default mpl.
default: ``['standard', 'bbq']``
- **show**:
Show plot.
action: ``store_true``
- **two_plots**:
Plot two axis into the figure.
action: ``store_true``
- **x_lim** *(float)*:
X-Axis limits. (yyyy-mm-dd HH:mm:ss.mmm)
- **y_lim** *(float)*:
Y-Axis limits.
"""
from collections import OrderedDict
from contextlib import suppress
from pathlib import Path
import matplotlib.dates as mdates
import numpy as np
from generic_parser import entrypoint, EntryPointParameters
from generic_parser.entry_datatypes import DictAsString
from generic_parser.entrypoint_parser import save_options_to_config
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from pandas.plotting import register_matplotlib_converters
from omc3 import amplitude_detuning_analysis as ad_ana
from omc3.definitions import formats
from omc3.definitions.constants import PLANES
from omc3.plotting.utils import colors as pcolors, style as pstyle
from omc3.tune_analysis import kick_file_modifiers as kick_mod
from omc3.tune_analysis.constants import (get_mav_window_header, get_used_in_mav_col,
get_bbq_col, get_mav_col)
from omc3.utils import logging_tools
from omc3.utils.iotools import UnionPathStr, PathOrStr, PathOrStrOrDataFrame
LOG = logging_tools.get_logger(__name__)
# Registering converters for datetime plotting as pandas won't do it for us automatically anymore
register_matplotlib_converters()
def get_params():
params = EntryPointParameters()
params.add_parameter(
name="input",
help="BBQ data as data frame or tfs file.",
required=True,
type=PathOrStrOrDataFrame
)
params.add_parameter(
name="kick",
help="Kick file as data frame or tfs file.",
type=PathOrStrOrDataFrame
)
params.add_parameter(
name="output",
help="Save figure to this location.",
type=PathOrStr,
)
params.add_parameter(
name="show",
help="Show plot.",
action="store_true"
)
params.add_parameter(
name="x_lim",
help="X-Axis limits. (yyyy-mm-dd HH:mm:ss.mmm)",
type=float,
nargs=2,
)
params.add_parameter(
name="y_lim",
help="Y-Axis limits.",
type=float,
nargs=2,
)
params.add_parameter(
name="interval",
help="x_axis interval that was used in calculations.",
type=float,
nargs=2,
)
params.add_parameter(
name="two_plots",
help="Plot two axis into the figure.",
action="store_true",
)
params.add_parameter(
name="plot_styles",
type=UnionPathStr,
nargs="+",
default=['standard', 'bbq'],
help='Which plotting styles to use, either from plotting.styles.*.mplstyles or default mpl.'
)
params.add_parameter(
name="manual_style",
type=DictAsString,
default={},
help='Additional style rcParameters which update the set of predefined ones.'
)
return params
@entrypoint(get_params(), strict=True)
def main(opt):
"""Plot BBQ wrapper."""
LOG.info("Plotting BBQ.")
_save_options(opt)
pstyle.set_style(opt.pop("plot_styles"), opt.pop("manual_style"))
bbq_df = kick_mod.read_timed_dataframe(opt.input) if isinstance(opt.input, (Path, str)) else opt.input
opt.pop("input")
if opt.kick is not None:
if opt.interval is not None:
raise ValueError("interval and kick-file given. Both are used for the same purpose. Please only use one.")
window = 0 # not too important, bars will then indicate first and last kick directly
with suppress(KeyError):
window = bbq_df.headers[get_mav_window_header()]
kick_df = kick_mod.read_timed_dataframe(opt.kick) if isinstance(opt.kick, (Path, str)) else opt.kick
opt.interval = ad_ana.get_approx_bbq_interval(bbq_df, kick_df.index, window)
bbq_df = bbq_df.loc[opt.interval[0]:opt.interval[1]]
opt.pop("kick")
show = opt.pop("show")
out = opt.pop("output")
fig = _plot_bbq_data(bbq_df, **opt)
if show:
plt.show()
if out:
fig.savefig(out)
return fig
def _plot_bbq_data(bbq_df, interval=None, x_lim=None, y_lim=None, two_plots=False):
"""
Plot BBQ data.
Args:
bbq_df: BBQ Dataframe with moving average columns.
interval: start and end time of used interval, will be marked with red bars.
x_lim: x limits (time).
y_lim: y limits (tune).
output: Path to the output file.
show: Shows plot if ``True``.
two_plots: Plots each tune in it's own axes if ``True``.
Returns:
Plotted figure.
"""
LOG.debug("Plotting BBQ data.")
fig, axs = plt.subplots(1+two_plots, 1)
if not two_plots:
axs = [axs, axs]
handles = [None] * (3 * len(PLANES))
for idx, plane in enumerate(PLANES):
color = pcolors.get_mpl_color(idx)
mask = np.array(bbq_df[get_used_in_mav_col(plane)], dtype=bool)
# plot and save handles for nicer legend
handles[idx] = axs[idx].plot([i.datetime for i in bbq_df.index],
bbq_df[get_bbq_col(plane)],
color=pcolors.change_color_brightness(color, .4),
marker="o", markerfacecolor="None",
label="$Q_{:s}$".format(plane.lower(),)
)[0]
filtered_data = bbq_df.loc[mask, get_bbq_col(plane)].dropna()
handles[len(PLANES)+idx] = axs[idx].plot(filtered_data.index, filtered_data.to_numpy(),
color=pcolors.change_color_brightness(color, .7),
marker=".",
label="filtered".format(plane.lower())
)[0]
handles[2*len(PLANES)+idx] = axs[idx].plot(bbq_df.index, bbq_df[get_mav_col(plane)],
color=color,
linestyle="-",
label="moving av.".format(plane.lower())
)[0]
if (y_lim is None or y_lim[0] is None) and two_plots:
axs[idx].set_ylim(bottom=min(bbq_df.loc[mask, get_bbq_col(plane)]))
if (y_lim is None or y_lim[1] is None) and two_plots:
axs[idx].set_ylim(top=max(bbq_df.loc[mask, get_bbq_col(plane)]))
# things to add/do only once if there is only one plot
for idx in range(1+two_plots):
if interval:
axs[idx].axvline(x=interval[0], color="red")
axs[idx].axvline(x=interval[1], color="red")
if two_plots:
axs[idx].set_ylabel("$Q_{:s}$".format(PLANES[idx]))
else:
axs[idx].set_ylabel('Tune')
if y_lim is not None:
axs[idx].set_ylim(y_lim)
axs[idx].yaxis.set_major_formatter(FormatStrFormatter('%.5f'))
if x_lim is not None:
axs[idx].set_xlim(x_lim)
axs[idx].set_xlabel('Time')
axs[idx].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
if idx:
# don't show labels on upper plot (if two plots)
# use the visibility to allow cursor x-position to be shown
axs[idx].tick_params(labelbottom=False)
axs[idx].xaxis.get_label().set_visible(False)
if not two_plots or idx:
# reorder legend
axs[idx].legend(handles, [h.get_label() for h in handles],
loc='lower right', bbox_to_anchor=(1.0, 1.01), ncol=3,)
return fig
def _save_options(opt):
if opt.output:
out_path = Path(opt.output).parent
out_path.mkdir(exist_ok=True, parents=True)
save_options_to_config(str(out_path / formats.get_config_filename(__file__)),
OrderedDict(sorted(opt.items()))
)
# Script Mode ------------------------------------------------------------------
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
pylhc.noreply@github.com
|
bc658a36302464007ad33ec47d4af1d28b852f3e
|
120d82e5becce975ab95c3ca26b9cd2cf816faca
|
/9/9_political.py
|
19189180966f2500802503bed444b89bcfade82b
|
[] |
no_license
|
BruceMJ128/Python_Code-Python_for_data_analysis
|
c0ed37936951eebd7c0f5e17371fe8ab8e6d721b
|
82d7390e5b37089188cf0333625886aaa1c883e5
|
refs/heads/master
| 2021-01-19T04:46:06.318007
| 2017-04-06T06:34:42
| 2017-04-06T06:34:42
| 87,394,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,016
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
from numpy.random import randn
import numpy as np
import os
import matplotlib.pyplot as plt
np.random.seed(12345)
plt.rc('figure', figsize=(10, 6))
from pandas import Series, DataFrame
import pandas as pd
np.set_printoptions(precision=4)
fec = pd.read_csv('data/ch09/P00000001-ALL.csv')
unique_cands = fec.cand_nm.unique()
parties = {'Bachmann, Michelle': 'Republican',
'Cain, Herman': 'Republican',
'Gingrich, Newt': 'Republican',
'Huntsman, Jon': 'Republican',
'Johnson, Gary Earl': 'Republican',
'McCotter, Thaddeus G': 'Republican',
'Obama, Barack': 'Democrat',
'Paul, Ron': 'Republican',
'Pawlenty, Timothy': 'Republican',
'Perry, Rick': 'Republican',
"Roemer, Charles E. 'Buddy' III": 'Republican',
'Romney, Mitt': 'Republican',
'Santorum, Rick': 'Republican'}
fec['party'] = fec.cand_nm.map(parties)
fec = fec[fec.contb_receipt_amt > 0]
fec_mrbo = fec[fec.cand_nm.isin(['Obama, Barack', 'Romney, Mitt'])]
fec.contbr_occupation.value_counts()[:10]
occ_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'INFORMATION REQUESTED (BEST EFFORTS)' : 'NOT PROVIDED',
'C.E.O.': 'CEO'
}
# If no mapping provided, return x
f = lambda x: occ_mapping.get(x, x)
fec.contbr_occupation = fec.contbr_occupation.map(f)
emp_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'SELF' : 'SELF-EMPLOYED',
'SELF EMPLOYED' : 'SELF-EMPLOYED',
}
# If no mapping provided, return x
f = lambda x: emp_mapping.get(x, x)
fec.contbr_employer = fec.contbr_employer.map(f)
by_occupation = fec.pivot_table('contb_receipt_amt',
index='contbr_occupation',
columns='party', aggfunc='sum')
over_2mm = by_occupation[by_occupation.sum(1) > 2000000]
over_2mm
over_2mm.plot(kind='barh')
def get_top_amounts(group, key, n=5):
totals = group.groupby(key)['contb_receipt_amt'].sum()
return totals.order(ascending=False)[:n] # Order totals by key in descending order
grouped = fec_mrbo.groupby('cand_nm')
grouped.apply(get_top_amounts, 'contbr_occupation', n=7)
grouped.apply(get_top_amounts, 'contbr_employer', n=10)
bins = np.array([0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000])
labels = pd.cut(fec_mrbo.contb_receipt_amt, bins) #type is pandas.core.series.Series
labels
grouped = fec_mrbo.groupby(['cand_nm', labels])
grouped.size().unstack(0)
bucket_sums = grouped.contb_receipt_amt.sum().unstack(0) #axis = 0,按照横向展开
bucket_sums
normed_sums = bucket_sums.div(bucket_sums.sum(axis=1), axis=0)
normed_sums
normed_sums[:-2].plot(kind='barh', stacked=True)
grouped = fec_mrbo.groupby(['cand_nm', 'contbr_st'])
totals = grouped.contb_receipt_amt.sum().unstack(0).fillna(0)
totals = totals[totals.sum(1) > 100000]
totals[:10]
percent = totals.div(totals.sum(1), axis=0)
percent[:10]
from mpl_toolkits.basemap import Basemap, cm
from matplotlib import rcParams
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import shapefile
obama = percent['Obama, Barack']
fig=plt.figure(figsize=(12,12))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
lllat = 21; urlat=53; lllon=-118; urlon=-62
m = Basemap(ax=ax, projection='stere',lon_0=(urlon + lllon)/2, lat_0=(urlat+lllat)/2, llcrnrlat=lllat, urcrnrlat=urlat, llcrnrlon=lllon, urcrnrlon=urlon, resolution = 'l')
m.drawcoastlines()
m.drawcountries()
#m.drawstates() #画出每个州的边界
sf = shapefile.Reader('data/ch09/statesp020_nt00032/statesp020')
shp = shapefile.Reader('data/ch09/statesp020_nt00032/statesp020.shp')
dbf = shapefile.Reader('data/ch09/statesp020_nt00032/statesp020.dbf')
shapes=sf.shapes()
points=shapes[3].points
shpsegs=[]
for point in points:
shpsegs.append(zip(point))
'''
lines = LineCollection(shpsegs, linewidths=(0.5, 1, 1.5, 2),linestyles='solid')
lines.set_facecolor('k')
lines.set_edgecolors('k')
lines.set_linewidth(0.3)
'''
'''
for npoly in range(shp.info()[0]):
#在地图上绘制彩色多边形
shpsegs = []
shp_object = shp.read_object(npoly)
verts = shp_object.vertives()
rings = len(verts)
for ring in range(rings):
lons, lats = zip(*verts[ring])
x, y = m(lons, lats)
shpsegs.append(zip(x,y))
if ring ==0:
shapedict = dbf.read_record(npoly)
name = shapedict['STATE']
lines = LineCollection(shpsegs, antialiaseds=(1,))
# state_to_code字典,例如'ALASKA' -> 'AK', omitted
try:
per =obama[state_to_code[name.upper()]]
except KeyError:
continue
lines.set_facecolor('k')
lines.set_alpha(0.75*per) #把“百分比”变小一点
lines.set_edgecolors('k')
lines.set_linewidth(0.3)
'''
plt.show()
|
[
"bruce.mj128@hotmail.com"
] |
bruce.mj128@hotmail.com
|
8562078511a2b98b35c0153092baecf64619a2b1
|
1442b1785a71794801b2e09febc5ce220a4562f1
|
/HT_6/task_7_8.py
|
f9bf7d0752072428d5ad0780f7a542fc94b38dab
|
[] |
no_license
|
yarik335/GeekPy
|
64acfc91502601d02525be9fb03ba527a15e3450
|
e8806d56f2a458c4548113170191bc3b017754d3
|
refs/heads/master
| 2021-05-15T19:27:07.822718
| 2018-01-24T13:55:01
| 2018-01-24T13:55:01
| 107,698,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
"""7Напишіть програму, де клас «геометричні фігури» (figure) містить властивість color з початковим значенням white і метод для
зміни кольору фігури, а його підкласи «овал» (oval) і «квадрат» (square) містять методи __init__ для завдання початкових
розмірів об'єктів при їх створенні.
8Видозмініть програму так, щоб метод __init__ мався в класі «геометричні фігури» та приймав кольор фігури при створенні
екземпляру, а методи __init__ підкласів доповнювали його та додавали початкові розміри.
"""
class Figure:
""" Base class of all figures"""
color = "white"
def __init__(self, color):
self.color = color
def change_color(self, color):
self.color = color
class Oval(Figure):
def __init__(self, color, width, height):
Figure.__init__(self, color)
self.width = width
self.height = height
class Square(Figure):
def __init__(self, color, line_size):
Figure.__init__(self, color)
self.line_size = line_size
sq = Square("black", 3)
ov = Oval("red", 34, 56)
|
[
"yarik3351@gmail.com"
] |
yarik3351@gmail.com
|
b15c4909e4ebcee994aad2f27a4f718e33b226bd
|
c2c212ba42ebfa35f3b6122344978bc94ec8fa67
|
/recipe_scrapers/primaledgehealth.py
|
5a1487eff3700876a66e6921811d7de3f7c7ca93
|
[
"MIT"
] |
permissive
|
hhursev/recipe-scrapers
|
0cd6b7db4ef23ca825f2354f5d1ba76076a14813
|
8ced0227b3b16c532fc5ebf3060c99ee0452adab
|
refs/heads/main
| 2023-09-03T07:33:29.684121
| 2023-09-01T21:15:50
| 2023-09-01T21:15:50
| 42,446,168
| 1,276
| 443
|
MIT
| 2023-09-14T16:34:09
| 2015-09-14T12:05:00
|
Python
|
UTF-8
|
Python
| false
| false
| 633
|
py
|
# mypy: disallow_untyped_defs=False
from ._abstract import AbstractScraper
class PrimalEdgeHealth(AbstractScraper):
@classmethod
def host(cls):
return "primaledgehealth.com"
def title(self):
return self.schema.title()
def total_time(self):
return self.schema.total_time()
def yields(self):
return self.schema.yields()
def image(self):
return self.schema.image()
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
def ratings(self):
return self.schema.ratings()
|
[
"noreply@github.com"
] |
hhursev.noreply@github.com
|
1658da8e0382e70539d831eed17bf286d77ea1d0
|
7b41e7883e210724ccc0e08ac0c213643e5cf0bd
|
/Week_02/589-N叉树的前序遍历.py
|
69e93f12890964d45c4e53cc718b875d35535fe5
|
[] |
no_license
|
lieagle/AlgorithmQIUZHAO
|
101fe17c5cc26aafc2a576844f903e8d36a649d3
|
ef83d285b10676d203b5d21b39d2dffbf5ef72c9
|
refs/heads/master
| 2022-12-04T06:23:27.253569
| 2020-08-28T12:14:45
| 2020-08-28T12:14:45
| 280,793,853
| 0
| 0
| null | 2020-07-19T04:50:05
| 2020-07-19T04:50:04
| null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
#Leetcode 589
#解法一,递归,先根节点放数组里,再孩子节点
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def preorder(self, root: 'Node') -> List[int]:
if not root: return None
output = []
def helper(root):
output.append(root.val)
for i in root.children:
helper(i)
helper(root)
return output
#解法二,用栈。先把根节点放数组里,再把子节点反转都放栈里,抛出时就是正常子节点顺序了。
class Solution:
def preorder(self, root: 'Node') -> List[int]:
if not root: return None
output = []
stack = [root]
while len(stack):
temp = stack.pop()
output.append(temp.val)
stack.extend(reversed(temp.children))
return output
|
[
"3188279500@qq.com"
] |
3188279500@qq.com
|
d35c922b47e0acae3cbeebc7a6215fc96412eaaf
|
94ec56a9c81085e04f15382a6d3ba0e8944fe589
|
/chapter1/ex30.py
|
4d4f55bac6031013cc17ff27eefd14eac24d56d1
|
[] |
no_license
|
indraputra147/pythonworkbook
|
39ccd4bb6b8ee24debed13938c88f119819603b0
|
af2ed0879234085ef3e2b6fc747f84dee703bc97
|
refs/heads/master
| 2022-08-28T18:31:46.151942
| 2020-05-25T12:04:06
| 2020-05-25T12:04:06
| 262,615,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
#Exercise 30: Celcius to Fahrenheit and Kelvin
"""
The program reads a temperature from the user in degrees Celsius
then display the equivalent temperature in degrees Fahrenheit and Kelvin
"""
c = float(input("Input the temperaure in degrees Celsius: "))
f = (9 * c /5) + 32
k = c + 273.15
print("Output:")
print("%.2f" % f + " degree Fahrenheit ")
print("%.2f" % k + " degree Kelvin")
|
[
"indraputra785@gmail.com"
] |
indraputra785@gmail.com
|
bb14e9fd5cda6909de23c7649af9f9b51cdf3c25
|
025bd10bcc4e03edfc14c8b336c0bf370ffeba64
|
/napalm_logs/transport/zeromq.py
|
eacb29054ecfb9da0087548620927b60cfe436a7
|
[
"Apache-2.0"
] |
permissive
|
lspgn/napalm-logs
|
3c514df1302aa28519fdde03207dd00a4418133c
|
c626f809b228cde3053ed972a58ac671871d8856
|
refs/heads/master
| 2021-01-20T07:37:43.022881
| 2017-04-26T10:37:58
| 2017-04-26T10:37:58
| 90,020,543
| 0
| 0
| null | 2017-05-02T10:25:45
| 2017-05-02T10:25:45
| null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
# -*- coding: utf-8 -*-
'''
ZeroMQ transport for napalm-logs.
'''
from __future__ import absolute_import
from __future__ import unicode_literals
# Import stdlib
import json
import logging
# Import third party libs
import zmq
# Import napalm-logs pkgs
from napalm_logs.exceptions import BindException
from napalm_logs.transport.base import TransportBase
log = logging.getLogger(__name__)
class ZMQTransport(TransportBase):
'''
ZMQ transport class.
'''
def __init__(self, addr, port):
self.addr = addr
self.port = port
def start(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
try:
self.socket.bind('tcp://{addr}:{port}'.format(
addr=self.addr,
port=self.port)
)
except zmq.error.ZMQError as err:
log.error(err, exc_info=True)
raise BindException(err)
def publish(self, obj):
self.socket.send(obj)
def stop(self):
if hasattr(self, 'socket'):
self.socket.close()
if hasattr(self, 'context'):
self.context.term()
|
[
"mirucha@cloudflare.com"
] |
mirucha@cloudflare.com
|
86db8f9199e6b954fe085d7e65ecfdefad8f17d5
|
39e03684081b27311385a0ab31afcc2e09883e5c
|
/mmdet/core/mask/__init__.py
|
d9132f8a58bd469e13b7a8957152d8d7fd2cc2ca
|
[
"MIT",
"Python-2.0"
] |
permissive
|
witnessai/MMSceneGraph
|
8d0b2011a946ddcced95fbe15445b7f4da818509
|
bc5e0f3385205404c712ae9f702a61a3191da0a1
|
refs/heads/master
| 2023-08-12T06:54:00.551237
| 2021-10-12T03:04:21
| 2021-10-12T03:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
from .mask_target import mask_target
from .utils import split_combined_polys
from .dense_reppoints_target import dense_reppoints_target
__all__ = ['split_combined_polys', 'mask_target', 'dense_reppoints_target']
|
[
"23736866+Kenneth-Wong@users.noreply.github.com"
] |
23736866+Kenneth-Wong@users.noreply.github.com
|
14bb902d96d1f49a37653760a597039fff4959e9
|
2c32193626ade72c798a60442ffea299724b82ee
|
/network/migrations/0002_auto_20200807_1856.py
|
f9d3b61a442cb9fb154ec7b31e908e3b778e0c2a
|
[] |
no_license
|
Swetha-14/opinion-mining
|
fa249a56d953c136b778711864aceb1507843f0d
|
98bbfa4fefb343454668f2775a4e2756e94477a4
|
refs/heads/master
| 2023-07-15T00:45:03.443896
| 2021-08-17T12:22:02
| 2021-08-17T12:22:02
| 388,768,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
# Generated by Django 3.0.8 on 2020-08-07 13:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('network', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(blank=True, related_name='following', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('likes', models.ManyToManyField(blank=True, related_name='liked_posts', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"swethasukumar14@gmail.com"
] |
swethasukumar14@gmail.com
|
dc1e1d4c9c170ce0ef968217095a822c1d65bf67
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/atbash-cipher/0fa3dc50f1d6432491b42aa5d63e891b.py
|
731850a15eca31ce3c3a1acf949ebc37d5a2cdbe
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
#!/usr/bin/env python
from re import sub
def encode(text):
return _chunk([_cipher(i) for i in _clean(text)])
def decode(text):
return "".join([_cipher(i) for i in _clean(text)])
# non-public methods
def _clean(text):
"""Takes some text and returns a lowercase/alphnumeric spaceless string.
"""
return sub(r'[\W_]+', '', text.lower().replace(" ",""))
def _cipher(character):
"""Returns single string character based on cipher rules. This could
probably be written much more efficiently or via some built-in, I just
haven't discovered it yet.
"""
alpha = "abcdefghijklmnopqrstuvwxyz0123456789"
cipher = "zyxwvutsrqponmlkjihgfedcba0123456789"
return "".join([c for p, c in zip(alpha, cipher) if character == p])
def _chunk(ciphered_list):
"""Takes a list and returns a chunked string. I'm sure there is a more
clever method to handle this.
"""
if len(ciphered_list) < 5:
return "".join(ciphered_list)
else:
index = 0
chunked_string = ""
while index < len(ciphered_list):
if (index + 1) % 5 == 0 and (index + 1) != len(ciphered_list):
chunked_string = chunked_string + ciphered_list[index] + " "
index += 1
else:
chunked_string = chunked_string + ciphered_list[index]
index += 1
return chunked_string
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
aa79fc4667a859ff73b7efe67caaf9c269dc8d56
|
e5d5e331b1d124f57da2d93db94f3da6cc7640bc
|
/hw1/hw_1.py
|
8059f864b017aeedd28bbe679da391430406bbea
|
[] |
no_license
|
Krishna-Sankar/Natural-Language-Processing-585-UMASS-Fall-2017
|
806f91055878161373ae8cd49192d9940b290e47
|
19ad1e708c7d6b0522a87b4340bd8c681f12f4d7
|
refs/heads/master
| 2020-03-27T09:53:18.456040
| 2017-12-26T22:22:07
| 2017-12-26T22:22:07
| 146,380,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,284
|
py
|
from __future__ import division
import matplotlib.pyplot as plt
import math
import os
import time
from collections import defaultdict
# Global class labels.
POS_LABEL = 'pos'
NEG_LABEL = 'neg'
###### DO NOT MODIFY THIS FUNCTION #####
def tokenize_doc(doc):
"""
Tokenize a document and return its bag-of-words representation.
doc - a string representing a document.
returns a dictionary mapping each word to the number of times it appears in doc.
"""
bow = defaultdict(float)
tokens = doc.split()
lowered_tokens = map(lambda t: t.lower(), tokens)
for token in lowered_tokens:
bow[token] += 1.0
return dict(bow)
###### END FUNCTION #####
def n_word_types(word_counts):
return len(word_counts)
pass
def n_word_tokens(word_counts):
add=0
for word in word_counts:
add+=word_counts[word]
return int(add)
pass
class NaiveBayes:
"""A Naive Bayes model for text classification."""
def __init__(self, path_to_data, tokenizer):
# Vocabulary is a set that stores every word seen in the training data
self.vocab = set()
self.path_to_data = path_to_data
self.tokenize_doc = tokenizer
self.train_dir = os.path.join(path_to_data, "train")
self.test_dir = os.path.join(path_to_data, "test")
# class_total_doc_counts is a dictionary that maps a class (i.e., pos/neg) to
# the number of documents in the trainning set of that class
self.class_total_doc_counts = { POS_LABEL: 0.0,
NEG_LABEL: 0.0 }
# class_total_word_counts is a dictionary that maps a class (i.e., pos/neg) to
# the number of words in the training set in documents of that class
self.class_total_word_counts = { POS_LABEL: 0.0,
NEG_LABEL: 0.0 }
# class_word_counts is a dictionary of dictionaries. It maps a class (i.e.,
# pos/neg) to a dictionary of word counts. For example:
# self.class_word_counts[POS_LABEL]['awesome']
# stores the number of times the word 'awesome' appears in documents
# of the positive class in the training documents.
self.class_word_counts = { POS_LABEL: defaultdict(float),
NEG_LABEL: defaultdict(float) }
def train_model(self):
"""
This function processes the entire training set using the global PATH
variable above. It makes use of the tokenize_doc and update_model
functions you will implement.
"""
pos_path = os.path.join(self.train_dir, POS_LABEL)
neg_path = os.path.join(self.train_dir, NEG_LABEL)
for (p, label) in [ (pos_path, POS_LABEL), (neg_path, NEG_LABEL) ]:
for f in os.listdir(p):
with open(os.path.join(p,f),'r') as doc:
content = doc.read()
self.tokenize_and_update_model(content, label)
self.report_statistics_after_training()
def report_statistics_after_training(self):
"""
Report a number of statistics after training.
"""
print "REPORTING CORPUS STATISTICS"
print "NUMBER OF DOCUMENTS IN POSITIVE CLASS:", self.class_total_doc_counts[POS_LABEL]
print "NUMBER OF DOCUMENTS IN NEGATIVE CLASS:", self.class_total_doc_counts[NEG_LABEL]
print "NUMBER OF TOKENS IN POSITIVE CLASS:", self.class_total_word_counts[POS_LABEL]
print "NUMBER OF TOKENS IN NEGATIVE CLASS:", self.class_total_word_counts[NEG_LABEL]
print "VOCABULARY SIZE: NUMBER OF UNIQUE WORDTYPES IN TRAINING CORPUS:", len(self.vocab)
def update_model(self, bow, label):
"""
IMPLEMENT ME!
Update internal statistics given a document represented as a bag-of-words
bow - a map from words to their counts
label - the class of the document whose bag-of-words representation was input
This function doesn't return anything but should update a number of internal
statistics. Specifically, it updates:
- the internal map the counts, per class, how many times each word was
seen (self.class_word_counts)
- the number of words seen for each label (self.class_total_word_counts)
- the vocabulary seen so far (self.vocab)
- the number of documents seen of each label (self.class_total_doc_counts)
"""
for words in bow:
self.class_word_counts[label][words]+=bow[words]
self.class_total_word_counts[label]+=bow[words]
self.vocab.add(words)
self.class_total_doc_counts[label]+=1.0
pass
def tokenize_and_update_model(self, doc, label):
"""
Tokenizes a document doc and updates internal count statistics.
doc - a string representing a document.
label - the sentiment of the document (either postive or negative)
stop_word - a boolean flag indicating whether to stop word or not
Make sure when tokenizing to lower case all of the tokens!
"""
bow = self.tokenize_doc(doc)
self.update_model(bow, label)
def top_n(self, label, n):
"""
Returns the most frequent n tokens for documents with class 'label'.
"""
return sorted(self.class_word_counts[label].items(), key=lambda (w,c): -c)[:n]
def p_word_given_label(self, word, label):
"""
Implement me!
Returns the probability of word given label
according to this NB model.
"""
return self.class_word_counts[label][word]/self.class_total_word_counts[label]
pass
def p_word_given_label_and_pseudocount(self, word, label, alpha):
"""
Implement me!
num=self.class_word_counts[label][word]+alpha
b=alpha*len(self.vocab())
den=self.class_total_word_counts[label]+b
return num/den
Returns the probability of word given label wrt psuedo counts.
alpha - pseudocount parameter
"""
#print len(self.vocab)
num=self.class_word_counts[label][word]+alpha
b=alpha*len(self.vocab)
den=self.class_total_word_counts[label]+b
return num/den
pass
def log_likelihood(self, bow, label, alpha):
"""
Implement me!
Computes the log likelihood of a set of words give a label and pseudocount.
bow - a bag of words (i.e., a tokenized document)
label - either the positive or negative label
alpha - float; pseudocount parameter
"""
import math
like=1
for words in bow:
like=like+math.log(self.p_word_given_label_and_pseudocount(words,label,alpha))
return like
pass
def log_prior(self, label):
"""
Implement me!
Returns the log prior of a document having the class 'label'.
"""
import math
tot=self.class_total_doc_counts[POS_LABEL]+self.class_total_doc_counts[NEG_LABEL]
return math.log(self.class_total_doc_counts[label]/tot)
pass
def unnormalized_log_posterior(self, bow, label, alpha):
"""
Implement me!
Computes the unnormalized log posterior (of doc being of class 'label').
bow - a bag of words (i.e., a tokenized document)
"""
return self.log_prior(label)+self.log_likelihood(bow, label, alpha)
pass
def classify(self, bow, alpha):
"""
Implement me!
Compares the unnormalized log posterior for doc for both the positive
and negative classes and returns the either POS_LABEL or NEG_LABEL
(depending on which resulted in the higher unnormalized log posterior)
bow - a bag of words (i.e., a tokenized document)
"""
pos=self.unnormalized_log_posterior(bow,POS_LABEL,alpha)
neg=self.unnormalized_log_posterior(bow,NEG_LABEL,alpha)
if pos>neg:
return POS_LABEL
else:
return NEG_LABEL
pass
def likelihood_ratio(self, word, alpha):
"""
Implement me!
Returns the ratio of P(word|pos) to P(word|neg).
"""
p=self.p_word_given_label_and_pseudocount(word,POS_LABEL,alpha)
n=self.p_word_given_label_and_pseudocount(word,NEG_LABEL,alpha)
return p/n
pass
def evaluate_classifier_accuracy(self, alpha):
"""
DO NOT MODIFY THIS FUNCTION
alpha - pseudocount parameter.
This function should go through the test data, classify each instance and
compute the accuracy of the classifier (the fraction of classifications
the classifier gets right.
"""
correct = 0.0
total = 0.0
pos_path = os.path.join(self.test_dir, POS_LABEL)
neg_path = os.path.join(self.test_dir, NEG_LABEL)
for (p, label) in [ (pos_path, POS_LABEL), (neg_path, NEG_LABEL) ]:
for f in os.listdir(p):
with open(os.path.join(p,f),'r') as doc:
content = doc.read()
bow = tokenize_doc(content)
if self.classify(bow, alpha) == label:
correct += 1.0
total += 1.0
return 100 * correct / total
def find_mistake(self, alpha):
correct = 0.0
total = 0.0
n_label=1 #loop iterator
margin=30 # margin of deviation error
pos_path = os.path.join(self.test_dir, POS_LABEL)
neg_path = os.path.join(self.test_dir, NEG_LABEL)
for (p, label) in [ (pos_path, POS_LABEL), (neg_path, NEG_LABEL) ]:
for f in os.listdir(p):
with open(os.path.join(p,f),'r') as doc:
text = doc.read()
bow = tokenize_doc(text)
if self.classify(bow, alpha) == label:
correct += 1.0
else:
n_label+=1
if (n_label==margin):
print text
break
total += 1.0
|
[
"32944520+Krishna-Sankar@users.noreply.github.com"
] |
32944520+Krishna-Sankar@users.noreply.github.com
|
85f8a6ae7a8d540284851807e39aadb1241e8e37
|
f5ae8efb5b2e1704916c5ccefe2fe52417e78eac
|
/ziponline/migrations/0005_auto_20201004_1655.py
|
d7bf023b8454cc15c510e9ab10432afaeb2eeb52
|
[] |
no_license
|
ilyuhich/ZipPodryad
|
d7ea04504f60e6090b4e05197a1788fb28ea14c3
|
082a3658b40fc328c8d03afa6d6e53ef3d95e1c6
|
refs/heads/master
| 2023-01-12T01:59:39.001375
| 2020-11-18T16:08:03
| 2020-11-18T16:08:03
| 302,758,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
# Generated by Django 3.1.2 on 2020-10-04 13:55
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('ziponline', '0004_auto_20201004_1646'),
]
operations = [
migrations.AlterModelOptions(
name='good',
options={'ordering': ['-goods_name'], 'verbose_name': 'Товар', 'verbose_name_plural': 'Товары'},
),
migrations.AlterModelOptions(
name='moving',
options={'ordering': ['-move_date'], 'verbose_name': 'Движение', 'verbose_name_plural': 'Движения'},
),
migrations.AlterModelOptions(
name='storage',
options={'ordering': ['-storage_name'], 'verbose_name': 'Склад', 'verbose_name_plural': 'Склады'},
),
migrations.AddField(
model_name='moving',
name='move_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Дата добавления'),
preserve_default=False,
),
]
|
[
"ilyuhich@gmail.com"
] |
ilyuhich@gmail.com
|
d883ae48dd3209059106218666af8d4d05326789
|
31a7c0fa71fa9f7b75406fc6868c698acd714804
|
/aichuangzuo/urls.py
|
b076d4ec13104a1220c3a8972612f74ca81687f4
|
[] |
no_license
|
cc8848/AiChuangZuoBackground
|
adb65fc6af937257e4867e95068bf66320a62611
|
72c77f9569f8739a00a82dfe298db8797f04f228
|
refs/heads/master
| 2020-06-05T02:07:49.050905
| 2018-06-28T10:28:25
| 2018-06-28T10:28:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
"""aichuangzuo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.conf.urls import include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('work_tark.urls', namespace='main')),
url(r'^index/', include('main.urls')),
url(r'^sucai/', include('sucai.urls')),
url(r'^user/', include('usermanage.urls')),
url(r'^baowen/', include('baowen.urls')),
url(r'^talk/', include('talkings.urls')),
url(r'^movie/', include('movigs.urls')),
url(r'^tark/', include('work_tark.urls')),
]
|
[
"shuo.du@edaibu.net"
] |
shuo.du@edaibu.net
|
53c86a1cb0665271a0b392b6547f0d182e3207ad
|
ad8c2983dce0aa842d271c31f9ddcc26f8a9c9ef
|
/dispatch/tests/test_state.py
|
ec1702fdea2ca90bd3c369766f374a4b12f2bbdc
|
[
"Apache-2.0"
] |
permissive
|
mesosphere-backup/dispatch
|
29587c875e775572d98d96db211dda9ca3d86b5d
|
d10fa3e7bf5a711415c3fb9dafea331ac5273bf5
|
refs/heads/master
| 2021-05-28T13:30:17.950676
| 2015-02-12T21:31:02
| 2015-02-12T21:31:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,140
|
py
|
from mock import patch, mock_open
import dispatch.state as state
class TestState(object):
@patch('dispatch.state.open', create=True)
@patch('os.path.exists', new=lambda x: True)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_two(self, mock_args, mock_open_queue):
file_data = '[{"id":"one", "location": "bar", "port": 1234, '\
'"resource": "baz", "running": true, ' \
'"data": "somescript", "uris": ["http://foo/"]}, ' \
'{"id":"two", "location": "bar", "port": 456, '\
'"resource": "baz", "running": true, ' \
'"data": "somescript", "uris": ["http://bar/"]}]'
mock_open(mock=mock_open_queue, read_data=file_data)
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 2
@patch('dispatch.state.open', create=True)
@patch('os.path.exists', new=lambda x: True)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_one(self, mock_args, mock_open_queue):
file_data = '[{"id":"foo", "location": "bar", "port": 1234, '\
'"resource": "baz", "running": true, ' \
'"data": "somescript", "uris": ["http://foo/"]}]'
mock_open(mock=mock_open_queue, read_data=file_data)
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 1
@patch('dispatch.state.open', create=True)
@patch('os.path.exists', new=lambda x: True)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_zero(self, mock_args, mock_open_queue):
file_data = '[]'
mock_open(mock=mock_open_queue, read_data=file_data)
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 0
@patch('os.path.exists', new=lambda x: False)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_no_file(self, mock_args):
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 0
|
[
"daniel@netkine.com"
] |
daniel@netkine.com
|
2e6f995fee6960e139a86b9d6ab651d51ea0ba6e
|
ef17666167c1f655b957f0f523358f7cd242985d
|
/src/coreclr/scripts/superpmi_collect_setup.py
|
84240c90418f7347c057160188c6318e61e5b5e8
|
[
"MIT"
] |
permissive
|
xtqqczze/dotnet-runtime
|
83301c91bfb611046f489a38cfece063f29ba979
|
3bd33dd53f0490e1e188d13f4c8a553aae7b384a
|
refs/heads/main
| 2023-08-17T03:52:14.678867
| 2023-06-02T23:53:11
| 2023-06-02T23:53:11
| 234,144,307
| 0
| 0
|
MIT
| 2023-05-30T00:40:18
| 2020-01-15T18:20:48
|
C#
|
UTF-8
|
Python
| false
| false
| 28,003
|
py
|
#!/usr/bin/env python3
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
#
# Title : superpmi_collect_setup.py
#
# Notes:
#
# Script to setup directory structure required to perform SuperPMI collection in CI.
# It does the following steps:
# 1. Create `correlation_payload_directory` that contains files from CORE_ROOT, src\coreclr\scripts.
# This directory is the one that is sent to all the helix machines that perform SPMI collections.
# 2. For PMI collections, clone dotnet/jitutils, build it and then copy the `pmi.dll` to
# `correlation_payload_directory` folder.
# 3. For PMI/crossgen2 collections, the `input_directory` directory contains the set of assemblies
# to collect over. This script will partition these folders into equal buckets of approximately
# `max_size` bytes and stores them under the workitem payload directory. Each sub-folder inside
# this directory is sent to an individual helix machine to do SPMI collection on. E.g. for
# `input_directory` to be run on libraries, the parameter is the path to `CORE_ROOT` folder and
# this script will copy `max_size` bytes of those files under
# `payload/collectAssembliesDirectory/libraries/0/binaries`,
# `payload/collectAssembliesDirectory/libraries/1/binaries` and so forth.
# 4. For benchmarks collections, a specialized script is called to set up the benchmarks collection.
# 5. Lastly, it sets the pipeline variables.
#
# Below are the helix queues and images it sets depending on the OS/architecture (accepted format by Helix is either "QueueName" or "(DisplayName)QueueName@Image")
# | Arch | windows | Linux | macOS |
# |-------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------|
# | x86 | Windows.10.Amd64.X86.Rt | - | - |
# | x64 | Windows.10.Amd64.X86.Rt | Ubuntu.1804.Amd64 | OSX.1014.Amd64 |
# | arm | - | (Ubuntu.1804.Arm32)Ubuntu.2004.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7 | - |
# | arm64 | Windows.11.Arm64 | (Ubuntu.1804.Arm64)Ubuntu.2004.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8 | OSX.1100.ARM64 |
#
################################################################################
################################################################################
import argparse
import os
import shutil
import stat
from coreclr_arguments import *
from jitutil import run_command, copy_directory, copy_files, set_pipeline_variable, ChangeDir, TempDir
# Start of parser object creation.
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-collection_type", required=True, help="Type of the SPMI collection to be done (crossgen2, pmi, run, run_tiered, run_pgo)")
parser.add_argument("-collection_name", required=True, help="Name of the SPMI collection to be done (e.g., libraries, libraries_tests, coreclr_tests, benchmarks)")
parser.add_argument("-payload_directory", required=True, help="Path to payload directory to create: subdirectories are created for the correlation payload as well as the per-partition work items")
parser.add_argument("-source_directory", required=True, help="Path to source directory")
parser.add_argument("-core_root_directory", required=True, help="Path to Core_Root directory")
parser.add_argument("-arch", required=True, help="Architecture")
parser.add_argument("-platform", required=True, help="OS platform")
parser.add_argument("-mch_file_tag", help="Tag to be used to mch files")
parser.add_argument("-input_directory", help="Directory containing assemblies which SuperPMI will use for collection (for pmi/crossgen2 collections)")
parser.add_argument("-max_size", help="Max size of each partition in MB (for pmi/crossgen2 collections)")
is_windows = platform.system() == "Windows"
legal_collection_types = [ "crossgen2", "pmi", "run", "run_tiered", "run_pgo" ]
directories_to_ignore = [
"runtimes", # This appears to be the result of a nuget package that includes a bunch of native code
]
native_binaries_to_ignore = [
"api-ms-win-core-console-l1-1-0.dll",
"api-ms-win-core-datetime-l1-1-0.dll",
"api-ms-win-core-debug-l1-1-0.dll",
"api-ms-win-core-errorhandling-l1-1-0.dll",
"api-ms-win-core-file-l1-1-0.dll",
"api-ms-win-core-file-l1-2-0.dll",
"api-ms-win-core-file-l2-1-0.dll",
"api-ms-win-core-handle-l1-1-0.dll",
"api-ms-win-core-heap-l1-1-0.dll",
"api-ms-win-core-interlocked-l1-1-0.dll",
"api-ms-win-core-libraryloader-l1-1-0.dll",
"api-ms-win-core-localization-l1-2-0.dll",
"api-ms-win-core-memory-l1-1-0.dll",
"api-ms-win-core-namedpipe-l1-1-0.dll",
"api-ms-win-core-processenvironment-l1-1-0.dll",
"api-ms-win-core-processthreads-l1-1-0.dll",
"api-ms-win-core-processthreads-l1-1-1.dll",
"api-ms-win-core-profile-l1-1-0.dll",
"api-ms-win-core-rtlsupport-l1-1-0.dll",
"api-ms-win-core-string-l1-1-0.dll",
"api-ms-win-core-synch-l1-1-0.dll",
"api-ms-win-core-synch-l1-2-0.dll",
"api-ms-win-core-sysinfo-l1-1-0.dll",
"api-ms-win-core-timezone-l1-1-0.dll",
"api-ms-win-core-util-l1-1-0.dll",
"api-ms-win-crt-conio-l1-1-0.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-locale-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-multibyte-l1-1-0.dll",
"api-ms-win-crt-private-l1-1-0.dll",
"api-ms-win-crt-process-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-time-l1-1-0.dll",
"api-ms-win-crt-utility-l1-1-0.dll",
"clretwrc.dll",
"clrgc.dll",
"clrjit.dll",
"clrjit_universal_arm_arm.dll",
"clrjit_universal_arm_arm64.dll",
"clrjit_universal_arm_x64.dll",
"clrjit_universal_arm_x86.dll",
"clrjit_universal_arm64_arm64.dll",
"clrjit_universal_arm64_x64.dll",
"clrjit_unix_arm_arm.dll",
"clrjit_unix_arm_arm64.dll",
"clrjit_unix_arm_x64.dll",
"clrjit_unix_arm_x86.dll",
"clrjit_unix_arm64_arm64.dll",
"clrjit_unix_arm64_x64.dll",
"clrjit_unix_armel_arm.dll",
"clrjit_unix_armel_arm64.dll",
"clrjit_unix_armel_x64.dll",
"clrjit_unix_armel_x86.dll",
"clrjit_unix_osx_arm64_arm64.dll",
"clrjit_unix_osx_arm64_x64.dll",
"clrjit_unix_x64_arm64.dll",
"clrjit_unix_x64_x64.dll",
"clrjit_win_arm_arm.dll",
"clrjit_win_arm_arm64.dll",
"clrjit_win_arm_x64.dll",
"clrjit_win_arm_x86.dll",
"clrjit_win_arm64_arm64.dll",
"clrjit_win_arm64_x64.dll",
"clrjit_win_x64_arm64.dll",
"clrjit_win_x64_x64.dll",
"clrjit_win_x86_arm.dll",
"clrjit_win_x86_arm64.dll",
"clrjit_win_x86_x64.dll",
"clrjit_win_x86_x86.dll",
"coreclr.dll",
"CoreConsole.exe",
"coredistools.dll",
"CoreRun.exe",
"CoreShim.dll",
"createdump.exe",
"crossgen.exe",
"crossgen2.exe",
"dbgshim.dll",
"e_sqlite3.dll",
"FileCheck.exe",
"ilasm.exe",
"ildasm.exe",
"jitinterface_arm.dll",
"jitinterface_arm64.dll",
"jitinterface_x64.dll",
"jitinterface_x86.dll",
"KernelTraceControl.dll",
"KernelTraceControl.Win61.dll",
"llvm-mca.exe",
"mcs.exe",
"Microsoft.DiaSymReader.Native.amd64.dll",
"Microsoft.DiaSymReader.Native.x86.dll",
"mscordaccore.dll",
"mscordbi.dll",
"mscorrc.dll",
"msdia140.dll",
"msquic.dll",
"msvcp140.dll",
"NativeLibrary.dll",
"R2RDump.exe",
"R2RTest.exe",
"sni.dll",
"SuperFileCheck.exe",
"superpmi-shim-collector.dll",
"superpmi-shim-counter.dll",
"superpmi-shim-simple.dll",
"superpmi.exe",
"System.CommandLine.resources.dll", # Managed, but uninteresting
"System.IO.Compression.Native.dll",
"ucrtbase.dll",
"UnloadableAssembly.dll",
"vcruntime140.dll",
"vcruntime140_1.dll",
"xunit.console.exe",
]
MAX_FILES_COUNT = 1500
def setup_args(args):
""" Setup the args for SuperPMI to use.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"payload_directory",
lambda unused: True,
"Unable to set payload_directory",
modify_arg=lambda payload_directory: os.path.abspath(payload_directory))
coreclr_args.verify(args,
"source_directory",
lambda source_directory: os.path.isdir(source_directory),
"source_directory doesn't exist",
modify_arg=lambda source_directory: os.path.abspath(source_directory))
check_dir = os.path.join(coreclr_args.source_directory, 'src', 'coreclr', 'scripts')
if not os.path.isdir(check_dir):
print("Specified directory {0} doesn't looks like a source directory".format(coreclr_args.source_directory))
sys.exit(1)
coreclr_args.verify(args,
"core_root_directory",
lambda core_root_directory: os.path.isdir(core_root_directory),
"core_root_directory doesn't exist",
modify_arg=lambda core_root_directory: os.path.abspath(core_root_directory))
coreclr_args.verify(args,
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(args,
"platform",
lambda unused: True,
"Unable to set platform")
coreclr_args.verify(args,
"mch_file_tag",
lambda unused: True,
"Unable to set mch_file_tag")
coreclr_args.verify(args,
"collection_name",
lambda unused: True,
"Unable to set collection_name")
coreclr_args.verify(args,
"collection_type",
lambda collection_type: collection_type in legal_collection_types,
"Please specify one of the allowed collection types: " + ' '.join(legal_collection_types))
coreclr_args.verify(args,
"input_directory",
lambda input_directory: coreclr_args.collection_type not in [ "pmi", "crossgen2" ] or os.path.isdir(input_directory),
"input_directory doesn't exist",
modify_arg=lambda input_directory: None if input_directory is None else os.path.abspath(input_directory))
coreclr_args.verify(args,
"max_size",
lambda max_size: coreclr_args.collection_type not in [ "pmi", "crossgen2" ] or max_size > 0,
"Please enter valid positive numeric max_size",
modify_arg=lambda max_size: int(
max_size) * 1000 * 1000 if max_size is not None and max_size.isnumeric() else 0
# Convert to MB
)
return coreclr_args
def get_files_sorted_by_size(src_directory, exclude_directories, exclude_files):
""" For a given src_directory, returns all the .dll files sorted by size.
Args:
src_directory (string): Path of directory to enumerate.
exclude_directories ([string]): Directory names to exclude.
exclude_files ([string]): File names to exclude.
"""
def sorter_by_size(pair):
""" Sorts the pair (file_name, file_size) tuple in descending order of file_size
Args:
pair ([(string, int)]): List of tuple of file_name, file_size
"""
pair.sort(key=lambda x: x[1], reverse=True)
return pair
filename_with_size = []
exclude_files_lower = [filename.lower() for filename in exclude_files]
for file_path, dirs, files in os.walk(src_directory, topdown=True):
# Credit: https://stackoverflow.com/a/19859907
dirs[:] = [d for d in dirs if d not in exclude_directories]
for name in files:
# Make the exclude check case-insensitive
if name.lower() in exclude_files_lower:
continue
curr_file_path = os.path.join(file_path, name)
if not os.path.isfile(curr_file_path):
continue
if not name.endswith(".dll") and not name.endswith(".exe"):
continue
size = os.path.getsize(curr_file_path)
filename_with_size.append((curr_file_path, size))
return sorter_by_size(filename_with_size)
def first_fit(sorted_by_size, max_size):
""" Given a list of file names along with size in descending order, divides the files
in number of buckets such that each bucket doesn't exceed max_size (unless a single file exceeds
max_size, in which case it gets its own bucket). Since this is a first-fit
approach, it doesn't guarantee to find the bucket with tighest spot available.
Args:
sorted_by_size ((string, int)): (file_name, file_size) tuple
max_size (int): Maximum size (in bytes) of each bucket.
Returns:
[{int, [string]}]: Returns a dictionary of partition-index to list of file names following in that bucket.
"""
partitions = {}
for curr_file in sorted_by_size:
_, file_size = curr_file
# Find the right bucket
found_bucket = False
if file_size < max_size:
for p_index in partitions:
total_in_curr_par = sum(n for _, n in partitions[p_index])
if ((total_in_curr_par + file_size) < max_size) and (len(partitions[p_index]) < MAX_FILES_COUNT):
partitions[p_index].append(curr_file)
found_bucket = True
break
if not found_bucket:
partitions[len(partitions)] = [curr_file]
total_size = 0
for p_index in partitions:
partition_size = sum(n for _, n in partitions[p_index])
print("Partition {0}: {1} files with {2} bytes.".format(p_index, len(partitions[p_index]), partition_size))
total_size += partition_size
print("Total {0} partitions with {1} bytes.".format(str(len(partitions)), total_size))
return partitions
def partition_files(src_directory, dst_directory, max_size, exclude_directories=[],
exclude_files=native_binaries_to_ignore):
""" Copy bucketized files based on size to destination folder.
Args:
src_directory (string): Source folder containing files to be copied.
dst_directory (string): Destination folder where files should be copied.
max_size (int): Maximum partition size in bytes
exclude_directories ([string]): List of folder names to be excluded.
exclude_files ([string]): List of files names to be excluded.
"""
print('Partitioning files from {0} to {1}'.format(src_directory, dst_directory))
sorted_by_size = get_files_sorted_by_size(src_directory, exclude_directories, exclude_files)
partitions = first_fit(sorted_by_size, max_size)
index = 0
for p_index in partitions:
file_names = [curr_file[0] for curr_file in partitions[p_index]]
curr_dst_path = os.path.join(dst_directory, str(index), "binaries")
copy_files(src_directory, curr_dst_path, file_names)
index += 1
def setup_microbenchmark(workitem_directory, arch):
""" Perform setup of microbenchmarks
Args:
workitem_directory (string): Path to work
arch (string): Architecture for which dotnet will be installed
"""
performance_directory = os.path.join(workitem_directory, "performance")
run_command(
["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory])
try:
shutil.rmtree(os.path.join(performance_directory, ".git"))
except Exception as ex:
print("Warning: failed to remove directory \"%s\": %s", os.path.join(performance_directory, ".git"), ex)
with ChangeDir(performance_directory):
dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch)
dotnet_install_script = os.path.join(performance_directory, "scripts", "dotnet.py")
if not os.path.isfile(dotnet_install_script):
print("Missing " + dotnet_install_script)
return
# Sometimes the dotnet version installed by the script is latest and expect certain versions of SDK that
# have not published yet. As a result, we hit errors of "dotnet restore". As a workaround, hard code the
# working version until we move to ".NET 8" in the script.
run_command(
get_python_name() + [dotnet_install_script, "install", "--channels", "8.0-preview", "--architecture", arch, "--install-dir",
dotnet_directory, "--verbose"])
def get_python_name():
"""Gets the python name
Returns:
[string]: Returns the appropriate python name depending on the OS.
"""
if is_windows:
return ["py", "-3"]
else:
return ["python3"]
def main(main_args):
""" Main entrypoint
Args:
main_args ([type]): Arguments to the script
"""
coreclr_args = setup_args(main_args)
source_directory = coreclr_args.source_directory
# If the payload directory doesn't already exist (it probably shouldn't) then create it.
if not os.path.isdir(coreclr_args.payload_directory):
os.makedirs(coreclr_args.payload_directory)
correlation_payload_directory = os.path.join(coreclr_args.payload_directory, 'correlation')
workitem_payload_directory = os.path.join(coreclr_args.payload_directory, 'workitem')
superpmi_src_directory = os.path.join(source_directory, 'src', 'coreclr', 'scripts')
# Correlation payload directories (sent to every Helix machine).
# Currently, all the Core_Root files, superpmi script files, and pmi.dll go in the same place.
superpmi_dst_directory = os.path.join(correlation_payload_directory, "superpmi")
core_root_dst_directory = superpmi_dst_directory
# Workitem directories
# input_artifacts is only used for pmi/crossgen2 collections.
input_artifacts = ""
arch = coreclr_args.arch
platform_name = coreclr_args.platform.lower()
helix_source_prefix = "official"
creator = ""
ci = True
# Determine the Helix queue name to use when running jobs.
if platform_name == "windows":
helix_queue = "Windows.11.Arm64" if arch == "arm64" else "Windows.10.Amd64.X86.Rt"
elif platform_name == "linux":
if arch == "arm":
helix_queue = "(Ubuntu.1804.Arm32)Ubuntu.2004.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7"
elif arch == "arm64":
helix_queue = "(Ubuntu.1804.Arm64)Ubuntu.2004.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8"
else:
helix_queue = "Ubuntu.1804.Amd64"
elif platform_name == "osx":
helix_queue = "OSX.1100.ARM64" if arch == "arm64" else "OSX.1014.Amd64"
# Copy the superpmi scripts
print('Copying {} -> {}'.format(superpmi_src_directory, superpmi_dst_directory))
copy_directory(superpmi_src_directory, superpmi_dst_directory, verbose_output=True, match_func=lambda path: any(path.endswith(extension) for extension in [".py"]))
# Copy Core_Root
if platform_name == "windows":
acceptable_copy = lambda path: any(path.endswith(extension) for extension in [".py", ".dll", ".exe", ".json"])
else:
acceptable_extensions = [".py", ".dll", ".json"]
acceptable_extensions.append(".so" if platform_name == "linux" else ".dylib")
# Need to accept files without any extension, which is how executable file's names look.
acceptable_copy = lambda path: (os.path.basename(path).find(".") == -1) or any(path.endswith(extension) for extension in acceptable_extensions)
print('Copying {} -> {}'.format(coreclr_args.core_root_directory, core_root_dst_directory))
copy_directory(coreclr_args.core_root_directory, core_root_dst_directory, verbose_output=True, match_func=acceptable_copy)
if coreclr_args.collection_name == "benchmarks":
# Setup microbenchmarks
setup_microbenchmark(workitem_payload_directory, arch)
else:
# Setup for pmi/crossgen2 runs
# For libraries tests, copy all the test files to the single
# The reason is there are lot of dependencies with *.Tests.dll and to ensure we do not get
# Reflection errors, just copy everything to CORE_ROOT so for all individual partitions, the
# references will be present in CORE_ROOT.
if coreclr_args.collection_name == "libraries_tests":
def make_readable(folder_name):
"""Make file executable by changing the permission
Args:
folder_name (string): folder to mark with 744
"""
if is_windows:
return
print("Inside make_readable")
run_command(["ls", "-l", folder_name])
for file_path, dirs, files in os.walk(folder_name, topdown=True):
for d in dirs:
os.chmod(os.path.join(file_path, d),
# read+write+execute for owner
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
# read for group
(stat.S_IRGRP) |
# read for other
(stat.S_IROTH))
for f in files:
os.chmod(os.path.join(file_path, f),
# read+write+execute for owner
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
# read for group
(stat.S_IRGRP) |
# read for other
(stat.S_IROTH))
run_command(["ls", "-l", folder_name])
make_readable(coreclr_args.input_directory)
print('Copying {} -> {}'.format(coreclr_args.input_directory, core_root_dst_directory))
copy_directory(coreclr_args.input_directory, core_root_dst_directory, verbose_output=True, match_func=acceptable_copy)
# We need the PMI tool if we're doing a PMI collection. We could download a cached copy from Azure DevOps JIT blob
# storage, but instead we clone and build jitutils to build pmi.dll.
if coreclr_args.collection_type == "pmi":
try:
with TempDir() as jitutils_directory:
run_command(
["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory])
# Make sure ".dotnet" directory exists, by running the script at least once
dotnet_script_name = "dotnet.cmd" if is_windows else "dotnet.sh"
dotnet_script_path = os.path.join(source_directory, dotnet_script_name)
run_command([dotnet_script_path, "--info"], jitutils_directory)
# Set dotnet path to run build
os.environ["PATH"] = os.path.join(source_directory, ".dotnet") + os.pathsep + os.environ["PATH"]
build_file = "build.cmd" if is_windows else "build.sh"
run_command([os.path.join(jitutils_directory, build_file), "-p"], jitutils_directory)
copy_files(os.path.join(jitutils_directory, "bin"), core_root_dst_directory, [os.path.join(jitutils_directory, "bin", "pmi.dll")])
except PermissionError as pe_error:
# Details: https://bugs.python.org/issue26660
print('Ignoring PermissionError: {0}'.format(pe_error))
# NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a
# Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2.
# The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios.
# # Copy ".dotnet" to correlation_payload_directory for crossgen2 job; it is needed to invoke crossgen2.dll
# if coreclr_args.collection_type == "crossgen2":
# dotnet_src_directory = os.path.join(source_directory, ".dotnet")
# dotnet_dst_directory = os.path.join(correlation_payload_directory, ".dotnet")
# print('Copying {} -> {}'.format(dotnet_src_directory, dotnet_dst_directory))
# copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False)
input_artifacts = os.path.join(workitem_payload_directory, "collectAssembliesDirectory", coreclr_args.collection_name)
exclude_directories = list(directories_to_ignore)
if coreclr_args.collection_name == "coreclr_tests":
exclude_directories += ['Core_Root']
exclude_files = list(native_binaries_to_ignore)
if coreclr_args.collection_type == "crossgen2":
print('Adding exclusions for crossgen2')
# Currently, trying to crossgen2 R2RTest\Microsoft.Build.dll causes a pop-up failure, so exclude it.
exclude_files += ["Microsoft.Build.dll"]
if coreclr_args.collection_name == "libraries_tests":
# libraries_tests artifacts contains files from core_root folder. Exclude them.
core_root_dir = coreclr_args.core_root_directory
exclude_files += [item for item in os.listdir(core_root_dir)
if os.path.isfile(os.path.join(core_root_dir, item)) and (item.endswith(".dll") or item.endswith(".exe"))]
partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directories,
exclude_files)
# Set variables
print('Setting pipeline variables:')
set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory)
set_pipeline_variable("WorkItemDirectory", workitem_payload_directory)
set_pipeline_variable("InputArtifacts", input_artifacts)
set_pipeline_variable("Python", ' '.join(get_python_name()))
set_pipeline_variable("Architecture", arch)
set_pipeline_variable("Creator", creator)
set_pipeline_variable("Queue", helix_queue)
set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)
set_pipeline_variable("MchFileTag", coreclr_args.mch_file_tag)
################################################################################
# __main__
################################################################################
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
|
[
"noreply@github.com"
] |
xtqqczze.noreply@github.com
|
c956bb5027f0ebb151dac38418f947b1d1a7f1b4
|
72e09a49aef4cf271850405ad00dc474bdbe8fb5
|
/conftest.py
|
f051d407dbe9791345fff4fe4eac4d7fd917feb8
|
[] |
no_license
|
roma123test/test_task_ui
|
ceb67ae6c99884ad634fa06418e73f8ebaffa0c1
|
641fff586a243b1c2ff6fcba39896a4017dc07fc
|
refs/heads/master
| 2020-04-27T18:31:03.464586
| 2019-03-11T15:07:42
| 2019-03-11T15:07:42
| 174,574,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
import random
import string
import pytest
from webdriver_manager.driver import ChromeDriver
from src.constants.users import BY_DEFAULT
from src.pages.Login_page import LoginPage
from src.pages.Main_page import MainPage
# generate random names for devices
@pytest.fixture(scope="session")
def generate_new_value():
list = [id_generator(), id_generator()]
return list
# pre- & post- conditions to each test
@pytest.yield_fixture()
def browser():
browser.driver = ChromeDriver(os_type="windows", version="latest")
# login to keeptit
LoginPage().open_url().login_as(user=BY_DEFAULT)
# teardown method - delete all created test devices before test starts
MainPage().delete_all_test_items_in_table()
b = {}
yield b
# teardown method - delete all created test devices after test is completed
MainPage().delete_all_test_items_in_table()
MainPage().log_out()
def id_generator(size=3, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
|
[
"romkhimka@gmail.com"
] |
romkhimka@gmail.com
|
2b3d49431a0f55f51e3a3665adc483d6d0ed0209
|
cbbef22e33fc21f0768b89dc688701e9387cabf2
|
/examples/mayavi_webgl_demo.py
|
43ed96dc05baf62beedd1c1eef0d1cc1348d3fa1
|
[] |
no_license
|
r0k3/jigna
|
ec3b8f76d95267d3d69a943b77646467e4251b4d
|
3f8ad0adc31e9f3fb36aba3684e723ea0bec9199
|
refs/heads/master
| 2021-01-24T03:21:37.513679
| 2014-02-22T11:02:40
| 2014-02-22T11:02:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,809
|
py
|
import json
import numpy as np
from mayavi import mlab
from mayavi.core.api import PipelineBase
from traits.api import HasTraits, Instance, Int, Str
from tvtk.api import tvtk
mlab.options.offscreen = True
mlab.options.backend = 'test'
def dataset_to_string(dataset, **kwargs):
"""Given a TVTK `dataset` this writes the `dataset` to an old style VTK
file.
Any additional keyword arguments are passed to the writer used.
"""
err_msg = "Can only write tvtk.DataSet instances "\
"'got %s instead"%(dataset.__class__.__name__)
assert isinstance(dataset, tvtk.DataSet), err_msg
# Mapping to determine appropriate extension and writer.
d2r = {'vtkImageData': ('.vti', tvtk.StructuredPointsWriter),
'vtkRectilinearGrid': ('.vtr', tvtk.RectilinearGridWriter),
'vtkStructuredGrid': ('.vts', tvtk.StructuredGridWriter),
'vtkPolyData': ('.vtp', tvtk.PolyDataWriter),
'vtkUnstructuredGrid': ('.vtu', tvtk.UnstructuredGridWriter)
}
for type in d2r:
if dataset.is_a(type):
datatype = d2r[type]
break
writer = datatype[1](write_to_output_string=True, input=dataset, **kwargs)
writer.write()
return writer.output_string
def get_point_idx_from_poly(dataset):
"""Given the dataset, this gets the polygon connectivity array
and generates the indices of the points in the polygon.
"""
conn = dataset.polys.to_array()
npoly = conn.size/4
choice = np.zeros(npoly*3, dtype=int)
for start in (1, 2, 3):
choice[start-1::3] = np.arange(start, npoly*4, step=4)
return conn[choice]
def get_colors(dataset, module_manager):
scm = module_manager.scalar_lut_manager
scalars = dataset.point_data.scalars
return scm.lut.map_scalars(scalars, 0, -1).to_array()/255.
class MeshData(HasTraits):
# Optional data from a file that can be used.
filedata = Str('')
# The points for the mesh, this is a json string of a list.
points = Str
# The normals for the mesh, this is a json string of a list.
normals = Str
# The colors to use for the points, again a json string.
colors = Str
# The type of the mesh, for now only polygons are supported.
type = Str("POLYGONS")
@classmethod
def from_file(cls, dataset, module_manager):
filedata = dataset_to_string(dataset)
point_idx = get_point_idx_from_poly(dataset)
colors = get_colors(dataset, module_manager)
colors_xtk = colors[point_idx]
result = MeshData(filedata=filedata,
colors=json.dumps(colors_xtk.tolist()))
return result
@classmethod
def from_data(cls, dataset, module_manager):
points = dataset.points.to_array()
normals = dataset.point_data.normals.to_array()
colors = get_colors(dataset, module_manager)
point_idx = get_point_idx_from_poly(dataset)
result = MeshData()
data = {'points': points, 'normals': normals, 'colors': colors}
for attr, array in data.iteritems():
arr_xtk = array[point_idx]
setattr(result, attr, json.dumps(arr_xtk.tolist()))
return result
class Model3D(HasTraits):
expression = Str("x*x*0.5 + y*y + z*z*2.0")
plot_output = Instance(MeshData, ())
n_contour = Int(4)
plot = Instance(PipelineBase)
def __init__(self, **traits):
super(Model3D, self).__init__(**traits)
self._expression_changed(self.expression)
def _expression_changed(self, expr):
if self.plot is None:
x, y, z = np.mgrid[-5:5:32j, -5:5:32j, -5:5:32j]
else:
x, y, z = self.x, self.y, self.z
g = np.__dict__
try:
s = eval(expr, g, dict(x=x, y=y, z=z))
except:
pass
else:
if self.plot is None:
self.x, self.y, self.z = x, y, z
self.plot = mlab.contour3d(x, y, z, s, contours=self.n_contour)
else:
self.plot.mlab_source.set(scalars=s)
self._setup_plot_output()
def _n_contour_changed(self, value):
if 0 < value < 20:
self.plot.contour.number_of_contours = value
self._setup_plot_output()
def _setup_plot_output(self):
self.plot_output.copy_traits(
MeshData.from_file(
self.plot.contour.outputs[0],
self.plot.module_manager
)
)
body_html = """
<script type="text/javascript" src="http://get.goXTK.com/xtk.js"></script>
<script>
window.on_data_changed = function(new_data) {
var mesh = window.mesh;
mesh.file = null;
if (new_data.filedata.length > 0) {
var arr = window.strtobuf(new_data.filedata);
mesh.filedata = arr;
var p = new X.parserVTK();
p.parse(mesh, mesh, arr, null);
}
else {
mesh.points = window.array_to_triplets(new_data.points);
if (new_data.normals.length > 0) {
mesh.normals = window.array_to_triplets(new_data.normals);
}
mesh.type = new_data.type;
}
if (new_data.colors.length > 0) {
mesh.colors = window.array_to_triplets(new_data.colors);
}
mesh.modified();
};
window.strtobuf = function (str) {
var buf = new Uint8Array(str.length);
for (var i=0; i<str.length; i++) {
buf[i] = str.charCodeAt(i);
}
return buf;
};
window.array_to_triplets = function(json_data) {
var data = JSON.parse(json_data);
var triplets = new X.triplets(4*3*data.length);
for (var i=0; i<data.length; i++) {
triplets.add(data[i][0], data[i][1], data[i][2]);
}
return triplets;
};
window.onload = function() {
var r = new X.renderer3D();
window.renderer = r;
r.container = "3d-scene";
r.init();
var mesh = new X.mesh();
window.mesh = mesh;
r.add(mesh);
r.render();
setTimeout(function() {
window.on_data_changed(jigna.models.model.plot_output);
}, 1000
);
$(document.body).scope().$watchCollection(
"[model.plot_output.filedata, model.plot_output.colors]",
function (new_data) {
console.log("Updating plot.");
window.on_data_changed(jigna.models.model.plot_output);
});
};
</script>
<div>
Expression: <input ng-model="model.expression">
<br>
Number of contours: <input type="number" ng-model="model.n_contour" min="1" max="10">
<div id="3d-scene" style="background-color: #000; height:80%;">
</div>
</div>
"""
def main():
import webbrowser
from jigna.api import View
model = Model3D()
view = View(body_html=body_html)
webbrowser.open_new('http://localhost:8888')
view.serve(model=model)
if __name__ == '__main__':
main()
|
[
"prabhu@enthought.com"
] |
prabhu@enthought.com
|
f972915025a9fa882981db4531f591c88cecf896
|
13a74d3b771feb3c4b288af253dea4da825e70ca
|
/grouproj/env/Scripts/django-admin.py
|
b79deda42a47aee6e99ccbd29c9094ff7fdb03ad
|
[] |
no_license
|
rafailiadimaki/Group_Project
|
e4e02276e08cc1e551d1e5fabfdc75ce8a5a1394
|
586a53e030e1892ddda9c3d37b3d0ad07c0caaa7
|
refs/heads/main
| 2023-07-14T23:59:52.813436
| 2021-09-06T18:11:17
| 2021-09-06T18:11:17
| 402,742,651
| 0
| 0
| null | 2021-09-03T11:12:04
| 2021-09-03T11:12:03
| null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
#!c:\pythonstuff\grouproj\env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"egiork@yahoo.com"
] |
egiork@yahoo.com
|
0207fc00bbb68968b30eb508747167c0cb7b24eb
|
a942f764828f7f0a215fc06ffd0d5ace67cb29f6
|
/setup.py
|
4d1ac260b14784d682e386c8d767bca01712575e
|
[
"Apache-2.0"
] |
permissive
|
monikernemo/aesthetics
|
c2db96c734b7d58ac9d9e0d9b309a9dda8eab8f6
|
f854c285a02f02e5d136266b605162a9421cfc0b
|
refs/heads/master
| 2020-06-06T04:32:45.601865
| 2019-06-19T08:40:14
| 2019-06-19T08:40:14
| 192,638,432
| 0
| 0
|
Apache-2.0
| 2019-06-19T01:44:41
| 2019-06-19T01:44:40
| null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
from distutils.core import setup
from setuptools import find_packages
def get_version():
return '0.1.1'
def get_requirements():
with open('requirements.txt', 'rU') as fhan:
requires = [line.strip() for line in fhan.readlines()]
return requires
def get_long_description():
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
with open('README.txt') as fhan:
long_description = fhan.read()
return long_description
add_keywords = dict(
entry_points={
'console_scripts': ['aesthetics = aesthetics.cli:main'],
}, )
setup(
name='Aesthetics',
description='Image Aesthetics Toolkit',
version=get_version(),
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
license='GPLv3+',
author='Shubham Chaudhary',
author_email='me@shubhamchaudhary.in',
url='https://github.com/shubhamchaudhary/aesthetics',
long_description=get_long_description(),
install_requires=get_requirements(),
**add_keywords)
|
[
"me@shubhamchaudhary.in"
] |
me@shubhamchaudhary.in
|
73e9c1e3a9f77d0cd400a1fc661ab8139800f386
|
c576ce575fcc02409cae55cbd21e089e0d8bc58c
|
/banner updater.py
|
1b09c305c080f1097cd0641553cd70374c74776f
|
[
"MIT"
] |
permissive
|
googed/banner-bot
|
e680c2d574f659199aea4772b2c339e4af5570dd
|
540d462959094e945a373583b3c4d69ee062c229
|
refs/heads/master
| 2021-01-10T10:47:55.020422
| 2016-04-14T16:41:39
| 2016-04-14T16:41:39
| 55,941,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,874
|
py
|
# The main subreddit's sidebar must include strings to denote the beginning and ending location of the list, the bot will not update the sidebar if these strings are not present
# With the default delimiters the sidebar should include a chunk of text like:
# [](#banner_start)
# banner text here
# [](#banner_end)
from ConfigParser import SafeConfigParser
from datetime import datetime, timedelta
import HTMLParser
import logging, logging.config, re, sys, os
from time import time
from dateutil import parser, rrule, tz
import praw
from requests.exceptions import HTTPError
from sqlalchemy import create_engine
from sqlalchemy import Boolean, Column, DateTime, String, Text, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
import yaml
import random
import requests
from imgurpython import ImgurClient
# global reddit session
r = None
cfg_file = SafeConfigParser()
path_to_cfg = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_cfg = os.path.join(path_to_cfg, 'schedulebot.cfg')
cfg_file.read(path_to_cfg)
if cfg_file.get('database', 'system').lower() == 'sqlite':
engine = create_engine(
cfg_file.get('database', 'system')+':///'+\
cfg_file.get('database', 'database'))
else:
engine = create_engine(
cfg_file.get('database', 'system')+'://'+\
cfg_file.get('database', 'username')+':'+\
cfg_file.get('database', 'password')+'@'+\
cfg_file.get('database', 'host')+'/'+\
cfg_file.get('database', 'database'))
print "engine running..."
Base = declarative_base()
Session = sessionmaker(bind=engine, expire_on_commit=False)
session = Session()
class Subreddit(Base):
"""Table containing the subreddits for the bot to monitor.
name - The subreddit's name. "gaming", not "/r/gaming".
enabled - Subreddit schedule will not be executed if False
schedule_yaml - YAML definition of the subreddit's schedule
updated - Time that the subreddit was last updated (UTC)
"""
__tablename__ = 'schedule'
name = Column(Text, nullable=False, primary_key=True)
enabled = Column(Integer, nullable=False, default=1)
schedule_yaml = Column(Text)
updated = Column(Integer, nullable=False)
banner_limit = Column(Integer, nullable = False, default=1)
banner_name = Column(Text, nullable = False, default='banner')
class ScheduledEvent(object):
_defaults = {'repeat': None,
'rrule': None,
'url': None,
'title': None}
repeat_regex = re.compile(r'^(\d+)\s+(minute|hour|day|week|month|year)s?$')
url_regex = re.compile(r'^https?:\/\/imgur\.com\/(a|gallery)\/\w+\/?$')
freq_dict = {'minute': rrule.MINUTELY,
'hour': rrule.HOURLY,
'day': rrule.DAILY,
'week': rrule.WEEKLY,
'month': rrule.MONTHLY,
'year': rrule.YEARLY,
}
def __init__(self, values, default=None):
values = lowercase_keys_recursively(values)
# anything not defined in the "values" dict will be defaulted
init = self._defaults.copy()
init.update(values)
# convert the dict to attributes
self.__dict__.update(init)
try:
self.first = parser.parse(self.first)#, default=default)
if not self.first.tzinfo:
self.first = self.first.replace(tzinfo=tz.tzutc())
except Exception as e:
raise ValueError('Error parsing date from `first`.')
try:
if self.repeat:
match = self.repeat_regex.match(self.repeat)
interval = int(match.group(1))
if interval == 0:
raise ValueError('Invalid repeat interval.')
self.rrule = rrule.rrule(self.freq_dict[match.group(2)],
interval=interval,
dtstart=self.first)
elif self.rrule:
self.rrule = rrule.rrulestr(self.rrule, dtstart=self.first)
except Exception as e:
raise ValueError('Error parsing repeat interval.')
try:
if self.title:
self.title = self.replace_placeholders(self.title)
except Exception as e:
raise ValueError('Error in title')
def is_due(self, start_time, end_time):
if self.rrule and self.rrule.before(start_time, inc=True):
print "Due now? %s: %s" %(bool(self.rrule.between(start_time, end_time, inc=True)), self.title)
print 'next recurrence', self.rrule.after(start_time, inc=True)
return bool(self.rrule.between(start_time, end_time, inc=True)), start_time - self.rrule.before(start_time, inc=True), self.title
else:
print "%s: %s - %s" %("Not started or ended", self.title, self.first)
return start_time <= self.first <= end_time, start_time - end_time, self.title
## def is_album(self, user, COUNT, LIMIT):
## valid_images = 0
## client = ImgurClient(cfg_file.get('imgur', 'client_id'), cfg_file.get('imgur', 'client_secret'))
## album_id = get_album_id(self.url)
## album = client.get_album(album_id)
## if COUNT < LIMIT:
## print('Not enough images!')
## return False
## for image in album.images:
## if image['size'] > 512000:
## valid_images -= 1
## if (COUNT+valid_images) < LIMIT:
## return False
## return True
##
def execute(self, subreddit, BANNER, LIMIT):
global r
client = ImgurClient(cfg_file.get('imgur', 'client_id'), cfg_file.get('imgur', 'client_secret'))
album_id = get_album_id(self.url)
album = client.get_album(album_id)
album_title = self.title
album = album.images
COUNT = len(album)
if COUNT < LIMIT:
print('Not enough images!')
send_error_message(cfg_file.get('reddit', 'owner_username'), subreddit.display_name, 'Not enough '
' images in album ["{0}"]({1})'.format(album_title, self.url))
return
# Pick x random ones if greater than limit
if COUNT > LIMIT:
album = random.sample(album, COUNT)
banner_number = 0
sidebar_format = '* [{title}]({link} "{desc}")'
sidebar_lines = []
bigpic = []
for image in album:
if image['size'] > 512000:
print ('too big: %s' %(image['link']))
title = '{0} - ({1} kB) - {2}px x {3}px'.format(image['link'], float(image['size'])/1000, image['width'], image['height'])
bigpic.append(sidebar_format.format(title=title, link=image['link'], desc=image['description']))
continue
banner_number += 1
url = image['link']
local_name = localize_name(album_id, url)
download_image(url, local_name)
title = image['title'] if image['title'] else 'Untitled'
description = image['description'] if image['description'] else ' '
line = sidebar_format.format(title=title, link='#s', desc=description)
css_name = BANNER + '%d' % banner_number
print('%s: adding %s to stylesheet...' % (subreddit, css_name))
try:
r.upload_image(subreddit, local_name, css_name)
except Exception as e:
print (e)
return
sidebar_lines.append(line)
if banner_number >= LIMIT:
break
if banner_number < LIMIT:
print ('Not enough valid images')
send_error_message(cfg_file.get('reddit', 'owner_username'), subreddit.display_name, 'Not enough valid'
' images in album ["{0}"]({1}); check that the following image sizes are less than 500kB. '
'Images ideally should be greater than 300px wide and 1:1 or greater aspect ratio: \n\n{2}'.format(album_title, self.url, '\n'.join(bigpic)))
return
bar = '\n'.join(sidebar_lines)
bar = '##### ' + album_title + '\n' + bar + '\n\n'
r.config.decode_html_entities = True
current_sidebar = subreddit.get_settings()['description']
current_sidebar = HTMLParser.HTMLParser().unescape(current_sidebar)
replace_pattern = re.compile('%s.*?%s' % (re.escape(cfg_file.get('reddit', 'start_delimiter')), re.escape(cfg_file.get('reddit', 'end_delimiter'))), re.IGNORECASE|re.DOTALL|re.UNICODE)
new_sidebar = re.sub(replace_pattern,
'%s\\n\\n%s\\n%s' % (cfg_file.get('reddit', 'start_delimiter'), bar, cfg_file.get('reddit', 'end_delimiter')),
current_sidebar)
r.update_settings(subreddit, description=new_sidebar)
print ('%s sidebar updated!' %subreddit)
subreddit.set_stylesheet(subreddit.get_stylesheet()['stylesheet'])
print ('%s stylesheet set!' %subreddit)
if bigpic:
send_error_message(cfg_file.get('reddit', 'owner_username'), subreddit.display_name, 'The following '
' images in album ["{0}"]({1}) were not valid and were skipped; check that the following image sizes are less than 500kB. '
'Images ideally should be greater than 300px wide and 1:1 or greater aspect ratio: \n\n{2}'.format(album_title, self.url, '\n'.join(bigpic)))
def error_album (error):
pass
def replace_placeholders(self, string):
date_regex = re.compile(r'\{\{date([+-]\d+)?\s+([^}]+?)\}\}')
now = datetime.now(self.first.tzinfo)
match = date_regex.search(string)
while match:
date = now
if match.group(1):
offset = int(match.group(1))
date += timedelta(days=offset)
format_str = match.group(2)
string = date_regex.sub(date.strftime(format_str), string, count=1)
match = date_regex.search(string)
return string
def download_image(url, local_name):
if os.path.exists(local_name):
return
location = os.path.split(local_name)[0]
if not os.path.exists(location):
os.makedirs(location)
page = requests.get(url)
image = page.content
with open(local_name, 'wb') as f:
f.write(image)
def localize_name(album_id, image_link):
image_name = image_link.split('/')[-1]
return os.path.join('images', album_id, image_name)
def get_album_id(album_url):
album_url = album_url.replace('/gallery/', '/a/')
album_id = album_url.split('/a/')[-1].split('/')[0]
return album_id
def update_from_wiki(subreddit, requester):
print "Updating events from the %s wiki." %subreddit
global r
username = cfg_file.get('reddit', 'username')
try:
page = subreddit.get_wiki_page(cfg_file.get('reddit', 'wiki_page_name'))
except Exception:
send_error_message(requester, subreddit.display_name,
'The wiki page could not be accessed. Please ensure the page '
'http://www.reddit.com/r/{0}/wiki/{1} exists and that {2} '
'has the "wiki" mod permission to be able to access it.'
.format(subreddit.display_name,
cfg_file.get('reddit', 'wiki_page_name'),
username))
return False
html_parser = HTMLParser.HTMLParser()
page_content = html_parser.unescape(page.content_md)
# check that all the events are valid yaml
event_defs = yaml.safe_load_all(page_content)
event_num = 1
try:
for event_def in event_defs:
event_num += 1
except Exception as e:
indented = ''
for line in str(e).split('\n'):
indented += ' {0}\n'.format(line)
send_error_message(requester, subreddit.display_name,
'Error when reading schedule from wiki - '
'Syntax invalid in section #{0}:\n\n{1}'
.format(event_num, indented))
return False
# reload and actually process the events
event_defs = yaml.safe_load_all(page_content)
event_num = 1
kept_sections = []
for event_def in event_defs:
# ignore any non-dict sections (can be used as comments, etc.)
if not isinstance(event_def, dict):
continue
event_def = lowercase_keys_recursively(event_def)
try:
check_event_valid(event_def)
event = ScheduledEvent(event_def)
except ValueError as e:
send_error_message(requester, subreddit.display_name,
'Invalid event in section #{0} - {1}'
.format(event_num, e))
return False
event_num += 1
kept_sections.append(event_def)
# Update the subreddit, or add it if necessary
try:
db_subreddit = (session.query(Subreddit)
.filter(Subreddit.name == subreddit.display_name.lower())
.one())
except NoResultFound:
db_subreddit = Subreddit()
db_subreddit.name = subreddit.display_name.lower()
session.add(db_subreddit)
db_subreddit.updated = datetime.utcnow()
db_subreddit.schedule_yaml = page_content
session.commit()
logging.info("Update from wiki complete")
## r.send_message(requester,
## '{0} schedule updated'.format(username),
## "{0}'s schedule was successfully updated for /r/{1}"
## .format(username, subreddit.display_name))
return True
def lowercase_keys_recursively(subject):
"""Recursively lowercases all keys in a dict."""
lowercased = dict()
for key, val in subject.iteritems():
if isinstance(val, dict):
val = lowercase_keys_recursively(val)
lowercased[key.lower()] = val
return lowercased
def check_event_valid(event):
"""Checks if an event defined on a wiki page is valid."""
print "Validating wiki events..."
validate_keys(event)
validate_values_not_empty(event)
validate_type(event, 'first', basestring)
validate_type(event, 'repeat', basestring)
validate_type(event, 'rrule', basestring)
validate_type(event, 'title', basestring)
validate_regex(event, 'url', ScheduledEvent.url_regex)
validate_regex(event, 'repeat', ScheduledEvent.repeat_regex)
def validate_values_not_empty(check):
for key, val in check.iteritems():
if isinstance(val, dict):
validate_values_not_empty(val)
elif (val is None or
(isinstance(val, (basestring, list)) and len(val) == 0)):
raise ValueError('`{0}` set to an empty value'.format(key))
def validate_keys(check):
valid_keys = set(['first', 'rrule', 'title', 'url'])
valid_keys |= set(ScheduledEvent._defaults.keys())
for key in check:
if key not in valid_keys:
raise ValueError('Invalid variable: `{0}`'.format(key))
# make sure that all of the required keys are being set
if ('title' not in check or 'first' not in check or
'url' not in check):
raise ValueError('All the required variables were not set.')
def validate_type(check, key, req_type):
if key not in check:
return
if req_type == int:
try:
int(str(check[key]))
except ValueError:
raise ValueError('{0} must be an integer'.format(key))
else:
if not isinstance(check[key], req_type):
raise ValueError('{0} must be {1}'.format(key, req_type))
def validate_regex(check, key, pattern):
if key not in check:
return
if not re.match(pattern, check[key]):
raise ValueError('Invalid {0}: {1}'.format(key, check[key]))
def send_error_message(user, sr_name, error):
"""Sends an error message to the user if a wiki update failed."""
global r
r.send_message(user,
'Error processing wiki in /r/{0}'.format(sr_name),
'**Error updating from [wiki configuration in /r/{0}]'
'(http://www.reddit.com/r/{0}/wiki/{1})**:\n\n---\n\n{2}'
.format(sr_name,
cfg_file.get('reddit', 'wiki_page_name'),
error))
def process_messages():
global r
stop_time = int(cfg_file.get('reddit', 'last_message'))
owner_username = cfg_file.get('reddit', 'owner_username')
new_last_message = None
update_srs = set()
invite_srs = set()
logging.debug('Reading messages and commands...')
try:
for message in r.get_inbox():
if int(message.created_utc) <= stop_time:
break
if message.was_comment:
continue
if not new_last_message:
new_last_message = int(message.created_utc)
if message.body.strip().lower() == 'schedule':
# handle if they put in something like '/r/' in the subject
if '/' in message.subject:
sr_name = message.subject[message.subject.rindex('/')+1:]
else:
sr_name = message.subject
if (sr_name.lower(), message.author.name) in update_srs:
continue
try:
subreddit = r.get_subreddit(sr_name)
if (message.author.name == owner_username or
message.author in subreddit.get_moderators()):
update_srs.add((sr_name.lower(), message.author.name))
else:
send_error_message(message.author, sr_name,
'You do not moderate /r/{0}'.format(sr_name))
except HTTPError as e:
send_error_message(message.author, sr_name,
'Unable to access /r/{0}'.format(sr_name))
# do requested updates from wiki pages
updated_srs = []
for subreddit, sender in update_srs:
if update_from_wiki(r.get_subreddit(subreddit),
r.get_redditor(sender)):
updated_srs.append(subreddit)
logging.info('Updated from wiki in /r/{0}'.format(subreddit))
else:
logging.info('Error updating from wiki in /r/{0}'
.format(subreddit))
except Exception as e:
logging.error('ERROR: {0}'.format(e))
raise
finally:
# update cfg with new last_message value
if new_last_message:
cfg_file.set('reddit', 'last_message', str(new_last_message))
cfg_file.write(open(path_to_cfg, 'w'))
def main():
global r
global client
logging.config.fileConfig(path_to_cfg)
start_timestamp = int(time())
start_time = datetime.utcfromtimestamp(start_timestamp)
start_time = start_time.replace(tzinfo=tz.tzutc())
print "Start time %s" %start_time
last_run = int(cfg_file.get('reddit', 'last_run'))
last_run = datetime.utcfromtimestamp(last_run)
last_run = last_run.replace(tzinfo=tz.tzutc())
## cfg_file.set('reddit', 'last_run', str(start_timestamp))
## cfg_file.write(open(path_to_cfg, 'w'))
while True:
try:
r = praw.Reddit(user_agent=cfg_file.get('reddit', 'user_agent'))
logging.debug('Logging in as {0}'
.format(cfg_file.get('reddit', 'username')))
r.login(cfg_file.get('reddit', 'username'),
cfg_file.get('reddit', 'password'), disable_warning=True)
break
except Exception as e:
logging.error('ERROR: {0}'.format(e))
# check for update messages
logging.info("checking for update messages")
try:
process_messages()
except KeyboardInterrupt:
raise
except Exception as e:
logging.error('ERROR: {0}'.format(e))
session.rollback()
subreddits = (session.query(Subreddit)
.filter(Subreddit.enabled == 1)
.all())
for sr in subreddits:
LIMIT = sr.banner_limit
BANNER = sr.banner_name
schedule = [ScheduledEvent(d, sr.updated)
for d in yaml.safe_load_all(sr.schedule_yaml)
if isinstance(d, dict)]
title = ""
event_due = ""
past_due = timedelta(days=999999999)
for event in schedule:
mc = event.is_due(last_run, start_time)
if mc[0] and mc[1]:
if mc[1] < past_due:
past_due = mc[1]
event_due = event
title = mc[2]
if event_due:
try:
print ('executing', title, event_due)
event_due.execute(r.get_subreddit(sr.name), BANNER, LIMIT)
except KeyboardInterrupt:
raise
except Exception as e:
logging.error('ERROR in /r/{0}: {1}. Rolling back'.format(sr.name, e))
session.rollback()
cfg_file.set('reddit', 'last_run', str(start_timestamp))
cfg_file.write(open(path_to_cfg, 'w'))
if __name__ == '__main__':
main()
|
[
"godcast@gmail.com"
] |
godcast@gmail.com
|
4095191d2d5af67373038ef3eb73ba3597657739
|
57b29be35678ba3d7b532e574a9fa8acf00557c4
|
/pelican/plugins/webassets_babeljsx/__init__.py
|
e0aeea29cda7608d703ba4658fb415589a1abe29
|
[
"MIT"
] |
permissive
|
rhooper/pelican-webassets-babeljsx
|
c0503546c2bf194a8aa9185057e5ef37f046a47b
|
39fd2fb286f7bfa12c04785dddac84e935a8a5da
|
refs/heads/main
| 2023-03-26T12:48:58.637635
| 2021-03-27T22:51:26
| 2021-03-27T22:51:26
| 350,156,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
"""
Simple plugin that registers the BabelJSX filter with Jinja2
"""
from dukpy.webassets import BabelJSX
from webassets.filter import register_filter
def register():
register_filter(BabelJSX)
|
[
"rhooper@toybox.ca"
] |
rhooper@toybox.ca
|
9b71f1699ce7f419ca1a0de7731d2a04a54f68ab
|
3ef524fbe4f299fa5f2702bf3b421b9e5e548b90
|
/Problem 22.py
|
621ca91cd4de131ed2498a491b77f348fed7f9de
|
[] |
no_license
|
PSpeiser/ProjectEuler
|
87e95cac98f7811a15ca2bb1c925dd595d8a7c43
|
b846c172bd12b4400e200d28886a6af7bec2dcf0
|
refs/heads/master
| 2021-01-01T18:55:01.121820
| 2014-04-03T14:37:35
| 2014-04-03T14:37:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
def value(name):
value = 0
for letter in name:
value += ord(letter) - 64
return value
import csv
names = []
with open("names.txt","r") as csvfile:
csvreader = csv.reader(csvfile)
names = list(csvreader)[0]
names.sort()
total = 0
for i in range(len(names)):
name = names[i]
multiplier = i + 1
score = multiplier * value(name)
total += score
print total
|
[
"mail@patrickspeiser.at"
] |
mail@patrickspeiser.at
|
a2080ddcb4fbd6406b8f4cfc7f1dcae4a4155bfd
|
d6cb6fdd4b19b18add5c45e03c9962f2f05be410
|
/consumers/models/.ipynb_checkpoints/weather-checkpoint.py
|
90a6c7c1f4675b8596f832eb2b89b9ad0c1d4a68
|
[] |
no_license
|
sarakuma/udacity-ds-kafka-project
|
2a4d5bb75aaaa598238566169861af904a31800a
|
2d4817046cc1fe444e311cb43bb0ad49ccae0e81
|
refs/heads/master
| 2023-08-17T22:47:06.148883
| 2020-03-10T17:55:57
| 2020-03-10T17:55:57
| 246,367,981
| 0
| 0
| null | 2023-08-14T22:08:46
| 2020-03-10T17:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 644
|
py
|
"""Contains functionality related to Weather"""
import logging
logger = logging.getLogger(__name__)
class Weather:
"""Defines the Weather model"""
def __init__(self):
"""Creates the weather model"""
self.temperature = 70.0
self.status = "sunny"
def process_message(self, message):
"""Handles incoming weather data"""
try:
value = message.value()
self.temperature = value.temperature
self.status = value.status
except Exception as e:
logger.error(f"weather process_message is incomplete - skipping. reason = {str(e)}")
|
[
"kumar.saraboji@gmail.com"
] |
kumar.saraboji@gmail.com
|
53d35c3afe3426d84013479bfd93c2bc09b77990
|
645b3461ac4081ba41519a72ae36ac807e00e31e
|
/level2/topnumber.py
|
e7b3d5147c46af803bcde71480b223000a357643
|
[] |
no_license
|
edw1n94/algorithm
|
48411abe47b3d62e7baea6829a51cae3ce87ed99
|
310b7619e0330a1f2d168ad05bb7f3b4803f8278
|
refs/heads/master
| 2021-05-20T23:12:26.437262
| 2020-04-02T12:33:06
| 2020-04-02T12:33:06
| 252,446,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
def solution(numbers):
answer = ""
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def str_sort(A, B):
strAB = str(A) + str(B)
strBA = str(B) + str(A)
if strAB < strBA:
return 0
else:
return -1
numbers = sorted(numbers, key=cmp_to_key(str_sort), reverse=False)
for number in numbers:
answer += str(number)
if int(answer) == 0:
return "0"
return answer
numbers = [0, 0, 000, 0]
print(solution(numbers))
|
[
"ngw42@naver.com"
] |
ngw42@naver.com
|
7f285b3636bb9855071087c8449ec16b13e9dd44
|
10c27cff9ed412954cddcbb0f3bba8c69cca3fb3
|
/config_tune/deep_tcr_vdj_tune.py
|
8c1196da5a6b16249c728cd3989afbb46f840ede
|
[
"MIT"
] |
permissive
|
lizwood/mvTCR
|
1d121019cc263d668f4993a022bb1199e14e9ec8
|
dba217393a3a9b1a8700d2927dbac81d360aec4a
|
refs/heads/master
| 2023-06-19T14:48:56.193657
| 2021-07-16T10:45:51
| 2021-07-16T10:45:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,500
|
py
|
from ray import tune
params = {
'seq_model_hyperparams': {
'num_features_1': tune.qrandint(20, 50, 10),
'num_features_2': tune.qrandint(50, 100, 25),
'num_features_3': tune.qrandint(50, 200, 50),
'dropout': tune.quniform(0.0, 0.2, 0.05),
'batch_norm': tune.choice([True, False]),
'embedding_dim': tune.qrandint(20, 80, 20),
'encoder': {
'kernel_1': tune.qrandint(3, 7),
'kernel_23': tune.qrandint(3, 5),
'stride_1': tune.qrandint(1, 3),
'stride_23': tune.qrandint(1, 3),
# 'kernel': [5, 3, 3],
# 'stride': [1, 3, 3],
# 'num_features': [32, 64, 128],
'num_layers': None,
'activation': 'leakyrelu',
},
'decoder': {
'kernel_1': tune.qrandint(3, 5),
'kernel_2': tune.qrandint(3, 5),
'stride_1': tune.qrandint(2, 3),
'stride_2': tune.qrandint(2, 3),
# 'kernel': [3, 3], # omit last, as last kernel size is calculated
# 'stride': [2, 2], # omit last, as last stride is calculated
# 'num_features': [64, 128, 64], # first is input shape, omit last as it is number of classes
'initial_feature': tune.qrandint(50, 200, 50),
'initial_len': tune.qrandint(3, 10, 1),
'num_layers': None,
'activation': 'relu'
}
},
'use_vdj': True,
'vdj_embedding_dim': tune.qrandint(20, 60, 20),
'vdj_dec_layer_1': tune.qrandint(80, 200, 40),
'vdj_dec_layer_2': tune.qrandint(20, 80, 20),
'vdj_dec_layers': [128, 64],
'use_embedding_matrix': tune.choice([True, False]),
'dec_hdim_1': tune.qrandint(80, 200, 40),
'dec_hdim_2': tune.qrandint(150, 300, 50),
'dec_hdim': [128, 256],
'enc_hdim_1': tune.qrandint(150, 300, 50),
'enc_hdim_2': tune.qrandint(150, 300, 50),
'enc_hdim': [256, 256],
'zdim': tune.qrandint(20, 500, 20),
# Loss and optimizer
'lr': tune.qloguniform(1e-5, 1e0, 1e-5),
'batch_size': tune.choice([2048, 4096, 8192]),
'loss_weights_1': tune.qloguniform(1e-1, 1e1, 1e-1),
'loss_weights_2': 1.0,
'loss_weights_3': tune.qloguniform(1e-5, 1e0, 1e-5),
'loss_weights': [1.0, 1.0, 1.0e-3]
}
init_params = [{
'seq_model_hyperparams': {
'num_features_1': 32,
'num_features_2': 64,
'num_features_3': 128,
'num_features': [32, 64, 128],
'dropout': 0.0,
'batch_norm': False,
'embedding_dim': 64,
'encoder':
{'kernel_1': 5,
'kernel_23': 3,
'stride_1': 1,
'stride_23': 3,
'kernel': [5, 3, 3],
'stride': [1, 3, 3],
'num_layers': 3,
'activation': 'leakyrelu'
},
'decoder':
{'kernel_1': 3,
'kernel_2': 3,
'stride_1': 2,
'stride_2': 2,
'kernel': [3, 3], # omit last, as last kernel size is calculated
'stride': [2, 2], # omit last, as last stride is calculated
'initial_feature': 64,
'initial_len': 4,
'num_layers': 3,
'activation': 'relu'
}
},
'use_vdj': True,
'vdj_embedding_dim': 48,
'vdj_dec_layer_1': 128,
'vdj_dec_layer_2': 64,
'vdj_dec_layers': [128, 64],
'use_embedding_matrix': True,
'dec_hdim_1': 128,
'dec_hdim_2': 256,
'dec_hdim': [128, 256],
'enc_hdim_1': 256,
'enc_hdim_2': 256,
'enc_hdim': [256, 256],
'zdim': 256,
# Loss and optimizer
'lr': 1.0e-3,
'batch_size': 8192,
'loss_weights_1': 1.0,
'loss_weights_2': 1.0,
'loss_weights_3': 1.0e-3,
'loss_weights': [1.0, 1.0, 1.0e-3]
},
{
'seq_model_hyperparams': {
'num_features_1': 32,
'num_features_2': 64,
'num_features_3': 128,
'num_features': [32, 64, 128],
'dropout': 0.0,
'batch_norm': False,
'embedding_dim': 64,
'encoder':
{'kernel_1': 5,
'kernel_23': 3,
'stride_1': 1,
'stride_23': 3,
'kernel': [5, 3, 3],
'stride': [1, 3, 3],
'num_layers': 3,
'activation': 'leakyrelu'
},
'decoder':
{'kernel_1': 3,
'kernel_2': 3,
'stride_1': 2,
'stride_2': 2,
'kernel': [3, 3], # omit last, as last kernel size is calculated
'stride': [2, 2], # omit last, as last stride is calculated
'initial_feature': 64,
'initial_len': 4,
'num_layers': 3,
'activation': 'relu'
}
},
'use_vdj': True,
'vdj_embedding_dim': 48,
'vdj_dec_layer_1': 128,
'vdj_dec_layer_2': 64,
'vdj_dec_layers': [128, 64],
'use_embedding_matrix': True,
'dec_hdim_1': 128,
'dec_hdim_2': 256,
'dec_hdim': [128, 256],
'enc_hdim_1': 256,
'enc_hdim_2': 256,
'enc_hdim': [256, 256],
'zdim': 256,
# Loss and optimizer
'lr': 1.0e-4,
'batch_size': 8192,
'loss_weights_1': 1.0,
'loss_weights_2': 1.0,
'loss_weights_3': 1.0e-3,
'loss_weights': [1.0, 1.0, 1.0e-3]
}
]
|
[
"yang.an@outlook.com"
] |
yang.an@outlook.com
|
3e483b0f7144ccc6f0004d7b0bb6806d474db98e
|
2e07f6b94fc0f7a5cf55002040151b8745fd843d
|
/privious_learning_code/OS_Handling/os.ttyname() Method.py
|
4ab70bdcb4c400b2e6ddb190eb230fa7fb5757f2
|
[] |
no_license
|
LalithK90/LearningPython
|
a7e6404e900b7d66c663acc72cde3e3655d54ac7
|
ece38fdac88da66c8b76fe710b3df7d8635a3590
|
refs/heads/master
| 2023-06-09T22:32:16.674821
| 2021-06-27T18:55:00
| 2021-06-27T18:55:00
| 169,513,150
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
# Description
#
# The method ttyname() returns a string, which specifies the terminal device associated with fd. If fd is not associated with a terminal device, an exception is raised.
# Syntax
#
# Following is the syntax for ttyname() method −
#
# os.ttyname(fd)
#
# Parameters
#
# fd − This is the file descriptor.
#
# Return Value
#
# This method returns a string which specifies the terminal device.
# Example
import os, sys
# Showing current directory
print("Current working dir :%s" %os.getcwd())
# Changing dir to /dev/tty
fd = os.open("/dev/tty",os.O_RDONLY)
p = os.ttyname(fd)
print("the terminal device associated is: ")
print(p)
print("done!!")
os.close(fd)
print("Closed the file successfully!!")
|
[
"asakahatapitiya@gmail.com"
] |
asakahatapitiya@gmail.com
|
c741daee4636dd19fa8ffd7651f5f972e06fdc2c
|
7e5017837ce2468c6bc1c41d73baf982eed2adbd
|
/video.py
|
e4ddd76be78be5ef8b6624f55c2ae72c7e450d02
|
[] |
no_license
|
weizy2018/learnopencv
|
5fecead7af0aa13fb3c6aaba67895d5ff14adb41
|
4b2594689bef51f3f964d386d87309bf54485db3
|
refs/heads/master
| 2022-07-09T02:56:00.395577
| 2020-05-19T13:46:35
| 2020-05-19T13:46:35
| 255,314,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
import cv2 as cv
cap = cv.VideoCapture("/home/weizy/Videos/video.mp4")
if not cap.isOpened():
exit()
ret, frame = cap.read()
cv.namedWindow('video', cv.WINDOW_NORMAL)
while ret:
cv.imshow("video", frame)
if cv.waitKey(1) == ord('q'):
break
ret, frame = cap.read()
cap.release()
cv.destroyAllWindows()
|
[
"2233467661@qq.com"
] |
2233467661@qq.com
|
5fd255ea854d7583a563fe0d7b15d7f6e2e1e56a
|
ad8ded1e86419d13a692b974a4fe83786a4dbdb1
|
/eleven_sept/py_set_methods/symmetric_difference_update.py
|
e39d6d196f9b1bec29801981a76caefc936488a2
|
[] |
no_license
|
avengerryan/daily_practice_codes
|
9934d7d6c1be2c8e89567e327ccd4af5a8eb89c2
|
34b1d8b772694de9414a15269fdc59284d74c11b
|
refs/heads/master
| 2022-12-23T13:59:54.968089
| 2020-09-14T09:28:27
| 2020-09-14T09:28:27
| 295,375,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# python set symmetric_difference_update() : updates the set with symmetric
# difference
# the symmetric_difference_update() method finds the symmetric difference of two
# sets and updates the set calling it.
# e.g. working of symmetric_difference_updateO()
A = {'a', 'c', 'd'}
B = {'c', 'd', 'e'}
result = A.symmetric_difference_update(B)
print('A = ', A)
print('B =', B)
print('result =', result)
|
[
"avengerrmk@gmail.com"
] |
avengerrmk@gmail.com
|
7af2b382b7d33e936abb8c58493342b13410c0dc
|
604dba018a0937d0bdeb0bfd5400ccba1e14df10
|
/src/train_predict_xgb.py
|
2d12660f00492d6b86a37ff612044b35022e6211
|
[] |
no_license
|
ericdoi/kaggle-renthop-2017
|
08a132aa9b5feedb2b9cc91a6433c3b88c2dd6ca
|
8adfeddc2c2893efb4ed02d9e69e2b2ce9aebe6b
|
refs/heads/master
| 2021-01-23T11:21:08.941721
| 2019-10-31T04:52:12
| 2019-10-31T04:52:12
| 93,132,716
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,014
|
py
|
"""
Thanks to:
https://github.com/jeongyoonlee/kaggler-template
https://www.kaggle.com/sudalairajkumar/xgb-starter-in-python
"""
import argparse
import logging
import os
import time
import numpy as np
import pandas as pd
import xgboost as xgb
from kaggler.data_io import load_data
from sklearn import model_selection
from sklearn.metrics import log_loss
DEFAULT_DEPTH = 6
DEFAULT_LRATE = 0.3
DEFAULT_NROUNDS = 200
CV_SEED = 2017
XG_SEED = 0
DEFAULT_FOLDS = 5
EARLY_STOPPING = 100
def initialize_logger(filename):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
filename=filename)
logger = logging.getLogger()
handler = logging.StreamHandler()
logger.addHandler(handler)
def run(train_feature_file, test_feature_file, feature_map_file,
predict_valid_file, predict_test_file, feature_importance_file,
depth, lrate, n_rounds, n_folds=DEFAULT_FOLDS):
model_name = os.path.splitext(
os.path.splitext(os.path.basename(predict_test_file))[0]
)[0]
log_file = '{}.log'.format(model_name)
initialize_logger(log_file)
param_dict = {
'objective': 'multi:softprob',
'eta': lrate,
'max_depth': depth,
'silent': 1,
'num_class': 3,
'eval_metric': "mlogloss",
'min_child_weight': 1,
'subsample': 0.7,
'colsample_bytree': 0.7,
'seed': XG_SEED
}
params = list(param_dict.items())
[train_X, train_y] = load_data(train_feature_file) #datasets.load_svmlight_file(train_feature_file)
[test_X, _] = load_data(test_feature_file) #datasets.load_svmlight_file(test_feature_file)
# Run cv and produce out-of-fold predictions
oof_preds = np.zeros((train_X.shape[0], 3))
# test_preds = np.zeros(test_X.shape[0]) # Can accumulate test preds/nfolds to get avg of fold models
fold_gen = model_selection.KFold(n_splits=n_folds, shuffle=True, random_state=CV_SEED)
for k, (dev_ix, val_ix) in enumerate(fold_gen.split(train_X)):
dev_X, val_X = train_X[dev_ix, :], train_X[val_ix, :]
dev_y, val_y = train_y[dev_ix], train_y[val_ix]
if k == 0: # First fold
# Keep the number of rounds from first fold
val_preds, model = run_xgb(params, n_rounds, dev_X, dev_y, val_X, val_y, EARLY_STOPPING)
n_best = model.best_iteration
logging.info('best iteration={}'.format(n_best))
# Get feature importances
importance = model.get_fscore(feature_map_file)
imp_df = pd.DataFrame.from_dict(importance, 'index')
imp_df.index.name = 'feature'
imp_df.columns = ['fscore']
imp_df.ix[:, 'fscore'] = imp_df.fscore / imp_df.fscore.sum()
imp_df.sort_values('fscore', axis=0, ascending=False, inplace=True)
imp_df.to_csv(feature_importance_file, index=True)
logging.info('feature importance is saved in {}'.format(feature_importance_file))
else:
val_preds, model = run_xgb(params, n_best, dev_X, dev_y, val_X, val_y)
oof_preds[val_ix] = val_preds # save oof predictions per shuffled indices
logging.info('CV #{}: {:.4f}'.format(k, log_loss(val_y, val_preds)))
logging.info('Saving validation predictions...')
oof_preds_df = pd.DataFrame(oof_preds)
oof_preds_df.columns = ['low', 'medium', 'high']
oof_preds_df.to_csv(predict_valid_file, index=False)
# Run on 100% training
logging.info('Retraining with 100% training data')
test_preds, model = run_xgb(params, n_best, train_X, train_y, test_X)
test_preds_df = pd.DataFrame(test_preds)
test_preds_df.columns = ['low', 'medium', 'high']
logging.info('Saving test predictions...')
test_preds_df.to_csv(predict_test_file, index=False)
logging.info('{}: CV {:.4f}, n_best {}'.format(model_name, log_loss(train_y, oof_preds), n_best))
logging.info('Log file: %s' % log_file)
def run_xgb(params, n_rounds, train_X, train_y, test_X, test_y=None, early_stopping=None):
xgtrain = xgb.DMatrix(train_X, label=train_y)
watchlist = [(xgtrain, 'train')]
if test_y is not None:
xgtest = xgb.DMatrix(test_X, label=test_y)
watchlist.append((xgtest, 'val test'))
model = xgb.train(params, xgtrain, n_rounds, watchlist, early_stopping_rounds=early_stopping)
else:
xgtest = xgb.DMatrix(test_X)
model = xgb.train(params, xgtrain, n_rounds, watchlist)
# TODO: Limit num trees to n_best rounds?
pred_test_y = model.predict(xgtest)
return pred_test_y, model
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--train-feature-file", required=True, help="input path for train features")
parser.add_argument("--test-feature-file", required=True, help="input path for test features")
parser.add_argument('--feature-map-file', required=True, help="input path for feature indices")
parser.add_argument("--predict-valid-file", required=True, help="output path for oof validation preds")
parser.add_argument("--predict-test-file", required=True, help="output path for test predictions")
parser.add_argument('--feature-importance-file', required=True, help="output path for importances")
parser.add_argument("--depth", default=DEFAULT_DEPTH, type=int, help="max tree depth")
parser.add_argument("--lrate", default=DEFAULT_LRATE, type=float, help="learning rate eta")
parser.add_argument("--n-rounds", default=DEFAULT_NROUNDS, type=int, help="max num training rounds")
args = parser.parse_args()
start = time.time()
run(args.train_feature_file, args.test_feature_file, args.feature_map_file,
args.predict_valid_file, args.predict_test_file, args.feature_importance_file,
args.depth, args.lrate, args.n_rounds)
logging.info('finished ({:.2f} min elasped)'.format((time.time() - start) / 60))
if __name__ == "__main__":
main()
|
[
"ricekido@gmail.com"
] |
ricekido@gmail.com
|
684b338dccf3f6896525b851df87aeb51636c232
|
e12385c85e41d98bc3104f3e4dde22025a0b6365
|
/mcp23Sxx/examples/test_input_pins.py
|
15bcba619e8fe45db7d43f128d95d08b71b75315
|
[] |
no_license
|
mchobby/esp8266-upy
|
6ee046856ec03c900ebde594967dd50c5f0a8e21
|
75184da49e8578315a26bc42d9c3816ae5d5afe8
|
refs/heads/master
| 2023-08-04T15:11:03.031121
| 2023-07-27T15:43:08
| 2023-07-27T15:43:08
| 72,998,023
| 47
| 30
| null | 2021-06-20T16:12:59
| 2016-11-06T15:00:57
|
Python
|
UTF-8
|
Python
| false
| false
| 780
|
py
|
"""
The following demo based on test_piface.py for the PiFace-Digital interface.
It uses a PYBStick + HAT-FACE (SPI1 + CE0 on S24)
Reads multiples inputs at once.
"""
from mcp23Sxx import *
from machine import SPI, Pin
import time
# PYBStick / PYBStick-HAT-FACE
spi = SPI( 1, phase=0, polarity=0, baudrate=400000 ) # SCLK=S23, MISO=S21, MOSI=S19
# PYBStick / PYBStick-HAT-FACE
cs = Pin( 'S24', Pin.OUT, value=True ) # SPI_CE0=S24
# MCP23S17 - SPI GPIO extender
mcp = MCP23S17( spi, cs ) # default: device_id=0x00
# GPB0..GPB7 as input
for x in range(8, 16):
mcp.setup(x, Pin.IN)
for iter in range( 10 ):
print( '---[ %i ]--- read GPIO 8,9,10,11,12,13,14,15 ----' % iter )
print( mcp.input_pins([8,9,10,11,12,13,14,15]) )
time.sleep(1)
print("That s all Folks")
|
[
"info@mchobby.be"
] |
info@mchobby.be
|
a2da8d67779abc0d6279f9b7b095f068a3ce9d8c
|
206c86fa919a25866db8c6cf8a30d2aac22413ca
|
/docs/source/conf.py
|
644eb1826378df6796d9174b6d53a216da8b4dd8
|
[
"Apache-2.0"
] |
permissive
|
jeevb/flytekit
|
65b00d24cd14aad7cc17b390b2993e6bc146f3a8
|
b773a2015e83a1991a3f06260620d79ff670f247
|
refs/heads/master
| 2023-05-10T11:27:30.700384
| 2021-06-03T01:30:00
| 2021-06-03T01:30:00
| 281,543,221
| 0
| 0
|
Apache-2.0
| 2020-07-22T01:27:46
| 2020-07-22T01:27:45
| null |
UTF-8
|
Python
| false
| false
| 6,360
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../flytekit/"))
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "Flyte Cookbook [Python]"
copyright = "2021, Flyte"
author = "Flyte"
# The full version, including alpha/beta/rc tags
release = "0.16.0b9"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.graphviz",
"sphinx-prompt",
"sphinx_copybutton",
"sphinx_search.extension",
"sphinx_fontawesome",
]
# build the templated autosummary files
autosummary_generate = True
# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = "Flyte Docs"
html_theme_options = {
"light_css_variables": {
"color-brand-primary": "#4300c9",
"color-brand-content": "#4300c9",
},
"dark_css_variables": {
"color-brand-primary": "#9D68E4",
"color-brand-content": "#9D68E4",
},
# custom flyteorg furo theme options
"github_repo": "flytekit",
"github_username": "flyteorg",
"github_commit": "master",
"docs_path": "docs/source", # path to documentation source
}
templates_path = ["_templates"]
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
# html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
html_logo = "flyte_circle_gradient_1_4x4.png"
pygments_style = "tango"
pygments_dark_style = "native"
html_context = {
"home_page": "https://docs.flyte.org",
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "flytekitdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "flytekit.tex", "Flytekit Documentation", "Flyte", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "flytekit", "Flytekit Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"flytekit",
"Flytekit Documentation",
author,
"flytekit",
"Python SDK for Flyte (https://flyte.org).",
"Miscellaneous",
),
]
# -- Extension configuration -------------------------------------------------
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"flytectl": ("https://flytectl.readthedocs.io/en/latest/", None),
# "flytectl": ("/Users/yourusername/go/src/github.com/flyteorg/flytectl/docs/build/html", None),
"cookbook": ("https://flytecookbook.readthedocs.io/en/latest/", None),
"flyte": ("https://flyte.readthedocs.io/en/latest/", None),
}
|
[
"noreply@github.com"
] |
jeevb.noreply@github.com
|
42f9d7c603a88fbdf3e6e830f94a7dd917677c9c
|
b305b327dd585f89043e0d50c395508ec113d288
|
/venv/bin/gunicorn
|
d49dc2aae822ff5cc6d77f670b89b6a1f7d12b70
|
[] |
no_license
|
allanlburns/portfolio-project
|
98a428b2b7738ba0f76007f8a630f4371508c7b1
|
7c18fd959c67a31205e6a7ba64245dc3d879ef97
|
refs/heads/master
| 2020-04-27T19:11:26.297039
| 2019-03-18T23:17:33
| 2019-03-18T23:17:33
| 174,606,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
#!/home/allan/udemy_django_2.1/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"allanlburns@yahoo.com"
] |
allanlburns@yahoo.com
|
|
ff23d78ef8ca00500d2a1223a47401b4e6b4cd08
|
4f8ddd9808535ee8aa900393c3a429f480324574
|
/Estrutura_Controle/switch_2.py
|
f4a18106047d6f801f9cd9893ecf932303c927b1
|
[] |
no_license
|
kamibarreto/Cod3r
|
1de8bb5288c16f90e1060089e7fda8216b6cb7cf
|
cea86f3984e3d43c0726b9ea809505d00679a314
|
refs/heads/master
| 2022-12-22T23:13:59.176534
| 2020-09-05T11:01:10
| 2020-09-05T11:01:10
| 284,170,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
def get_dia_semana(dia):
dias = {
1: 'Domingo',
2: 'Segunda',
3: 'Terça',
4: 'Quarta',
5: 'Quinta',
6: 'Sexto',
7: 'Sabado',
}
return dias.get(dia, "**Fora Do Sistema**")
if __name__ == '__main__':
for dia in range(1, 8):
if 2 <= dia <= 6:
print(f'{dia}: {get_dia_semana(dia)} é meio de semana')
elif dia == 7:
print(f'{dia}: {get_dia_semana(dia)} é fim de semana')
elif dia == 1:
print(f'{dia}: {get_dia_semana(dia)} é fim de semana')
else:
print('invalido')
|
[
"fabinhobarreto9928@gmail.com"
] |
fabinhobarreto9928@gmail.com
|
29bae3a1c2c5625c8b165963c4600d9da472b054
|
44b4e7dfaaaeffeaee0573a529131fc9261cf118
|
/Eevee_speech_assistant/asis.py
|
c44c532424a56057e303da6e780e1dedc512e179
|
[] |
no_license
|
maxiicano/Prototypes
|
1d7905d6c722d38b9e63c489623e6b8b3a0b6156
|
8e7e93a621ca1799d903135d67b0e0af5dab5774
|
refs/heads/master
| 2022-10-13T11:32:48.732054
| 2020-06-09T11:11:28
| 2020-06-09T11:11:28
| 265,267,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,638
|
py
|
import speech_recognition as sr # recognise speech
import playsound # to play an audio file
from gtts import gTTS # google text to speech
import random
from time import ctime # get time details
import webbrowser # open browser
import ssl
import certifi
import time
import os # to remove created audio files
from PIL import Image
import subprocess
import pyautogui #screenshot
import pyttsx3
import bs4 as bs
import urllib.request
class person:
name = ''
def setName(self, name):
self.name = name
class asis:
name = ''
def setName(self, name):
self.name = name
def there_exists(terms):
for term in terms:
if term in voice_data:
return True
def engine_speak(text):
text = str(text)
engine.say(text)
engine.runAndWait()
r = sr.Recognizer() # initialise a recogniser
# listen for audio and convert it to text:
def record_audio(ask=""):
with sr.Microphone() as source: # microphone as source
if ask:
engine_speak(ask)
audio = r.listen(source, 5, 5) # listen for the audio via source
print("Done Listening")
voice_data = ''
try:
voice_data = r.recognize_google(audio) # convert audio to text
except sr.UnknownValueError: # error: recognizer does not understand
engine_speak('I did not get that')
except sr.RequestError:
engine_speak('Sorry, the service is down') # error: recognizer is not connected
print(">>", voice_data.lower()) # print what user said
return voice_data.lower()
# get string and make a audio file to be played
def engine_speak(audio_string):
audio_string = str(audio_string)
tts = gTTS(text=audio_string, lang='en') # text to speech(voice)
r = random.randint(1,20000000)
audio_file = 'audio' + str(r) + '.mp3'
tts.save(audio_file) # save as mp3
playsound.playsound(audio_file) # play the audio file
print(asis_obj.name + ":", audio_string) # print what app said
os.remove(audio_file) # remove audio file
def respond(voice_data):
# 1: greeting
if there_exists(['hey','hi','hello']):
greetings = ["hey, how can I help you" + person_obj.name, "hey, what's up?" + person_obj.name, "I'm listening" + person_obj.name, "how can I help you?" + person_obj.name, "hello" + person_obj.name]
greet = greetings[random.randint(0,len(greetings)-1)]
engine_speak(greet)
# 2: name
if there_exists(["what is your name","what's your name","tell me your name"]):
if person_obj.name:
engine_speak("whats with my name ")
else:
engine_speak("i dont know my name . what's your name?")
if there_exists(["my name is"]):
person_name = voice_data.split("is")[-1].strip()
engine_speak("okay, i will remember that " + person_name)
person_obj.setName(person_name) # remember name in person object
if there_exists(["your name should be"]):
asis_name = voice_data.split("be")[-1].strip()
engine_speak("okay, i will remember that my name is " + asis_name)
asis_obj.setName(asis_name) # remember name in asis object
# 3: greeting
if there_exists(["how are you","how are you doing"]):
engine_speak("I'm very well, thanks for asking " + person_obj.name)
# 4: time
if there_exists(["what's the time","tell me the time","what time is it"]):
time = ctime().split(" ")[3].split(":")[0:2]
if time[0] == "00":
hours = '12'
else:
hours = time[0]
minutes = time[1]
time = hours + " hours and " + minutes + "minutes"
engine_speak(time)
# 5: search google
if there_exists(["search for"]) and 'youtube' not in voice_data:
search_term = voice_data.split("for")[-1]
url = "https://google.com/search?q=" + search_term
webbrowser.get().open(url)
engine_speak("Here is what I found for" + search_term + "on google")
# 6: search youtube
if there_exists(["youtube"]):
search_term = voice_data.split("for")[-1]
url = "https://www.youtube.com/results?search_query=" + search_term
webbrowser.get().open(url)
engine_speak("Here is what I found for " + search_term + "on youtube")
#7: get stock price
if there_exists(["price of"]):
search_term = voice_data.split("for")[-1]
url = "https://google.com/search?q=" + search_term
webbrowser.get().open(url)
engine_speak("Here is what I found for " + search_term + " on google")
#8 time table
if there_exists(["show my time table"]):
im = Image.open(r"D:\WhatsApp Image 2019-12-26 at 10.51.10 AM.jpeg")
im.show()
#9 weather
if there_exists(["weather"]):
search_term = voice_data.split("for")[-1]
url = "https://www.google.com/search?sxsrf=ACYBGNSQwMLDByBwdVFIUCbQqya-ET7AAA%3A1578847393212&ei=oUwbXtbXDN-C4-EP-5u82AE&q=weather&oq=weather&gs_l=psy-ab.3..35i39i285i70i256j0i67l4j0i131i67j0i131j0i67l2j0.1630.4591..5475...1.2..2.322.1659.9j5j0j1......0....1..gws-wiz.....10..0i71j35i39j35i362i39._5eSPD47bv8&ved=0ahUKEwiWrJvwwP7mAhVfwTgGHfsNDxsQ4dUDCAs&uact=5"
webbrowser.get().open(url)
engine_speak("Here is what I found for on google")
#10 stone paper scisorrs
if there_exists(["game"]):
voice_data = record_audio("choose among rock paper or scissor")
moves=["rock", "paper", "scissor"]
cmove=random.choice(moves)
pmove=voice_data
engine_speak("The computer chose " + cmove)
engine_speak("You chose " + pmove)
#engine_speak("hi")
if pmove==cmove:
engine_speak("the match is draw")
elif pmove== "rock" and cmove== "scissor":
engine_speak("Player wins")
elif pmove== "rock" and cmove== "paper":
engine_speak("Computer wins")
elif pmove== "paper" and cmove== "rock":
engine_speak("Player wins")
elif pmove== "paper" and cmove== "scissor":
engine_speak("Computer wins")
elif pmove== "scissor" and cmove== "paper":
engine_speak("Player wins")
elif pmove== "scissor" and cmove== "rock":
engine_speak("Computer wins")
#11 toss a coin
if there_exists(["toss","flip","coin"]):
moves=["head", "tails"]
cmove=random.choice(moves)
engine_speak("The computer chose " + cmove)
#12 calc
if there_exists(["plus","minus","multiply","divide","power","+","-","*","/"]):
opr = voice_data.split()[1]
if opr == '+':
engine_speak(int(voice_data.split()[0]) + int(voice_data.split()[2]))
elif opr == '-':
engine_speak(int(voice_data.split()[0]) - int(voice_data.split()[2]))
elif opr == 'multiply':
engine_speak(int(voice_data.split()[0]) * int(voice_data.split()[2]))
elif opr == 'divide':
engine_speak(int(voice_data.split()[0]) / int(voice_data.split()[2]))
elif opr == 'power':
engine_speak(int(voice_data.split()[0]) ** int(voice_data.split()[2]))
else:
engine_speak("Wrong Operator")
#13 screenshot
if there_exists(["capture","my screen","screenshot"]):
myScreenshot = pyautogui.screenshot()
myScreenshot.save('Documents/screen.png')
#14 to search wikipedia for definition
if there_exists(["definition of"]):
definition=record_audio("what do you need the definition of")
url=urllib.request.urlopen('https://en.wikipedia.org/wiki/'+definition)
soup=bs.BeautifulSoup(url,'lxml')
definitions=[]
for paragraph in soup.find_all('p'):
definitions.append(str(paragraph.text))
if definitions:
if definitions[0]:
engine_speak('im sorry i could not find that definition, please try a web search')
elif definitions[1]:
engine_speak('here is what i found '+definitions[1])
else:
engine_speak ('Here is what i found '+definitions[2])
else:
engine_speak("im sorry i could not find the definition for "+definition)
if there_exists(["exit", "quit", "goodbye"]):
engine_speak("bye")
exit()
time.sleep(1)
person_obj = person()
asis_obj = asis()
asis_obj.name = 'Eevee'
engine = pyttsx3.init()
while(1):
voice_data = record_audio("Recording") # get the voice input
print("Done")
print("Q:", voice_data)
respond(voice_data) # respond
|
[
"maxiicanowow@gmail.com"
] |
maxiicanowow@gmail.com
|
0720062a7d6fdc8eaa26ee2a812a2cb7dba81c8b
|
b22c848d8d3257b58e25248721fa419cb2abaeb9
|
/home/urls.py
|
a23bb96b54da6157605d2c14f38a4cac24b2efb4
|
[] |
no_license
|
AlexNexton/soleki
|
69983de38a1ca709b4c1bc093f75debf105c106e
|
9c37cc5b6924a332ef1f0d534ed8087ec07d1034
|
refs/heads/master
| 2023-04-21T07:47:57.966541
| 2021-05-14T04:34:21
| 2021-05-14T04:34:21
| 358,224,825
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='home'),
]
|
[
"cianoc13@gmail.com"
] |
cianoc13@gmail.com
|
ead8d7810383dbd0c853e722095e2c66e7b896e6
|
1b0a9fa04e27f67ac639c11459bfd81fa6ad4e38
|
/root/models.py
|
1618da9fff528a7bca7488cd1fff052b3e25965c
|
[] |
no_license
|
JohnatanPalacios/BookStoreUTP
|
636146a0e140ccf3f9d898afa72b88898dad18d3
|
c00a6d611d0f5315a5e0674635c3d6cf48d5735a
|
refs/heads/master
| 2023-05-05T09:33:34.583501
| 2021-05-26T03:29:12
| 2021-05-26T03:29:12
| 370,855,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
from django.db import models
class Newuser(models.Model):
Email = models.CharField(max_length=150)
Username = models.CharField(max_length=150)
UserApellido = models.CharField(max_length=150)
MartialStatus = models.CharField(max_length=150)
Age = models.IntegerField()
Pwd = models.CharField(max_length=150)
Pwd1 = models.CharField(max_length=150)
Gender = models.CharField(max_length=1)
class auth_user(models.Model):
Age=models.IntegerField()
|
[
"39398211+JohnatanPalacios@users.noreply.github.com"
] |
39398211+JohnatanPalacios@users.noreply.github.com
|
4756b4996bfc26be5bcb6bbd162c1c75e8e8919e
|
3fc39ad0145702d7ea94098c8de0419504de7adb
|
/project/src/location/views.py
|
780ffbac7e4d08f883238d550a185510dfa4dc00
|
[
"MIT"
] |
permissive
|
fahmihidayah/DjangoBaseProject
|
22e1d1415f0f219921219fe172819117d82cec84
|
9044afe92b42ac2f677f75b796515e740402f8f4
|
refs/heads/master
| 2020-05-02T19:32:41.713071
| 2019-04-01T16:23:03
| 2019-04-01T16:23:03
| 178,161,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
from django.views.generic import DetailView, ListView, UpdateView, CreateView
from .models import City, Country, Address, Province
from .forms import CityForm, CountryForm, AddressForm, ProvinceForm
class CityListView(ListView):
model = City
class CityCreateView(CreateView):
model = City
form_class = CityForm
class CityDetailView(DetailView):
model = City
class CityUpdateView(UpdateView):
model = City
form_class = CityForm
class CountryListView(ListView):
model = Country
class CountryCreateView(CreateView):
model = Country
form_class = CountryForm
class CountryDetailView(DetailView):
model = Country
class CountryUpdateView(UpdateView):
model = Country
form_class = CountryForm
class AddressListView(ListView):
model = Address
class AddressCreateView(CreateView):
model = Address
form_class = AddressForm
class AddressDetailView(DetailView):
model = Address
class AddressUpdateView(UpdateView):
model = Address
form_class = AddressForm
class ProvinceListView(ListView):
model = Province
class ProvinceCreateView(CreateView):
model = Province
form_class = ProvinceForm
class ProvinceDetailView(DetailView):
model = Province
class ProvinceUpdateView(UpdateView):
model = Province
form_class = ProvinceForm
|
[
"fahmi.hidayah.cs@gmail.com"
] |
fahmi.hidayah.cs@gmail.com
|
a847dcde6a32c222103b099e71f770157530882e
|
1bd3076902117867ec048210905195ba2aaaaa6b
|
/exercise/leetcode/python_src/by2017_Sep/Leet234_2.py
|
2fc3f4fd912e27b52c33613521009eade9ed46bf
|
[] |
no_license
|
SS4G/AlgorithmTraining
|
d75987929f1f86cd5735bc146e86b76c7747a1ab
|
7a1c3aba65f338f6e11afd2864dabd2b26142b6c
|
refs/heads/master
| 2021-01-17T20:54:31.120884
| 2020-06-03T15:04:10
| 2020-06-03T15:04:10
| 84,150,587
| 2
| 0
| null | 2017-10-19T11:50:38
| 2017-03-07T03:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 343
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
z=[1,2,3]
z2=[1,2,3]
print(id(z),id(z2))
print(z==z2)
print(z is z2)
|
[
"songziheng@xiaomi.com"
] |
songziheng@xiaomi.com
|
192a20089f9803f06a457ae730dde748802b21e2
|
283f9fc69e0b00051a79232fc2cbaabdab589c18
|
/flashsale/xiaolupay/apis/v1/exceptions.py
|
53aaee79459e17921f5f76d68a1d227c87e06b86
|
[] |
no_license
|
nidepuzi/ndpuzsys
|
572b67a84fcd6c4fa4d49d3bdb0eb826e7791e62
|
be58dc8f1f0630d3a04e551911f66d9091bedc45
|
refs/heads/master
| 2023-01-06T22:52:49.861479
| 2019-07-09T11:00:06
| 2019-07-09T11:00:06
| 188,955,119
| 1
| 0
| null | 2022-12-26T20:15:24
| 2019-05-28T04:41:35
|
Python
|
UTF-8
|
Python
| false
| false
| 187
|
py
|
# coding: utf8
from __future__ import absolute_import, unicode_literals
class XiaoluPayException(BaseException):
pass
class ChannelNotCompleteException(XiaoluPayException):
pass
|
[
"xiuqing.mei@xiaolu.so"
] |
xiuqing.mei@xiaolu.so
|
0efcca57da4995c62dec310d862bf32a0f6d1f29
|
d9791df28169067788a37e29b4b4b6a138dba296
|
/custom_powerline_shell/theme_clean.py
|
6ac3051c5d696c49db7ccd20c9222d3288668551
|
[
"MIT"
] |
permissive
|
lexrupy/simple_dotfiles
|
b9e823ace8e350dd381cb6a8c7e837310a713237
|
e7039a2392b32645b4b8c4852988a2ce28b79f77
|
refs/heads/master
| 2023-09-01T09:17:05.969207
| 2023-08-20T03:13:36
| 2023-08-20T03:13:36
| 900,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
from powerline_shell.themes.default import DefaultColor
class Color(DefaultColor):
"""Basic theme which only uses colors in 0-15 range"""
USERNAME_FG = 8
USERNAME_BG = 15
USERNAME_ROOT_BG = 1
HOSTNAME_FG = 8
HOSTNAME_BG = 7
HOME_SPECIAL_DISPLAY = False
# PATH_BG = 8 # dark grey
PATH_BG = 0
PATH_FG = 10
CWD_FG = 10
SEPARATOR_FG = 0
READONLY_BG = 1
READONLY_FG = 15
# REPO_CLEAN_BG = 11
# REPO_CLEAN_FG = 0
# REPO_DIRTY_BG = 9
# REPO_DIRTY_FG = 255
REPO_CLEAN_BG = 0
REPO_CLEAN_FG = 11
REPO_DIRTY_BG = 0
REPO_DIRTY_FG = 9 # white
GIT_STASH_BG = 0
GIT_STASH_FG = 15
GIT_AHEAD_BG = 0
GIT_BEHIND_BG = 0
GIT_STAGED_BG = 0
GIT_NOTSTAGED_BG = 0
GIT_UNTRACKED_BG = 0
GIT_CONFLICTED_BG = 0
JOBS_FG = 14
JOBS_BG = 8
CMD_PASSED_BG = 0
CMD_PASSED_FG = 15
CMD_FAILED_BG = 0
CMD_FAILED_FG = 11
SVN_CHANGES_BG = REPO_DIRTY_BG
SVN_CHANGES_FG = REPO_DIRTY_FG
VIRTUAL_ENV_FG = 200
VIRTUAL_ENV_BG = 0
# VIRTUAL_ENV_FG = 200
# VIRTUAL_ENV_BG = 240
AWS_PROFILE_FG = 14
AWS_PROFILE_BG = 8
# TIME_FG = 255
# TIME_BG = 39
TIME_FG = 39
TIME_BG = 0
|
[
"lexrupy@gmail.com"
] |
lexrupy@gmail.com
|
081f59a2b8a2ff31e2cfbbf5e38e067819a6b053
|
cb9a03113111ac9472bbd0fd74611c396315bb92
|
/algorithm/leetcode/two_sums.py
|
352eb7ff0b7b9f3b0e4615a610dca5b58531dd8f
|
[] |
no_license
|
maoqiansheng/python_interview
|
8566ef3ccfd81550f6a8ea15aa4d5473d84210b5
|
206592bb8464de40e8eaa992e040f05b69aab2c6
|
refs/heads/main
| 2023-03-03T13:35:50.397939
| 2021-02-08T15:12:42
| 2021-02-08T15:12:42
| 320,850,454
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
"""
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
原题链接:https://leetcode.com/problems/two-sum
"""
# 思路:如果我们换一种方式: target - 当前数字, 需要的另外一个变量就变成已知!
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
lookup = {}
for i, num in enumerate(nums):
if target - num in lookup:
# 为什么先后顺序是 lookup[target - num], i
# 因为当前是i,而差值只能从 lookup 中找,而 lookup 是在 i 之前面入库的
# 所以 顺序是 lookup[target - num], i
return [lookup[target - num], i]
lookup[num] = i
return []
if __name__ == "__main__":
nums = [2, 7, 11, 15]
target = 9
so = Solution()
n = so.twoSum(nums, target)
print("结果: ", n)
|
[
"980054594@qq.com"
] |
980054594@qq.com
|
2b78829fbe94a27d1110030427cfb58b4f715a53
|
410038764b336c28dfe8420d3fbc6346553ce32e
|
/sc13-modeling-sid/regression_old1.py
|
6c234c4fd0d7d2ea39a11ebcc5de7c646bc7667e
|
[] |
no_license
|
avisheks/python-projects
|
2b6eae6834d562af78b10395c0f21654cb47bd75
|
da384f553d91dd442aa53f6588187af8074256cd
|
refs/heads/master
| 2021-01-22T04:41:59.863806
| 2013-11-14T21:04:05
| 2013-11-14T21:04:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,104
|
py
|
import os
import sys
import numpy as np
import math as m
import random as rnd
import pylab as pl
from sklearn import linear_model as lm
from sklearn import preprocessing as pp
## params
dataFile = sys.argv[1]
finp = open(dataFile, 'r')
## sanity checks
if not os.path.isfile(dataFile):
print 'ERROR:',dataFile,'does not exist.'
sys.exit(0)
## read data file directly from .csv to numpy array
tmp_data = np.genfromtxt(finp, delimiter='', skip_header=1)
#data = np.genfromtxt(finp, dtype="S17,f8,S17,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8", delimiter='', skip_header=1)
idx = 0
finp.seek(0, 0)
finp.next()
dataRows, dataCols = np.shape(tmp_data)
data = np.zeros(shape=(dataRows,dataCols+2))
for line in finp:
words = line.split()
gv_vals = words[0].split('_')
data[idx,0] = float(gv_vals[0])
data[idx,1] = float(gv_vals[1])
data[idx,2] = float(gv_vals[1])
lv_vals = words[2].split('_')
data[idx,3] = float(lv_vals[0])
data[idx,4] = float(lv_vals[1])
data[idx,5] = float(lv_vals[1])
for i in range(4,len(words)):
data[idx,2+i] = float(words[i])
idx = idx+1
#print data
## split datasets
X_raw = data[:,[0,1,2,3,4,5,6,8,9,10,11]]
y = data[:, [14]]
"""
X_raw = data[:,[0,1,2,3,4,6,8,9,10,11,14]]
y = data[:, [5]]
"""
#print X_raw
min_max_scaler = pp.MinMaxScaler()
X = min_max_scaler.fit_transform(X_raw)
#print X
## randomly split datasets and average the models obtained
MAXITER = 10
linregcoef = np.zeros(shape=(MAXITER,11))
ridregcoef = np.zeros(shape=(MAXITER,11))
lasregcoef = np.zeros(shape=(MAXITER,11))
larregcoef = np.zeros(shape=(MAXITER,11))
netregcoef = np.zeros(shape=(MAXITER,11))
for iters in range(0,MAXITER):
idx_all = range(0,dataRows)
rnd.shuffle(idx_all)
idx_train = idx_all[0:int(m.ceil(0.95*dataRows))]
idx_test = idx_all[int(m.ceil(0.95*dataRows)):-1]
data_X_train = X[idx_train]
data_X_test = X[idx_test]
data_y_train = y[idx_train]
data_y_test = y[idx_test]
## Create different regression models and train the models using the training sets
#---- linear regression
linreg = lm.LinearRegression()
linreg.fit(data_X_train, data_y_train)
linregcoef[iters] = linreg.coef_
#---- ridge regression
#ridreg = lm.RidgeCV(alphas=[0.01,0.1,1.0,10.0])
ridreg = lm.Ridge(alpha=0.9)
ridreg.fit(data_X_train, data_y_train)
ridregcoef[iters] = ridreg.coef_
#---- lasso
lasreg = lm.Lasso(alpha=0.1)
#lasreg = lm.LassoCV()
lasreg.fit(data_X_train, data_y_train)
lasregcoef[iters] = lasreg.coef_
#---- lasso LARS
larreg = lm.Lars()
#larreg = lm.LassoLars(alpha=0.1)
#larreg = lm.LarsCV()
larreg.fit(data_X_train, data_y_train)
larregcoef[iters] = larreg.coef_
#---- elastic net
netreg = lm.ElasticNet(alpha=0.1,rho=0.5)
#netreg = lm.ElasticNet()
#netreg = lm.ElasticNetCV()
netreg.fit(data_X_train, data_y_train)
netregcoef[iters] = netreg.coef_
avglinreg = np.mean(linregcoef, axis=0)
avgridreg = np.mean(ridregcoef, axis=0)
avglasreg = np.mean(lasregcoef, axis=0)
avglarreg = np.mean(larregcoef, axis=0)
avgnetreg = np.mean(netregcoef, axis=0)
## print some results
# Print the mean square error and the explained variance score: 1 is perfect prediction
print('Linear: \t Residual sum of squares: %.2f \t Var score: %.2f' % (np.mean((linreg.predict(data_X_test) - data_y_test) ** 2), linreg.score(data_X_test, data_y_test)))
print('Ridge : \t Residual sum of squares: %.2f \t Var score: %.2f' % (np.mean((ridreg.predict(data_X_test) - data_y_test) ** 2), ridreg.score(data_X_test, data_y_test)))
print('Lasso : \t Residual sum of squares: %.2f \t Var score: %.2f' % (np.mean((lasreg.predict(data_X_test) - data_y_test) ** 2), lasreg.score(data_X_test, data_y_test)))
print('Lars : \t Residual sum of squares: %.2f \t Var score: %.2f' % (np.mean((larreg.predict(data_X_test) - data_y_test) ** 2), larreg.score(data_X_test, data_y_test)))
print('ENet : \t Residual sum of squares: %.2f \t Var score: %.2f' % (np.mean((netreg.predict(data_X_test) - data_y_test) ** 2), netreg.score(data_X_test, data_y_test)))
"""
print linreg.predict(data_X_test)
print ridreg.predict(data_X_test)
print lasreg.predict(data_X_test)
print larreg.predict(data_X_test)
print netreg.predict(data_X_test)
"""
## plot the results
"""
pl.plot(data_y_test, color='k', linestyle='-',marker='o',linewidth=3, label='org')
pl.plot(linreg.predict(data_X_test), color='r', linestyle='--', marker='o',linewidth=2, label='linear')
pl.plot(ridreg.predict(data_X_test), color='b', linestyle='--', marker='o',linewidth=2, label='ridge')
pl.plot(lasreg.predict(data_X_test), color='g', linestyle='--', marker='o',linewidth=2, label='lasso')
pl.plot(larreg.predict(data_X_test), color='c', linestyle='--', marker='o',linewidth=2, label='lars')
pl.plot(netreg.predict(data_X_test), color='m', linestyle='--', marker='o',linewidth=2, label='enet')
pl.legend(loc='upper left')
resFile = dataFile.replace('../results/network/','')
pl.savefig(resFile+'_plots.png', bbox_inches=0, rotation=90)
"""
###########################################################
## TESTING PHASE
## params
if len(sys.argv) > 2:
print 'NOW TESTING................'
testdataFile = sys.argv[2]
finp = open(testdataFile, 'r')
## sanity checks
if not os.path.isfile(testdataFile):
print 'ERROR:',testdataFile,'does not exist.'
sys.exit(0)
## read data file directly from .csv to numpy array
tmp_testdata = np.genfromtxt(finp, delimiter='', skip_header=1)
#data = np.genfromtxt(finp, dtype="S17,f8,S17,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8", delimiter='', skip_header=1)
idx = 0
finp.seek(0, 0)
finp.next()
dataRows, dataCols = np.shape(tmp_testdata)
testdata = np.zeros(shape=(dataRows,dataCols+2))
for line in finp:
words = line.split()
gv_vals = words[0].split('_')
testdata[idx,0] = float(gv_vals[0])
testdata[idx,1] = float(gv_vals[1])
testdata[idx,2] = float(gv_vals[1])
lv_vals = words[2].split('_')
testdata[idx,3] = float(lv_vals[0])
testdata[idx,4] = float(lv_vals[1])
testdata[idx,5] = float(lv_vals[1])
for i in range(4,len(words)):
testdata[idx,2+i] = float(words[i])
idx = idx+1
testdata_X_raw = testdata[:,[0,1,2,3,4,5,6,8,9,10,11]]
testdata_y = testdata[:, [14]]
"""
testdata_X_raw = testdata[:,[0,1,2,3,4,6,8,9,10,11,14]]
testdata_y = testdata[:, [5]]
"""
testdata_y_std = testdata[:, [15]]
#print testdata_X_raw
min_max_scaler = pp.MinMaxScaler()
testdata_X = min_max_scaler.fit_transform(testdata_X_raw)
#print testdata_X
#print np.shape(data_X_test)
#print np.shape(testdata_X)
print(linreg.predict(testdata_X))
print(ridreg.predict(testdata_X))
print(lasreg.predict(testdata_X))
print(larreg.predict(testdata_X))
print(netreg.predict(testdata_X))
#pl.errorbar(range(1,dataRows+1),testdata_y.flatten().tolist(), testdata_y_std.flatten().tolist(), color='k', linestyle='-',marker='o',linewidth=3, label='org')
pl.plot(range(1,dataRows+1),testdata_y.flatten().tolist(), color='k', linestyle='-',marker='o',linewidth=3, label='org')
pl.plot(range(1,dataRows+1),linreg.predict(testdata_X), color='r', linestyle='--', marker='o',linewidth=2, label='linear')
pl.plot(range(1,dataRows+1),ridreg.predict(testdata_X), color='b', linestyle='--', marker='o',linewidth=2, label='ridge')
pl.plot(range(1,dataRows+1),lasreg.predict(testdata_X), color='g', linestyle='--', marker='o',linewidth=2, label='lasso')
pl.plot(range(1,dataRows+1),larreg.predict(testdata_X), color='c', linestyle='--', marker='o',linewidth=2, label='lars')
pl.plot(range(1,dataRows+1),netreg.predict(testdata_X), color='m', linestyle='--', marker='o',linewidth=2, label='enet')
pl.legend(loc='upper left')
#pl.ylim([0,300000])
#pl.ylim([0,75000])
resFile = dataFile.replace('../results/network/','')
#resFile = dataFile.replace('../results/','')
pl.savefig(resFile+'_plots.png', bbox_inches=0, rotation=90)
print 'Hi Hi Hi'
print np.shape(range(1,dataRows+1))
print np.shape(linreg.coef_)
print np.shape(testdata_X)
#pl.errorbar(range(1,dataRows+1),testdata_y.flatten().tolist(), testdata_y_std.flatten().tolist(), color='k', linestyle='-',marker='o',linewidth=3, label='org')
pl.errorbar(range(1,dataRows+1),testdata_y.flatten().tolist(), color='k', linestyle='-',marker='o',linewidth=3, label='org')
pl.plot(range(1,dataRows+1),np.dot(testdata_X,np.transpose(avglinreg)), color='r', linestyle='dotted', marker='o',linewidth=4, label='linear')
pl.plot(range(1,dataRows+1),np.dot(testdata_X,np.transpose(avgridreg)), color='b', linestyle='dotted', marker='o',linewidth=4, label='ridge')
pl.plot(range(1,dataRows+1),np.dot(testdata_X,np.transpose(avglasreg)), color='g', linestyle='dotted', marker='o',linewidth=4, label='lasso')
pl.plot(range(1,dataRows+1),np.dot(testdata_X,np.transpose(avglarreg)), color='c', linestyle='dotted', marker='o',linewidth=4, label='lars')
pl.plot(range(1,dataRows+1),np.dot(testdata_X,np.transpose(avgnetreg)), color='m', linestyle='dotted', marker='o',linewidth=4, label='enet')
pl.legend(loc='upper left')
#pl.ylim([0,75000])
#pl.ylim([0,300000])
resFile = dataFile.replace('../results/network/','')
#resFile = dataFile.replace('../results/','')
pl.savefig(resFile+'_plots2.png', bbox_inches=0, rotation=90)
## THINGS TO DO:
## -- DONE: try out other regression models (gaussian processes)
## -- let's ignore bayesian and gpm regression. too difficult to explain.
## -- let's go with simpler models
## -- DONE: try to break GV and LV features and see
## -- add cross validation to fine tune the parameters
## -- repeat the learning over multiple subsets and then provide a final answer
## (maybe will be subsumed within the CV phase)
|
[
"avishek.saha@gmail.com"
] |
avishek.saha@gmail.com
|
698743c20a454d92c763b878c6631efac254424d
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/Refuse.py
|
2b7e68efd408a30e6ade250e531d8989e57d4547
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Refuse(object):
def __init__(self):
self._code = None
self._msg = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def msg(self):
return self._msg
@msg.setter
def msg(self, value):
self._msg = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.msg:
if hasattr(self.msg, 'to_alipay_dict'):
params['msg'] = self.msg.to_alipay_dict()
else:
params['msg'] = self.msg
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Refuse()
if 'code' in d:
o.code = d['code']
if 'msg' in d:
o.msg = d['msg']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
1e8c4ead72192ee7612a1b36364cc67c251b236f
|
156796fca5dda2cf194ef0125f72b9f16736deaa
|
/receiver.py
|
0d28b015e6d49ae5bec009bc12e5c70053a8db42
|
[] |
no_license
|
auto-contest-kookmin/RPi-Laser-Receiver
|
29f42fe0b28119c325600944b35816a37ed8cdab
|
779813007f65910a9a65b180f3435adb7e393dfa
|
refs/heads/master
| 2020-09-22T08:32:50.000945
| 2019-12-01T11:52:43
| 2019-12-01T11:52:43
| 225,122,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,660
|
py
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time
from datetime import datetime
from PCF8591 import PCF8591
import requests
import json
from ast import literal_eval
API_SERVER_URL = 'http://192.168.0.71:5000'
# 포토레지스터 모듈
class LaserMeasure(object):
def __init__(self, addr=0x48, device_num=0):
# PCF8591_MODULE SETUP
self.PCF8591_MODULE = PCF8591.PCF8591()
# SET GPIO WARNINGS AS FALSE
GPIO.setwarnings(False)
# SET DEVICE ID
self.device_id = device_num
def get_object_detected(self):
value = self.PCF8591_MODULE.read(2)
# 값은 환경에 따라 변경 필요
return value > 30
def destroy(self):
GPIO.cleanup()
def main():
try:
# Init
receiver = LaserMeasure(0x48, 0)
before_status = False
current_job = 0
temp = requests.get(API_SERVER_URL + '/getid').text
car_id = int(temp.rstrip())
last_update = time.time()
last_log = time.time()
while True:
if time.time() - last_update > 2:
temp = int(requests.get(API_SERVER_URL + '/getid').text.rstrip())
if car_id != temp:
if temp != -1 and current_job == 0:
print(str(temp) + "번 차량 출발")
elif car_id != -1 and temp == -1 and current_job != 0:
print(str(car_id) + "번 차량 실격")
current_job = 0
car_id = temp
last_update = time.time()
sensor_value = receiver.get_object_detected()
log_timestamp = time.time()
if log_timestamp - last_log > 1:
print("현재 센서 측정 결과:", "감지됨" if sensor_value else "감지 안됨")
last_log = log_timestamp
if sensor_value and car_id != -1:
# 연속으로 감지되지 않는 상황에서
if not before_status:
# 감지되었다고 체크
before_status = True
# 시작 지점 체크 -> 서버에 측정중이라고 체크
if current_job == 0:
res = requests.post(API_SERVER_URL + "/status", {"status": "1"})
print("Start LapTime Check Result:", literal_eval(res.text)["result"])
# 현재 시간
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
print("통과 시각:", current_time)
# 타임스탬프 DB에 업데이트
res = requests.post(API_SERVER_URL + '/settime', {"car_id": str(car_id), "cur_job": str(current_job), "timestamp": current_time})
print("Update Time Result:", literal_eval(res.text)["result"])
# 만약 세바퀴를 다 돌았다면
if current_job == 3:
# 서버에 측정이 끝났다고 체크
res = requests.post(API_SERVER_URL + "/status", {"status": "0"})
print("Finish LapTime Check Result:", literal_eval(res.text)["result"])
# 주행 완료
current_job = 0
print(str(car_id) + "번 차량 주행 완료")
res = requests.post(API_SERVER_URL + '/setfinished')
print("Finish Check Result:", literal_eval(res.text)["result"])
print("현재 센서 측정 결과:", "감지됨" if receiver.get_object_detected() else "감지 안됨")
final_lap_last_log = time.time()
log_cnt = 0
while log_cnt < 3:
final_lap_log_timestamp = time.time()
if final_lap_log_timestamp - final_lap_last_log > 1:
print("현재 센서 측정 결과:", "감지됨" if receiver.get_object_detected() else "감지 안됨")
log_cnt += 1
final_lap_last_log = final_lap_log_timestamp
else:
current_job += 1
# 3초 이후에 랩타임 다시 체크
time.sleep(3)
else:
before_status = False
time.sleep(0.005)
# 종료
except KeyboardInterrupt:
receiver.destroy()
if __name__ == '__main__':
main()
|
[
"xcm1321@gmail.com"
] |
xcm1321@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.