blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ce859d1aeafe754e3298ab2f867f74b8b2f75b0 | 3aa343d79d0d6286a511c8745b698471792c47e6 | /tutorial/urls.py | 4c9c09a196ddffe46b555cd6e1baa20eb7e8c286 | [] | no_license | akcezzz/Tutorial | a80e24f3e1ab7b28821599841008c76f511ad09a | bcae2c5d6661a1f4ae79db8316f08a7219a798ee | refs/heads/master | 2020-05-23T09:00:46.534817 | 2019-05-14T21:49:41 | 2019-05-14T21:49:41 | 186,698,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include, path
from rest_framework import routers
from tutorial.quickstart import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| [
"axil-pacha@hotmail.com"
] | axil-pacha@hotmail.com |
835d78cc36290fc2fe0ecf0ca6a00d9de1712ccf | e4a8a8066f656761f85905e24486903b4094d3d0 | /DB_site/apps.py | 50263391bbfcc608bc29a88a5727e906b3fd2e14 | [] | no_license | wikibady/DB_lol | 3acfebd7330853557ea5ecf9b9c7f33a584bb1d9 | 43654e0fbd345788086ec0e8ad575460c1a9b355 | refs/heads/master | 2021-01-18T10:47:32.158147 | 2016-05-17T13:42:26 | 2016-05-17T13:42:26 | 59,025,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class DbSiteConfig(AppConfig):
name = 'DB_site'
| [
"hysuperliu@wikibady.com"
] | hysuperliu@wikibady.com |
9d3d87b1db818f478f4aa85b0c257eee39b0700b | c609730a43596a2d3303f072fc97d9cf681fac7b | /cagey/usedcar/main_haoche99.py | 386a9e6ee701ee754cd28189f895ff6701bf3b18 | [] | no_license | sinnettluo/ChenProject | 5403311c0c7b78c484145e16d692abff00d2a110 | 0e33ecf1683afb22f1deb4bd54294c41aed8a46b | refs/heads/master | 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from scrapy.cmdline import execute
import sys
import os
website = "haoche99"
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", website])
| [
"1316446041@qq.com"
] | 1316446041@qq.com |
1ada52b90f298f06b7a57e15ded114cdbd381a91 | 2b9e57bd48c4b55d118a0e9f395faad507a782f0 | /modules/geometry.py | 86249494df29080d34a1fd2c8a055b8d4110296b | [] | no_license | Wei-Tso/Python-Tutorial_From-YouTube | 97f6bc60a7b48176eeb431f84ac1feae06d4ebf2 | 51381111e9a38b0b0003197bcee978bdb6d570ac | refs/heads/main | 2023-01-02T15:22:28.498845 | 2020-11-04T17:53:30 | 2020-11-04T17:53:30 | 310,073,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | # 在 geometry 模組中定義幾何運算功能
# 計算兩點間的距離
def distance(x1, y1, x2, y2):
return ((x2-x1)**2 + (y2-y1)**2)**0.5
# 計算兩點線段的斜率
def slope(x1, y1, x2, y2):
return (y2-y1) / (x2-x1)
# 計算平均值
def average(*numbers):
sum = 0
for n in numbers:
sum = sum + n
return (sum / len(numbers))
# 加法
def add(*numbers):
sum = 0
for n in numbers:
sum = sum + n
return sum
# 減法
def subtract(*numbers):
numbersList = list(numbers)
withoutN1List = numbersList[1:]
n1 = int(numbersList[0])
sum = 0
for n in withoutN1List:
sum = sum - n
return (n1+sum)
# 乘法
def multiply(*numbers):
basic = 1
for n in numbers:
basic = basic*n
return basic
# 除法
def divide(*numbers):
numbersList = list(numbers)
withoutN1List = numbersList[1:]
n1 = int(numbersList[0])
basic = 1
if n1 == 0:
return 0
else:
for n in withoutN1List:
if (0 in withoutN1List)==True:
return "分母不能為 0"
else:
basic = basic * n
return (n1/basic) | [
"kobe87020@gmail.com"
] | kobe87020@gmail.com |
1568aa1de7c81345ca53eb818fa78232de792b4b | 748b55e0c02949ddb62c6f52b10be2ed7d9c871c | /Recursion/11729.Hanoi-tower/윤선영.py | 2297282929024207735dd9669b58f8a29e2a1758 | [] | no_license | seojisoosoo/Algorithm | 5305dd2ee5c819e639204fbe250a55dc979af006 | d4a9b3d6c7a87e3e2913aced0cdaa94f20832a45 | refs/heads/main | 2023-08-28T18:28:08.181742 | 2021-10-17T15:38:54 | 2021-10-17T15:38:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | def hanoi(N,start,dest,via):
if N == 1:
move.append([start,dest])
else :
hanoi(N-1,start,via,dest)
move.append([start,dest])
hanoi(N-1,via,dest,start)
N = int(input())
move = []
hanoi(N,1,3,2)
print(len(move))
for i in range(len(move)):
print(move[i][0],move[i][1]) | [
"64634970+yunseonyeong@users.noreply.github.com"
] | 64634970+yunseonyeong@users.noreply.github.com |
bcc938c96292fc6885f94488a3965fd6128af6ed | 5e14b8e24a1a3255b67006bafde2f809cf3e7f5c | /profiles/schema.py | 6129c5dbd075ce407932858596e1eabdecad3c7e | [] | no_license | marcoacierno/Pasteit-Backend | c2f5c9c2792deb01ea85732a98b963a8c2c16717 | 0088dfd69e631a8b6656cf4deeb568c1324cabbe | refs/heads/master | 2021-01-15T15:04:33.886036 | 2016-06-06T08:17:15 | 2016-06-06T08:17:15 | 58,814,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | import graphene
from django.contrib.auth import get_user_model
from graphene import relay, with_context
from graphene.contrib.django import DjangoNode
class UserNode(DjangoNode):
pastes = relay.ConnectionField('PasteNode')
node = relay.NodeField()
def resolve_pastes(self, args, info):
return self.pastes.all()
class Meta:
model = get_user_model()
exclude_fields = ('is_staff', 'is_superuser', 'password', 'is_active', 'user')
class Query(graphene.ObjectType):
node = relay.NodeField()
me = graphene.Field(UserNode)
@with_context
def resolve_me(self, args, context, info):
me = context.user
if me.is_anonymous() is True:
return None
# raise ValueError('You cannot query yourself if you are not logged')
return me
class Meta:
abstract = True
| [
"marcoacierno96@gmail.com"
] | marcoacierno96@gmail.com |
374054f4b1cc28d43773f1c286075531a60030a4 | ca2ed68bcd084913cf592f4e856484be01e2bce0 | /week028--get-in-line/charles.mcmillan/solution.py | 19d3c4759ec5878ea634557b18907660590b8334 | [] | no_license | viewthespace/code-challenge | ddcab182abbda3d1fd383bb742972dcdcaeb2758 | 3ceec2ceb1c8b688a5f1a232992cb8155325fc1a | refs/heads/master | 2021-03-30T15:54:48.321310 | 2019-03-20T02:53:38 | 2019-03-20T02:53:38 | 42,602,268 | 0 | 1 | null | 2019-03-05T01:38:45 | 2015-09-16T17:20:44 | OCaml | UTF-8 | Python | false | false | 2,509 | py | ##
## Time Complexity: O(n^2)
## Space Complexity: O(n^2)
##
import toml
import ast
import math
toml_dict = toml.load('../testcases.toml')
class Line:
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __str__(self):
return f"{self.slope}x + {self.intercept}"
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.slope == other.slope and self.intercept == other.intercept
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f"(x: {self.x}, y: {self.y})"
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.x == other.x and self.y == other.y
class Solution:
def __init__(self, points_array):
self.points_array = [Point(point[0], point[1]) for point in points_array]
self.line_hash = {}
def max_linear_points(self):
for point1 in self.points_array:
for point2 in self.points_array:
if point1 == point2:
continue
line = self.line_from_points(point1, point2)
if line in self.line_hash:
self.line_hash[line].add(point1)
self.line_hash[line].add(point2)
else:
self.line_hash[line] = set([point1, point2])
return self.tally_count(self.line_hash, self.points_array)
def tally_count(self, line_hash, points_array):
if len(points_array) == 0:
return 0
elif len(points_array) == 1:
return 1
else:
maximum = 0
for line, point_set in line_hash.items():
number_of_points = len(point_set)
if number_of_points > maximum:
maximum = number_of_points
return maximum
def line_from_points(self, point1, point2):
if point1.x == point2.x:
return Line(math.inf, point1.x)
slope = (point2.y - point1.y) / (point2.x - point1.x)
intercept = point2.y - point2.y * slope
return Line(slope, intercept)
for test_case_name, input_output in toml_dict['test'].items():
print(f'Running test: {test_case_name}')
input_values = ast.literal_eval(input_output['input'])
expected_output = ast.literal_eval(input_output['output'])
actual_output = Solution(input_values).max_linear_points()
print(f" Input: {input_values}")
if actual_output == expected_output:
print("\033[92mPASSED\033[0m")
else:
print("\033[91mFAILED\033[0m")
print(f" Expected Output: {expected_output}")
print(f" Actual Output: {actual_output}\n\n")
| [
"charlesmcm@viewthespace.com"
] | charlesmcm@viewthespace.com |
f34ee70b9a52f64e76c7679f05e1caf15fab05e3 | 3750311368d32d3431a6c7e14b8566fb7ad59470 | /backend/src/api/create_store.py | 56e4817140a2b52bf761dd0ef0f6771945eb46db | [] | no_license | queststudio/momma-dog | 5a9a584c9e96d344a70881fa2014c42f5316efb8 | e75951042b50833485667b2b257503ad31a6c3ad | refs/heads/master | 2022-12-11T19:27:15.964172 | 2019-07-07T15:42:16 | 2019-07-07T15:47:49 | 121,881,640 | 1 | 0 | null | 2022-12-07T19:40:40 | 2018-02-17T18:37:08 | Python | UTF-8 | Python | false | false | 459 | py | from src.relays.set_state import set_state
from src.relays.render import render_state
from src.game.store import Store, init_state
from src.game.middlewares import restart_middleware_creator
from src.relays.restart import restart_creator
restart = restart_creator(set_state)
restart_middleware = restart_middleware_creator(restart)
def create_store():
store = Store(init_state, [restart_middleware])
store.subscribe(render_state)
return store
| [
"d.f.goryunov@gmail.com"
] | d.f.goryunov@gmail.com |
d3550d7689399933bc52ca671f322510fc34bf23 | d94c5849e6308901f9af8a4edf8c8369d46576d1 | /BOJ/14499_주사위 굴리기.py | 22ad001dcfef81e9fc7a3e7aee0a5e29963d830e | [] | no_license | AhnDogeon/algorithm_study | b4c961b934b5e27afccdf2713a2ccb0174d9a698 | b8de39fff92cc98281ba7e94df82bcc9b1503243 | refs/heads/master | 2022-06-05T11:33:27.392131 | 2022-05-23T06:37:50 | 2022-05-23T06:37:50 | 188,783,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,095 | py | import sys
from copy import deepcopy
sys.stdin = open('14499_주사위 굴리기.txt', 'r')
N, M, x, y, K = map(int, input().split())
board = []
for _ in range(N):
board_list = list(map(int, input().split()))
board.append(board_list)
move = list(map(int, input().split()))
# print(move)
#
# print('===========디버깅=====================')
# for i in range(N):
# for j in range(M):
# print(board[i][j], end=' ')
# print()
# print('=====================================')
up = 0
middle = 0
left = 0
right = 0
down = 0
bottom = 0
def RIGHT(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_up
middle = copy_left
left = copy_bottom
right = copy_middle
down = copy_down
bottom = copy_right
board[a][b] = bottom
else:
up = copy_up
middle = copy_left
left = copy_bottom
right = copy_middle
down = copy_down
bottom = board[a][b]
board[a][b] = 0
print(middle)
def LEFT(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_up
middle = copy_right
left = copy_middle
right = copy_bottom
down = copy_down
bottom = copy_left
board[a][b] = bottom
else:
up = copy_up
middle = copy_right
left = copy_middle
right = copy_bottom
down = copy_down
bottom = board[a][b]
board[a][b] = 0
print(middle)
def UP(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_middle
middle = copy_down
left = copy_left
right = copy_right
down = copy_bottom
bottom = copy_up
board[a][b] = bottom
else:
up = copy_middle
middle = copy_down
left = copy_left
right = copy_right
down = copy_bottom
bottom = board[a][b]
board[a][b] = 0
print(middle)
def DOWN(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_bottom
middle = copy_up
left = copy_left
right = copy_right
down = copy_middle
bottom = copy_down
board[a][b] = bottom
else:
up = copy_bottom
middle = copy_up
left = copy_left
right = copy_right
down = copy_middle
bottom = board[a][b]
board[a][b] = 0
print(middle)
for i in move:
if i == 1:
dx, dy = x, y + 1
if 0 <= dx < N and 0 <= dy < M:
RIGHT(dx, dy)
x, y = dx, dy
elif i == 2:
dx, dy = x, y - 1
if 0 <= dx < N and 0 <= dy < M:
LEFT(dx, dy)
x, y = dx, dy
elif i == 3:
dx, dy = x - 1, y
if 0 <= dx < N and 0 <= dy < M:
UP(dx, dy)
x, y = dx, dy
elif i == 4:
dx, dy = x + 1, y
if 0 <= dx < N and 0 <= dy < M:
DOWN(dx, dy)
x, y = dx, dy
#
# print('===========디버깅=====================')
# for i in range(N):
# for j in range(M):
# print(board[i][j], end=' ')
# print()
# print('=====================================')
#
| [
"qltiqlti@gmail.com"
] | qltiqlti@gmail.com |
d89692a51b4763f9f8a3060e3dbea201c0530805 | 3344516cfaade0f0d2223c84127aefb91a8a8071 | /python_intermedio/assert_statement.py | a945ee9cdb30fc096fc54582fd69b79bf3b0cdc3 | [] | no_license | maurogome/platzi | 91ff14519dcfe48a26bfb34d2031e4e6146e131e | 56e51cef4b2ec82e8c52d3c384202a42c480817c | refs/heads/master | 2023-04-09T23:48:02.730877 | 2021-04-25T16:36:28 | 2021-04-25T16:36:28 | 268,672,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | def divisors(num):
divisors = []
for i in range(1, num + 1):
if num % i == 0:
divisors.append(i)
return divisors
def run():
num = input("Ingrese un número:")
assert num.strip("-").isnumeric(), "Debes ingresar un numero"
assert int(num) > 0, "El numero debe ser positivo"
print(divisors(int(num)))
print("Final del programa")
if __name__ == "__main__":
run() | [
"mauriciogom@gmail.com"
] | mauriciogom@gmail.com |
0d5757a1a9ed5bcbb7dbb9f0d6480b75d12b5efe | 4d1f1e188a4db8e909430b55bddf0d8113a28fcf | /reinforcement_learning/0x00-q_learning/2-epsilon_greedy.py | 5b5895e3aafe8d93a6fc7131ffb272cf3044f4a9 | [] | no_license | paurbano/holbertonschool-machine_learning | b0184a71733a1f51633ba7c7f4d3a82b8d50e94f | ff1af62484620b599cc3813068770db03b37036d | refs/heads/master | 2023-07-02T16:20:13.668083 | 2023-06-18T06:25:26 | 2023-06-18T06:25:26 | 279,967,511 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | #!/usr/bin/env python3
'''Epsilon Greedy
https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/
master/Q%20learning/FrozenLake/Q%20Learning%20with%20FrozenLake.ipynb
'''
import numpy as np
def epsilon_greedy(Q, state, epsilon):
'''uses epsilon-greedy to determine the next action:
Args:
Q is a numpy.ndarray containing the q-table
state is the current state
epsilon is the epsilon to use for the calculation
Returns: the next action index
'''
# First we randomize a number
p = np.random.uniform(0, 1)
# If this number > greater than epsilon -->
# exploitation (taking the biggest Q value for this state)
if p > epsilon:
action = np.argmax(Q[state, :])
# Else doing a random choice --> exploration
else:
# action = env.action_space.sample()
action = np.random.randint(0, int(Q.shape[1]))
return action
| [
"paurbano@gmail.com"
] | paurbano@gmail.com |
3723fa7aa30aba310bf4f9d5e1019ce129b50a69 | 94ea78dc4ae7794c97652368a656e8e45e6a3fbb | /send_mail.py | f5969a4a1f8dcbec04083d856a4e82e34edd9c5d | [] | no_license | nestoralvaro/Utils | 22dc5874c4fbbfb600fa1899b9abbde1f60e493d | acb98c8aade5044cf7089b52622aaea6e2cc7997 | refs/heads/master | 2021-01-20T07:32:05.396671 | 2017-03-04T12:28:26 | 2017-03-04T12:28:26 | 83,889,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,812 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import smtplib
"""
description:
Utility for sending e-mails on python
You can choose between sending these e-mails using the local SMTP server, or
you can send the messages using GOOGLE mail SMTP server.
Caution: These e-mails may appear on the "SPAM" folder.
author:
nestoralvaro
"""
def getMailNames(mail_addrs):
"""
Extracts the names (the string before the "@" sign) from a e-mail addresses.
Args:
mail_addrs: (string OR list of strings) e-mail address(es)
"""
if isinstance(mail_addrs, list):
names = [m[:m.find("@")] for m in mail_addrs]
names = ", ".join(names)
else:
names = mail_addrs[:mail_addrs.find("@")]
return names
def buildMessage(sender, receivers, subject, msg_body):
"""
Uses the information to prepare the message
Args:
sender: (string) e-mail address for the sender of the e-mail
receivers: (list) e-mail addresses for all the recipents of the e-mail
subject: (string) subject of the e-mail
msg_body: (string) body of the e-mail
"""
# E-mail contents meta-data
message = "From: {} <{}>\n"
message += "To: {} <{}>\n"
message += "MIME-Version: 1.0\n"
message += "Content-type: text/html\n"
message += "Subject: {}\n\n{}"
message = message.format(getMailNames(sender), sender, \
getMailNames(receivers), ",".join(receivers), \
subject, msg_body)
return message
def sendEmail(sender, receivers, message):
"""
Sends the e-mail message using the local SMTP server
Args:
sender: (string) e-mail address for the sender of the e-mail
receivers: (list) e-mail addresses for all the recipents of the e-mail
msg: (string) message body for the e-mail to be sent
"""
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
print "Successfully sent email"
except Exception as e:
print "Error: unable to send email", e
def sendGoogleEmail(sender, receivers, message, google_mail_password):
"""
Uses the information to send the message using GOOGLE mail
Args:
sender: (string) e-mail address for the sender of the e-mail.
This has to be a GOOGLE MAIL account
receivers: (list) e-mail addresses for all the recipents of the e-mail
msg: (string) message body for the e-mail to be sent
"""
try:
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
# If the login fails, allow it here: https://www.google.com/settings/u/1/security/lesssecureapps
server.login(sender, google_mail_password)
server.sendmail(sender, receivers, message)
server.quit()
print "Successfully sent email"
except Exception as e:
print "Error: unable to send email", e
if __name__ == '__main__':
# Person who sends the e-mail
sender = u"MY_OWN_EMAIL_ADDRESS@gmail.com"
# Recipients of this e-mail (one or more)
receivers = ["MY_FRIEND_1@gmail.com", "MY_FRIEND_2@hotmail.com"]
# Subject text of the e-mail
subject = "Hola :-)"
# Main body of the e-mail (can be HTML code)
msg_body = """Hi There! <h1>Hola</h1>\n\nCan you, <b>read this</b>\n<h2>E-mail?</h2>"""
# Full message
message = buildMessage(sender, receivers, subject, msg_body)
# Send the e-mail using your local SMTP server
#sendEmail(sender, receivers, message)
# TODO: Put here your google mail (gmail) password
google_mail_password = 'YOUR_GOOGLE_MAIL_ACCOUNT_PASSWORD'
# Send the same e-mail using GOOGLE SMTP server.
# TODO: make sure the "sender" is your google mail (gmail) account.
sendGoogleEmail(sender, receivers, message, google_mail_password)
| [
"noreply@github.com"
] | noreply@github.com |
bbea077e0cd49da8d560139dd7b739c639bbe3e1 | 0cca3fd224e21ebebdef7cfa68964351782c7758 | /recon.py | 080e13b20ca52c7cb80eeff15c25f2f3aeee60f1 | [
"MIT"
] | permissive | luckyster895/GoldenRecon | 36c049cc367bf0ebfcadfcb8aa49e63f4b125290 | 71255a8ecd86a1e6ea30524191100a424e72e676 | refs/heads/main | 2023-04-11T01:28:19.946709 | 2021-04-16T13:24:15 | 2021-04-16T13:24:15 | 344,731,331 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,480 | py | import os
import socket
import time
from colorama import Fore, Back, Style
def argument(arg):
switcher = {
1: "-A -sC -sV -vvv -oN Output/nmap", #Os scanning,Version detection,scripts,traceroute
2: "-O -V -oN Output/nmap", #OS Detection ,Version scanning
3: "-F --open -Pn -oN Output/nmap", #Fast Mode scan for open ports
4: "nm.command_line()", #Custom Payload or argument
5: "-p1-65535 --open -Pn -oN Output/nmap", #Scan for Open Tcp Ports
6: "-p1-65535 --open -sU -Pn -oN Output/nmap" #Scan for Open Udp Ports
}
return switcher.get(arg, "Invalid argument")
#Website information
#Making a folder if not Exists
os.system("clear")
os.system("rmdir Output && mkdir Output")
website=input("Enter The Website to scan: ")
ip_of_website = socket.gethostbyname(website)
print("Ip of "+website+" is:"+ip_of_website)
time.sleep(0.7)
#Choice to start nmap or not
print("\n Want to start nmap scanning \n1.Yes \n2.No ")
nmap_on_off=int(input("Choice:"))
if(nmap_on_off == 1):
#Starting nmap
print("\nFiring up Nmap\n")
print("1.Os scanning,Version detection,scripts,traceroute \n2.OS Detection ,Version scanning \n3.Fast Mode scan for open ports \n4.Custom Payload or argument \n5.Scan for Open Tcp Ports \n6.Scan for Open Udp Ports ")
choice=int(input("\n Enter The Choice:"))
if(choice == 4):
nmap_command=input("Enter Nmap Command u want \n")
os.system(nmap_command+" -oN Output/nmap "+ip_of_website)
else:
arg=argument(choice)
print("Command="" nmap "+arg+" <ip of website> \n")
os.system("nmap "+arg+" "+ip_of_website)
#print(arg)
else:
print("Skipping Nmap Scan")
#Finding Certificate of a website
#Finding subdomains and finding alive host
print("\n**************Finding all sudomain*****************")
os.system("assetfinder --subs-only "+website+" >> Output/all_host.txt")
print("\nDone and save output to all_host.txt")
print("\n************Finding alive sudomain*****************")
os.system("cat all_host.txt | httprobe >> Output/alive_host.txt")
print("\nDone and save output to alive_host.txt")
#Finding hidden Directory
print("\nWant to start checking hidden dir,files \n1.Yes\n2.No")
dir_start=int(input("Enter choice:"))
if (dir_start == 1):
print("Finding hidden Directory")
os.system("ffuf -w small.txt -u http://"+website+"/FUZZ -mc all -fs 42 -c -fc 404 -o Output/hidden_directories")
os.system("cat hidden_directories|jq >> Output/Hidden_Directory")
os.system("rm Output/hidden_directories")
else:
print("Skipping Directory search")
#Checking for wordpress site
print("\n***********Scanning website is Wordpress site or not***********")
os.system("wpscan --stealthy --url "+website)
os.system("wpscan -f json --url+ "+website+" >> Output/wpscan")
#Firing up the Sqlmap
print("\nStarting Sqlmap \n")
print(Fore.RED + 'WARNING:Only use sqlmap for attacking with mutual consent with the site owner or company')
print(Style.RESET_ALL)
sql_start_stop=int(input("Want to continue with SqlMap:"))
print("\n 1.Start \n2.Stop")
if(sql_start_stop == '1' ):
sql_level=int(input("Level of tests to perform (1-5) \nEnter the level of Sqlmap:"))
sql_site=input("Enter the endpoint where u want to test sql injection:")
os.system("sqlmap -u "+sql_site+" --dbs --level="+str(sql_level))
elif(sql_start_stop == '2'):
print("Stopping Nmap And Continue script")
else:
print("Skipping Sqlmap")
exit() | [
"luckyster895@gmail.com"
] | luckyster895@gmail.com |
0384c48739c535d5420ddbfa9c2b1913e77b9422 | 0642c068d96ae43bbb800283d1aba3bd663740b1 | /download/urls.py | e915a1ba491a6a6cc1cd09a6bb7c6f6dc1a45e80 | [] | no_license | weien8899/CyanScikit | 6724f2f7d3a810a1265edd9ac3a8201c1695d264 | 59a083d9a81c557e5ec7d10d50192f43ff936231 | refs/heads/master | 2020-03-17T17:00:57.825089 | 2017-07-16T15:26:04 | 2017-07-16T15:26:04 | 133,771,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.conf.urls import patterns,include, url
from django.contrib import admin
urlpatterns = patterns(
'download.views',
url(r'^admin/', include(admin.site.urls)),
url(r'^download/$', 'download'),
url(r'^onecateDown/(\w+)/$', 'onecateDown'),
url(r'^more/$', 'more'),
)
| [
"Thinkgamer@163.com"
] | Thinkgamer@163.com |
d0b8a76d43390187dc5aa8db4012da5d8af32d3d | 2ada8b8020a7a7169f087b8aa2e49ff52831a561 | /steps/common.py | 432230fac5a997dec7f9bbd28c21af0b6ef6cdaa | [] | no_license | marlonrochaeng/webAutomationBDD | a767a8d26f865afcada69c0df4cad1f7b79b9ce4 | 53b20134aaf9fc90ac25bb69083712d27a101e93 | refs/heads/master | 2020-08-21T17:55:24.326911 | 2020-04-15T01:47:30 | 2020-04-15T01:47:30 | 216,212,818 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from behave import given, when, then
from framework.webapp import WebBrowser
@given(u'I open the "{url}" url')
def step_impl_load_website(context, url):
wb = WebBrowser(context)
wb.go_to_page(url) | [
"marlon.alencar.rocha@everis.com"
] | marlon.alencar.rocha@everis.com |
e585a401b1aefd44adfc94300e388978d4eab3a1 | 4fcd8f40868d8a3831487a9ea74cceda462c5bfe | /transferit/callbacks.py | fcfbadb8410a47b22dc38304b890129a43a82590 | [
"MIT"
] | permissive | sorenlind/transferit | 822de4a73eecbd66a58f60c27cb6909e0abc0d13 | c77ecb23a2fb6f571cba66155b0b99e4f359fa68 | refs/heads/master | 2023-04-13T23:50:02.634960 | 2021-04-28T20:09:06 | 2021-04-28T20:09:06 | 361,758,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | """Custom callbacks used during training."""
from tensorflow.keras.callbacks import Callback
import matplotlib.pyplot as plt
class PlotLosses(Callback):
"""Simple callback for plotting losses to a file."""
def __init__(self, output_folder, model_name):
self.output_folder = output_folder
self.model_name = model_name
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get("loss"))
self.val_losses.append(logs.get("val_loss"))
self.i += 1
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(self.x, self.losses, label="loss")
ax.plot(self.x, self.val_losses, label="val_loss")
ax.legend()
fig.savefig(str(self.output_folder / f"{self.model_name}_loss.png"))
plt.close(fig)
| [
"soren@gutsandglory.dk"
] | soren@gutsandglory.dk |
abf248406904ecb544c5ae9b1d87e4652132b972 | b747c83829b650eb45b0d5522970bf71bc8ebe8a | /circuitpy/main.py | 71e57e9b28d1a8f2405d9591935202558cacc381 | [] | no_license | mcscope/liteup | 53048632f85577df953d9dcdb1a517e04dcf95d7 | ff6875319ecec0df02ec2c362e39fbaa77d77e9a | refs/heads/master | 2022-05-05T05:53:35.339022 | 2022-03-16T08:26:28 | 2022-03-16T08:26:28 | 93,298,507 | 13 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,290 | py | """
Liteup 'port' of 2 schemes (perlin and sorts) to Adafruit Gemma
Written in a hackathon at pycon 2018 as a badge decoration
"""
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogIn, AnalogOut
import neopixel
from touchio import TouchIn
import adafruit_dotstar as dotstar
import microcontroller
import board
import time
import random
import math
from perlin import gen_perlin
# One pixel connected internally!
dot = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.3)
# Built in red LED
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
# Capacitive touch on A2
touch2 = TouchIn(board.A2)
# Used if we do HID output, see below
kbd = Keyboard()
REG_BRIGHTNESS = 0.05
FLASH_BRIGHTNESS = 0.2
######################### HELPERS ##############################
def hue_to_rgb(hue):
"""
Convert float hue value to a rgb color value. Probably doesn't match
actual hsv conversion, but good for rainbows.
"""
return wheel(hue * 255)
# Helper to give us a nice color swirl
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if (pos < 0):
return [0, 0, 0]
if (pos > 255):
return [0, 0, 0]
if (pos < 85):
return [int(pos * 3), int(255 - (pos * 3)), 0]
elif (pos < 170):
pos -= 85
return [int(255 - pos * 3), 0, int(pos * 3)]
else:
pos -= 170
return [0, int(pos * 3), int(255 - pos * 3)]
def is_touched():
# set analog output to 0-3.3V (0-65535 in increments)
# use A2 as capacitive touch to turn on internal LED
if touch2.value:
print("A2 touched!")
# optional! uncomment below & save to have it sent a keypress
# kbd.press(Keycode.A)
# kbd.release_all()
return True
led.value = touch2.value
def mergesort(array, start=None, stop=None):
if start is None and stop is None:
start, stop = 0, len(array)
if stop - start < 2:
# Already sorted
return True
midpoint = start + math.ceil((stop - start) / 2)
yield from mergesort(array, start, midpoint)
yield from mergesort(array, midpoint, stop)
# merge!
lhead, lstop = start, midpoint
rhead = midpoint
yield True
# there's actual efficent in-place merge algorithm
# so we're gonna visually simulate it by inserting elements before
# the left list
while lhead < lstop and rhead < stop:
if array[lhead] < array[rhead]:
# easy, it's already in the right spot
lhead += 1
yield True
else:
tmp = array.pop(rhead)
array.insert(lhead, tmp)
lhead += 1
lstop += 1
rhead += 1
yield True
yield True
def swap(array, x, y):
tmp = array[x]
array[x] = array[y]
array[y] = tmp
yield
def bubblesort(array):
# Bubblesort it... it's the only way to be sure
for _ in range(math.ceil((len(array) / 2))):
for x in range(len(array) - 1):
if array[x] > array[x + 1]:
yield from swap(array, x, x + 1)
for x in range(len(array) - 2, -1, -1):
if array[x] > array[x + 1]:
yield from swap(array, x, x + 1)
######################### MAIN LOOP ##############################
def flash(strip):
strip.brightness = FLASH_BRIGHTNESS
yield 0.1
strip.brightness = REG_BRIGHTNESS
yield 0.3
def draw(strip, array, time=0.1):
for i in range(len(strip)):
strip[i] = hue_to_rgb(array[i])
yield time
def sort_scheme(strip):
while True:
for alg in [bubblesort, mergesort]:
array = [random.random() for _ in strip]
for _ in alg(array):
yield from draw(strip, array)
for _ in range(2):
yield from flash(strip)
def fade(array, idx, start, end):
diff_step = (end - start) / 8.0
for x in range(8):
array[idx] = start + x * diff_step
yield
def perlin_scheme(strip):
array = [0] * len(strip)
pervals = gen_perlin()
fadelist = []
while True:
for idx in range(len(strip)):
fadelist.append((idx, array[idx], next(pervals), 0))
newlist = []
for idx, start, end, step in fadelist:
diff_step = (end - start) / 8.0
array[idx] = start + step * diff_step
if step < 8:
newlist.append((idx, start, end, step + 1))
fadelist = newlist
yield from draw(strip, array)
def main():
num_leds = 16
strip = neopixel.NeoPixel(board.D1, num_leds, auto_write=False)
strip.brightness = REG_BRIGHTNESS
gen_idx = 0
gens = [sort_scheme(strip), perlin_scheme(strip)]
# gens = [perlin_scheme(strip)]
while True:
sleeptime = next(gens[gen_idx % len(gens)])
strip.show()
time.sleep(sleeptime)
if is_touched():
gen_idx += 1
next(gens[gen_idx % len(gens)])
strip.show()
# time to move your finger away
time.sleep(0.7)
main()
| [
"mcscope@gmail.com"
] | mcscope@gmail.com |
449197d603b056a7cfaf92ea38f6cbdabaf57f67 | 300fe198f4145cd10dfcd31884dc4b1aa165d097 | /experiments/test_grid_yspace.py | 327db982f4e4d5c60dbe499456267c3459f35495 | [
"BSD-3-Clause"
] | permissive | WrongWhp/mantis | 126170a9033191b970cc4e4697d4353527d25c2f | 2cf149b5bfa4f7c6dbf5aa47f1010785e886bd2c | refs/heads/master | 2020-06-14T22:00:01.472150 | 2018-10-22T17:55:45 | 2018-10-22T17:55:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,412 | py | import matplotlib.colors as mpl_colors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn.apionly as sns
from mantis import sdp_km_burer_monteiro, copositive_burer_monteiro
from experiments.utils import plot_matrix, plot_data_embedded
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage{amsmath}')
dir_name = '../results/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
dir_name += 'grid_bm/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def plot_bumps_on_data(X, bumps, palette='Set1'):
plot_data_embedded(X, palette='w')
colors = sns.color_palette(palette, n_colors=len(bumps))
colors = [mpl_colors.to_hex(c) for c in colors]
np.random.shuffle(colors)
for i, (b, c) in enumerate(zip(bumps, colors)):
alpha = np.maximum(b, 0) / b.max()
plot_data_embedded(X, palette=c, alpha=alpha)
def align_bumps(Y, ref_idx):
Y_ref = Y[:, ref_idx]
idx_best = np.zeros((Y.shape[1],), dtype=np.int)
corr_best = np.zeros((Y.shape[1],))
for i in range(Y.shape[0]):
Y_cshift = np.roll(Y, i, axis=0)
corr = Y_ref.dot(Y_cshift)
mask = corr > corr_best
idx_best[mask] = i
corr_best[mask] = corr[mask]
Y_aligned = np.zeros_like(Y)
for j in range(Y.shape[1]):
Y_aligned[:, j] = np.roll(Y[:, j], idx_best[j], axis=0)
return Y_aligned
def test_grid(n_clusters=16, use_copositive=False):
X = np.mgrid[0:16, 0:16]
X = X.reshape((len(X), -1)).T
labels = np.arange(len(X))
# X_norm = X - np.mean(X, axis=0)
# cov = X_norm.T.dot(X_norm)
# X_norm /= np.trace(cov.dot(cov)) ** 0.25
#
# alpha = 0.001
# plt.matshow(np.maximum(X_norm.dot(X_norm.T) - alpha, 0), cmap='gray_r')
#
# from scipy.spatial.distance import pdist, squareform
# plt.matshow(squareform(pdist(X)), cmap='gray_r')
#
# return
rank = len(X)
print(rank)
if use_copositive:
beta = n_clusters / len(X)
Y = copositive_burer_monteiro(X, alpha=0.003, beta=beta, rank=rank,
tol=1e-5, constraint_tol=1e-5,
verbose=True)
name = 'grid_copositive_bm'
else:
Y = sdp_km_burer_monteiro(X, n_clusters, rank=rank, tol=1e-6,
verbose=True)
name = 'grid_sdpkm_bm'
Q = Y.dot(Y.T)
idx = np.argsort(np.argmax(Y, axis=0))
Y = Y[:, idx]
sns.set_style('white')
plt.figure(figsize=(12, 4.7), tight_layout=True)
gs = gridspec.GridSpec(1, 3)
ax = plt.subplot(gs[0])
plot_data_embedded(X, palette='hls', ax=ax)
plt_title = ax.set_title('Input dataset', fontsize='xx-large')
# plt_title.set_position((0.5, 1.07))
ax = plt.subplot(gs[1])
plot_matrix(Q, ax=ax, labels=labels, which_labels='both',
labels_palette='hls')
plt_title = ax.set_title(r'$\mathbf{Q}$', fontsize='xx-large')
plt_title.set_position((0.5, 1.07))
ax = plt.subplot(gs[2])
plot_matrix(Y, ax=ax, labels=labels, which_labels='vertical',
labels_palette='hls')
plt_title = ax.set_title(r'$\mathbf{Y}^\top$', fontsize='xx-large')
plt_title.set_position((0.5, 1.07))
plt.savefig('{}{}.pdf'.format(dir_name, name))
pdf_file_name = '{}{}_plot_{}_on_data_{}{}'
for i in range(Y.shape[1]):
plt.figure()
plot_bumps_on_data(X, [Y[:, i]])
plt.savefig(pdf_file_name.format(dir_name, name, 'Y', i, '.png'),
dpi=300, bbox_inches='tight')
plt.close()
pdf_file_name = '{}{}_plot_{}_on_data_{}'
plt.figure()
bumps_locs = np.random.random_integers(Y.shape[1], size=6)
plot_bumps_on_data(X, [Y[:, i] for i in bumps_locs], palette='Set1')
plt.savefig(pdf_file_name.format(dir_name, name, 'Y', 'multiple.png'),
dpi=300, bbox_inches='tight')
Y_aligned = align_bumps(Y, Y.shape[1] // 2)
_, ax = plt.subplots(1, 1)
plot_matrix(Y_aligned, ax=ax)
plt_title = ax.set_title(r'Aligned $\mathbf{Y}^\top$', fontsize='xx-large')
plt_title.set_position((0.5, 1.07))
plt.savefig('{}{}_Y_aligned_2d.pdf'.format(dir_name, name))
_, ax = plt.subplots(1, 1)
ax.plot(Y_aligned)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'Receptive fields', fontsize='xx-large')
plt.savefig('{}{}Y_aligned_1d.pdf'.format(dir_name, name))
pos = np.arange(len(Y))
median = np.median(Y_aligned, axis=1)
mu = np.mean(Y_aligned, axis=1)
sigma = np.std(Y_aligned, axis=1)
_, ax = plt.subplots(1, 1)
plt_mean = ax.plot(pos, mu, color='#377eb8')
ax.fill_between(pos, np.maximum(mu - 3 * sigma, 0), mu + 3 * sigma,
alpha=0.3, color='#377eb8')
plt_median = ax.plot(pos, median, '-.', color='#e41a1c')
ax.set_xticks([])
ax.set_yticks([])
plt_aux = ax.fill(np.NaN, np.NaN, '#377eb8', alpha=0.3, linewidth=0)
ax.legend([(plt_mean[0], plt_aux[0]), plt_median[0]],
[r'Mean $\pm$ 3 STD', 'Median'],
loc='upper left', fontsize='xx-large')
ax.set_title(r'Receptive fields summary', fontsize='xx-large')
plt.savefig('{}{}Y_aligned_1d_summary.pdf'.format(dir_name, name))
if __name__ == '__main__':
# test_grid(use_copositive=True)
test_grid(use_copositive=False)
plt.show()
| [
"Lulita75"
] | Lulita75 |
0eed1e43e88e22d5e74f9010387e7ad031989714 | 472baa2414822520f7cb8d491d4bf5608f765ad8 | /zqxt4396/tools/views.py | 3d5f7f76bda31af965d9c812557cadea6c386f1e | [] | no_license | Umi101108/django-projects | cdcf0c9bb8bd272e04a4b7a702f09adb16c28404 | 50edfdc3511e1de5b4a5a3e92fe9ddad932b5396 | refs/heads/master | 2021-01-12T08:20:48.113696 | 2017-06-11T14:45:20 | 2017-06-11T14:45:20 | 76,545,822 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def add(request):
a = request.GET['a']
b = request.GET['b']
a = int(a)
b = int(b)
return HttpResponse(str(a+b))
| [
"408465808@qq.com"
] | 408465808@qq.com |
6b3ee56fb7f4552bfebdfa4efb793cedd84f4731 | 8512ec0b778cf4efaa960ef88aad4da9e4013a9d | /pip_benchmark_python/utilities/Formatter.py | 561df3c88f38d2db10db99ddfbf665ad67bd96ab | [
"MIT"
] | permissive | pip-benchmark/pip-benchmark-python | b375bd16f0102e698a0a45edbc92fc02735220ab | d75672e940af12e2f9818607e2188490e989d8c5 | refs/heads/master | 2020-05-27T03:22:07.706723 | 2020-05-08T19:31:18 | 2020-05-08T19:31:18 | 82,516,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | # -*- coding: utf-8 -*-
import datetime
from pip_services3_commons.convert import StringConverter
class Formatter:
@staticmethod
def pad_left(value, lenght, pad_symbol):
output = ''
output += pad_symbol
output += value
output += pad_symbol
while len(output) < lenght + 2:
output = pad_symbol + output
return output
@staticmethod
def pad_right(value, lenght, pad_symbol):
output = ''
output += pad_symbol
output += value
output += pad_symbol
while len(output) < lenght + 2:
output = pad_symbol + output
return output
@staticmethod
def format_number(value, decimals=2):
value = value or 0
return str(round(value, decimals or 2))
@staticmethod
def format_date(date):
date = date or datetime.datetime.now()
value = StringConverter.to_string(date)
pos = value.index('T')
return value[0:pos]
@staticmethod
def format_time(date):
date = date or datetime.datetime.now()
value = StringConverter.to_string(date)
pos = value.index('T')
value = value[pos + 1:]
pos = value.index('.')
return value[0:pos] if pos > 0 else value
@staticmethod
def format_time_span(ticks):
ticks = ticks * 1000
millis = str(int(round((ticks % 1000), 0)))
seconds = str(int(round((ticks / 1000) % 60, 0)))
minutes = str(int(round(((ticks / 1000) / 60) % 60, 0)))
hours = str(int(round((ticks / 1000 / 60 / 60), 0)))
return '{}:{}:{}:{}'.format(hours, minutes, seconds, millis)
| [
"anastasf/2gmail.com"
] | anastasf/2gmail.com |
a5680836916c2ce43cd2b4b36b019cde8f18cee4 | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/classic/mi/pcnn.py | 825d23c100525d15bf520d848194da8230315155 | [
"MIT"
] | permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | #!/usr/bin/python
import sys
sys.path.append('../../../')
from io_utils import RuSentRelBasedExperimentsIOUtils
from arekit.contrib.experiments.callback import CustomCallback
from arekit.contrib.networks.multi.configurations.max_pooling import MaxPoolingOverSentencesConfig
from arekit.contrib.networks.multi.architectures.max_pooling import MaxPoolingOverSentences
from arekit.common.evaluation.evaluators.two_class import TwoClassEvaluator
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.context.architectures.pcnn import PiecewiseCNN
from arekit.contrib.experiments.multi.model import MultiInstanceTensorflowModel
from arekit.contrib.experiments.nn_io.rusentrel import RuSentRelBasedNeuralNetworkIO
from arekit.contrib.experiments.engine import run_testing
from rusentrel.mi_names import MaxPoolingModelNames
from rusentrel.classic.ctx.pcnn import ctx_pcnn_custom_config
from rusentrel.classic.common import \
classic_common_callback_modification_func, \
classic_mi_common_config_settings
def mi_pcnn_custom_config(config):
ctx_pcnn_custom_config(config.ContextConfig)
config.fix_context_parameters()
def run_testing_pcnn(name_prefix=u'',
cv_count=1,
model_names_classtype=MaxPoolingModelNames,
network_classtype=MaxPoolingOverSentences,
config_classtype=MaxPoolingOverSentencesConfig,
custom_config_func=mi_pcnn_custom_config,
custom_callback_func=classic_common_callback_modification_func):
run_testing(full_model_name=name_prefix + model_names_classtype().PCNN,
create_network=lambda: network_classtype(context_network=PiecewiseCNN()),
create_config=lambda: config_classtype(context_config=CNNConfig()),
create_nn_io=RuSentRelBasedNeuralNetworkIO,
cv_count=cv_count,
create_model=MultiInstanceTensorflowModel,
evaluator_class=TwoClassEvaluator,
create_callback=CustomCallback,
experiments_io=RuSentRelBasedExperimentsIOUtils(),
common_callback_modification_func=custom_callback_func,
custom_config_modification_func=custom_config_func,
common_config_modification_func=classic_mi_common_config_settings)
if __name__ == "__main__":
run_testing_pcnn()
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
0d8ed6f82e39bf255cdbdaf41569b7e5d76e34ca | 888f98aa6cd5c706582296b2edea5f331836a3aa | /accounts/urls.py | 8e48b97fb7b90371bf373a53068844e3bffefc83 | [] | no_license | Hibatouallah/djangoSite | 662220634da04fbd4cee0623d393df5adeb2a24a | c6f10b1a4c1334b58ee6cdc8665d844be07ffbdc | refs/heads/master | 2022-11-08T12:06:43.232710 | 2020-06-28T20:50:31 | 2020-06-28T20:50:31 | 275,660,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py |
from django.urls import path
from . import views
from django.contrib.auth.views import LoginView , LogoutView
urlpatterns = [
path('',views.indexView,name="home"),
path('login/',LoginView.as_view(),name="login_url"),
path('register/',views.registerView,name="register_url"),
path('logout/',LogoutView.as_view(next_page='home'),name="logout"),
] | [
"boulsane.1996@gmail.com"
] | boulsane.1996@gmail.com |
330d07b94732fbde6570ab71a73dd30c8eebd34c | 2b9a7ada172dd7adbed7921e6787bf4d43891b36 | /sb05/package.py | 73d97e13365e4b004b6ae6c1c712f4743f3b6c8f | [] | no_license | Vinhnguyen19922/glvis | ba566d3f6117ee005ad1ce89f884631bccf40644 | 1db0c6e57c2b2a4677ab3765525f4f22b7345771 | refs/heads/master | 2020-09-05T04:45:22.064164 | 2016-10-08T07:53:16 | 2016-10-08T07:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | from PyQt5 import QtCore
from PyQt5.QtWidgets import (QGridLayout, QWidget)
from sn.qt import *
from sn.gl import *
class SB05(GLWidget):
def __init__(self, parent, width=200, height=200):
super().__init__(parent)
self._width = width; self._height = height
def initializeGL(self, path):
super().initializeGL()
self.program = self.program or Program(path)
self.va = VertexArray()
def minimumSizeHint(self): return QtCore.QSize(self._width, self._height)
def onTick(self):
self.updateGL()
keyPressEvent = Window.keyPressEvent
def start(Widget):
app = Application()
widget = Widget(None)
widget.show()
app.startTimer(timeout = 1000/60, onTick = widget.onTick)
app.run()
if __name__ == '__main__' and False:
import sb05a, sb05b, sb05c, sb05d, sb05e, sb05f, sb05g
app = Application()
app.startTimer(1000/60)
w = SB05(None)
grid = QGridLayout(w)
for r, c, W in [
(1, 0, sb05a.W), (1, 1, sb05b.W), (1, 2, sb05c.W),
(2, 0, sb05d.W), (2, 1, sb05e.W), (2, 2, sb05f.W),
(3, 1, sb05g.W) ]:
wx = W(w, width=400, height=300)
Application.addOnTick(wx.onTick)
grid.addWidget(wx, r, c)
w.setLayout(grid)
w.show()
import sys
sys.exit(app.exec_())
| [
"wakita@is.titech.ac.jp"
] | wakita@is.titech.ac.jp |
000dfc6172faa1e4cc544650996beca345692cf0 | e28b5950500edac1ec78841ba3232a352c01a5c8 | /models/map.py | dedd8757421d51de371611fa4adc288a363f098f | [] | no_license | jmarcelos/mathspace | 48c82d592dcff240fc9befc2fa6de2c58275cd43 | 37720ec287c77f0fe06aa989292ed73d5bfe26a4 | refs/heads/master | 2021-07-22T08:10:34.523126 | 2017-10-31T12:10:40 | 2017-10-31T12:10:40 | 108,726,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | from math import sqrt
class Map:
MAX_COST = float('inf')
def __init__(self, str_map):
self._validation(str_map)
paths = str_map.split()
size_matrix = sqrt(len(paths))
self.grid = []
line = []
for index, path in enumerate(paths):
if index != 0 and index % size_matrix == 0:
self.grid.append(line)
line = []
line.append(int(path, 16))
self.grid.append(line)
self.start_x, self.start_y = 0, 0
self.end_x, self.end_y = len(self.grid) - 1, len(self.grid) - 1
def get_position_value(self, position_x, position_y):
if not self.is_inside_grid(position_x, position_y):
return self.MAX_COST
return self.grid[position_x][position_y]
def get_neighbours(self, position_x, position_y):
neighbours = []
if self.is_inside_grid(position_x+1, position_y):
neighbours.append(((position_x+1, position_y), 'D'))
if self.is_inside_grid(position_x-1, position_y):
neighbours.append(((position_x-1, position_y), 'U'))
if self.is_inside_grid(position_x, position_y+1):
neighbours.append(((position_x, position_y+1), 'R'))
if self.is_inside_grid(position_x, position_y-1):
neighbours.append(((position_x, position_y-1), 'L'))
return neighbours
def is_inside_grid(self, position_x, position_y):
if position_x < self.start_x or position_x > self.end_x:
return False
if position_y < self.start_y or position_y > self.end_y:
return False
return True
def _validation(self, str_map):
if not str_map or not isinstance(str_map, str):
raise ValueError("A valid str map should be provided")
paths = str_map.split()
size_matrix = sqrt(len(paths))
#assuming a square grid
if round(size_matrix) != size_matrix:
raise ValueError("Invalid map generation")
def __repr__(self):
string = "Map\n"
if self.grid:
for line in self.grid:
string += " ".join([str(x) for x in line])
string += "\n"
return string
| [
"jmarcelos@gmail.com"
] | jmarcelos@gmail.com |
09a5dcf778c742d075bd8decf005f393a6b3b6e6 | e6d1bbac91b97ee7a9d028c3aafa5d85a0ee593c | /Python04Month/chapter/chapter3/demo/code/3-1_abnormal_check.py | bd08daf230d7e50525b8458610580eb8e1138662 | [] | no_license | LiuJingGitLJ/PythonSuanFa_2 | 82159043523d6fe69beef7f86421cd4be2242919 | 0afba93c4c29231bc6c2aaf6e4663beee2b5cbbb | refs/heads/master | 2021-09-20T13:49:08.521080 | 2018-08-10T06:13:22 | 2018-08-10T06:13:22 | 124,337,675 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | #-*- coding: utf-8 -*-
import pandas as pd
catering_sale = '../data/catering_sale.xls' #餐饮数据
data = pd.read_excel(catering_sale, index_col = u'日期') #读取数据,指定“日期”列为索引列
print(data)
import matplotlib.pyplot as plt #导入图像库
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
plt.figure() #建立图像
p = data.boxplot(return_type='dict') #画箱线图,直接使用DataFrame的方法
x = p['fliers'][0].get_xdata() # 'flies'即为异常值的标签
y = p['fliers'][0].get_ydata()
y.sort() #从小到大排序,该方法直接改变原对象
#用annotate添加注释
#其中有些相近的点,注解会出现重叠,难以看清,需要一些技巧来控制。
#以下参数都是经过调试的,需要具体问题具体调试。
for i in range(len(x)):
if i>0:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.05 -0.8/(y[i]-y[i-1]),y[i]))
else:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.08,y[i]))
plt.show() #展示箱线图
| [
"15201078137@163.com"
] | 15201078137@163.com |
124439bf89b3356762e1f31a4567ac04ce455496 | 90e77dfba83cb6f60b0712dce16d131a7b0fd002 | /projects/Project3/analyse/tools.py | 5e8edf4f1dcba818e5782bf49018266d93349e1f | [] | no_license | halvarsu/FYS3150 | 98649d65773d9694cc0728fe69d99beb66ecf486 | dd52b83aa696b43341418ebf6ad116b8dd347299 | refs/heads/master | 2022-03-08T04:51:33.386081 | 2019-11-22T15:39:40 | 2019-11-22T15:39:40 | 104,348,830 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | import os, sys
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def enablePrint():
sys.stdout = sys.__stdout__
def printDisable(*args):
enablePrint()
print(args)
blockPrint()
| [
"halvard.sutterud@gmail.com"
] | halvard.sutterud@gmail.com |
ae9f47dcd6973ca4c8e603f1503be4d5ca8b26ce | a9063fd669162d4ce0e1d6cd2e35974274851547 | /test/test_role_members_add.py | ed565058c42a11f8a5eb9894159405db3ff757a7 | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.role_members_add import RoleMembersAdd # noqa: E501
from swagger_client.rest import ApiException
class TestRoleMembersAdd(unittest.TestCase):
"""RoleMembersAdd unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRoleMembersAdd(self):
"""Test RoleMembersAdd"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.role_members_add.RoleMembersAdd() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"github@rootalley.com"
] | github@rootalley.com |
5643935e9ef0b3663b510e3177bffe98981c5630 | 650bd88bf5da6b4105d84d0ef97434a4f4512790 | /nn_meter/prediction/predictors/kernel_predictor.py | c6ed0b812841e50f50bc65ba2960b1b525302987 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | JiahangXu/nn-Meter | 3a0303c08f59ca91673047fe6dcd5cb052ebc4d3 | c11b8223ecf8b5ba881528071a8ae18df80584ba | refs/heads/main | 2023-08-25T14:57:05.299811 | 2021-10-12T10:15:36 | 2021-10-12T10:15:36 | 393,234,662 | 0 | 0 | MIT | 2021-08-06T03:20:11 | 2021-08-06T03:20:10 | null | UTF-8 | Python | false | false | 13,250 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from sklearn.ensemble import RandomForestRegressor
def get_model(hardware, kernel):
model = None
if kernel == "convbnrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=320,
min_samples_leaf=1,
min_samples_split=2,
max_features=6,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=80,
n_estimators=550,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
n_jobs=32,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=500,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
n_jobs=32,
random_state=10,
)
if kernel == "dwconvbnrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=240,
min_samples_leaf=1,
min_samples_split=2,
max_features=6,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=40,
n_estimators=240,
min_samples_leaf=1,
min_samples_split=2,
max_features=7,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=650,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
n_jobs=32,
random_state=10,
)
if kernel == "fc":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=330,
min_samples_leaf=1,
min_samples_split=2,
max_features=4,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=330,
min_samples_leaf=1,
min_samples_split=2,
max_features=4,
oob_score=True,
n_jobs=32,
random_state=10,
)
if kernel == "channelshuffle":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "se":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=20,
n_estimators=290,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=110,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "maxpool":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=210,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if kernel == "globalavgpool":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "hswish":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=110,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "avgpool":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=390,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if kernel == "bnrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "relu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "bn":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=390,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "concat":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=690,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=690,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if kernel == "addrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=3,
oob_score=True,
random_state=10,
)
if hardware == "addrelu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=3,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=3,
oob_score=True,
random_state=10,
)
if kernel == "split":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
return model
| [
"lzhani@microsoft.com"
] | lzhani@microsoft.com |
3e1303281faa8390bc75072c73e0d996ebfdb03e | 8d9b85f92a934c57306f13d6bdddfe2c0c04c101 | /Lessons/rand_tmp.py | ecb43bf0a21835931e8c1505361c3d54ec0d0ce0 | [] | no_license | estherica/wonderland | 640dcbce9343753ecde9f87b03fdebdc7950c49a | 458e77f7e20b8852bc18fd97add4f62558d175c7 | refs/heads/master | 2022-12-07T16:19:54.727623 | 2020-08-25T14:13:19 | 2020-08-25T14:13:19 | 284,969,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from random import randint
print("Random numbers game\n")
num1=(randint(1,37))
num2=(randint(1,37))
print("1st number: " + str(num1) + "\n2nd number: " + str(num2) + "\n")
if (num1==num2):
print("You won 100$! \n")
else:
print("Maybe next time...")
print("\nBye-bye!") | [
"belleshamharoth@gmail.com"
] | belleshamharoth@gmail.com |
6434ee69271aa8ef76600a1a8e6d60014f9b18f6 | ba1a1e90406230eeb0a86ef22a3a94a7b227b7b8 | /taskmanager/tcp_protocol/message_templates.py | b8b520e92459b14aa099bebaebb9efa8afc3f62b | [
"MIT"
] | permissive | spanickroon/Task-Management-Tools | 6e47ac05a1ff9ddf21a988cf6fc63670bf921d63 | ab8ddba79830fe46bf8e0280832f94ece97a3edb | refs/heads/master | 2022-06-10T18:51:15.235038 | 2020-05-05T23:47:18 | 2020-05-05T23:47:18 | 259,631,581 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | START_APP = '!START!'
STOP_APP = '!STOP!'
SEND_MSG = '!SENDMSG!'
UPD_RPOCESS = '!UPD!'
CONNECT = '!CONNECT!'
| [
"nikitakoznev@gmail.com"
] | nikitakoznev@gmail.com |
7733b0f0ac3a81dfe676c533d7b32dbf6a711e97 | 8e5ebf2c0296294cc2d5850e4dbd6282601aaa26 | /backenddj/urls.py | d775e1ba446dfba1374458515861c35b6683d984 | [] | no_license | udaravimukthi/Django-learn | 0e628c46978a8d2cba0105e577d36906a8dcd508 | 6801b3621b5ed0dd3ca3750f9ef14f027e05a308 | refs/heads/main | 2023-01-30T08:44:47.509775 | 2020-12-08T17:17:19 | 2020-12-08T17:17:19 | 306,814,128 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | """backenddj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
#path('', include('webtemp.urls')), // for webtemp projects
path('', include('calc.urls')),
path('admin/', admin.site.urls),
]
| [
"uvlakshan@gmail.com"
] | uvlakshan@gmail.com |
b5ce7163b13168c36c12bcfe8d7bc802301482c5 | 935e9d6d806f507eb541a88de731b2b16b0cc6c9 | /Pages/TeamPage.py | fc8b3995c2d004271bce5de45eab28c4b6dd717d | [] | no_license | Anandqualwebs/AgileSportz | 27ff864115907c7b80466ad0ece955add6054642 | e59dbb26970f3d486507b8c968068fb3b1ae7069 | refs/heads/master | 2020-07-30T20:13:50.390846 | 2019-09-23T11:56:48 | 2019-09-23T11:56:48 | 210,344,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,967 | py | import sys
import os
sys.path.append(os.path.dirname(sys.path[0]+"\Locators"))
from Locators.Locators import locators
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
class TeamPage():
def __init__(self, driver):
self.driver = driver
self.team_tab_xpath = locators.team_tab_xpath
self.team_name_input_xpath = locators.team_name_input_xpath
self.team_location_input_xpath = locators.team_location_input_xpath
self.select_league_xpath = locators.select_league_xpath
self.select_founded_year_xpath = locators.select_founded_year_xpath
self.number_of_sprints_xpath = locators.number_of_sprints_xpath
self.sub_domain_input_xpath = locators.sub_domain_input_xpath
self.add_new_team_admin_checkbox = locators.add_new_team_admin_checkbox
self.team_admin_first_name_css = locators.team_admin_first_name_css
self.team_admin_last_name_css = locators.team_admin_last_name_css
self.team_admin_email_css = locators.team_admin_email_css
self.team_admin_user_name_css = locators.team_admin_user_name_css
self.add_new_league_admin_checkbox = locators.add_new_league_admin_checkbox
self.league_admin_first_name_css = locators.league_admin_first_name_css
self.league_admin_last_name_css = locators.league_admin_last_name_css
self.league_admin_email_css = locators.league_admin_email_css
self.league_admin_user_name_css = locators.league_admin_user_name_css
self.table_team_names_xpath = locators.table_team_names_xpath
self.table_team_location_xpath = locators.table_team_location_xpath
self.table_team_founded_year_xpath = locators.table_team_founded_year_xpath
self.table_team_admin_name_xpath = locators.table_team_admin_name_xpath
self.table_team_league_name_xpath = locators.table_team_league_name_xpath
self.table_team_league_number_of_sprints_xpath = locators.table_team_league_number_of_sprints_xpath
self.table_team_league_number_of_games_xpath = locators.table_team_league_number_of_games_xpath
def click_team_tab(self):
self.driver.find_element_by_xpath(self.team_tab_xpath).click()
def enter_team_name(self, text):
self.driver.find_element_by_xpath(self.team_name_input_xpath).send_keys(text)
def enter_team_location(self, text):
self.driver.find_element_by_xpath(self.team_location_input_xpath).send_keys(text)
def select_league(self, league):
Select(self.driver.find_element_by_xpath(self.select_league_xpath)).select_by_index(league)
def select_founded_year(self, year):
Select(self.driver.find_element_by_xpath(self.select_founded_year_xpath)).select_by_index(year)
def enter_sub_domain(self, text):
self.driver.find_element_by_xpath(self.sub_domain_input_xpath).send_keys(text)
def click_add_new_team_admin_checkbox(self):
self.driver.find_element_by_xpath(self.add_new_team_admin_checkbox).click()
def enter_new_team_admin_first_name(self, text):
self.driver.find_element_by_css_selector(self.team_admin_first_name_css).send_keys(text)
def enter_new_team_admin_last_name(self, text):
self.driver.find_element_by_css_selector(self.team_admin_last_name_css).send_keys(text)
def enter_new_team_admin_email_xpath(self, text):
self.driver.find_element_by_css_selector(self.team_admin_email_css).send_keys(text)
def enter_new_team_admin_user_name(self, text):
self.driver.find_element_by_css_selector(self.team_admin_user_name_css).send_keys(text)
def click_add_new_league_admin_checkbox(self):
self.driver.find_element_by_xpath(self.add_new_league_admin_checkbox).click()
def enter_new_league_admin_first_name(self, text):
self.driver.find_element_by_css_selector(self.league_admin_first_name_css).send_keys(text)
def enter_new_league_admin_last_name(self, text):
self.driver.find_element_by_css_selector(self.league_admin_last_name_css).send_keys(text)
def enter_new_league_admin_email_xpath(self, text):
self.driver.find_element_by_css_selector(self.league_admin_email_css).send_keys(text)
def enter_new_league_admin_user_name(self, text):
self.driver.find_element_by_css_selector(self.league_admin_user_name_css).send_keys(text)
def print_team_names(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_names_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_locations(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_location_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_founded_year(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_founded_year_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_admin_names(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_admin_name_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_league_names(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_league_name_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_sprints(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_league_number_of_sprints_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_number_of_games(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_league_number_of_games_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
| [
"envio login s.shukla@enviosystems.com"
] | envio login s.shukla@enviosystems.com |
f1d3ed05dd0a8188d896cde41bfb30bf2177629c | 14a19a5dfbe5519529c097cf9606cd325549d1b3 | /metadataapp/views.py | 06c429f825f87f1b29aa747ab12d4b28781764f3 | [] | no_license | factvsankit/bbtApp | ea9d7b2c72f22b67c6e0f5dd4f0a7321d6c2d834 | 0fc5aa209bc454629f0eaf4a635b0624313bd5de | refs/heads/master | 2021-07-24T10:40:24.861572 | 2017-11-04T04:51:43 | 2017-11-04T04:51:43 | 109,467,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,375 | py | import json, os
import math
from django.http import HttpResponse, JsonResponse
from content_page.models import ContentPage, ContentIndexPage
from metadataapp.models import Plant, Event, Fruit, CropPricing
from .constants import NEPALI_MONTH_INDEX, NO_DAYS_IN_MONTH, EVENT_URL
YEAR = 2073
BASE_TECHNIQUES_URL = "http://barsabharitarkari.org/en/techniques/"
from django.conf import settings
BASE_DIR = settings.BASE_DIR
LOG_FILE = os.path.join(BASE_DIR, "my_log.txt")
def add_zero_if_less_than_ten(number):
if number > 10:
return str(number)
if not number:
number += 1
return "0" + str(number)
def get_date_from_days(total_days):
"""bivu edit test this function"""
year = math.floor(total_days / 365)
remainder_days = total_days - year * 365
month = math.floor(remainder_days / 30)
# for past month assign next year
days = round(remainder_days - month * 30)
# convert 1 to 01 for java date compatibility
month = add_zero_if_less_than_ten(month)
days = add_zero_if_less_than_ten(days)
return "{}/{}/{}".format(month, days, year)
def obtain_english_calendar_from_event(event_object):
"""
subtract 1 because baisakh 15 is 15 days although baisakh is 1
subtract 20698 to get english dates
"""
month_index = NEPALI_MONTH_INDEX[event_object.month]
days = NO_DAYS_IN_MONTH[event_object.week]
total_bikram_sambat_days = round(YEAR * 365 + month_index * 30.5 + days)
total_number_english_days = total_bikram_sambat_days - 20698
return get_date_from_days(total_number_english_days)
def get_event_url(event_object):
slug = EVENT_URL[event_object.event_name.strip()]
return BASE_TECHNIQUES_URL + slug + "/"
def convert_event_into_dict(event_object):
'''
outputs dict with keys:
url, eventDate, name
'''
event_dict = {
'detailURL': get_event_url(event_object),
'eventDate': obtain_english_calendar_from_event(event_object),
'name': event_object.event_name,
'nepaliName': event_object.nepali_event_name
}
return event_dict
def get_timeline_for_plant(plant_object):
plant_pk = plant_object.pk # pk = 33
plant_model = Plant.objects.get(pk=plant_pk)
events = Event.objects.filter(plant_events=plant_model)
if not events:
return []
all_timeline = []
for event in events:
all_timeline.append(convert_event_into_dict(event))
return all_timeline
def generate_unique_name(name):
return name.replace("-", "_")
def remove_paranthesis(name):
return name.split("(")[0].strip()
def get_nepali_name(name):
# bivu edit add function definition
try:
return name.split("(")[1].split(")")[0].strip()
except IndexError:
return name
def get_json_from_plant(plant):
import datetime
try:
plant_dict = {
'name': plant.name,
'plantNepaliName': get_nepali_name(plant.name),
'season': plant.season,
'detailURL': plant.detailURL,
'image': plant.image,
'unique_name': generate_unique_name(plant.unique_name),
'timeline': get_timeline_for_plant(plant)
}
get_timeline_for_plant(plant)
return plant_dict
except Exception as e:
with open(LOG_FILE, "a") as f:
f.write("Exception on {}".format(str(datetime.datetime.today())))
f.write(str(e))
f.write("\n")
return False
def check_to_add(name):
names_to_add = ["Beans", "Cress", "mustard", "Fava", "Colocasia",
"Coriander", "Cauliflower", "Bottle", "Sweet"]
for each_name in names_to_add:
if each_name in name:
return True
return False
def get_json_of_all_plants():
plants = Plant.objects.all()
all_plants = []
for q in plants:
json_from_plant = get_json_from_plant(q)
if json_from_plant:
all_plants.append(get_json_from_plant(q))
return all_plants
def convert_fruit_into_dict(fruit_object):
return {
'name': fruit_object.name,
'image': fruit_object.image,
'unique_name': fruit_object.unique_name,
'detailURL': fruit_object.detailURL
}
def get_dict_from_technique(technique_object):
# name = technique_object.title.split('(')[0].strip()
name = technique_object.title.strip()
detail_url = "http://barsabharitarkari.org/en/techniques/" + technique_object.slug
detail_nepali_url = detail_url.replace("/en/", "/ne/")
try:
nepali_name = technique_object.title.split("(")[1].split(")")[0].strip()
except IndexError:
nepali_name = ""
return {
'name': name,
'detailURL': detail_url,
'detailNepaliURL': detail_nepali_url,
'nepaliName': nepali_name
}
def get_json_of_all_techniques():
_techniques = ContentIndexPage.objects.get(slug="techniques").get_children()
all_techniques = []
for t in _techniques:
slug = t.slug
content_page = ContentPage.objects.get(slug=slug)
if content_page.improved_technique:
all_techniques.append(get_dict_from_technique(t))
return all_techniques
def get_json_of_all_fruits():
fruits = Fruit.objects.all()
all_fruits = []
for f in fruits:
all_fruits.append(convert_fruit_into_dict(f))
return all_fruits
def get_price_json_one_item(price_object):
def get_plant_unique_name(plant):
return plant.split("(")[0].strip().lower().replace(" ", "_")
return {
get_plant_unique_name(price_object.name): str(price_object.price)
}
def get_json_of_all_prices():
def get_plant_unique_name(plant):
return plant.strip().split("(")[0].strip().lower().replace(" ", "_")
all_crops = {}
for i in CropPricing.objects.all():
all_crops[get_plant_unique_name(i.name)] = str(i.price)
return all_crops
def get_plants_data(request):
all_plants = get_json_of_all_plants()
all_fruits = get_json_of_all_fruits()
all_techniques = get_json_of_all_techniques()
all_prices = get_json_of_all_prices()
final_json = {
"plants": all_plants,
"fruits": all_fruits,
"techniques": all_techniques,
"prices": all_prices
}
return JsonResponse(final_json)
| [
"mta.ankit@gmail.com"
] | mta.ankit@gmail.com |
cad08c6af20f321507af6bc050e428731b67a33f | 7dc240e587213e4b420676c60aa1b24905b1b2e4 | /src/app/tests/mailchimp/conftest.py | d5af1f4a3624389007aae35e1b133692b303f6ce | [
"MIT"
] | permissive | denokenya/education-backend | 834d22280717f15f93407108846e2eea767421c8 | 3b43ba0cc54c6a2fc2f1716170393f943323a29b | refs/heads/master | 2023-08-27T09:07:48.257108 | 2021-11-03T00:19:04 | 2021-11-03T00:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | import pytest
import requests_mock
from app.integrations.mailchimp import AppMailchimp, MailchimpMember
pytestmark = [pytest.mark.django_db]
@pytest.fixture(autouse=True)
def _set_mailchimp_credentials(settings):
settings.MAILCHIMP_API_KEY = 'key-us05'
settings.MAILCHIMP_CONTACT_LIST_ID = '123cba'
@pytest.fixture
def mailchimp():
client = AppMailchimp()
with requests_mock.Mocker() as http_mock:
client.http_mock = http_mock
yield client
@pytest.fixture
def mailchimp_member(user):
return MailchimpMember.from_django_user(user)
@pytest.fixture
def post(mocker):
return mocker.patch('app.integrations.mailchimp.http.MailchimpHTTP.post')
@pytest.fixture
def user(mixer):
return mixer.blend('users.User', email='test@e.mail', first_name='Rulon', last_name='Oboev')
| [
"noreply@github.com"
] | noreply@github.com |
5a430ef971af1b67af314b64ae6eac1b2d348931 | 28f726ae55c94ad559aba289f5e3f8f51c966a4d | /导出数据.py | c0fbbae723b55cecabffaaba977e678da8dae968 | [] | no_license | bruce994/python_training | 546ed4f27ef8da7a9f94f7b8f4db9100ffeae137 | 12ba20f8ef662ef2e8cc3018ed1254c33e75375b | refs/heads/master | 2021-01-10T12:54:28.306911 | 2019-09-18T03:15:16 | 2019-09-18T03:15:16 | 43,752,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | #!/usr/bin/env python
#-*- coding:utf-8-*-
import socket
import thread
import time,os,shutil,platform,datetime,sys,re
import MySQLdb
import sqlite3
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='test',charset='utf8', init_command='SET NAMES UTF8')
cursor = conn.cursor()
cursor.execute("select title,body,id from jzwj_archives as a join jzwj_addonarticle as b on a.id=b.aid where title <> '' and id<8542 order by id desc ")
for row in cursor.fetchall():
title = row[0]
content = row[1].encode('utf-8')
id = row[2]
title = title.replace("?","")
title = title.replace(",","")
title = title.replace("!","")
title = title.replace(".","")
title = title.replace("\"","")
title = title.replace("|","")
title = title.replace("/","")
tmp2=''
for x in title:
tmp2 += x + ' '
tmp2 = tmp2[:-1] + '.txt'
try:
file_write = open(tmp2, 'wb')
except Exception, e:
continue
else:
reps = [r"<[^>]+>",r" "]
for rep in reps:
regex = re.compile(rep)
content = regex.sub("", content)
file_write.write(content)
file_write.close
print str(id) + ":" +tmp2
conn.close()
| [
"noreply@github.com"
] | noreply@github.com |
231d481e63ca5e223979d6b4a158a15ed9294642 | 0e74ba41a89742cc81c6ffaab685ee7f991fd0dc | /gettingFileList/getFileListTtB.py | 36d448060c3422231d6f199b8056b3dc85179822 | [] | no_license | bigalex95/androidMalwareDetectionSystem | 661059acbb40ad56fb6ca99943c9a02a87e1362c | 542373ca7dc700fa4a569deb34d3d87ca80d4ecd | refs/heads/master | 2021-07-16T21:54:03.263225 | 2020-07-25T22:08:12 | 2020-07-25T22:08:12 | 192,614,718 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import glob
#path where your source files
BENIGN_PATH_TEST = '../Benign/Test/'
#opening and putting to list array all sources file names
class_benign_test = glob.glob(BENIGN_PATH_TEST + '*.txt')
#writing all source file names to fileList.txt
with open('file_list_test_Benign.txt', 'w') as f:
for item in class_benign_test:
f.write("%s\n" % item)
f.close()
| [
"amanbayeva95@gmail.com"
] | amanbayeva95@gmail.com |
bf8e5d90cbc8364cd686f902be7a8ff4071e570c | 3ada098871f017f316209f0ff7751c5ac784121a | /queue/multicast/main.py | 654a82c5a59cd6f29bafbd62a650d9cd9c62072b | [
"Apache-2.0"
] | permissive | kubemq-io/python-sdk-cookbook | 1824d2da06fdf6f7d778c8ed465866a2d125266e | 0c843ec77e8923a79a0853c83915f7ee240c5ddb | refs/heads/master | 2023-07-13T06:27:36.167221 | 2021-08-21T11:29:17 | 2021-08-21T11:29:17 | 352,559,888 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | from kubemq.queue.message_queue import MessageQueue
from kubemq.queue.message import Message
def create_queue_message(meta_data, body, policy=None):
message = Message()
message.metadata = meta_data
message.body = body
message.tags = [
('key', 'value'),
('key2', 'value2'),
]
message.attributes = None
message.policy = policy
return message
if __name__ == "__main__":
channel = "queue.a;queue.b;queue.c"
queue = MessageQueue(channel, "python-sdk-cookbook-queues-multicast-client", "localhost:50000")
message = create_queue_message("queueName {}".format(channel),
"some-simple-queue-multicast-message-1".encode('UTF-8'))
try:
sent = queue.send_queue_message(message)
if sent.error:
print('message enqueue error, error:' + sent.error)
else:
print('Send to Queue at: %d' % (
sent.sent_at
))
except Exception as err:
print('message enqueue error, error:%s' % (
err
))
queue_a = MessageQueue('queue.a', "python-sdk-cookbook-queues-multicast-client-receiver-A", "localhost:50000", 2, 1)
try:
res = queue_a.receive_queue_messages()
if res.error:
print(
"'Error Received:'%s'" % (
res.error
)
)
else:
for message in res.messages:
print(
"'Queue A Received :%s ,Body: sending:'%s'" % (
message.MessageID,
message.Body
)
)
except Exception as err:
print(
"'error sending:'%s'" % (
err
)
)
queue_b = MessageQueue('queue.b', "python-sdk-cookbook-queues-multicast-client-receiver-B", "localhost:50000", 2, 1)
try:
res = queue_b.receive_queue_messages()
if res.error:
print(
"'Error Received:'%s'" % (
res.error
)
)
else:
for message in res.messages:
print(
"'Queue B Received :%s ,Body: sending:'%s'" % (
message.MessageID,
message.Body
)
)
except Exception as err:
print(
"'error sending:'%s'" % (
err
)
)
queue_c = MessageQueue('queue.c', "python-sdk-cookbook-queues-multicast-client-receiver-C", "localhost:50000", 2, 1)
try:
res = queue_c.receive_queue_messages()
if res.error:
print(
"'Error Received:'%s'" % (
res.error
)
)
else:
for message in res.messages:
print(
"'Queue C Received :%s ,Body: sending:'%s'" % (
message.MessageID,
message.Body
)
)
except Exception as err:
print(
"'error sending:'%s'" % (
err
)
)
| [
"eitam.ring@kubemq.io"
] | eitam.ring@kubemq.io |
8a2e8a556542b2c6270c5ebb0463bb7eda92fe92 | e02366473ccd3ddbddd96e1fecd2f460bf07db95 | /lambdas.py | 703fd3139332731f05659d137a00f4ca418268cd | [
"MIT"
] | permissive | davidlares/python-overview | 03413e4b27107d71cc1ced122ba104a83c99e48c | 523a4fd59ecc356b95f091adbe609448f85e8aa5 | refs/heads/master | 2021-09-14T17:34:15.468977 | 2018-05-16T19:09:57 | 2018-05-16T19:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # asignar una funcion a una variable
def grados(grados):
return grados * 1.8 + 32
function_variable = grados
resultado = function_variable(32)
print(resultado)
# lambdas o funciones anonimas
mifunc = lambda grados=0 : grados * 1.8 + 32 # todas las lambdas retornan un valor
resultado = mifunc(32)
print(resultado)
| [
"david.e.lares@gmail.com"
] | david.e.lares@gmail.com |
a29b5c5fd84534f37e17dd2410016807deff86f6 | 9c404f18c27297e5c6fe6dde50097765478e09bf | /src/blog/migrations/0007_contact.py | 48d4ef5c22e50203a8a429c09560e19633698873 | [] | no_license | rishav4101/Click_Galaxy | 6001619e25d41504cd7f27cc40a1dfd064bfd52c | 404482ce760f8422837438fbddc046575d41b351 | refs/heads/master | 2021-03-09T23:00:21.274638 | 2020-03-17T16:49:20 | 2020-03-17T16:49:20 | 246,389,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # Generated by Django 3.0.4 on 2020-03-10 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_delete_feedback'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
],
),
]
| [
"rajkumarrishav4101@gmail.com"
] | rajkumarrishav4101@gmail.com |
2809b47d249d56790cb08fb8a0c7d5f1fbdd146e | d53baf0a3aaa10521cfc28a7be8f2c498bc9e741 | /examples/CaffeModels/load-vgg16.py | 96780e85eac94a3b1709a479d22cf2e3faa232fd | [
"Apache-2.0"
] | permissive | qianlinjun/tensorpack | 8f6e99ba17095334de1163d6412e740642343752 | 7f505225cd41aaeee3a0b0688fe67afc0af8fb30 | refs/heads/master | 2020-03-29T22:38:22.269889 | 2018-09-25T07:20:48 | 2018-09-25T07:20:48 | 150,432,021 | 1 | 0 | Apache-2.0 | 2018-09-26T13:35:19 | 2018-09-26T13:35:18 | null | UTF-8 | Python | false | false | 3,493 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: load-vgg16.py
from __future__ import print_function
import cv2
import tensorflow as tf
import numpy as np
import os
import six
import argparse
from tensorpack import *
from tensorpack.dataflow.dataset import ILSVRCMeta
enable_argscope_for_module(tf.layers)
def tower_func(image):
is_training = get_current_tower_context().is_training
with argscope([tf.layers.conv2d], kernel_size=3, activation=tf.nn.relu, padding='same'):
x = image
x = tf.layers.conv2d(x, 64, name='conv1_1')
x = tf.layers.conv2d(x, 64, name='conv1_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool1')
x = tf.layers.conv2d(x, 128, name='conv2_1')
x = tf.layers.conv2d(x, 128, name='conv2_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool2')
x = tf.layers.conv2d(x, 256, name='conv3_1')
x = tf.layers.conv2d(x, 256, name='conv3_2')
x = tf.layers.conv2d(x, 256, name='conv3_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool3')
x = tf.layers.conv2d(x, 512, name='conv4_1')
x = tf.layers.conv2d(x, 512, name='conv4_2')
x = tf.layers.conv2d(x, 512, name='conv4_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool4')
x = tf.layers.conv2d(x, 512, name='conv5_1')
x = tf.layers.conv2d(x, 512, name='conv5_2')
x = tf.layers.conv2d(x, 512, name='conv5_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool5')
x = tf.layers.flatten(x, name='flatten')
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc6')
x = tf.layers.dropout(x, rate=0.5, name='drop0', training=is_training)
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc7')
x = tf.layers.dropout(x, rate=0.5, name='drop1', training=is_training)
logits = tf.layers.dense(x, 1000, activation=tf.identity, name='fc8')
tf.nn.softmax(logits, name='prob')
def run_test(path, input):
param_dict = dict(np.load(path))
param_dict = {k.replace('/W', '/kernel').replace('/b', '/bias'): v for k, v in six.iteritems(param_dict)}
predict_func = OfflinePredictor(PredictConfig(
inputs_desc=[InputDesc(tf.float32, (None, 224, 224, 3), 'input')],
tower_func=tower_func,
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['prob'] # prob:0 is the probability distribution
))
im = cv2.imread(input)
assert im is not None, input
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (224, 224)).reshape((1, 224, 224, 3)).astype('float32')
# VGG16 requires channelwise mean substraction
VGG_MEAN = [103.939, 116.779, 123.68]
im -= VGG_MEAN[::-1]
outputs = predict_func(im)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
print("Top10 predictions:", ret)
meta = ILSVRCMeta().get_synset_words_1000()
print("Top10 class names:", [meta[k] for k in ret])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', required=True,
help='.npz model file generated by tensorpack.utils.loadcaffe')
parser.add_argument('--input', help='an input image', required=True)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
run_test(args.load, args.input)
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
5b3165a574457eeb1f369cd70b0259bd520aec67 | 8e2404c7bcfd28329bed789839192b2c4e85ea1b | /LeetCode/Linked_List_Cycle_II.py | ca97be57324afaacc01727943d36debb9971ccae | [] | no_license | Pabitra-26/Problem-Solved | 408bd51bbffc69f8c5e1def92797c2e6f027f91d | c27de1dd6c4ad14444fa5ee911a16186c200a7f9 | refs/heads/master | 2023-07-30T16:51:28.062349 | 2021-09-27T06:06:54 | 2021-09-27T06:06:54 | 269,935,039 | 2 | 0 | null | 2021-09-27T06:06:55 | 2020-06-06T09:39:33 | Python | UTF-8 | Python | false | false | 886 | py | # Problem name: Linked List Cycle II
# Description: Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
# To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to.
# If pos is -1, then there is no cycle in the linked list.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
table={}
curr=head
i=0
m=0
while(curr is not None):
if(curr in table):
m=1
return curr
else:
table[curr]=1
curr=curr.next
if(m==0):
return None | [
"noreply@github.com"
] | noreply@github.com |
b6a4a9e47571cdc8e1f355c4ff97f2f25ce41edb | ee7e42417d9d1e76b0e84e44dc6eb037adc3ebad | /.history/pet/api_20190703151654.py | 3b027aed09213348242bbcfd996055000b31003a | [] | no_license | web3-qa/pets-api | 4632127ee84a299f207d95754f409fc1e4c0013d | ee4a04e7291740ac8eb6147c305b41d27d5be29c | refs/heads/master | 2023-05-12T09:09:47.509063 | 2019-07-18T15:07:13 | 2019-07-18T15:07:13 | 197,611,701 | 0 | 0 | null | 2023-05-01T19:42:17 | 2019-07-18T15:19:59 | Python | UTF-8 | Python | false | false | 7 | py | from fl | [
"dcolmer@statestreet.com"
] | dcolmer@statestreet.com |
9cbccdf6741a644e2d43e78594b58ded66dc35c4 | af992da82e277bf3982b003a0fb8b6f65d4311b4 | /dataAnalysis/fit_task.py | cc9226d71dd6fbcd41bb6154583bc0eb3d2e0c11 | [
"MIT"
] | permissive | emailhy/lab5 | 5529c7c388111f16215262f7e45a3ba7201b767c | c0f499bf396d228290ce6d06fc90567e81cb638c | refs/heads/master | 2021-05-10T07:37:35.532945 | 2017-10-09T04:17:56 | 2017-10-09T04:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | from dataAnalysis.prmodel import FormulationDataModel
from math import ceil, floor
import numpy as np
import json
from app import celery
from redis import Redis
from datetime import datetime
r = Redis(host='127.0.0.1')
@celery.task
def fit_model_task(f_id, training_uuid, logging_uuid, epochs=100):
fdm = FormulationDataModel(f_id)
model, fit_history = fdm.fit_model(logging_uuid, epochs=epochs)
data_traces, grid_traces = fdm.get_formulation_predict_data()
# save model with a format name like 2017-07-12_20-38-39_loss-0.0118556629749.hdf5
model_name = '%s_loss-%s.hdf5' % (datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), str(fit_history.history['loss'][-1]))
fdm.save_model(model=model, model_name=model_name)
result = json.dumps({'status': 'success',
'formulation_id': f_id,
'data_traces': data_traces,
'grid_traces': grid_traces,
'model_name': model_name})
r.set(training_uuid, result)
r.set(logging_uuid, json.dumps({'model_state': 'trained'}))
| [
"qinzishi@gmail.com"
] | qinzishi@gmail.com |
890e60f80c689b4b20df4c533f1250dfabacfa0e | f466c7d8dc1034df6dfd150b0468fe2fe45f8565 | /xls2pdf/xls2pdf.spec | 2edd20d4b1b8385715dfd8eeb6765e0d661b8c71 | [] | no_license | MarlboroLeo/utils | ebce4ca300ce4c5fe85c4a03930f12363e08627c | 7bc717cf46d8f39807dd9c294535bb16ece17f0d | refs/heads/master | 2021-06-21T01:39:11.147692 | 2021-03-25T15:51:35 | 2021-03-25T15:51:35 | 194,812,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['xls2pdf.py'],
pathex=['D:\\Leau\\code\\xls2doc'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='xls2pdf',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"mu_zhennan@126.com"
] | mu_zhennan@126.com |
fc75c39c188f5c86376de80a21d363b87e28047e | a3e2201173a475c78c2b89456b916c919657ed25 | /core_contracts/rebalancing/utils/checks.py | af0638719f4a9748f3285a7ce9ac84ffdbf6a9d4 | [
"MIT"
] | permissive | subba72/balanced-contracts | a302994044dab909f9d5ef84bbee593e6e9695c7 | cd185fa831de18b4d9c634689a3c6e7b559bbabe | refs/heads/main | 2023-08-15T12:15:47.967642 | 2021-08-06T04:09:19 | 2021-08-06T04:09:19 | 396,687,527 | 0 | 0 | MIT | 2021-08-16T08:04:25 | 2021-08-16T08:04:24 | null | UTF-8 | Python | false | false | 1,361 | py | from iconservice import *
# ================================================
# Exceptions
# ================================================
class SenderNotScoreOwnerError(Exception):
pass
class SenderNotAuthorized(Exception):
pass
class SenderNotGovernance(Exception):
pass
class SenderNotRebalance(Exception):
pass
class NotAFunctionError(Exception):
pass
def only_governance(func):
if not isfunction(func):
raise NotAFunctionError
@wraps(func)
def __wrapper(self: object, *args, **kwargs):
if self.msg.sender != self._governance.get():
raise SenderNotGovernance(self.msg.sender)
return func(self, *args, **kwargs)
return __wrapper
def only_owner(func):
if not isfunction(func):
raise NotAFunctionError
@wraps(func)
def __wrapper(self: object, *args, **kwargs):
if self.msg.sender != self.owner:
raise SenderNotScoreOwnerError(self.owner)
return func(self, *args, **kwargs)
return __wrapper
def only_admin(func):
if not isfunction(func):
raise NotAFunctionError
@wraps(func)
def __wrapper(self: object, *args, **kwargs):
if self.msg.sender != self._admin.get():
raise SenderNotAuthorized(self.msg.sender)
return func(self, *args, **kwargs)
return __wrapper
| [
"adhikarisuyog49@gmail.com"
] | adhikarisuyog49@gmail.com |
ac8fff68a489144a8de93d5312a8f51903d2b38c | dc5fd106270d1e81f9eefcc542695c4bb1f8c691 | /customers/migrations/0010_auto_20160411_0245.py | 643d4c1d61597f5db2eafdb5acd5ed8c4a6139eb | [] | no_license | iblogc/backend | 3bd134701cc2a6dbcf4438026693814524f07cc2 | e7a5f85eaa57765cf91a62f022ea64b61ce56ffd | refs/heads/master | 2021-01-21T08:57:36.300994 | 2016-09-21T05:34:35 | 2016-09-21T05:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-11 02:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customers', '0009_accountkey'),
]
operations = [
migrations.AddField(
model_name='approvelog',
name='action_user',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='customeraccount',
name='gender',
field=models.IntegerField(choices=[(0, b'\xe7\x94\xb7'), (1, b'\xe5\xa5\xb3')], default=0),
),
migrations.AlterField(
model_name='accountkey',
name='account',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='key', to='customers.CustomerAccount'),
),
migrations.AlterField(
model_name='customeraccount',
name='register_date',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| [
"14841787@qq.com"
] | 14841787@qq.com |
f7d01c0eda3577f57ae5c0e2137ea657057871cc | 7c64785c00de294f1456a3d167727e0885af0f59 | /setup.py | 12d4c9d70662766fff071ed4bd294fb7978eeaaa | [
"Apache-2.0"
] | permissive | yh-luo/prosdk-addons-python | b5bc311d98b1c095bcf86c19d1f3c4228f27bd22 | 9335cf9a17da7673892c2b849f0884b89e8cdabf | refs/heads/master | 2020-03-19T14:49:10.745441 | 2018-06-08T16:16:18 | 2018-06-08T16:16:18 | 136,640,731 | 0 | 0 | null | 2018-06-08T16:09:54 | 2018-06-08T16:09:54 | null | UTF-8 | Python | false | false | 1,634 | py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='tobii_research_addons',
version='0.1.0',
description='Addons for the Tobii Pro SDK.',
long_description=long_description,
url='https://github.com/tobiipro/prosdk-addons-python',
author='Tobii AB',
author_email='tobiiprosdk@tobii.com',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Beta',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
keywords='tobii research eyetracking sdk tobiipro',
py_modules=["tobii_research_addons"],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['tobii_research'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
package_data={
'sample': [],
},
project_urls={
'Bug Reports': 'https://github.com/tobiipro/prosdk-addons-python/issues',
'Source': 'https://github.com/tobiipro/prosdk-addons-python',
},
)
| [
"pontus.nyman@tobii.com"
] | pontus.nyman@tobii.com |
2f9a0e5eb894f82c3246cbe316a3aaec12605ea4 | 49c715a71da32472e078be32c46ffe2c7315d253 | /TestREST_Framework/env/bin/pip2 | b6279f9dd630da82762490a0d1b35818b84ddd83 | [] | no_license | janicheen/KontrollrommetBETA | aaddc92a3b58ecb2bbed5e0e79f7d3b461fe79e4 | 00a78e5ca8079beb327ceacd7fb4a02a9011ca06 | refs/heads/master | 2020-06-24T13:57:57.848532 | 2017-07-07T23:47:47 | 2017-07-07T23:47:47 | 96,937,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/Users/Janic/Kontrollrommet/TestREST_Framework/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"janic@online.no"
] | janic@online.no | |
a11c6f9099d8b70366f5afa7c539fef9e9c2c750 | 32e97c6f83142d6fc365749a42356e689ea2fa70 | /mk/com/dragan/nupic/result_generators/AbstractResultGenerator.py | 94e470c3b50269e2745870f4e50a4d438e3c7bcd | [] | no_license | inside-dragan/magisterska | 7168f9f8864c2d214cb764abc79d42983c4c0d9b | 334408bc4a89371f8c63880a9865ea2f37054d50 | refs/heads/master | 2016-09-06T06:54:30.052230 | 2013-05-21T21:41:37 | 2013-05-21T21:41:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,780 | py | '''
Created on Jun 7, 2012
@author: dzaharie
'''
from mk.dragan.config.Params import Params
from mk.dragan.config.ResultContainer import ResultContainer
from mk.dragan.utils.CsvUtils import writeData
from mk.dragan.utils.DatabaseUtils import dbgetlist, dbsetlist
import logging
import os
import shutil
import traceback
log = logging.getLogger('Abstract Result Generator')
class AbstractResultGenerator(object):
__resultFolder = None
_executor = None
def __init__(self, executor):
self._executor = executor
def _areValidNodes(self):
raise NotImplementedError()
def _shouldStopIncreasingMaxDistance(self):
raise NotImplementedError()
def _shouldStopDecreasingMaxDistance(self):
raise NotImplementedError()
def _getLevel1BottomUpOut(self):
raise NotImplementedError()
def _getSigmas(self):
raise NotImplementedError()
def _createData(self, dataCreator):
outPath = os.getcwd() + '/input'
if os.path.exists(outPath):
shutil.rmtree(outPath)
os.mkdir(outPath)
writeData(dataCreator.getCat(), outPath + '/cat.csv')
inputs = dataCreator.getInputs()
for i in range(0, len(inputs)):
writeData(inputs[i], outPath + '/input' + str(i+1) + '.csv')
def _strResult(self, dictionaryList):
result = ''
for dictionary in dictionaryList:
result += "level 1) %0.6f" % dictionary['max-distance'] + ' '
result += "%0.6f" % dictionary['sigma'] + ' '
result += "level 2) %0.6f" % dictionary['max-distance2'] + ' '
result += "%0.6f" % dictionary['sigma2'] + ' '
result += "result) %0.2f" % dictionary['correct'] + ' '
result += "%0.2f" % dictionary['unknown'] + ' '
result += '\n'
return result
def _isValidNode(self, nodeName):
coincidences = ResultContainer().coincidences[nodeName]['num']
largestGroupSize = ResultContainer().coincidences[nodeName]['size-of-largest']
return coincidences >= 10 and largestGroupSize > 1
def _isValidResult(self):
perc = ResultContainer().getLatestResult()['correct']
unknown = ResultContainer().getLatestResult()['unknown']
return perc > 55 and unknown < 40
def _shouldStopIncreasingMaxDistanceForNode(self, nodeName):
if ResultContainer().isEmpty():
return False
coincidences = ResultContainer().coincidences[nodeName]['num']
largestGroupSize = ResultContainer().coincidences[nodeName]['size-of-largest']
log.info("stop increasing? largestGroupSize=" + str(largestGroupSize) + " coincidences=" + str(coincidences))
return coincidences < 10 or largestGroupSize * 3 > coincidences;
def _shouldStopDecreasingMaxDistanceForNode(self, nodeName):
if ResultContainer().isEmpty():
return False
largestGroupSize = ResultContainer().coincidences[nodeName]['size-of-largest']
coincidences = ResultContainer().coincidences[nodeName]['num']
log.info("stop decreasing? largestGroupSize=" + str(largestGroupSize) + " coincidences=" + str(coincidences))
return largestGroupSize < 2 or coincidences > self._getLevel1BottomUpOut()
def _calculateMaxDistances(self):
result = []
maximal = 0.01
while not self._shouldStopIncreasingMaxDistance():
Params().MAX_DISTANCE = maximal
try:
log.info('calculating max distance. trying: ' + str(maximal))
self._executor.executeSupervised(execEval=True, execTest=False, execVal=False)
except RuntimeError:
log.error('error thrown for maxDistance=' + str(maximal))
maximal *= 2
minimal = 50.0
while not self._shouldStopDecreasingMaxDistance():
Params().MAX_DISTANCE = minimal
try:
log.info('calculating max distance. trying: ' + str(minimal))
self._executor.executeSupervised(execEval=True, execTest=False, execVal=False)
except RuntimeError:
log.error('error thrown for maxDistance=' + str(minimal))
log.error(traceback.format_exc())
break #error is thrown because so small maxDistance is not allowed any more
minimal /= 2
log.info("max distance calculated in the range of: " + str(minimal) + " - " + str(maximal))
if maximal > minimal:
step = (maximal - minimal) / 10
for i in range(0, 11):
result.append(minimal + step*i)
return result
def _initGenerateResult(self, dataCreator):
if dataCreator != None:
self._createData(dataCreator)
Params().BOTTOM_UP_OUT = self._getLevel1BottomUpOut()
ResultContainer().clear()
def generateResult(self, dataCreator):
self._initGenerateResult(dataCreator)
result = ''
pairs = []
maxDistances = dbgetlist('maxDistances', dataCreator.getDescription())
if not maxDistances:
maxDistances = self._calculateMaxDistances()
dbsetlist('maxDistances', dataCreator.getDescription(), maxDistances)
ResultContainer().clear()
for s in self._getSigmas():
for d in maxDistances:
log.info("trying: distance=%s sigma=%s" % (d, s))
Params().MAX_DISTANCE = d
Params().SIGMA = s
try:
self._executor.executeSupervised(execEval=True, execTest=False, execVal=False)
if (self._isValidResult() and self._areValidNodes()):
pairs.append((d, s))
except RuntimeError:
log.error('error thrown for maxDistance=' + str(d) + ' and sigma=' + str(s))
log.error(traceback.format_exc())
r = ResultContainer().result
result += 'training results: \n' + self._strResult(r)
ResultContainer().result = []
for pair in pairs:
d = pair[0]
s = pair[1]
log.info("testing combination: distance=%s sigma=%s" % (d, s))
Params().MAX_DISTANCE = d
Params().SIGMA = s
self._executor.executeSupervised(execEval=False, execTest=True, execVal=False)
r = ResultContainer().result
result += 'testing results: \n' + self._strResult(r)
found = []
for line in r:
if line['unknown'] < 40: #zemi gi samo tie so unknown pomalku od 40%
found.append(line)
if len(found) > 0:
maxx = found[0]
for x in found:
if x['correct'] > maxx['correct']:
maxx = x
result += 'best testing result: ' + self._strResult([maxx])
ResultContainer().result = []
Params().MAX_DISTANCE = maxx['max-distance']
Params().SIGMA = maxx['sigma']
self._executor.executeSupervised(execEval=False, execTest=False, execVal=True)
validationResult = ResultContainer().getLatestResult()
result += 'validation result: ' + self._strResult([validationResult])
info = dataCreator.getDescription()
print 'tesing combination: ' + str(info)
self.addToFile(str(info) + '\n')
self.addToFile(result + '\n\n\n')
print result
return result
| [
"dragan.zahariev@inside-solutions.ch"
] | dragan.zahariev@inside-solutions.ch |
2392a6dd5a8bc0cc84ab0904642c9fb7c3252d87 | a38b90349f7e2bae1400d2db4cfc9200d369d7ba | /blog/migrations/0001_initial.py | 1f3616c21f11d71129d5f6ea46c20112985d6f94 | [] | no_license | merymeru/my-first-blog | 01d79f45a71344edfb2caba2f5c777ca9f0f6ff4 | 95ddfbd07513de9f8b1ea90aca83a58b6a937e67 | refs/heads/master | 2020-03-27T06:26:20.929721 | 2018-08-26T20:37:20 | 2018-08-26T20:37:20 | 146,105,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.0.8 on 2018-08-25 14:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"merymeru@gmail.com"
] | merymeru@gmail.com |
754231c8d6e0ac524b966bfe25b565c8c101d363 | 79859d498c9fbb568f2eae19399a23558c3c6fd1 | /information/views/__init__.py | af1c64d210d93f1b5620c675925cedab6deefd12 | [] | no_license | Kiharaten/VirtualClassRoom | 7d04a57308587735e189d1d1c338b99fca6a3cbe | 170ecb3dbdd54a67496d0d95a8730804570c1a8b | refs/heads/master | 2023-03-13T19:57:24.925622 | 2021-02-19T03:41:50 | 2021-02-19T03:41:50 | 299,920,280 | 0 | 0 | null | 2021-02-19T03:41:51 | 2020-09-30T13:00:54 | Python | UTF-8 | Python | false | false | 367 | py | from django.shortcuts import get_object_or_404, render
context = {
'fixed': {
'sitename': '遠隔授業システム',
'title': '- サイト情報 -',
},
}
# Create your views here.
def index(request):
return render(request, 'information/top.html', context)
def help(request):
return render(request, 'information/help.html', context) | [
"kiharaten1129@gmail.com"
] | kiharaten1129@gmail.com |
3e7df88f0417ba13618a0e02619b628a14db66c2 | f6a9634d65a24731f6c1ef39aaa8e59d974d79b1 | /python/exercises/flowcontrols_conditions.py | 40c8aeda2a920d4caecfe7e5accc6d8f3ee40c40 | [] | no_license | emersonmellado/devopsbc | 653a98526396c88da9cf0e9584d4a4048a9f173c | a99d84fd6569480e6ebf1d95da3844ae2dfafc26 | refs/heads/master | 2022-11-30T09:12:56.915799 | 2020-08-13T04:30:26 | 2020-08-13T04:30:26 | 266,922,507 | 1 | 0 | null | 2022-07-22T07:35:45 | 2020-05-26T02:09:37 | Python | UTF-8 | Python | false | false | 559 | py | """
Syntax:
if CONDITION:
# Condition is True
else:
# Condition is False
Usage: Condition is replaced with an expression
True path: you write logic for the true case
False path: you write logic for the false case
"""
value = input("Give me a number: ")
if isinstance(value, int):
print("All good, keep going")
else:
value = float(value)
if value == 100:
print("Value is equal to 100")
elif value>100:
print("Value is greater than 100")
elif value<100:
print("Value is less than 100")
else:
print("Value it NOT equal to 100") | [
"emersonmellado@gmail.com"
] | emersonmellado@gmail.com |
16d79b54d69df57c653a5cc4fbe3d3bba8ccedce | 5f2d270bd8acddc6262a3be4e569e96e83bbf70f | /examples/h2o-classifier/train.py | 5ac44b25a4ab7a3db38a530dd61a4fc30b11f431 | [] | no_license | bchalamayya/promote-python | f42ee55f884b18da298749e01790de20aa5a4b84 | 906bf4b3ee80d5280129be048b2cd1ab83f9f8d2 | refs/heads/master | 2020-04-14T17:57:48.365691 | 2018-11-20T20:59:04 | 2018-11-20T20:59:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import h2o
from h2o.estimators import H2ORandomForestEstimator
import os
h2o.init()
data = h2o.import_file('iris.csv')
training_columns = ['C1', 'C2', 'C3', 'C4']
response_column = 'C5'
train, test = data.split_frame(ratios=[0.8])
model = H2ORandomForestEstimator(ntrees=50, max_depth=20, nfolds=10)
model.train(x=training_columns, y=response_column, training_frame=train)
save_path = os.path.realpath('.') + '/objects/'
h2o.save_model(model=model, path=save_path) | [
"colin.ristig@gmail.com"
] | colin.ristig@gmail.com |
adb77a3ed74b681351ac695e90d5e55ea1b00919 | 1798ab9a1116022e8517f77f840f93b7f3668198 | /euler056.py | d8d3f8b78c4ce727c0e93114fa55915f563cbba5 | [] | no_license | iynaix/eulerproject | c79e9b37b77fe5e14e1ed23e1fc24121cb757512 | 4d1ba226e2a6d4261ce2cf0d64ebd4b0e538e0b0 | refs/heads/master | 2021-01-19T14:30:17.960934 | 2014-09-29T01:28:39 | 2014-09-29T01:28:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from utils import digits
def euler56():
ret = 0
for a in range(1, 100):
for b in range(1, 100):
ret = max(ret, sum(digits(a ** b)))
return ret
| [
"iynaix@gmail.com"
] | iynaix@gmail.com |
bc8c5f17ce1d0ec1610763eeda69ec813f6f4a2f | 4dbc1e4a7115b834bbf239fd5254adf293b61516 | /vfmflathub/__init__.py | 973ac4103d9eea129e43384235b067edb27a0f4f | [
"MIT",
"CC-BY-SA-4.0"
] | permissive | sharkwouter/vaporos-flatpak-manager | 32d398ec0b66494471d0c63eea088504efa65dd1 | da1dce2a806fdb51aa9366408ace50c28fbc3ff6 | refs/heads/master | 2020-07-06T05:34:16.634849 | 2019-10-11T18:23:06 | 2019-10-11T18:23:06 | 202,908,134 | 2 | 0 | MIT | 2019-09-10T14:58:50 | 2019-08-17T16:51:10 | Python | UTF-8 | Python | false | false | 194 | py | from vfmflathub.api import get_applications
from vfmflathub.application import Application
from vfmflathub.flatpak import add_flathub, get_installed_applications, install, uninstall, update_all
| [
"wwijsman@live.nl"
] | wwijsman@live.nl |
50ddae41737c1856fdea70885af523908cdebab0 | d83fa072a084642ebaa40317dda61f7a2f660284 | /cleancoderscom/gateways/codecast_gateway.py | 89edbc42cf18f413af36449ce9f5bf8e0749df70 | [] | no_license | xstrengthofonex/CleanCodeCaseStudy | 479ca1f0c028f3f481635b23bf44363fd50dec18 | 312aeef9f2127033f2b9e0b4a2c41baf4e6cc01e | refs/heads/master | 2021-01-02T22:55:50.471384 | 2017-08-06T14:36:17 | 2017-08-06T14:36:17 | 99,425,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | from abc import ABCMeta, abstractmethod
from typing import List, Optional
from cleancoderscom.entities.codecast import Codecast
class CodecastGateway(metaclass=ABCMeta):
@abstractmethod
def find_all_codecasts_ordered_by_date(self) -> List[Codecast]:
pass
@abstractmethod
def find_codecast_by_title(self, title) -> Optional[Codecast]:
pass
| [
"xstrengthofonex@gmail.com"
] | xstrengthofonex@gmail.com |
fca8833ff2ffcf10a7e5395e8b705cd0a33fad29 | cd4bbecc3f713b0c25508d0c5674d9e103db5df4 | /toontown/building/DistributedAnimDoor.py | 37bb7065eba4aa04a774aaff39c4ee732815e3bb | [] | no_license | peppythegod/ToontownOnline | dce0351cfa1ad8c476e035aa3947fdf53de916a6 | 2e5a106f3027714d301f284721382cb956cd87a0 | refs/heads/master | 2020-04-20T05:05:22.934339 | 2020-01-02T18:05:28 | 2020-01-02T18:05:28 | 168,646,608 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,294 | py | from pandac.PandaModules import NodePath, VBase3
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import Parallel, Sequence, Wait, HprInterval, LerpHprInterval, SoundInterval
from toontown.building import DistributedDoor
from toontown.building import DoorTypes
if __debug__:
import pdb
class DistributedAnimDoor(DistributedDoor.DistributedDoor):
def __init__(self, cr):
DistributedDoor.DistributedDoor.__init__(self, cr)
base.animDoor = self
def getBuilding(self):
if 'building' not in self.__dict__:
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
searchStr = '**/??' + \
str(self.block) + ':animated_building_*_DNARoot;+s'
self.notify.debug('searchStr=%s' % searchStr)
self.building = self.cr.playGame.hood.loader.geom.find(
searchStr)
else:
self.notify.error(
'DistributedAnimDoor.getBuiding with doorType=%s' %
self.doorType)
return self.building
def getDoorNodePath(self):
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
if hasattr(self, 'tempDoorNodePath'):
return self.tempDoorNodePath
else:
building = self.getBuilding()
doorNP = building.find('**/door_origin')
self.notify.debug('creating doorOrigin at %s %s' % (str(
doorNP.getPos()), str(doorNP.getHpr())))
otherNP = NodePath('doorOrigin')
otherNP.setPos(doorNP.getPos())
otherNP.setHpr(doorNP.getHpr())
otherNP.reparentTo(doorNP.getParent())
self.tempDoorNodePath = otherNP
else:
self.notify.error(
'DistributedAnimDoor.getDoorNodePath with doorType=%s' %
self.doorType)
return otherNP
def setTriggerName(self):
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
building = self.getBuilding()
if not building.isEmpty():
doorTrigger = building.find('**/door_0_door_trigger')
if not doorTrigger.isEmpty():
doorTrigger.node().setName(self.getTriggerName())
else:
self.notify.warning('setTriggerName failed no building')
else:
self.notify.error('setTriggerName doorTYpe=%s' % self.doorType)
def getAnimBuilding(self):
if 'animBuilding' not in self.__dict__:
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
bldg = self.getBuilding()
key = bldg.getParent().getParent()
animPropList = self.cr.playGame.hood.loader.animPropDict.get(
key)
if animPropList:
for prop in animPropList:
if bldg == prop.getActor().getParent():
self.animBuilding = prop
break
continue
else:
self.notify.error('could not find' + str(key))
else:
self.notify.error('No such door type as ' + str(self.doorType))
return self.animBuilding
def getBuildingActor(self):
result = self.getAnimBuilding().getActor()
return result
def enterOpening(self, ts):
bldgActor = self.getBuildingActor()
rightDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_right_door')
if rightDoor.isEmpty():
self.notify.warning('enterOpening(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorOpen-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Parallel(
SoundInterval(self.openSfx, node=rightDoor),
Sequence(
HprInterval(rightDoor, VBase3(0, 0, 0)),
Wait(0.40000000000000002),
LerpHprInterval(
nodePath=rightDoor,
duration=0.59999999999999998,
hpr=VBase3(h, 0, 0),
startHpr=VBase3(0, 0, 0),
blendType='easeInOut')),
name=trackName)
self.doorTrack.start(ts)
def enterClosing(self, ts):
bldgActor = self.getBuildingActor()
rightDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_right_door')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(
LerpHprInterval(
nodePath=rightDoor,
duration=1.0,
hpr=VBase3(0, 0, 0),
startHpr=VBase3(h, 0, 0),
blendType='easeInOut'),
SoundInterval(self.closeSfx, node=rightDoor),
name=trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
request = self.getRequestStatus()
messenger.send('doorDoneEvent', [request])
def exitDoorEnterOpening(self, ts):
bldgActor = self.getBuildingActor()
leftDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_left_door')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorDoorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Parallel(
SoundInterval(self.openSfx, node=leftDoor),
Sequence(
LerpHprInterval(
nodePath=leftDoor,
duration=0.59999999999999998,
hpr=VBase3(h, 0, 0),
startHpr=VBase3(0, 0, 0),
blendType='easeInOut')),
name=trackName)
self.doorExitTrack.start(ts)
else:
self.notify.warning(
'exitDoorEnterOpening(): did not find leftDoor')
def exitDoorEnterClosing(self, ts):
bldgActor = self.getBuildingActor()
leftDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_left_door')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Sequence(
LerpHprInterval(
nodePath=leftDoor,
duration=1.0,
hpr=VBase3(0, 0, 0),
startHpr=VBase3(h, 0, 0),
blendType='easeInOut'),
SoundInterval(self.closeSfx, node=leftDoor),
name=trackName)
self.doorExitTrack.start(ts)
| [
"47166977+peppythegod@users.noreply.github.com"
] | 47166977+peppythegod@users.noreply.github.com |
48035def9dc27ef8655ec0557839d1a7558ed009 | 08bfc8a1f8e44adc624d1f1c6250a3d9635f99de | /SDKs/Qt/5.12.3_python_37/msvc2017_64/PySide/PySide2/scripts/uic.py | 1471f24152ba72980656c2caa300f5e965452b38 | [] | no_license | Personwithhat/CE_SDKs | cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02 | 7afbd2f7767c9c5e95912a1af42b37c24d57f0d4 | refs/heads/master | 2020-04-09T22:14:56.917176 | 2019-07-04T00:19:11 | 2019-07-04T00:19:11 | 160,623,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:7342dc46431b086d9ffeed1ae7e528d3b0e53a3dc1ccd79003825db7ec8dad8e
size 2880
| [
"personwithhats2@Gmail.com"
] | personwithhats2@Gmail.com |
13728d7d3e4fd069f326f6493d706e6f0df8f729 | dbef97b46cbef9d2a2f9f89f5a4fec7f49875857 | /extract_short.py | 299e8198d35ab78fda0ab4665aebda2303e75829 | [] | no_license | ayu1992/MachineLearning | e1a3626bb60bed98866ea228e27f9310bb2d3102 | f885064160f9f01e1c48edb742f770d264fc645f | refs/heads/master | 2020-06-05T13:58:41.392529 | 2015-03-30T06:09:58 | 2015-03-30T06:09:58 | 33,105,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | with open("/Volumes/YUCHI/ML_anan/movies.txt.txt.txt", "r") as f:
with open("summary_rating.csv", "w") as of:
arr = ["", "", "", ""]
for i, line in enumerate(f):
if i % 100000 == 0:
print i
if line.startswith("product/productId"):
arr[0] = line.split()[1]
elif line.startswith("review/userId"):
arr[1] = line.split()[1]
elif line.startswith("review/score"):
arr[2] = line.split()[1]
elif line.startswith("review/summary"):
arr[3] = ' '.join(line.split()[1:])
of.write(", ".join(arr) + "\n")
| [
"yuxx0535@umn.edu"
] | yuxx0535@umn.edu |
100f6258f50963233c3177c141a8c294e712e957 | 4a5931556117ceb4cb1a770928a68454658c7bd0 | /Katakana/tests.py | 72105cac7bb31df68b5a4135660bf6a9a336ad1b | [] | no_license | zooyl/JapaneseMemo | 8ce21683eae305667e11b61acd7aeeba8867044a | 9e808bc39be3e9d3c1986eb4c8d8cd7819668a8c | refs/heads/master | 2022-12-15T11:39:40.448575 | 2021-08-11T11:23:38 | 2021-08-11T11:23:38 | 175,037,873 | 0 | 0 | null | 2022-12-08T04:59:08 | 2019-03-11T16:12:41 | Python | UTF-8 | Python | false | false | 5,587 | py | from django.test import TestCase
import django
from django.test import Client
from django.urls import reverse
from django.contrib.auth.models import User, Permission
# app imports
from Hiragana.models import Stats
# Create your tests here.
class PresetsTests(django.test.TestCase):
fixtures = ['Katakana.json', 'Katakana_Levels.json']
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(username='test_preset', password='12345')
self.stats = Stats.objects.create(user=self.user)
def test_preset_easy_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_easy'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_easy_with_permission(self):
perm = Permission.objects.get(codename='easy_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_easy'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_medium_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_medium'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_medium_with_permission(self):
perm = Permission.objects.get(codename='medium_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_medium'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_hard_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_hard'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_hard_with_permission(self):
perm = Permission.objects.get(codename='hard_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_hard'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_diacritics_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_diacritics'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_diacritics_with_permission(self):
perm = Permission.objects.get(codename='diacritics_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_diacritics'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_mixed_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_mixed'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_mixed_with_permission(self):
perm = Permission.objects.get(codename='mixed_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_mixed'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
class KatakanaPageTest(django.test.TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(username='test_katakana', password='12345')
self.stats = Stats.objects.create(user=self.user)
def test_not_authenticated_user(self):
response = self.client.get(reverse('katakana'))
self.assertRedirects(response, '/login/?next=/home/katakana', status_code=302, target_status_code=200)
def test_authenticated_user_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('katakana'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_authenticated_user_with_permission(self):
perm = Permission.objects.get(codename='easy_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('katakana'))
self.assertTemplateUsed(response, 'katakana.html')
self.assertContains(response, 'List of unlocked levels')
| [
"natoniewski.m@gmail.com"
] | natoniewski.m@gmail.com |
fa196682cdbaa35f05d090a579a50930f5be698b | 1af44bdb5f59f5a58ead1094daea44f8d49e015c | /recursion.py | 3d9e66a67a862ee1b0c3a0fe2094d334c86496ce | [] | no_license | KrishnaRekapalli/out-think | 865bf2dba27ac220db084de9c0e5fbe7fc9db2e6 | 94fd32c04e8b4a5755c88dc180a3dc293392c62f | refs/heads/master | 2021-01-10T16:52:30.175292 | 2017-05-29T17:41:20 | 2017-05-29T17:41:20 | 53,183,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | def get_fib(position):
# Write your code here.
if position==0:
return 0
elif position==1:
return 1
else:
fib = [0 for i in range(position+1)]
fib[0] = 0
fib[1] = 1
for j in range(2,position+1):
fib[j] = fib[j-1]+fib[j-2]
return fib[position]
#n = int(raw_input())
print(get_fib(23))
| [
"noreply@github.com"
] | noreply@github.com |
86e58c0836f1d5180acbfb2d7d40c1b45183e6e5 | e3f92d9157c5af78aa2ea0a4ea05027a04014b4c | /sampler.py | 18b33f76121e6094887932758f31531682ed5ca8 | [] | no_license | gdesjardins/smlpt | 10c7900ef62f02ca5fcb23313a7ace3f1bf9656c | facf90d522d056f150dfc8874ebf16e0a299fc5c | refs/heads/master | 2021-01-17T11:58:50.311901 | 2014-01-22T22:37:08 | 2014-01-22T22:37:08 | 16,154,636 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | import theano
import theano.tensor as T
import numpy
class BlockGibbsSampler(object):
def __init__(self, block_updates, n_steps=1):
"""
:param block_updates: dictionary whose keys are conditionally independent (theano
shared) variables, and whose values are the update expression to use for block gibbs
sampling
:param n_steps: number of block Gibbs steps to perform
"""
self.block_updates = block_updates
self.n_steps = n_steps
self.sample_block = {}
for i, (k,v) in enumerate(block_updates.iteritems()):
self.sample_block[k] = theano.function([],[],
updates={k:v},allow_input_downcast = False)
def simulate(self, n_steps=None):
n_steps = n_steps if n_steps else self.n_steps
for n in xrange(n_steps):
for fn in self.sample_block.itervalues():
fn()
def get_state(self):
state = {}
for v in self.block_updates.iterkeys():
state[v] = v.value
return state
def draw(self, n_steps=None):
self.simulate(n_steps=n_steps)
return self.get_state()
| [
"guillaume.desjardins@gmail.com"
] | guillaume.desjardins@gmail.com |
7b3e108a66ca87302ccf56e8cdf18d7fb50ce119 | 472f15abd5b889e96e554272e371208c63d044d2 | /blog/urls.py | 2556a4365076550a279151aab7999c127012f975 | [] | no_license | BjoernBerlin/my-first-blog | 499cafdc3c06d998fbbb5c3ad6d72033a1941cd6 | 5cae465941e1764041ed7c0125bccea2037b3725 | refs/heads/master | 2016-09-01T06:06:38.448887 | 2015-10-10T11:22:09 | 2015-10-10T11:22:09 | 43,352,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from django.conf.urls import url
from .import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail, name='post_detail'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_edit, name='post_edit'),
url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),
url(r'^post/(?P<pk>[0-9]+)/publish/$', views.post_publish, name='post_publish'),
url(r'^post/(?P<pk>[0-9]+)/remove/$', views.post_remove, name='post_remove'),
] | [
"bjoern@lengers.de"
] | bjoern@lengers.de |
aa0d2e6554684c54501f6f150d32cf14d1cc827e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/136/21959/submittedfiles/funcoes.py | efca9f8ab430ae8fca7e83512158b118f168e4d3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | #ARQUIVO COM SUAS FUNCOES
from __future__ import division
def calcula_valor_absoluto(x):
if x < 0:
x = x*(-1)
return x
def calcula_pi(m):
expr = 0
i = 1
x = 2
while i<=m:
if 1<=m<=2000: #para m maior ou igual a 1 e menor ou igual a 2000
if i%2==0: #se i for par
expr = expr - (4/(x*(x+1)*(x+2)))
else: #caso contrário
expr = expr + (4/(x*(x+1)*(x+2)))
x = x +2
i = i +1
calcula_pi = 3 + expr #pi será igual a 3 + a expressão final
return calcula_pi #a função retorna o valor de pi
def fatorial(n):
fatorial = 1
for i in range (0, n, 1):
fatorial = fatorial * i
return fatorial
def calcula_co_seno(z, epsilon):
soma = 0
i = 1
expoente = 2
fracao = (z**expoente)/fatorial(expoente) # observa-se, aqui, que é chamada a função fatorial com o exponte dentro da mesma
while fracao>epsilon:
fracao = (z**expoente)/fatorial(expoente)
if i%2==1:
soma = soma - fracao
else:
soma = soma + fracao
expoente = expoente + 2
i = i + 1
calcula_co_seno = soma + 1
return calcula_co_seno
def calcula_razao_aurea(m, epsilon):
fi = 2 * calcula_co_seno(calcula_pi(m)/5, epsilon)
return fi
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
eb8ac1065478d710684981c40c7244e68a8d4b27 | 8469dff9fcfae4ea79af33bf5663c0870e4cea77 | /socket/socket_local/sock_client.py | ab90946c8d20c1a30009df68c49091b2016f1228 | [] | no_license | mrliuminlong/note | 130de6f038fe6c7a7d6991beab4bf965bee8424f | f9b34e79b4d1a467e362a65350422c7fc870d205 | refs/heads/master | 2020-04-19T16:18:01.736696 | 2019-02-13T10:39:39 | 2019-02-13T10:39:39 | 168,300,257 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | #本地套接字
from socket import *
sockfd = socket(AF_UNIX, SOCK_STREAM)
#两端需要使用相同的套接字文件
sockfd.connect("./sock")
while True:
msg = input(">>")
if not msg:
break
sockfd.send(msg.encode())
sockfd.close()
# os.remove(file)删除一个文件
# os os.path.exists()判断一个文件是否存在 | [
"liuminlong2010@sina.cn"
] | liuminlong2010@sina.cn |
e2b7d9b4825d95f7b92c5d81dd50fc3bdbd93371 | 13feec69e423695e650d018a1ceca1f6fa83d275 | /training/config.py | dae9c6a0f262c66987ac2e7df5872e677b787141 | [
"Apache-2.0"
] | permissive | OpenImageDenoise/oidn | 4da631f5d9ce32ee632538aa5819bba650a08995 | 5579cd99edfa0839f87ec6960d16dcafcfe0eb31 | refs/heads/master | 2023-09-04T19:03:14.242623 | 2023-06-24T12:06:59 | 2023-06-24T12:06:59 | 168,025,831 | 1,491 | 157 | Apache-2.0 | 2023-07-14T09:19:50 | 2019-01-28T19:48:52 | C++ | UTF-8 | Python | false | false | 10,880 | py | ## Copyright 2018 Intel Corporation
## SPDX-License-Identifier: Apache-2.0
import os
import sys
import argparse
import time
import torch
from util import *
# Returns the main feature from a list of features
def get_main_feature(features):
if len(features) > 1:
features = list(set(features) & {'hdr', 'ldr', 'sh1'})
if len(features) > 1:
error('multiple main features specified')
if not features:
error('no main feature specified')
return features[0]
# Returns the auxiliary features from a list of features
def get_aux_features(features):
main_feature = get_main_feature(features)
return list(set(features).difference([main_feature]))
# Returns the config filename in a directory
def get_config_filename(dir):
return os.path.join(dir, 'config.json')
# Loads the config from a directory
def load_config(dir):
filename = get_config_filename(dir)
cfg = load_json(filename)
return argparse.Namespace(**cfg)
# Saves the config to a directory
def save_config(dir, cfg):
filename = get_config_filename(dir)
save_json(filename, vars(cfg))
# Parses the config from the command line arguments
def parse_args(cmd=None, description=None):
def get_default_device():
return 'cuda' if torch.cuda.is_available() else 'cpu'
if cmd is None:
cmd, _ = os.path.splitext(os.path.basename(sys.argv[0]))
parser = argparse.ArgumentParser(description=description)
parser.usage = '\rIntel(R) Open Image Denoise - Training\n' + parser.format_usage()
advanced = parser.add_argument_group('optional advanced arguments')
parser.add_argument('--config', '-c', type=str, help='load configuration from JSON file (overrides command-line arguments)')
if cmd in {'preprocess', 'train', 'find_lr'}:
parser.add_argument('features', type=str, nargs='*',
choices=['hdr', 'ldr', 'sh1', 'albedo', 'alb', 'normal', 'nrm', []],
help='set of input features')
parser.add_argument('--clean_aux', action='store_true',
help='train with noise-free (reference) auxiliary features')
parser.add_argument('--filter', '-f', type=str,
choices=['RT', 'RTLightmap'],
help='filter to train (determines some default arguments)')
parser.add_argument('--preproc_dir', '-P', type=str, default='preproc',
help='directory of preprocessed datasets')
parser.add_argument('--train_data', '-t', type=str,
help='name of the training dataset')
advanced.add_argument('--transfer', '-x', type=str,
choices=['linear', 'srgb', 'pu', 'log'],
help='transfer function')
if cmd in {'preprocess', 'train'}:
parser.add_argument('--valid_data', '-v', type=str,
help='name of the validation dataset')
if cmd in {'preprocess', 'infer'}:
parser.add_argument('--data_dir', '-D', type=str, default='data',
help='directory of datasets (e.g. training, validation, test)')
if cmd in {'train', 'find_lr', 'infer', 'export', 'visualize'}:
parser.add_argument('--results_dir', '-R', type=str, default='results',
help='directory of training results')
parser.add_argument('--result', '-r', type=str, required=(not cmd in {'train', 'find_lr'}),
help='name of the training result')
if cmd in {'infer'}:
parser.add_argument('--aux_results', '-a', type=str, nargs='*', default=[],
help='prefilter auxiliary features using the specified training results')
if cmd in {'train', 'infer', 'export'}:
parser.add_argument('--num_epochs', '--epochs', '-e', type=int,
default=(2000 if cmd == 'train' else None),
help='number of training epochs')
if cmd in {'train'}:
parser.add_argument('--num_valid_epochs', '--valid_epochs', type=int, default=10,
help='perform validation every this many epochs')
parser.add_argument('--num_save_epochs', '--save_epochs', type=int, default=10,
help='save checkpoints every this many epochs')
parser.add_argument('--lr', '--learning_rate', type=float,
help='initial learning rate')
parser.add_argument('--max_lr', '--max_learning_rate', type=float,
help='maximum learning rate')
parser.add_argument('--lr_warmup', '--learning_rate_warmup', type=float, default=0.15,
help='the percentage of the cycle spent increasing the learning rate (warm-up)')
if cmd in {'find_lr'}:
parser.add_argument('--lr', '--learning_rate', type=float, default=1e-8,
help='minimum learning rate')
parser.add_argument('--max_lr', '--max_learning_rate', type=float, default=0.1,
help='maximum learning rate')
if cmd in {'train', 'find_lr'}:
parser.add_argument('--batch_size', '--bs', '-b', type=int, default=16,
help='mini-batch size (total batch size of all devices)')
parser.add_argument('--num_loaders', '--loaders', '-j', type=int, default=4,
help='number of data loader threads per device')
parser.add_argument('--precision', '-p', type=str, choices=['fp32', 'mixed'],
help='training precision')
advanced.add_argument('--model', '-m', type=str, choices=['unet'], default='unet',
help='network model')
advanced.add_argument('--loss', '-l', type=str,
choices=['l1', 'mape', 'smape', 'l2', 'ssim', 'msssim', 'l1_msssim', 'l1_grad'],
default='l1_msssim',
help='loss function')
advanced.add_argument('--msssim_weights', type=float, nargs='*',
help='MS-SSIM scale weights')
advanced.add_argument('--tile_size', '--ts', type=int, default=256,
help='size of the cropped image tiles')
advanced.add_argument('--seed', '-s', type=int,
help='seed for random number generation')
if cmd in {'infer', 'compare_image'}:
parser.add_argument('--metric', '-M', type=str, nargs='*',
choices=['psnr', 'mse', 'ssim', 'msssim'], default=['psnr', 'ssim'],
help='metrics to compute')
if cmd in {'infer'}:
parser.add_argument('--input_data', '-i', type=str, default='test',
help='name of the input dataset')
parser.add_argument('--output_dir', '-O', type=str, default='infer',
help='directory of output images')
parser.add_argument('--output_suffix', '-o', type=str,
help='suffix of the output image names')
parser.add_argument('--format', '-F', type=str, nargs='*', default=['exr'],
help='output image formats')
parser.add_argument('--save_all', action='store_true',
help='save input and target images too')
if cmd in {'export'}:
parser.add_argument('target', type=str, nargs='?',
choices=['weights', 'package'], default='weights',
help='what to export')
parser.add_argument('--output', '-o', type=str,
help='output file')
if cmd in {'convert_image', 'split_exr'}:
parser.add_argument('input', type=str,
help='input image')
if cmd in {'compare_image'}:
parser.add_argument('input', type=str, nargs=2,
help='input images')
if cmd in {'convert_image'}:
parser.add_argument('output', type=str,
help='output image')
if cmd in {'convert_image', 'compare_image'}:
parser.add_argument('--exposure', '-E', type=float, default=1.,
help='linear exposure scale for HDR image')
if cmd in {'split_exr'}:
parser.add_argument('--layer', type=str,
help='name of the image layer')
if cmd in {'preprocess', 'train', 'find_lr', 'infer', 'export'}:
parser.add_argument('--device', '-d', type=str,
choices=['cpu', 'cuda'], default=get_default_device(),
help='type of device(s) to use')
parser.add_argument('--device_id', '-k', type=int, default=0,
help='ID of the first device to use')
parser.add_argument('--num_devices', '-n', type=int, default=1,
help='number of devices to use (with IDs device_id .. device_id+num_devices-1)')
advanced.add_argument('--deterministic', '--det', action='store_true',
default=(cmd in {'preprocess', 'infer', 'export'}),
help='makes computations deterministic (slower performance)')
cfg = parser.parse_args()
# Load and apply configuration from file if specified
if cfg.config is not None:
cfg_dict = vars(cfg)
cfg_dict.update(load_json(cfg.config))
cfg = argparse.Namespace(**cfg_dict)
if cmd in {'preprocess', 'train', 'find_lr'}:
# Check the filter
if cfg.filter is None:
warning('filter not specified, using generic default arguments')
# Replace feature names with IDs
FEATURE_IDS = {'albedo' : 'alb', 'normal' : 'nrm'}
cfg.features = [FEATURE_IDS.get(f, f) for f in cfg.features]
# Remove duplicate features
cfg.features = list(dict.fromkeys(cfg.features).keys())
# Set the default transfer function
if cfg.transfer is None:
main_feature = get_main_feature(cfg.features)
if main_feature == 'hdr':
cfg.transfer = 'log' if cfg.filter == 'RTLightmap' else 'pu'
elif main_feature in {'ldr', 'alb'}:
cfg.transfer = 'srgb'
else:
cfg.transfer = 'linear'
# Set the default datasets
if cfg.train_data is None and (cmd == 'find_lr' or cfg.valid_data is None):
cfg.train_data = 'train'
if cmd != 'find_lr':
cfg.valid_data = 'valid'
if cmd in {'train', 'find_lr'}:
# Check the batch size
if cfg.batch_size % cfg.num_devices != 0:
parser.error('batch_size is not divisible by num_devices')
# Set the default result name (generated)
if cfg.result is None:
cfg.result = WORKER_UID
# Set the default MS-SSIM weights
if cfg.msssim_weights is None:
if cfg.filter == 'RT':
cfg.msssim_weights = [0.2, 0.2, 0.2, 0.2, 0.2]
if cmd in {'train'}:
# Set the default training precision
if cfg.precision is None:
cfg.precision = 'mixed' if cfg.device == 'cuda' else 'fp32'
# Set the default maximum learning rate
if cfg.max_lr is None:
cfg.max_lr = 3.125e-6 * cfg.batch_size
# Print PyTorch version
print('PyTorch:', torch.__version__)
return cfg | [
"attila.t.afra@intel.com"
] | attila.t.afra@intel.com |
1def8bfa91528ad23d33f5f84710747a8dc3cf57 | c0f86b926fc82baa633862896096c149dd9913cf | /Python/Numpy/Mean-Var-and-Std/Python2/solution.py | 74b8d96a55af697e4421abd696b485c3a4ebf3f7 | [] | no_license | qxzsilver1/HackerRank | 8df74dd0cd4a9dedd778cdecea395f4234eda767 | bcb1b74711a625d8ad329a3f9fdd9f49b1bebc54 | refs/heads/master | 2021-09-09T15:45:35.681284 | 2021-09-07T00:11:16 | 2021-09-07T00:11:16 | 75,671,896 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import numpy
n, m = map(int, raw_input().split())
a = numpy.array([raw_input().split() for _ in xrange(n)], int)
print numpy.mean(a, axis=1)
print numpy.var(a, axis=0)
print numpy.std(a, None)
| [
"noreply@github.com"
] | noreply@github.com |
0e3aebd5a6b8e7490e4f7f478497e0a2e46b2f3d | 61f9553eedc2ec936ea87f06da5b986091e3b8ff | /workspace/buildout-cache/eggs/plone.app.upgrade-1.3.4-py2.7.egg/plone/app/upgrade/v40/tests.py | 5d20ec6119c77470822fbbc82a2aec777d5bd649 | [] | no_license | gruhter/gso | 47880b055455cc99d63eec72498048c857e7831b | c0eb949f8a06aab6b97329d51a6d046e2fc0a653 | refs/heads/master | 2016-09-01T18:28:05.589620 | 2015-05-14T19:38:18 | 2015-05-14T19:38:18 | 35,579,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,186 | py | import time
from zope.component import getSiteManager, queryUtility
from zope.ramcache.interfaces.ram import IRAMCache
from Products.CMFCore.ActionInformation import Action
from Products.CMFCore.Expression import Expression
from Products.CMFCore.utils import getToolByName
from Products.MailHost.interfaces import IMailHost
from plone.app.upgrade.utils import loadMigrationProfile
from plone.app.upgrade.v40.alphas import _KNOWN_ACTION_ICONS
from plone.app.upgrade.v40.alphas import migrateActionIcons
from plone.app.upgrade.v40.alphas import migrateTypeIcons
from plone.app.upgrade.v40.alphas import addOrReplaceRamCache
from plone.app.upgrade.v40.alphas import changeWorkflowActorVariableExpression
from plone.app.upgrade.v40.alphas import changeAuthenticatedResourcesCondition
from plone.app.upgrade.v40.alphas import setupReferencebrowser
from plone.app.upgrade.v40.alphas import migrateMailHost
from plone.app.upgrade.v40.alphas import migrateFolders
from plone.app.upgrade.v40.alphas import renameJoinFormFields
from plone.app.upgrade.v40.alphas import updateLargeFolderType
from plone.app.upgrade.v40.alphas import addRecursiveGroupsPlugin
from plone.app.upgrade.v40.alphas import cleanUpClassicThemeResources
from plone.app.upgrade.v40.betas import repositionRecursiveGroupsPlugin
from plone.app.upgrade.v40.betas import updateIconMetadata
from plone.app.upgrade.v40.betas import removeLargePloneFolder
from plone.app.upgrade.tests.base import MigrationTest
class FakeSecureMailHost(object):
meta_type = 'Secure Mail Host'
id = 'MailHost'
title = 'Fake MailHost'
smtp_host = 'smtp.example.com'
smtp_port = 587
smtp_userid='me'
smtp_pass='secret'
smtp_notls=False
def manage_fixupOwnershipAfterAdd(self):
pass
class TestMigrations_v4_0alpha1(MigrationTest):
profile = "profile-plone.app.upgrade.v40:3-4alpha1"
def afterSetUp(self):
self.atool = getToolByName(self.portal, 'portal_actions')
self.aitool = getToolByName(self.portal, 'portal_actionicons')
self.cptool = getToolByName(self.portal, 'portal_controlpanel')
self.wftool = getToolByName(self.portal, 'portal_workflow')
self.csstool = getToolByName(self.portal, 'portal_css')
self.jstool = getToolByName(self.portal, 'portal_javascripts')
def testProfile(self):
# This tests the whole upgrade profile can be loaded
self.setRoles(['Manager'])
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testMigrateActionIcons(self):
_KNOWN_ACTION_ICONS['object_buttons'].extend(['test_id', 'test2_id'])
self.aitool.addActionIcon(
category='object_buttons',
action_id='test_id',
icon_expr='test.gif',
title='Test my icon',
)
self.aitool.addActionIcon(
category='object_buttons',
action_id='test2_id',
icon_expr='python:context.getIcon()',
title='Test my second icon',
)
test_action = Action('test_id',
title='Test me',
description='',
url_expr='',
icon_expr='',
available_expr='',
permissions=('View', ),
visible = True)
test2_action = Action('test2_id',
title='Test me too',
description='',
url_expr='',
icon_expr='',
available_expr='',
permissions=('View', ),
visible = True)
object_buttons = self.atool.object_buttons
if getattr(object_buttons, 'test_id', None) is None:
object_buttons._setObject('test_id', test_action)
if getattr(object_buttons, 'test2_id', None) is None:
object_buttons._setObject('test2_id', test2_action)
self.assertEqual(object_buttons.test_id.icon_expr, '')
self.assertEqual(object_buttons.test2_id.icon_expr, '')
self.assertEqual(
self.aitool.getActionIcon('object_buttons', 'test_id'),
'test.gif')
# Test it twice
for i in range(2):
migrateActionIcons(self.portal)
icons = [ic.getActionId() for ic in self.aitool.listActionIcons()]
self.failIf('test_id' in icons)
self.failIf('test2_id' in icons)
self.assertEqual(object_buttons.test_id.icon_expr,
'string:$portal_url/test.gif')
self.assertEqual(object_buttons.test2_id.icon_expr,
'python:context.getIcon()')
def testMigrateControlPanelActionIcons(self):
_KNOWN_ACTION_ICONS['controlpanel'].extend(['test_id'])
self.aitool.addActionIcon(
category='controlpanel',
action_id='test_id',
icon_expr='test.gif',
title='Test my icon',
)
self.cptool.registerConfiglet(
id='test_id',
name='Test Configlet',
action='string:${portal_url}/test',
permission='Manage portal',
category='Plone',
visible=True,
appId='',
icon_expr='',
)
action = self.cptool.getActionObject('Plone/test_id')
self.assertEqual(action.getIconExpression(), '')
self.assertEqual(self.aitool.getActionIcon('controlpanel', 'test_id'),
'test.gif')
# Test it twice
for i in range(2):
migrateActionIcons(self.portal)
icons = [ic.getActionId() for ic in self.aitool.listActionIcons()]
self.failIf('test_id' in icons)
self.assertEqual(action.getIconExpression(),
'string:$portal_url/test.gif')
def testContentTypeIconExpressions(self):
"""
FTIs should now be using icon_expr instead of content_icon.
(The former caches the expression object.)
"""
tt = getToolByName(self.portal, "portal_types")
tt.Document.icon_expr = None
loadMigrationProfile(self.portal, self.profile, ('typeinfo', ))
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.png")
def testMigrateTypeIcons(self):
"""
FTIs having content_icon should be upgraded to icon_expr.
"""
tt = getToolByName(self.portal, "portal_types")
del tt.Document.icon_expr
tt.Document.content_icon = 'document_icon.gif'
migrateTypeIcons(self.portal)
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.gif")
self.assertTrue(hasattr(tt.Document, 'icon_expr_object'))
#Don't upgrade if there is already an icon_expr.
tt.Document.icon_expr = "string:${portal_url}/document_icon.png"
tt.Document.content_icon = 'document_icon.gif'
migrateTypeIcons(self.portal)
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.png")
def testPngContentIcons(self):
tt = getToolByName(self.portal, "portal_types")
tt.Document.icon_expr = "string:${portal_url}/document_icon.gif"
loadMigrationProfile(self.portal, self.profile, ('typeinfo', ))
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.png")
def testAddRAMCache(self):
# Test it twice
for i in range(2):
sm = getSiteManager()
sm.unregisterUtility(provided=IRAMCache)
util = queryUtility(IRAMCache)
self.assertEqual(util.maxAge, 86400)
addOrReplaceRamCache(self.portal)
util = queryUtility(IRAMCache)
self.assertEqual(util.maxAge, 3600)
def testReplaceOldRamCache(self):
sm = getSiteManager()
# Test it twice
for i in range(2):
sm.unregisterUtility(provided=IRAMCache)
from zope.app.cache.interfaces.ram import IRAMCache as OldIRAMCache
from zope.app.cache.ram import RAMCache as OldRAMCache
sm.registerUtility(factory=OldRAMCache, provided=OldIRAMCache)
addOrReplaceRamCache(self.portal)
util = queryUtility(IRAMCache)
self.assertEqual(util.maxAge, 3600)
def testChangeWorkflowActorVariableExpression(self):
self.wftool.intranet_folder_workflow.variables.actor.setProperties('')
for i in range(2):
changeWorkflowActorVariableExpression(self.portal)
wf = self.wftool.intranet_folder_workflow
self.assertEqual(wf.variables.actor.getDefaultExprText(),
'user/getId')
wf = self.wftool.one_state_workflow
self.assertEqual(wf.variables.actor.getDefaultExprText(),
'user/getId')
wf = self.wftool.simple_publication_workflow
self.assertEqual(wf.variables.actor.getDefaultExprText(),
'user/getId')
# make sure it doesn't break if the workflow is missing
wf = self.wftool.intranet_folder_workflow
self.wftool._delOb('intranet_folder_workflow')
changeWorkflowActorVariableExpression(self.portal)
self.wftool._setOb('intranet_folder_workflow', wf)
def testChangeAuthenticatedResourcesCondition(self):
# make sure CSS resource is updated
res = self.csstool.getResource('member.css')
res.setAuthenticated(False)
res.setExpression('not: portal/portal_membership/isAnonymousUser')
# test it twice
for i in range(2):
changeAuthenticatedResourcesCondition(self.portal)
self.assertEqual(res.getExpression(), '')
self.failUnless(res.getAuthenticated())
# make sure it doesn't update it if the expression has been
# customized
res.setExpression('python:False')
changeAuthenticatedResourcesCondition(self.portal)
self.assertEqual(res.getExpression(), 'python:False')
def testAddedUseEmailProperty(self):
tool = getToolByName(self.portal, 'portal_properties')
sheet = getattr(tool, 'site_properties')
#self.assertEqual(sheet.getProperty('use_email_as_login'), False)
self.removeSiteProperty('use_email_as_login')
loadMigrationProfile(self.portal, self.profile, ('propertiestool', ))
self.assertEqual(sheet.getProperty('use_email_as_login'), False)
def testReplaceReferencebrowser(self):
self.setRoles(['Manager'])
skins_tool = getToolByName(self.portal, 'portal_skins')
sels = skins_tool._getSelections()
for skinname, layer in sels.items():
layers = layer.split(',')
self.failIf('ATReferenceBrowserWidget' in layers)
layers.remove('referencebrowser')
new_layers = ','.join(layers)
sels[skinname] = new_layers
loadMigrationProfile(self.portal, self.profile)
setupReferencebrowser(self.portal)
sels = skins_tool._getSelections()
for skinname, layer in sels.items():
layers = layer.split(',')
self.failUnless('referencebrowser' in layers)
def testInstallNewDependencies(self):
self.setRoles(['Manager'])
# test for running the TinyMCE profile by checking for the skin layer
# it installs (the profile is marked as noninstallable, so we can't
# ask the quick installer)
skins_tool = getToolByName(self.portal, 'portal_skins')
del skins_tool['tinymce']
for i in range(2):
loadMigrationProfile(self.portal, self.profile)
self.failUnless('tinymce' in skins_tool)
# sleep to avoid a GS log filename collision :-o
time.sleep(1)
def testNewJSIsInstalled(self):
installedScriptIds = self.jstool.getResourceIds()
expected = [
# js resources that are part of plone.app.jquerytools
'++resource++plone.app.jquerytools.js',
'++resource++plone.app.jquerytools.overlayhelpers.js',
# js resource that is new in CMFPlone
'popupforms.js']
for e in expected:
self.failUnless(e in installedScriptIds, e)
def testReplaceSecureMailHost(self):
portal = self.portal
sm = getSiteManager(context=portal)
# try it with an unmodified site to ensure it doesn't give any errors
migrateMailHost(portal.portal_setup)
portal._delObject('MailHost')
# Run it with our MailHost replaced
portal._setObject('MailHost', FakeSecureMailHost())
self.assertEqual(portal.MailHost.meta_type, 'Secure Mail Host')
sm.unregisterUtility(provided=IMailHost)
sm.registerUtility(portal.MailHost, provided=IMailHost)
migrateMailHost(portal)
new_mh = portal.MailHost
self.failUnlessEqual(new_mh.meta_type, 'Mail Host')
self.failUnlessEqual(new_mh.title, 'Fake MailHost')
self.failUnlessEqual(new_mh.smtp_host, 'smtp.example.com')
self.failUnlessEqual(new_mh.smtp_port, 587)
self.failUnlessEqual(new_mh.smtp_uid, 'me')
self.failUnlessEqual(new_mh.smtp_pwd, 'secret')
#Force TLS is always false, because SMH has no equivalent option
self.failUnlessEqual(new_mh.force_tls, False)
def testFolderMigration(self):
from plone.app.folder.tests.content import create
from plone.app.folder.tests.test_migration import reverseMigrate
from plone.app.folder.tests.test_migration import isSaneBTreeFolder
# create a folder in an unmigrated state & check it's broken...
folder = create('Folder', self.portal, 'foo', title='Foo')
reverseMigrate(self.portal)
self.failIf(isSaneBTreeFolder(self.portal.foo))
# now run the migration step...
migrateFolders(self.portal)
folder = self.portal.foo
self.failUnless(isSaneBTreeFolder(folder))
self.assertEqual(folder.getId(), 'foo')
self.assertEqual(folder.Title(), 'Foo')
class TestMigrations_v4_0alpha2(MigrationTest):
def testMigrateJoinFormFields(self):
ptool = getToolByName(self.portal, 'portal_properties')
sheet = getattr(ptool, 'site_properties')
self.removeSiteProperty('user_registration_fields')
self.addSiteProperty('join_form_fields')
sheet.join_form_fields = (
'username', 'password', 'email', 'mail_me', 'groups')
renameJoinFormFields(self)
self.assertEqual(sheet.hasProperty('join_form_fields'), False)
self.assertEqual(sheet.hasProperty('user_registration_fields'), True)
self.assertEqual(sheet.getProperty('user_registration_fields'),
('username', 'password', 'email', 'mail_me'))
class TestMigrations_v4_0alpha3(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4alpha2-4alpha3"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testJoinActionURL(self):
self.portal.portal_actions.user.join.url_expr = 'foo'
loadMigrationProfile(self.portal, self.profile, ('actions', ))
self.assertEqual(self.portal.portal_actions.user.join.url_expr,
'string:${globals_view/navigationRootUrl}/@@register')
class TestMigrations_v4_0alpha5(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4alpha4-4alpha5"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testMigrateLargeFolderType(self):
portal = self.portal
catalog = getToolByName(portal, 'portal_catalog')
# set things up in the old way...
ids = 'news', 'events', 'Members'
for id in ids:
obj = portal[id]
obj._setPortalTypeName('Large Plone Folder')
obj.reindexObject()
self.assertEquals(obj.portal_type, 'Large Plone Folder')
# Type falls back to meta_type since there's no
# Large Plone Folder FTI
self.assertEquals(obj.Type(), 'ATFolder')
brain, = catalog(getId=id)
self.assertEquals(brain.portal_type, 'Large Plone Folder')
self.assertEquals(brain.Type, 'ATFolder')
# migrate & check again...
updateLargeFolderType(self.portal)
for id in ids:
obj = portal[id]
self.assertEquals(obj.portal_type, 'Folder')
self.assertEquals(obj.Type(), 'Folder')
brain, = catalog(getId=id)
self.assertEquals(brain.portal_type, 'Folder')
self.assertEquals(brain.Type, 'Folder')
def testAddRecursiveGroupsPlugin(self):
acl = getToolByName(self.portal, 'acl_users')
addRecursiveGroupsPlugin(self.portal)
self.failUnless('recursive_groups' in acl)
# Now that we have an existing one, let's make sure it's handled
# properly if this migration is run again.
addRecursiveGroupsPlugin(self.portal)
self.failUnless('recursive_groups' in acl)
def testClassicThemeResourcesCleanUp(self):
"""Test that the plonetheme.classic product doesn't have any
registered CSS resource in its metadata after migration.
"""
portal = self.portal
qi = getToolByName(portal, 'portal_quickinstaller')
qi.installProduct('plonetheme.classic')
classictheme = qi['plonetheme.classic']
classictheme.resources_css = ['something'] # add a random resource
cleanUpClassicThemeResources(portal)
self.failUnlessEqual(classictheme.resources_css, [])
def testGetObjPositionInParentIndex(self):
from plone.app.folder.nogopip import GopipIndex
catalog = self.portal.portal_catalog
catalog.delIndex('getObjPositionInParent')
catalog.addIndex('getObjPositionInParent', 'FieldIndex')
self.failIf(isinstance(catalog.Indexes['getObjPositionInParent'],
GopipIndex))
loadMigrationProfile(self.portal, self.profile)
self.failUnless('getObjPositionInParent' in catalog.indexes())
self.failUnless(isinstance(catalog.Indexes['getObjPositionInParent'],
GopipIndex))
def testGetEventTypeIndex(self):
catalog = self.portal.portal_catalog
catalog.addIndex('getEventType', 'KeywordIndex')
self.failUnless('getEventType' in catalog.indexes())
loadMigrationProfile(self.portal, self.profile)
self.failIf('getEventType' in catalog.indexes())
class TestMigrations_v4_0beta1(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4alpha5-4beta1"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testRepositionRecursiveGroupsPlugin(self):
# Ensure that the recursive groups plugin is moved to the bottom
# of the IGroups plugins list, if active.
addRecursiveGroupsPlugin(self.portal)
# Plugin is installed, but not active, run against this state.
from Products.PluggableAuthService.interfaces.plugins import \
IGroupsPlugin
acl = getToolByName(self.portal, 'acl_users')
plugins = acl.plugins
# The plugin was originally moved to the top of the list of
# IGroupsPlugin plugins by p.a.controlpanel. Recreate that state.
while (plugins.getAllPlugins('IGroupsPlugin')['active'].index(
'recursive_groups') > 0):
plugins.movePluginsUp(IGroupsPlugin, ['recursive_groups'])
active_groups = plugins.getAllPlugins('IGroupsPlugin')['active']
self.assertEqual(active_groups[0], 'recursive_groups')
# Rerun the migration, making sure that it's now the last item in the
# list of IGroupsPlugin plugins.
repositionRecursiveGroupsPlugin(self.portal)
active_groups = plugins.getAllPlugins('IGroupsPlugin')['active']
self.assertEqual(active_groups[-1], 'recursive_groups')
class TestMigrations_v4_0beta2(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4beta1-4beta2"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testCoreContentIconExprCleared(self):
types = getToolByName(self.portal, 'portal_types')
catalog = getToolByName(self.portal, 'portal_catalog')
# Reinstate the now-empty icon expression for the Document type
doc_icon_expr = Expression('string:${portal_url}/document_icon.png')
types['Document'].icon_expr_object = doc_icon_expr
front = self.portal['front-page']
catalog.reindexObject(front)
old_modified = front.modified()
# Make sure the getIcon metadata column shows the "original" value
brains = catalog(id='front-page')
self.assertEqual(brains[0].getIcon, 'document_icon.png')
# Run the migration
loadMigrationProfile(self.portal, self.profile)
updateIconMetadata(self.portal)
# The getIcon column should now be empty
self.assertEqual(catalog(id='front-page')[0].getIcon, '')
self.assertEquals(front.modified(), old_modified)
class TestMigrations_v4_0beta4(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4beta3-4beta4'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testRemoveLargePloneFolder(self):
# re-create pre-migration settings
ptool = self.portal.portal_properties
nav_props = ptool.navtree_properties
l = list(nav_props.parentMetaTypesNotToQuery)
nav_props.parentMetaTypesNotToQuery = l + ['Large Plone Folder']
site_props = ptool.site_properties
l = list(site_props.typesLinkToFolderContentsInFC)
site_props.typesLinkToFolderContentsInFC = l + ['Large Plone Folder']
temp_folder_fti = self.portal.portal_types['TempFolder']
l = list(temp_folder_fti.allowed_content_types)
temp_folder_fti.allowed_content_types = l + ['Large Plone Folder']
l = set(self.portal.portal_factory.getFactoryTypes())
l.add('Large Plone Folder')
ftool = self.portal.portal_factory
ftool.manage_setPortalFactoryTypes(listOfTypeIds=list(l))
for i in xrange(2):
loadMigrationProfile(self.portal, self.profile)
removeLargePloneFolder(self.portal)
self.failIf('Large Plone Folder' in self.portal.portal_types)
self.failIf('Large Plone Folder' in
temp_folder_fti.allowed_content_types)
self.failUnless('Folder' in temp_folder_fti.allowed_content_types)
self.failIf('Large Plone Folder' in ftool.getFactoryTypes())
self.failUnless('Folder' in ftool.getFactoryTypes())
self.failIf('Large Plone Folder' in
nav_props.parentMetaTypesNotToQuery)
self.failUnless('TempFolder' in
nav_props.parentMetaTypesNotToQuery)
self.failIf('Large Plone Folder' in
site_props.typesLinkToFolderContentsInFC)
self.failUnless('Folder' in
site_props.typesLinkToFolderContentsInFC)
# sleep to avoid a GS log filename collision :-o
time.sleep(1)
class TestMigrations_v4_0beta5(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4beta4-4beta5'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0rc1(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4beta5-4rc1'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4rc1-4final'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_1(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0-4.0.1'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_2(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.1-4.0.2'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_3(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.2-4.0.3'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_4(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.3-4.0.4'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_5(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.4-4.0.5'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def test_suite():
from unittest import defaultTestLoader
return defaultTestLoader.loadTestsFromName(__name__)
| [
"gso@abv.bg"
] | gso@abv.bg |
f4c6ce59efd1e1f03e2d9705d803ef33a713b166 | e1b0308dc4ba9e412d12e945c31f7f46f524daa4 | /project/image64/models.py | 947999279a351494ff5d2ca946b6ea023600b019 | [] | no_license | loressl/djangorestframework_image_base64 | 8d05d11af8c5802afe287be433714012dfa174e9 | a9078664cc5a3fe5044b65c00497f05fec811ab7 | refs/heads/master | 2022-11-17T16:57:33.727584 | 2020-07-13T22:51:00 | 2020-07-13T22:51:00 | 279,215,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django.db import models
# Create your models here.
class Image_Base64(models.Model):
image= models.TextField()
def __str__(self):
return self.image
| [
"loryssl@hotmail.com"
] | loryssl@hotmail.com |
54905961f5da67d188acd3d289b59b48346852ab | ebac75f37d7afb53d63d82e173a1f9708e477961 | /rango/utilities.py | 78b927f273b5da82c6702e60af841a48a253534b | [] | no_license | mscienski/rango | adcef6f232aded43be3de0ea505666533ec92d53 | cdc8167f972ea0eb57169921f0159292c904ac19 | refs/heads/master | 2020-06-02T04:06:55.064207 | 2020-03-03T18:55:13 | 2020-03-03T18:55:13 | 21,962,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | __author__ = 'idfl'
def urlencoding(param):
param = param.__str__()
if '_' in param:
return param.replace('_', ' ')
elif ' ' in param:
return param.replace(' ', '_')
else:
return param | [
"michal.scienski@idfl.com"
] | michal.scienski@idfl.com |
e859ec2e54f53f7b9c6871255c5541097f1f8cc2 | 3f23eec5418587e6608af6b1b57a33e88046e750 | /7-gce/config.py | 6228544da2063b36407607663af6c53105b65d50 | [] | no_license | OlexiyVovnyuk/bookshelf | 5830327acb456cbf1947863936520b834aa611db | e53916ae45c9ea7e871a79812fcf5466d25dce9d | refs/heads/main | 2023-04-03T16:55:08.283035 | 2021-04-05T07:37:35 | 2021-04-05T07:37:35 | 354,753,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,494 | py | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains all of the configuration values for the application.
Update this file with the values for your specific Google Cloud project.
You can create and manage projects at https://console.developers.google.com
"""
import os
# The secret key is used by Flask to encrypt session cookies.
SECRET_KEY = '\xfd{H\xe5<\x95\xf9\xe3\x96.5\xd1\x01O<!\xd5\xa2\xa0\x9fR"\xa1\xa8'
# There are three different ways to store the data in the application.
# You can choose 'datastore', 'cloudsql', or 'mongodb'. Be sure to
# configure the respective settings for the one you choose below.
# You do not have to configure the other data backends. If unsure, choose
# 'datastore' as it does not require any additional configuration.
DATA_BACKEND = 'cloudsql'
# Google Cloud Project ID. This can be found on the 'Overview' page at
# https://console.developers.google.com
PROJECT_ID = 'bookshelf-309511'
# CloudSQL & SQLAlchemy configuration
# Replace the following values the respective values of your Cloud SQL
# instance.
CLOUDSQL_USER = 'root'
CLOUDSQL_PASSWORD = 'Jbo6x0b5k898pkyd'
CLOUDSQL_DATABASE = 'bookshelf'
# Set this value to the Cloud SQL connection name, e.g.
# "project:region:cloudsql-instance".
# You must also update the value in app.yaml.
CLOUDSQL_CONNECTION_NAME = 'bookshelf-309511:europe-central2:bookshelf-sql'
# The CloudSQL proxy is used locally to connect to the cloudsql instance.
# To start the proxy, use:
#
# $ cloud_sql_proxy -instances=your-connection-name=tcp:3306
#
# Port 3306 is the standard MySQL port. If you need to use a different port,
# change the 3306 to a different port number.
# Alternatively, you could use a local MySQL instance for testing.
LOCAL_SQLALCHEMY_DATABASE_URI = (
'mysql+pymysql://{user}:{password}@127.0.0.1:3306/{database}').format(
user=CLOUDSQL_USER, password=CLOUDSQL_PASSWORD,
database=CLOUDSQL_DATABASE)
# When running on App Engine a unix socket is used to connect to the cloudsql
# instance.
LIVE_SQLALCHEMY_DATABASE_URI = (
'mysql+pymysql://{user}:{password}@localhost/{database}'
'?unix_socket=/cloudsql/{connection_name}').format(
user=CLOUDSQL_USER, password=CLOUDSQL_PASSWORD,
database=CLOUDSQL_DATABASE, connection_name=CLOUDSQL_CONNECTION_NAME)
if os.environ.get('GAE_INSTANCE'):
SQLALCHEMY_DATABASE_URI = LIVE_SQLALCHEMY_DATABASE_URI
else:
SQLALCHEMY_DATABASE_URI = LOCAL_SQLALCHEMY_DATABASE_URI
# Mongo configuration
# If using mongolab, the connection URI is available from the mongolab control
# panel. If self-hosting on compute engine, replace the values below.
# MONGO_URI = 'mongodb://user:password@host:27017/database'
# Google Cloud Storage and upload settings.
# Typically, you'll name your bucket the same as your project. To create a
# bucket:
#
# $ gsutil mb gs://<your-bucket-name>
#
# You also need to make sure that the default ACL is set to public-read,
# otherwise users will not be able to see their upload images:
#
# $ gsutil defacl set public-read gs://<your-bucket-name>
#
# You can adjust the max content length and allow extensions settings to allow
# larger or more varied file types if desired.
CLOUD_STORAGE_BUCKET = 'bookshelf-309511'
MAX_CONTENT_LENGTH = 8 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# OAuth2 configuration.
# This can be generated from the Google Developers Console at
# https://console.developers.google.com/project/_/apiui/credential.
# Note that you will need to add all URLs that your application uses as
# authorized redirect URIs. For example, typically you would add the following:
#
# * http://localhost:8080/oauth2callback
# * https://<your-app-id>.appspot.com/oauth2callback.
#
# If you receive a invalid redirect URI error review you settings to ensure
# that the current URI is allowed.
GOOGLE_OAUTH2_CLIENT_ID = \
'your-client-id'
GOOGLE_OAUTH2_CLIENT_SECRET = 'your-client-secret'
| [
"noreply@github.com"
] | noreply@github.com |
ac25d0db95dee1117ae6fe4b899083d273595bed | df41dbe7691f1b954057b8aa787c988ffcc6692a | /test.py | 94eeb3df1c071bfaf6ec15838838fe0a649e571c | [] | no_license | mago960806/RemoteCheck | 02e55622c1cd2ce1defb70fa64f40c54cd3eff0b | a46d279fa8bca30c29c12d28a445ca5814f76338 | refs/heads/master | 2020-04-01T22:31:00.400213 | 2019-05-09T02:02:10 | 2019-05-09T02:02:10 | 153,712,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from remotecheck import map_with_multi_thread_output_json
host_list = [
[
'192.168.1.1',
'22',
'root',
'admin123'
],
[
'192.168.1.2',
'10022',
'weblogic',
'admin123'
]
]
result = map_with_multi_thread_output_json(host_list)
print(result)
| [
"mago960806@hotmail.com"
] | mago960806@hotmail.com |
98447ab158842379f6445b580543c5b19f094a29 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/keras/engine/training_arrays.py | 5de18f2e9cb1d7dd00b968bd7ddef3a828ccaf01 | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/keras/engine/training_arrays.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
ff42b0afb739c60d4ad201d92376e6272401eeb7 | 03a878e126a4645e2ae0d814f7005a9d7eebf6e4 | /backend/schedules/migrations/0033_auto_20200919_1356.py | 7ff43226fbbe650af5d723a604b2aed508eadf21 | [] | no_license | nickfff-dev/GeneSys | d8a471734fe6afba8a968004a204a20bc1d6fcdc | e4972f735234bbf69e77b3cbfd9279e32558ede7 | refs/heads/master | 2023-04-06T05:53:32.842913 | 2021-04-02T14:08:12 | 2021-04-02T14:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | # Generated by Django 3.0.5 on 2020-09-19 05:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('schedules', '0032_clinicschedulepatient_appointment_type'),
]
operations = [
migrations.RemoveField(
model_name='clinicschedulepatient',
name='time_end',
),
migrations.RemoveField(
model_name='clinicschedulepatient',
name='time_start',
),
migrations.RemoveField(
model_name='event',
name='end_time',
),
migrations.RemoveField(
model_name='event',
name='start_time',
),
]
| [
"abdulmaula.nacan@gmail.com"
] | abdulmaula.nacan@gmail.com |
aa718ed8354abdea50f56b54e171775a136dd57a | dd116fe1e94191749ab7a9b00be25bfd88641d82 | /cairis/cairis/SearchDialog.py | c128364ca182e31bbb94073ecd249cd1315fc760 | [
"Apache-2.0"
] | permissive | RobinQuetin/CAIRIS-web | fbad99327707ea3b995bdfb4841a83695989e011 | 4a6822db654fecb05a09689c8ba59a4b1255c0fc | HEAD | 2018-12-28T10:53:00.595152 | 2015-06-20T16:53:39 | 2015-06-20T16:53:39 | 33,935,403 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
import ARM
from SearchPanel import SearchPanel
from Borg import Borg
class SearchDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,armid.SEARCHMODEL_ID,'Search model',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(700,500))
b = Borg()
self.dbProxy = b.dbProxy
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = SearchPanel(self)
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,armid.SEARCHMODEL_BUTTONFIND_ID,self.onFind)
def onFind(self,evt):
ssCtrl = self.FindWindowById(armid.SEARCHMODEL_TEXTSEARCHSTRING_ID)
ssValue = ssCtrl.GetValue()
if (len(ssValue) == 0) or (ssValue == ' '):
dlg = wx.MessageDialog(self,'Search string empty','Search model',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
listCtrl = self.FindWindowById(armid.SEARCHMODEL_LISTRESULTS_ID)
listCtrl.DeleteAllItems()
searchOptionsCtrl = self.FindWindowById(armid.SEARCHOPTIONSPANEL_ID)
searchOptions = searchOptionsCtrl.optionFlags()
try:
searchResults = self.dbProxy.searchModel(ssValue,searchOptions)
for idx,result in enumerate(searchResults):
listCtrl.InsertStringItem(idx,result[0])
listCtrl.SetStringItem(idx,1,result[1])
listCtrl.SetStringItem(idx,2,result[2])
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Search model',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
| [
"shamal.faily@googlemail.com"
] | shamal.faily@googlemail.com |
333ed64669224b879c5b0bc36d873a73ef3b7b12 | ecb8f796de591ed38a7a176f54182b074a59768d | /recusion_hanoi_four_pillar_tower.py | ed50cfd0cf376662273157fb7073e0c7ab5e1cfc | [] | no_license | maxianren/algorithm_python | 89d0ebc7e475875b97c5b25da2dc7c2118245af8 | 7218348831af059db69aa02637b85b9d9a799b6b | refs/heads/master | 2023-05-25T09:01:36.798743 | 2021-06-03T12:17:45 | 2021-06-03T12:17:45 | 313,368,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | '''
The issue of the Hanoi Tower originated from an ancient Indian legend.
For the original Hanoi Tower game, there are only three pillars available for players to operate.
As a result, the original legend requires more than 1.8*10^19 steps to solve the problem.
The number of required steps can be greatly reduced by adding columns.
Find the minimum number of steps to complete the migration under the restriction below:
- the number of plates is given,
- the number of pillars is 4 (that is, the limit is 4 pillars)
- the other rules of the original legend are not changed.
Input format:
A non-negative integer M, M represents the number of disks, M<=1000.
Output format:
A non-negative integer that represents the minimum number of steps to complete the migration.
Input sample:
3
Sample output:
5
'''
#the main function
def best_hanoi_4_tower(m,cache_4,cache_3):
if m ==0:
cache_4[0] = 0
return 0
# Recursion termination condition
elif m==1:
cache_4[1] = 1
return 1
else:
for n in range(1, m):
# result of hanoi3
if cache_3[n]==None:
res_3= hanoi_3_tower(n)
cache_3[n]=res_3
# consult previous result that already saved in cache list
else:
res_3=cache_3[n]
#result of reduced haoi4
if cache_4[m-n]==None:
res_4=best_hanoi_4_tower(m - n,cache_4,cache_3)
# consult previous result that already saved in cache list
else:
res_4 = cache_4[m - n]
#result of desired haoi4: sum of reduced hanoi4 and hanoi3
res=res_3+2*res_4
#results of the best hanoi4 solution
if cache_4[m] == None:
cache_4[m]=res
# consult previous result that already saved in cache list
else:
# keep updating the haoi result
if res < cache_4[m]:
cache_4[m]=res
return cache_4[m]
def hanoi_3_tower(n):
#Recursion termination condition
if n==1:
return 1
else:
h=1+2*hanoi_3_tower(n-1)
return h
if __name__ == "__main__":
m = 3#int(input())
print(best_hanoi_4_tower(m,(m+1)*[None],(m+1)*[None])) | [
"maxianren@gmail.com"
] | maxianren@gmail.com |
6eb5cb0c208022350e4de33e4e9a311131f2b321 | 6f8a0685ecba9540ee5aefb44b3b09fb0e68ba14 | /src/repeating_key_XOR.py | ec0f0b481a377301806a7e1ffcef6b2e8017c31f | [] | no_license | Auguste0904/CAESAR | def873cc6c965f1b0f92e7b1a560b4cd82935c6b | 0f58741f40582b59b5923532fa199fc8876b2bbd | refs/heads/master | 2023-03-17T18:58:50.165218 | 2021-03-11T08:54:19 | 2021-03-11T08:54:19 | 346,637,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | #!/usr/bin/env python3
##
## EPITECH PROJECT, 2020
## B-SEC-500-PAR-5-1-caesar-lucas.moritel
## File description:
## repeating_key_XOR.py
##
import os
import sys
import codecs
def error_gestion_arg(argv):
if len(argv) != 2:
print("Error: Invalid number of arguments")
exit(84)
if os.path.isfile(argv[1]) == False:
print("Error: The argument is not a file")
exit(84)
def repeating_key_xor(key, text):
output = b''
i = 0
for chara in text:
output += bytes([chara ^ key[i]])
if (i + 1) == len(key):
i = 0
else:
i += 1
return output
def main():
error_gestion_arg(sys.argv)
file = open(sys.argv[1], "r")
encoded_key = file.readline().strip('\n')
encoded_text = file.readline().strip('\n')
if len(encoded_key) == 0:
print("Error: There is no key in your file")
exit(84)
if len(encoded_text) == 0:
print("Error: There is no text to decrypt in your file")
exit(84)
size_key = len(encoded_key) % 2
if size_key != 0:
print("Error: Length of the encoded key content is not even but odd")
exit(84)
if encoded_text == '' or encoded_key == '':
print("Error: The encoded key or the encoded tesxt is missing")
exit(84)
decoded_text = ''.join(encoded_text).encode()
decoded_key = ''.join(encoded_key).encode()
decoded_text = codecs.decode(decoded_text, 'hex')
decoded_key = codecs.decode(decoded_key, 'hex')
ciphertext = repeating_key_xor(decoded_key, decoded_text)
print(ciphertext.hex().upper())
if __name__ == "__main__":
main()
| [
"auguste.alexandre@epitech.eu"
] | auguste.alexandre@epitech.eu |
fad45a86132e84bd2b36271cb1a2dfe8fc908e37 | 416bbc7b84b728950b1811ab310afa30ed652ec1 | /cBOW_skipGram.py | d30695371bff6023498d6913ace880a713de21b1 | [] | no_license | datacampmumbai/First-Project | b9b2acb86c2baeff9a7e11a01cf670b7a0254336 | 41772fa8017372b4dd696145eec3137603f2471e | refs/heads/master | 2020-04-05T17:36:05.126030 | 2018-11-14T10:20:18 | 2018-11-14T10:20:18 | 157,068,394 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 28 09:10:42 2018
@author: Sanmoy
"""
import os
import pandas as pd
import gensim
from string import punctuation
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
custom=set(stopwords.words('english')+list(punctuation)+['»'])
path="C:/F/NMIMS/DataScience/Sem-3/TA/data"
os.chdir(path)
data = open("11-0.txt", encoding="latin-1").read()
doc = data.replace("\n", " ")
print(doc)
data = []
for sent in sent_tokenize(doc):
temp = []
for j in word_tokenize(sent):
if j not in custom:
temp.append(j.lower())
data.append(temp)
len(data)
data
##Create CBOW Model
model1 = gensim.models.Word2Vec(data, min_count=1, size=100, window=5)
print("Cosine similarity between 'alice' "+"and'wonderland'-CBOW: ", model1.similarity('alice', 'wonderland'))
print("Cosine similarity between 'alice' "+"and'machines'-CBOW: ", model1.similarity('alice', 'machines'))
from textblob import TextBlob as tb
blob = tb(doc)
blob_wor = list(blob.words)
blob_wor
data = [word.lower() for word in blob_wor if word not in custom]
model1 = gensim.models.Word2Vec([data], min_count=1, size=100, window=5)
print(model1.similarity('after', 'like'))
#data = [word for word in data if word not in custom]
| [
"noreply@github.com"
] | noreply@github.com |
3d320782a9808236efa872d44247c0f6d4dd8806 | 246ee82e5e53770c71374e0bc781ccf7b7341634 | /aula6.py | 520c0bd4489a2827d36e79c69a01f7440b9cf398 | [] | no_license | Felipe-builder/Dio_Introducao_PYTHON | f3e281e391977cc34c15033b3dfc8465971408fd | 530ce4f11b5ce23dc78f6994dc5abc5e104e7644 | refs/heads/master | 2023-07-01T16:46:52.373154 | 2021-08-10T21:23:22 | 2021-08-10T21:23:22 | 394,672,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | conjunto = {1, 2, 3, 4, 5}
conjunto2 = {5, 6, 7, 8}
conjunto3 = {1, 2, 3, 4}
conjunto4 = {1, 2, 3, 5}
conjunto_uniao = conjunto.union(conjunto2)
print('União: {}'.format(conjunto_uniao))
conjunto_interseccao = conjunto.intersection(conjunto2)
print('Intersecção: {}'.format(conjunto_interseccao))
conjunto_diferenca1 = conjunto.difference(conjunto2)
conjunto_diferenca2 = conjunto2.difference(conjunto)
print('Diferença entre 1 e 2: {}'.format(conjunto_diferenca1))
print('Diferença entre 2 e 1: {}'.format(conjunto_diferenca2))
conjunto_diff_simetrica1 = conjunto.symmetric_difference(conjunto2)
conjunto_diff_simetrica2 = conjunto3.symmetric_difference(conjunto4)
print('Diferença simétrica 1: {}'.format(conjunto_diff_simetrica1))
print('Diferença simétrica 2: {}'.format(conjunto_diff_simetrica2))
conjunto_a = {1, 2, 3}
conjunto_b = {1, 2, 3, 4, 5}
conjunto_subset1 = conjunto_a.issubset(conjunto_b)
conjunto_subset2 = conjunto_b.issubset(conjunto_a)
print('A é subconjunto de B: {}'.format(conjunto_subset1))
print('B é subconjunto de A: {}'.format(conjunto_subset2))
conjunto_superset1 = conjunto_a.issuperset(conjunto_b)
conjunto_superset2 = conjunto_b.issuperset(conjunto_a)
print('A é superconjunto de B: {}'.format(conjunto_superset1))
print('B é superconjunto de A: {}'.format(conjunto_superset2))
lista = ['cachorro', 'cachorro', 'gato', 'gato', 'elefante']
print(lista)
conjunto_animais = set(lista)
print(conjunto_animais)
lista_animais = list(conjunto_animais)
print(lista_animais)
# conjunto = {1, 2, 3, 4, 4, 2}
# conjunto.add(5)
# print(type(conjunto))
# print(conjunto)
# conjunto.discard(2)
# print(conjunto)
| [
"felipesvascon@gmail.com"
] | felipesvascon@gmail.com |
acd9a985926faad6a4fcbdf4d441313cd62cd668 | b0741867b842fe177205c2fd714cabd34652ced4 | /crawling/mmtaobao/sexpic.py | dd4edbee55c824bc1e1e6a92158773afc91f5084 | [] | no_license | zdYng/python | 6737ea43b041f57e0d23598cfa2e5e23d5bd11ff | fd074f5700ec9733958e8640eb63af83aac3001f | refs/heads/master | 2021-07-22T13:50:24.745405 | 2020-04-02T02:15:29 | 2020-04-02T02:15:29 | 93,690,795 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | # -*- coding: utf-8 -*-
import requests
import urllib2,re
import os
from mmtaobao.cons import headers
from lxml import etree
from parsel import Selector
import datetime
html =requests.get("http://cl.j4q.pw/htm_data/2/1709/2664044.html")
html.encoding = 'utf-8'
# req = urllib2.Request('http://cl.j4q.pw/htm_data/2/1709/2664044.html')
# req.add_header('user-agent', headers())
# html = urllib2.urlopen(req).read()
print html.content
# select = Selector(html.text)
# content =select.xpath('//div//img/@src')
regt = r'<img src="(.*?)" onclick="(?#...)" style="cursor:pointer>"'
hh = re.findall(regt, html)
print hh
# for imgurl in content:
#
# x=datetime.datetime.now()
#
# name = imgurl[-7:-1]
# os.chdir(r"D://pic")
# req = urllib2.Request(imgurl)
# req.add_header('User-agent', headers())
# #html = urllib2.urlopen(req).read().decode('gbk').encode('utf-8')
# response =urllib2.urlopen(req)
# f = open(name,'wb')
# f.write(response.read())
# f.close()
# y=datetime.datetime.now()
#
# print imgurl,(y-x).seconds
| [
"qianzhongdao@163.com"
] | qianzhongdao@163.com |
4fa3f1d14d7a3874a09c7c5cc9abb92aad5c255c | 2b5f57510315d96de0ab5c374560adaac76a5abf | /Grade-Calculator.py | 72ebdc9b91c7de88322355ebcc3dc32be95bd502 | [] | no_license | zeem5/Grade-calculator | 92ad1e9fb8bd69370b250c60cc388ec7220b1f5a | ab0c955d603d1903e472d5b5b43ec36dacd13c92 | refs/heads/master | 2020-04-08T03:35:53.580214 | 2018-11-24T23:46:14 | 2018-11-24T23:46:14 | 158,982,476 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | x = int (input ("Enter your score"))
if x > 0 and x < 39:
print ("F")
elif x > 39 and x < 44:
print ("E")
elif x > 44 and x < 50:
print ("D")
elif x > 50 and x < 60:
print ("C")
elif x > 60 and x < 70:
print ("B")
elif x > 70 and x < 100:
print ("A")
else:
print ("Please enter marks between 0-100! Thanks")
| [
"noreply@github.com"
] | noreply@github.com |
313596f03b52d8f926d39f82a8d8f88c0c0a19bf | 829d1c828e4e90b151718c4902413700e60db512 | /Day_4.py | cfa06f63b2f8ba1598788baaed64f8084c15565c | [] | no_license | SoadB/100DaysOfCode-Python | 2f8bcd337dc317bc33e8814ea1aeb24f78e4974f | 9482e26825bf1bd5e7520371736896208086b185 | refs/heads/master | 2020-07-11T08:45:11.838852 | 2019-11-26T22:56:03 | 2019-11-26T22:56:03 | 204,493,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py |
import random
# Ex1
x = 5
y = 4.5
z = 2j
print(type(x)), print(type(y)), print(type(z))
print("--------------------------")
# Ex2. int number
long = -72836309174392816438
print(type(long))
print("--------------------------")
# Ex. float number
num = -5.721
print(type(num))
print("--------------------------")
# Ex. power of 10
a = 35e3
b = 12E4
c = -87.7e100
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Complex number
val_1 = 3+5j
val_2 = 5J
val_3 = -72j
print(type(val_1)), print(type(val_2)), print(type(val_3))
print("--------------------------")
# Ex. Convert between the numbers
x = 43
y = 61j
z = -1.29
a = float(x)
b = int(z)
c = complex(x)
print(a), print(b), print(c)
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Random number, import statement at a top file
print(random.randrange(1, 10))
import random
# Ex1
x = 5
y = 4.5
z = 2j
print(type(x)), print(type(y)), print(type(z))
print("--------------------------")
# Ex2. int number
long = -72836309174392816438
print(type(long))
print("--------------------------")
# Ex. float number
num = -5.721
print(type(num))
print("--------------------------")
# Ex. power of 10
a = 35e3
b = 12E4
c = -87.7e100
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Complex number
val_1 = 3+5j
val_2 = 5J
val_3 = -72j
print(type(val_1)), print(type(val_2)), print(type(val_3))
print("--------------------------")
# Ex. Convert between the numbers
x = 43
y = 61j
z = -1.29
a = float(x)
b = int(z)
c = complex(x)
print(a), print(b), print(c)
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Random number, import statement at a top file
print(random.randrange(1, 10))
| [
"soadb321@gmail.com"
] | soadb321@gmail.com |
3bb4a436ba047184d62d283d7b2b9e40cae5dd1a | c7d87b146913128fcc12dd4241f69a6b5b346235 | /week6/6_6_BMI.py | 71a33ddf3a4233c33c6832af7a7716835956133c | [] | no_license | PutkisDude/Developing-Python-Applications | cbe9fc169937087721440a378a912383ba7c2930 | 928c22bcb1b05408dc008c605c1c3a4b509a5536 | refs/heads/main | 2023-04-09T03:53:08.312225 | 2021-04-13T09:15:15 | 2021-04-13T09:15:15 | 336,190,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | #Author Lauri Putkonen
#6. Returns the BMI.
def bmi(weight, height):
height = height / 100
bmi = weight / (height * height)
return bmi
weight = float(input("Type your weight(kg) : "))
height = float(input("Type height (cm): "))
print("Your BMI is %.2f" % bmi(weight, height))
#OUTPUT:
# Type your weight(kg) : 90
# Type height (cm): 180
# Your BMI is 27.78
| [
"putkis@gmail.com"
] | putkis@gmail.com |
cfbdf7c3da7f8b2699eaf24f527932d1c674b6d1 | 4e44c4bbe274b0a8ccca274f29c4140dfad16d5e | /Push2_MIDI_Scripts/decompiled 10.1.2b5 scripts/pushbase/touch_encoder_element.py | f9f76e3eeae43809b8f5db8daf6b10d1825bf8fa | [] | no_license | intergalacticfm/Push2_MIDI_Scripts | b48841e46b7a322f2673259d1b4131d2216f7db6 | a074e2337b2e5d2e5d2128777dd1424f35580ae1 | refs/heads/master | 2021-06-24T15:54:28.660376 | 2020-10-27T11:53:57 | 2020-10-27T11:53:57 | 137,673,221 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,161 | py | # uncompyle6 version 3.0.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.13 (default, Jan 19 2017, 14:48:08)
# [GCC 6.3.0 20170118]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\pushbase\touch_encoder_element.py
# Compiled at: 2018-11-27 11:59:28
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface.elements import TouchEncoderElement as TouchEncoderElementBase
class TouchEncoderObserver(object):
u""" Interface for observing the state of one or more TouchEncoderElements """
def on_encoder_touch(self, encoder):
pass
def on_encoder_parameter(self, encoder):
pass
class TouchEncoderElement(TouchEncoderElementBase):
u""" Class representing an encoder that is touch sensitive """
def __init__(self, undo_step_handler=None, delete_handler=None, *a, **k):
super(TouchEncoderElement, self).__init__(*a, **k)
self._trigger_undo_step = False
self._undo_step_open = False
self._undo_step_handler = undo_step_handler
self._delete_handler = delete_handler
self.set_observer(None)
return
def set_observer(self, observer):
if observer is None:
observer = TouchEncoderObserver()
self._observer = observer
return
def on_nested_control_element_value(self, value, control):
self._trigger_undo_step = value
if value:
param = self.mapped_parameter()
if self._delete_handler and self._delete_handler.is_deleting and param:
self._delete_handler.delete_clip_envelope(param)
else:
self.begin_gesture()
self._begin_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
else:
self._end_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
self.end_gesture()
def connect_to(self, parameter):
if parameter != self.mapped_parameter():
self.last_mapped_parameter = parameter
super(TouchEncoderElement, self).connect_to(parameter)
self._observer.on_encoder_parameter(self)
def release_parameter(self):
if self.mapped_parameter() != None:
super(TouchEncoderElement, self).release_parameter()
self._observer.on_encoder_parameter(self)
return
def receive_value(self, value):
self._begin_undo_step()
super(TouchEncoderElement, self).receive_value(value)
def disconnect(self):
super(TouchEncoderElement, self).disconnect()
self._undo_step_handler = None
return
def _begin_undo_step(self):
if self._undo_step_handler and self._trigger_undo_step:
self._undo_step_handler.begin_undo_step()
self._trigger_undo_step = False
self._undo_step_open = True
def _end_undo_step(self):
if self._undo_step_handler and self._undo_step_open:
self._undo_step_handler.end_undo_step() | [
"ratsnake.cbs@gmail.com"
] | ratsnake.cbs@gmail.com |
15585c539acb0e4546ebbccb70364de39847516c | e57d7785276053332c633b57f6925c90ad660580 | /sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_workspace_managed_sql_server_extended_blob_auditing_policies_operations.py | 516ad87a451dfc9680c799edc043f4ea896578f1 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 17,241 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations(object):
"""WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
blob_auditing_policy_name, # type: Union[str, "_models.BlobAuditingPolicyName"]
**kwargs # type: Any
):
# type: (...) -> "_models.ExtendedServerBlobAuditingPolicy"
"""Get server's extended blob auditing policy.
Get a workspace SQL server's extended blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param blob_auditing_policy_name: The name of the blob auditing policy.
:type blob_auditing_policy_name: str or ~azure.mgmt.synapse.models.BlobAuditingPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedServerBlobAuditingPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedServerBlobAuditingPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
blob_auditing_policy_name, # type: Union[str, "_models.BlobAuditingPolicyName"]
parameters, # type: "_models.ExtendedServerBlobAuditingPolicy"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExtendedServerBlobAuditingPolicy"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExtendedServerBlobAuditingPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExtendedServerBlobAuditingPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
blob_auditing_policy_name, # type: Union[str, "_models.BlobAuditingPolicyName"]
parameters, # type: "_models.ExtendedServerBlobAuditingPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExtendedServerBlobAuditingPolicy"]
"""Create or Update server's extended blob auditing policy.
Create or Update a workspace managed sql server's extended blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param blob_auditing_policy_name: The name of the blob auditing policy.
:type blob_auditing_policy_name: str or ~azure.mgmt.synapse.models.BlobAuditingPolicyName
:param parameters: Properties of extended blob auditing policy.
:type parameters: ~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExtendedServerBlobAuditingPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedServerBlobAuditingPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
blob_auditing_policy_name=blob_auditing_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
def list_by_workspace(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExtendedServerBlobAuditingPolicyListResult"]
"""List server's extended blob auditing policies.
List workspace managed sql server's extended blob auditing policies.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtendedServerBlobAuditingPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedServerBlobAuditingPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
e3dd831fccc3a95a952dbdc11ecb63ba2363ac4a | 06cd596e0f49d1e5de09a3de56be504453881413 | /graphm/matrix.py | 251c4584e075f168eb420e2b6fea01820f7b76c4 | [] | no_license | aguytech/graphm | 630b0e8b252d286c91a3c2429f344952a3513b7b | c4f58dabced17be83bb89da2c8bf5eb554a69ea2 | refs/heads/master | 2023-04-11T23:17:09.014633 | 2021-05-11T11:00:31 | 2021-05-11T11:00:31 | 365,999,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,282 | py | '''
Created on Apr 26, 2021
@author: salem Aguemoun
'''
import functools
import graphm.matrixboolean
class Matrix(graphm.matrixboolean.MatrixBoolean):
""" Manage a arithmetic matrix
.. CAUTION:: Instance variables
:var list matrix: matrix with real numbers
:var int dimM: number of rows
:var int dimN: number of columns
"""
def __init__(self, **d) -> 'Matrix':
"""Set the matrix properties with type given by one option in:
:matrix: get a matrix
:empty: get 2 dimensions of an empty matrix
:random: get 2 dimensions of randomized matrix
:unity: get the dimension of unity matrix
:param dict \*\*d: options to specify the type of matrix
:matrix: (list) matrix in [[int,...], ...] or ((int,...), ...)
:empty: dimensions for matrix (dimM: int, dimN: (tuple) int)
:random: dimensions for matrix (dimM: int, dimN: (tuple) int)
:unity: (int) dimensions for square matrix
:return: the matrix
:rtype: Matrix
"""
super().__init__(**d)
def __add__(self, matrix: 'Matrix') -> 'Matrix':
""" Return the result of the sum of this instance and that given in argument
:param Matrix matrix: matrix to be added to the instance
:return: the result of the sum of this instance and that given in argument
:rtype: Matrix
>>> m = Matrix(matrix=[[0,10,4,2], [1,3,5,7]])
>>> m2 = Matrix(matrix=[[4,5,8,2], [10,5,7,4]])
>>> print(m + m2)
dim 2,4
4,15,12,4
11,8,12,11
"""
# wrong dimensions
if matrix.dimM != self.dimM or matrix.dimN != self.dimN:
raise ValueError("Matrix have wrong dimensions")
r = Matrix(empty=(self.dimM, self.dimN))
for m in range(self.dimM):
for n in range(self.dimN):
r.matrix[m][n] = self.get_value(m, n) + matrix.get_value(m, n)
return r
def __mul__(self, matrix: 'Matrix') -> 'Matrix':
""" Return the matrix multiplication with a logical '&'
between instance and that passed in argument
:param Matrix matrix: matrix to be added to the instance
:return: the result of the multiplication of this instance and that given in argument
:rtype: Matrix
>>> m = Matrix(matrix=[[0,10,4,2], [1,3,5,7], [2,-1,5,3]])
>>> m2 = Matrix(matrix=[[4,2], [1,2], [2,3], [1,1]])
>>> print(m * m2)
dim 3,2
20,34
24,30
20,20
"""
# wrong dimensions
if matrix.dimM != self.dimN:
raise ValueError("Matrix have wrong dimensions")
r = Matrix(empty=(self.dimM, matrix.dimN))
for m in range(self.dimM):
for n in range(matrix.dimN):
l = (self.get_value(m, i) * matrix.get_value(i, n) for i in range(self.dimN))
# with functools package
r.matrix[m][n] = functools.reduce(lambda x, y: x + y, l)
#r.matrix[n][m] = 1 if sum(l) > 0 else 0
#r.matrix[n][m] = sum(self.matrix[n][i] * matrix.matrix[i][m] for i in range(self.dimN))
return r
def __repr__(self) -> str:
""" Return a linear representation of matrix
:return: a linear representation of the matrix separated by comma
>>> m = Matrix(matrix=['00001', '00100', '00010'])
>>> repr(m)
'0,0,0,0,1 0,0,1,0,0 0,0,0,1,0'
"""
return " ".join(",".join(str(n) for n in m) for m in self.matrix)
def __str__(self) -> str:
""" Return dimensions of matrix and matrix in 2 dimensions
:return: a 2 dimensions representation of the matrix
>>> m = Matrix(matrix=['00001', '00100', '00010'])
>>> print(m)
dim 3,5
0,0,0,0,1
0,0,1,0,0
0,0,0,1,0
"""
return f"dim {self.dimM},{self.dimN}" +"\n" \
+ "\n".join(",".join(str(n) for n in m) for m in self.matrix)
def __sub__(self, matrix: 'Matrix') -> 'Matrix':
""" Return the result of the substraction of this instance and that given in argument
:param Matrix matrix: matrix to be added to the instance
:return: the result of the sum of this instance and that given in argument
:rtype: Matrix
>>> m = Matrix(matrix=[[0,10,4,2], [1,3,5,7]])
>>> m2 = Matrix(matrix=[[4,5,8,2], [10,5,7,4]])
>>> print(m - m2)
dim 2,4
-4,5,-4,0
-9,-2,-2,3
"""
# wrong dimensions
if matrix.dimM != self.dimM or matrix.dimN != self.dimN:
raise ValueError("Matrix have wrong dimensions")
r = Matrix(empty=(self.dimM, self.dimN))
for m in range(self.dimM):
for n in range(self.dimN):
r.matrix[m][n] = self.get_value(m, n) - matrix.get_value(m, n)
return r
| [
"aguytech@free.fr"
] | aguytech@free.fr |
2c2c8cb78bcf9652ef11f20e9933579b3cc83471 | 40425604bbd709a80e273e02d62b5925d493fdc0 | /servidor/tests/testsServidor.py | bc06fe590e7e5cb264caeb93cb316140b4f45554 | [] | no_license | mafvidal/UdriveTPTaller2 | b93daa9a44b37048f953f640805b7c67e98d59c6 | 1a1bc28fb7bc2e2e58d36263d99d41fc1ea73f27 | HEAD | 2016-09-06T16:43:35.822346 | 2015-11-24T02:41:00 | 2015-11-24T02:41:00 | 42,020,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,573 | py | #! /usr/bin/python
import requests
import unittest
class TestServidor(unittest.TestCase):
def test_01registrarUsuarioCorrectamente(self):
#No le envio los metadatos del usuario, para simplificar el tests
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro el usuario
salida = requests.post('http://localhost:8000/usuarios/usu1', json=registrarUsuarioJson)
salidaJson = salida.json()
self.assertEqual("Se registro correctamente el usuario", salidaJson["Mensaje"])
self.assertEqual("OK", salidaJson["Estado"])
def test_02iniciarSesionUsuarioCorrectamente(self):
iniciarSesionJson = {'Clave': 'MiClave'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu2', json=registrarUsuarioJson)
#Inicio sesion con el usuario
salida = requests.post('http://localhost:8000/iniciarsesion/usu2', json=iniciarSesionJson)
salidaJson = salida.json()
self.assertEqual("Inicio existoso", salidaJson["Mensaje"])
self.assertEqual("OK", salidaJson["Estado"])
def test_03iniciarSesionUsuarioConClaveIncorrectaReciboError(self):
iniciarSesionJson = {'Clave': 'otraClave'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu3', json=registrarUsuarioJson)
#Inicio sesion con el usuario
salida = requests.post('http://localhost:8000/iniciarsesion/usu3', json=iniciarSesionJson)
salidaJson = salida.json()
self.assertEqual("Usuario o clave incorrecta", salidaJson["Mensaje"])
self.assertEqual("ERROR", salidaJson["Estado"])
def test_04registrarUsuarioExistenteReciboQueElUsuarioYaExiste(self):
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu1', json=registrarUsuarioJson)
salidaJson = salida.json()
self.assertEqual("Error usuario existente", salidaJson["Mensaje"])
self.assertEqual("ERROR", salidaJson["Estado"])
def test_05ObtenerDatosUsuarioRegistrado(self):
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu5', json=registrarUsuarioJson)
#Obtengo los datos del usuario
salida = requests.get('http://localhost:8000/usuarios/usu5')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("pepe@mail.com", salidaJson["Email"])
def test_06ActualizarDatosUsuario(self):
#Metadatos originales
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
#Metadatos para actualizar
MetaDatosActualizados = {'Email': 'pepito@mail.com','Foto': 'otraFoto','Nombre': 'carlos','UltimaUbicacion': 'China'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
actualizarUsuarioJson = {'Clave': 'otraClave','Cuota': 100500,'MetaDatos': MetaDatosActualizados }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu6', json=registrarUsuarioJson)
#Se actualizan los datos del usuario
salida = requests.put('http://localhost:8000/usuarios/usu6', json=actualizarUsuarioJson)
#Se obtienen los datos del usuario
salida = requests.get('http://localhost:8000/usuarios/usu6')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("pepito@mail.com", salidaJson["Email"])
self.assertEqual("otraFoto", salidaJson["Foto"])
self.assertEqual("China", salidaJson["UltimaUbicacion"])
def test_07AlCrearArchivoElUsuarioDebeTenerlo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu7','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu7', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu7/archivos', json=archivoJson)
#Se obtiene el archivo del usuario
salida = requests.get('http://localhost:8000/usuarios/usu7/archivos')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu7", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_08AlEliminarArchivoEsteDebeEstarEnLaPapelera(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu8','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos necesarios para eliminar archivo
eliminarArchivoJson = {'Propietario': 'usu8','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin' }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu8', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu8/archivos', json=archivoJson)
#Se elimina el archivo
salida = requests.delete('http://localhost:8000/usuarios/usu8/archivos', json= eliminarArchivoJson)
#Se obtiene el archivo de la papelera
salida = requests.get('http://localhost:8000/usuarios/usu8/papelera')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu8", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_09AlCompartirUnArchivoConOtroUsuarioEsteDebeTenerlo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu9','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos necesarios para compartir archivo
archivoCompartirJson = {'Propietario': 'usu9','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin', 'Usuarios': ['usu9_2'] }
#Registro de dos usuarios
salida = requests.post('http://localhost:8000/usuarios/usu9', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu9_2', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu9/archivos', json=archivoJson)
#Se comparte el archivo al segundo usuario
salida = requests.put('http://localhost:8000/usuarios/usu9/archivos/compartir', json= archivoCompartirJson)
#Se obtiene el archivo compartido del segundo usuario
salida = requests.get('http://localhost:8000/usuarios/usu9_2/archivos/compartidos')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu9", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_10BuscarArchivoPorEtiquetas(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu10','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu10', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu10/archivos', json=archivoJson)
#Se busca el archivo por etiqueta
salida = requests.get('http://localhost:8000/usuarios/usu10/archivos/etiquetas/saludo')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu10", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_11BuscarArchivoPorNombre(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu11','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
salida = requests.post('http://localhost:8000/usuarios/usu11', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu11/archivos', json=archivoJson)
#Se busca el archivo por nombre
salida = requests.get('http://localhost:8000/usuarios/usu11/archivos/nombre/hola')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu11", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_12BuscarArchivoPorPropietario(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu12','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
salida = requests.post('http://localhost:8000/usuarios/usu12', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu12/archivos', json=archivoJson)
#Se busca el archivo por propietario
salida = requests.get('http://localhost:8000/usuarios/usu12/archivos/propietario/usu12')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu12", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_13BuscarArchivoPorExtension(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu13','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
salida = requests.post('http://localhost:8000/usuarios/usu13', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu13/archivos', json=archivoJson)
#Se busca el archivo por extension
salida = requests.get('http://localhost:8000/usuarios/usu13/archivos/extension/txt')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu13", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_14ActualizarArchivo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu14','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos del Archivo a actualizar
actualizacionArchivoJson = {'Propietario': 'usu14', 'DirectorioOriginal' : 'documentos/bin', 'NombreOriginal': 'hola', 'ExtensionOriginal': 'txt','DirectorioNuevo': 'doc/','NombreNuevo': 'saludo', 'ExtensionNueva': 'bat','Etiquetas': ['hola','saludo'] }
#Se registra el usuario
salida = requests.post('http://localhost:8000/usuarios/usu14', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu14/archivos', json=archivoJson)
#Se actualiza el archivo
salida = requests.put('http://localhost:8000/usuarios/usu14/archivos/actualizar',json= actualizacionArchivoJson)
#Obtengo el archivo actualizado
archivoActualizado = requests.get('http://localhost:8000/usuarios/usu14/archivos')
archivoActualizadoJson = archivoActualizado.json()
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
#Verifico que se actualizo el nombre
self.assertEqual("saludo", archivoActualizadoJson["Archivos"][0]["Nombre"])
def test_15RestaurarArchivo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu15','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos del Archivo a actualizar
actualizacionArchivoJson = {'Propietario': 'usu15', 'DirectorioOriginal' : 'documentos/bin', 'NombreOriginal': 'hola', 'ExtensionOriginal': 'txt','DirectorioNuevo': 'doc/','NombreNuevo': 'saludo', 'ExtensionNueva': 'bat','FechaDeModificacion' : '2015/08/03','UsuarioQueModifico' : 'pepe300','Etiquetas': ['hola','saludo'] }
#Datos del archivo a restaurar
archivoRestaurarJson = {'Propietario': 'usu15','Nombre': 'saludo','Extension': 'bat','Directorio': 'doc/','FechaDeModificacion' : '2015/08/03','UsuarioQueModifico' : 'usu15' }
#Se registra el usuario
salida = requests.post('http://localhost:8000/usuarios/usu15', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu15/archivos', json=archivoJson)
#Se actualiza el archivo
salida = requests.put('http://localhost:8000/usuarios/usu15/archivos/actualizar',json= actualizacionArchivoJson)
#Se restaura el archivo
salida = requests.put('http://localhost:8000/usuarios/usu15/archivos/restaurar',json= archivoRestaurarJson)
#Obtengo el archivo restaurado
archivoRestaurado = requests.get('http://localhost:8000/usuarios/usu15/archivos')
archivoRestauradoJson = archivoRestaurado.json()
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
#Verifico que se actualizo el nombre
self.assertEqual("hola", archivoRestauradoJson["Archivos"][0]["Nombre"])
def test_16CrearArchivoFisico(self):
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu16','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Registramos al usuario y agregamos el archivo logico
salida = requests.post('http://localhost:8000/usuarios/usu16', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu16/archivos', json=archivoJson)
salidaJson = salida.json()
#Obtengo el ID del archivo
idArchivo = salidaJson["Mensaje"]
#Abro el archivo
files = {'file': open('mainTest.cpp')}
#Envio el archivo fisico
salida = requests.post('http://localhost:8000/usuarios/usu16/archivofisico/'+idArchivo, files=files)
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("Archivo creado correctamente", salidaJson["Mensaje"])
if __name__ == '__main__':
unittest.main()
| [
"mafvidal@gmail.com"
] | mafvidal@gmail.com |
ecefc0e5c68e1e71c4d81babea0adbd873cc0749 | b0680a0905161b5b8c8539f5021eb8dfcd33576a | /String Examples/ex14.py | c6ba2641e8aca25a72a144ed773bfe8b4f737999 | [] | no_license | FerruccioSisti/LearnPython3 | 32a78f3b63a985b7a42dcf69ae3ac432ec3dea76 | a4c0d238041836d22e99cf9f2cde80381daa91b9 | refs/heads/master | 2020-05-02T14:23:35.182958 | 2020-01-21T16:14:29 | 2020-01-21T16:14:29 | 178,009,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | #This example is using both argv and input to get information from the user
from sys import argv
#Get the name of the script and the user
script, user_name = argv
prompt = "> "
print (f"Hi {user_name}, I'm the {script} script")
print ("I'd like to ask you a few questions.")
print (f"Do you like me {user_name}?")
likes = input(prompt)
#Entering creepville right now
print (f"Where do you live {user_name}?")
lives = input(prompt)
print ("What kind of computer do you have?")
pc = input(prompt)
print (f"Alright, so you said {likes} about liking me.\nI've also noted you live in {lives}")
print (f"Finally, you said that you own a {pc}")
| [
"ferrucciosisti@gmail.com"
] | ferrucciosisti@gmail.com |
f4627a9f0b0e5a3bc6856616a26598590fe7c8db | f9a96f02fb59ebb320d48ae7d266a1ba1bb2f7cc | /ex31.py | 9267bf4cd0e0abe51b98857e138015a5aaec168e | [] | no_license | virtualet/LPTHW | eb54eca5471c179652b1466e604419601a3a082c | e31b703e835640fc9f04ad99b027bcf6d6c1a746 | refs/heads/master | 2021-01-13T01:53:50.027232 | 2014-10-06T22:03:27 | 2014-10-06T22:03:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | __author__ = 'echoecho'
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There's a giant bear here eating a cheese cake. What do you do?"
print "1. Take the cake"
print "2. Scream at the bear"
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away" % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina"
print "1. Blueberries"
print "2. Yello jacket clothespins"
print "3. Understanding revolvers yelling melodies"
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good luck!"
else:
print "The insanity rots your eyes into a pool of muck. Good luck!"
else:
print "You stumble around and fall on a knife and die. Good job!"
| [
"echoecho@gmail.com"
] | echoecho@gmail.com |
59019e4e0de44502a63c95bc121cf7f067510cda | 841a4906780c75fe72f0bea68e641bcab1fa19f5 | /2019/07/two.py | 75e4be895e0f2c8fba69cf1f2ce7e6abe32faa95 | [] | no_license | RobertMusser/Avent-of-Code | 87584a37e7d81b252affb2c04bda8abbc9ef9fd3 | 6bcdd866efaa1088b02f2ad50a125a453d41d7f5 | refs/heads/master | 2023-02-06T13:47:50.596782 | 2023-01-26T01:31:34 | 2023-01-26T01:31:34 | 224,293,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,787 | py | import math
import itertools
# Reads file into string, code adapted from ( https://github.com/imhoffman/advent/blob/master/2015/01/one.py )
def file_to_string(file_name):
with open(file_name) as fp:
while True:
line = fp.read()
if not line: # a little bit of error catching
break
string_opcode = line
return string_opcode
# finds commas in string
# could be done with .split(), but oh well
def comma_finder(string_opcode):
start_search = 0
all_commas = []
while True:
comma_loc = string_opcode.find(',', start_search)
if comma_loc == -1: # breaks out of loop once all commas found
break
all_commas.append(comma_loc)
start_search = comma_loc + 1
return all_commas
# parses string into an array
# could be done with .split()
def string_to_array(opcode_string, comma_index):
opcode = []
buffer = 0
for i in range(len(comma_index)+1):
start = buffer
if i == len(comma_index):
end = len(opcode_string)+1
opcode.append(int(opcode_string[start:end]))
break
if i < len(comma_index):
end = comma_index[i]
opcode.append(int(opcode_string[start:end]))
buffer = comma_index[i]+1
return opcode
# makes number str and back-fills with 0s
def yarnifier(number):
yarn = str(number)
yarn = ("0" * int(5-len(yarn))) + yarn
return yarn
# returns true if number was a valid opcode, false if not
def opcode_checker(number):
answer = False # default falseyness
yarn = str(number) # string of number
if len(yarn) > 5: # greater than 5 digits c/t be an opcode
return answer
if number < 1: # 0 or -#s c/t be opcodes
return answer
yarn = ("0" * int(5-len(yarn))) + yarn # fill yarn with 0s, just like yarnifier
ones = int(yarn[4]) # purely symbolic
tens = int(yarn[3])
mode_three = int(yarn[0])
mode_two = int(yarn[1])
mode_one = int(yarn[2])
# https://stackoverflow.com/questions/148042/using-or-comparisons-with-if-statements
if ones in (1, 2, 3, 4, 5, 6, 7, 8):
if tens == 0:
if mode_three in (0, 1) and mode_two in (0, 1) and mode_one in (0, 1):
answer = True
if int(yarn[3:5]) == 99:
if mode_three in (0, 1) and mode_two in (0, 1) and mode_one in (0, 1):
answer = True
return answer
# given a pointer and a program, executes instructions and returns modified program + pointer
def opcode_processor(pointer, program, setting):
input_received = -1 # default falsyness
output = -1
opcode = program[pointer] # purely symbolic
if opcode_checker(opcode): # this is only helpful for debugging
yarn = yarnifier(opcode)
first = int(yarn[2])
second = int(yarn[1])
if int(yarn[4]) == 1:
x = program[pointer + 1] # default set to value not address
y = program[pointer + 2]
if first == 0: # x and y updated if modes not 1
x = program[x]
if second == 0:
y = program[y]
program[program[pointer + 3]] = x + y # + rule
pointer += 4
elif int(yarn[4]) == 2:
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
program[program[pointer + 3]] = x * y # * rule
pointer += 4
elif int(yarn[4]) == 3: # get input rule
x = setting # always address mode
program[program[pointer + 1]] = x
input_received = 1
pointer += 2
elif int(yarn[4]) == 4: # print rule
if first == 0:
output = program[program[pointer + 1]]
elif first == 1:
output = program[pointer + 1]
pointer += 2
elif int(yarn[4]) == 5: # jump-if-true
x = program[pointer+1]
y = program[pointer+2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x != 0:
pointer = y
else: # this might need to be something else
pointer += 3
elif int(yarn[4]) == 6: # jump-if-false
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x == 0:
pointer = y
else: # this might need to be something else
pointer += 3
elif int(yarn[4]) == 7:
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x < y:
program[program[pointer+3]] = 1
else:
program[program[pointer + 3]] = 0
pointer += 4
elif int(yarn[4]) == 8:
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x == y:
program[program[pointer + 3]] = 1
else:
program[program[pointer + 3]] = 0
pointer += 4
elif int(yarn[4]) == 9:
return 'DONE', program, 0, output
else:
print("--- ERORR ---")
print("@ adress: ", pointer, "which is int: ", opcode)
return 'DONE', 'ERROR', 0, 0
return pointer, program, input_received, output
# runs one amp at specified phase setting and input signal
def single_amp(program, input_one, input_two):
pointer = 0
setting = input_one
while True:
pointer, program, input_received, maybe_output = opcode_processor(pointer, program, setting)
if input_received != -1:
setting = input_two
if maybe_output != -1:
output = maybe_output
if pointer == 'DONE':
break
return program, output
# runs all five amps, with specified phase settings
def test_amp_config(program, amp_setting):
_, output = single_amp(program, int(amp_setting[0]), 0)
_, output = single_amp(program, int(amp_setting[1]), output)
_, output = single_amp(program, int(amp_setting[2]), output)
_, output = single_amp(program, int(amp_setting[3]), output)
_, signal = single_amp(program, int(amp_setting[4]), output)
return signal
# generates all possible amp settings, and finds highest signal return
def phase_setting_checker(program):
# generates all possible settings
# https://stackoverflow.com/questions/104420/how-to-generate-all-permutations-of-a-list
x = list(itertools.permutations([0, 1, 2, 3, 4]))
all_settings = []
for c in x:
c = str(c)
all_settings.append(c[1] + c[4] + c[7] + c[10] + c[13])
highest_signal = 0
for setting in all_settings:
signal = test_amp_config(program, setting)
if signal > highest_signal:
highest_signal = signal
return highest_signal
# main program:
program = file_to_string('input.txt') # change file name here!
all_commas = comma_finder(program)
program = string_to_array(program, all_commas)
# done with file io / formatting
answer = phase_setting_checker(program)
print(answer)
| [
"robert.musser@questu.ca"
] | robert.musser@questu.ca |
76d07d0af3b66039cf6a45daa29221885fca4724 | a00a9591df0c32f12595ac7c1c07ffbfd1185642 | /punctatools/lib/preprocess.py | 57260029e06ed82f4183b50b1169c5a496ff91b0 | [
"Apache-2.0"
] | permissive | stjude/punctatools | 4bcc100620e45c6e1839035cc2f6227d93f3fe7f | 0630b67fdf2d81772b11b95b140468dca20a35de | refs/heads/main | 2023-04-18T04:33:24.882656 | 2022-12-13T17:05:57 | 2022-12-13T17:05:57 | 377,252,061 | 7 | 8 | NOASSERTION | 2022-12-13T17:05:58 | 2021-06-15T18:00:57 | Jupyter Notebook | UTF-8 | Python | false | false | 3,708 | py | import os
from typing import Union
import intake_io
import numpy as np
import pandas as pd
from am_utils.parallel import run_parallel
from am_utils.utils import walk_dir
from tqdm import tqdm
def compute_histogram(dataset):
"""
Compute intensity histogram for a give image.
Parameters
----------
img : xr.Dataset
Input image
Returns
-------
pd.DataFrame:
Histogram as pandas DataFrame
"""
imghist = pd.DataFrame()
for i in range(dataset.dims['c']):
img = dataset.loc[dict(c=dataset.coords['c'][i])]['image'].data
hist, bins = np.histogram(img, bins=np.max(img) + 1, range=(0, np.max(img) + 1))
chist = pd.DataFrame({
'values': bins[:-1],
'counts': hist
})
chist = chist[chist['counts'] > 0]
chist['channel'] = dataset.coords['c'][i].data
imghist = pd.concat([imghist, chist], ignore_index=True)
return imghist
def compute_histogram_batch(input_dir: str, output_dir: str):
"""
Compute intensity histograms for all images in a folder and save as csv.
Parameters
----------
input_dir : str
Input directory
output_dir : str
Output directory
"""
samples = walk_dir(input_dir)
all_hist = pd.DataFrame()
for sample in tqdm(samples):
dataset = intake_io.imload(sample)
imghist = compute_histogram(dataset)
imghist['Image name'] = sample
fn_out = sample.replace(input_dir, output_dir).replace(os.path.splitext(sample)[-1], '.csv')
os.makedirs(os.path.dirname(fn_out), exist_ok=True)
imghist.to_csv(fn_out, index=False)
all_hist = pd.concat([all_hist, imghist], ignore_index=True)
all_hist.to_csv(output_dir.rstrip('/') + '.csv', index=False)
def subtract_background(dataset, bg_value):
bg_value = np.array([bg_value]).ravel()
channels = dataset.coords['c'].data
if len(bg_value) >= len(channels):
for i in range(len(channels)):
img = dataset.loc[dict(c=channels[i])]['image'].data
img = np.clip(img, bg_value[i], None)
dataset['image'].loc[dict(c=channels[i])] = img - bg_value[i]
else:
img = dataset['image'].data
img = np.clip(img, bg_value[0], None)
dataset['image'].data = img - bg_value[0]
return dataset
def __subtract_bg_helper(item, **kwargs):
fn_in, fn_out = item
dataset = intake_io.imload(fn_in)
dataset = subtract_background(dataset, **kwargs)
os.makedirs(os.path.dirname(fn_out), exist_ok=True)
intake_io.imsave(dataset, fn_out)
def subtract_background_batch(input_dir: str, output_dir: str,
bg_value: Union[int, float, list, tuple], n_jobs: int = 8):
"""
Parameters
----------
input_dir : str
Input directory
output_dir : str
Output directory
bg_value : scalar or list
Background values for each channel.
If one value provided, it will be subtracted from all channels.
n_jobs : int, optional
Number of jobs to run in parallel if `parallel` is True
Default: 8
"""
run_parallel(items=[(sample,
sample.replace(input_dir, output_dir))
for sample in walk_dir(input_dir)],
process=__subtract_bg_helper,
max_threads=n_jobs,
bg_value=bg_value)
def rescale_intensity(x, quantiles=(0.0025, 0.9975)):
mn, mx = [np.percentile(x, p * 100) for p in quantiles]
if mx > mn + 5:
return np.clip((x.astype(np.float32) - mn) / (mx - mn), 0, 1)
else:
return np.zeros(x.shape, dtype=np.float32)
| [
"37274810+amedyukhina@users.noreply.github.com"
] | 37274810+amedyukhina@users.noreply.github.com |
b7f64b488967b17bca55e572ba1fa82ce4bb0851 | 5d0a85c793266da7ea77aed0998398cc2eef0e49 | /Sem 4/AI/Assignment2/task2/cube.py | 6188ab2af0536685b9b891182a5df8c38cc9f969 | [] | no_license | GeorgeSfarz35/UBB-Projects | 8757efeeb4f8517602e2f0659050a6e54c47109f | b025958be030fd80c6b571cdd6e26c6a1bbd61b9 | refs/heads/main | 2023-09-03T15:30:08.652869 | 2021-10-15T16:02:08 | 2021-10-15T16:02:08 | 417,543,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,886 | py | import pygame, sys
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
######
import random
verticies = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1)
)
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7)
)
surfaces = (
(0,1,2,3),
(3,2,7,6),
(6,7,5,4),
(4,5,1,0),
(1,5,7,2),
(4,0,3,6)
)
colors = (
(1,0,0),
(0,1,0),
(0,0,1),
(0,1,0),
(1,1,1),
(0,1,1),
(1,0,0),
(0,1,0),
(0,0,1),
(1,0,0),
(1,1,1),
(0,1,1),
)
def Cube():
glBegin(GL_QUADS)
for surface in surfaces:
x = 0
for vertex in surface:
x+=1
glColor3fv(colors[x])
glVertex3fv(verticies[vertex])
glEnd()
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(verticies[vertex])
glEnd()
def main():
pygame.init()
display = (800,600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)
#start further back
glTranslatef(random.randrange(-5,5),0, -30)
# no more rotate
#glRotatef(25, 2, 1, 0)
object_passed = False
ind = False
while not object_passed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
ind = True
pygame.quit()
break
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
glTranslatef(-0.5,0,0)
if event.key == pygame.K_RIGHT:
glTranslatef(0.5,0,0)
if event.key == pygame.K_UP:
glTranslatef(0,1,0)
if event.key == pygame.K_DOWN:
glTranslatef(0,-1,0)
'''
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 4:
glTranslatef(0,0,1.0)
if event.button == 5:
glTranslatef(0,0,-1.0)
'''
if ind:
break
x = glGetDoublev(GL_MODELVIEW_MATRIX)#, modelviewMatrix)
camera_x = x[3][0]
camera_y = x[3][1]
camera_z = x[3][2]
#print(camera_x,camera_y,camera_z)
# slowly move:
glTranslatef(0,0,0.5)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
Cube()
pygame.display.flip()
if camera_z <= 0:
object_passed = True
pygame.time.delay(250)
pygame.quit()
sys.exit()
if __name__=="__main__":
for x in range(100):
main()
| [
"george.sfarz@stud.ubbcluj.ro"
] | george.sfarz@stud.ubbcluj.ro |
cc0c0b02bbebc632dd806ce1cb000e302ef11030 | 1c5444654ab9756378b19b633f89c34703b789f7 | /workspace/dcmfinder.py | e75d9fd564c0d479b40b9e839829e74dcb5f54fc | [
"MIT"
] | permissive | ythackerCS/DCMFinder-Container | 4f0834288becf5500d9072c75e33943e667539a1 | cd37fe8ffb01f067c9b3876f35293c123ccaf644 | refs/heads/main | 2023-08-15T19:59:31.669406 | 2021-10-08T23:08:05 | 2021-10-08T23:08:05 | 415,128,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,228 | py | import sys, os
import re
import subprocess
import csv
from numpy.core.numeric import count_nonzero
import pydicom
from tqdm import tqdm
keepIfTagNotFound = True
def findDicoms(FilterForArray,FilterAgainstArray):
print("RUNNING DCM FINDER")
#This is the input directory (main directory) that is searched for all possible dicom files and a csv of file paths called 'dcmwithoutClassification' is generated
dataDir = "/input/"
experimentNumbers = os.listdir(dataDir)
originalCount = 0
tagnotFoundTime = 0
filteredCount = 0
#all csvs made are put in the output folder, this script generates a dcmwithoutClassification csv that filters the dicom for filters provided
with open ('/output/dcmwithoutClassification.csv', 'w') as dcm_csv:
csv_writer = csv.writer(dcm_csv, delimiter=',')
csv_writer.writerow(["experimentnumber", "dcmsArray"])
for keepFilter in FilterForArray:
print("Filtering for: ", keepFilter[0] , "==", keepFilter[1])
for removeFilter in FilterAgainstArray:
print("Filtering for: ", removeFilter[0] , "!=", removeFilter[1])
for experimentNumber in tqdm(experimentNumbers):
dataFolder = os.path.join(dataDir,experimentNumber)
#NOTE: this is a recursive search so it will search every directory and subdirectory for any file that is of type '.dcm'
dcmFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(dataFolder) for f in filenames if os.path.splitext(f)[1] == '.dcm']
originalCount += len(dcmFiles)
#LINE ADDED TO RESOLVE SYMLINKS
try:
resolvedDCMFiles = [os.readlink(link) for link in dcmFiles]
except OSError:
resolvedDCMFiles = dcmFiles
#filter dicomes for "filters for"
filteredForDCM = []
if len(FilterForArray) > 0:
for keepFilter in FilterForArray:
for file in resolvedDCMFiles:
image = pydicom.read_file(file)
if getattr(image, keepFilter[0]) == keepFilter[1]:
if file not in filteredForDCM:
filteredForDCM.append(file)
else:
filteredForDCM = resolvedDCMFiles
#filter dicomes for "filters against"
filteredAgainstDCM = filteredForDCM
if len(FilterAgainstArray) > 0:
for removeFilter in FilterAgainstArray:
for file in filteredForDCM:
image = pydicom.read_file(file)
if getattr(image, removeFilter[0]) == removeFilter[1]:
if file in filteredAgainstDCM:
filteredAgainstDCM.remove(file)
else:
filteredAgainstDCM = filteredForDCM
filteredCount += len(filteredAgainstDCM)
csv_writer.writerow([experimentNumber, filteredAgainstDCM])
print("Stats \n", "original lenth", originalCount, "\n tag(s) not found for time filters", tagnotFoundTime, "\n filteredLenth", filteredCount)
| [
"46691555+ythackerCS@users.noreply.github.com"
] | 46691555+ythackerCS@users.noreply.github.com |
97dff6c9bea41dde6e6d416fe20ab4804a53ed50 | 9757f47db825c61fd7180462be97e59909d183fc | /env/bin/python-config | 0eedffec046f906eddb2944d98f3b1f0b97d70c6 | [] | no_license | dignacio0815/translatorrepo | dcf449eadc17bc3d89a111dec3120dfbb1a31cc7 | f1beaf0fe378363e597db153976ccf1d46c79910 | refs/heads/master | 2020-03-25T15:44:18.681461 | 2018-08-10T19:38:56 | 2018-08-10T19:38:56 | 143,899,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | #!/home/ubuntu/workspace/translator_project/env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"denizeignacio@gmail.com"
] | denizeignacio@gmail.com | |
e3d05428e8745778fea2949c845fbc7da34a2630 | 6fd76f3bec4aa0784be93cfbd0f6fa72a00bbf5c | /accounts/views.py | efaf95511e52581717a16d54a73d3ba008f78bba | [] | no_license | ihor-nahuliak/Python-Django-Website | 70e43ceadac36c745c6a50fc8635e01872c433e2 | af4338325a9b741a7f047738049218d8384d6183 | refs/heads/master | 2020-12-08T03:35:25.046570 | 2019-11-01T12:07:46 | 2019-11-01T12:07:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from contacts.models import Contact
def register(request):
if request.method == 'POST':
# Get form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
# Check if passwords match
if password == password2:
# Check user name
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is beign used')
return redirect('register')
else:
# Looks good
user = User.objects.create_user(
username=username, password=password, email=email, first_name=first_name, last_name=last_name)
user.save()
messages.success(
request, 'You are now registered and can log in')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
else:
return render(request, 'accounts/register.html')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in')
return redirect('dashboard')
else:
messages.error(request, 'Invalid Credentials')
return redirect('login')
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'You are logged out')
return redirect('index')
def dashboard(request):
user_contacts = Contact.objects.order_by('-contact_date').filter(user_id=request.user.id)
context = {
'contacts': user_contacts
}
return render(request, 'accounts/dashboard.html', context)
| [
"salmanmoazam08@gmail.com"
] | salmanmoazam08@gmail.com |
e4c15e1ee609db1dfee0dcf2cb6a825074785e3c | 3e9259daf292e924b0f114b3fa2d4249f103de1a | /AI/DEAD_pyvona_test.py | 411d52ac7fb166aaa6486fd4c8ba8199b7210379 | [] | no_license | rajonali/HorribleAI | bd692479dc11114f525b7232e8b442e14ee27cf0 | 6644c931652802244b231df47c68cf4b2d6b673b | refs/heads/master | 2021-01-25T09:04:08.814758 | 2019-02-01T04:53:08 | 2019-02-01T04:53:08 | 83,951,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | import pyvona
filename = "/home/rajonali/AI/filename.ogg"
v = pyvona.create_voice("something", "something")
v.speak("Hello World")
| [
"noreply@github.com"
] | noreply@github.com |
0588e6013bc4ccd0a97c815853df716c9fa6e040 | c0ea89d58fd6f780a23f10a0b5535b3feada5a1a | /anchore_engine/services/policy_engine/api/models/image_selection_rule.py | e0f9abbea332fcca8e57209b3916beb1d02c3c34 | [
"Apache-2.0"
] | permissive | longfeide2008/anchore-engine | b62acbab8c7ebbf7fa67a2503768c677942220e4 | 622786ec653531f4fb216cb33e11ffe31fe33a29 | refs/heads/master | 2022-11-08T10:02:51.988961 | 2020-06-15T18:00:37 | 2020-06-15T18:00:37 | 274,068,878 | 1 | 0 | Apache-2.0 | 2020-06-22T07:27:39 | 2020-06-22T07:27:38 | null | UTF-8 | Python | false | false | 4,764 | py | # coding: utf-8
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from anchore_engine.services.policy_engine.api.models.base_model_ import Model
from anchore_engine.services.policy_engine.api.models.image_ref import ImageRef # noqa: F401,E501
from anchore_engine.services.policy_engine.api import util
class ImageSelectionRule(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, registry=None, repository=None, image=None): # noqa: E501
"""ImageSelectionRule - a model defined in Swagger
:param id: The id of this ImageSelectionRule. # noqa: E501
:type id: str
:param name: The name of this ImageSelectionRule. # noqa: E501
:type name: str
:param registry: The registry of this ImageSelectionRule. # noqa: E501
:type registry: str
:param repository: The repository of this ImageSelectionRule. # noqa: E501
:type repository: str
:param image: The image of this ImageSelectionRule. # noqa: E501
:type image: ImageRef
"""
self.swagger_types = {
'id': str,
'name': str,
'registry': str,
'repository': str,
'image': ImageRef
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'registry': 'registry',
'repository': 'repository',
'image': 'image'
}
self._id = id
self._name = name
self._registry = registry
self._repository = repository
self._image = image
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ImageSelectionRule of this ImageSelectionRule. # noqa: E501
:rtype: ImageSelectionRule
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this ImageSelectionRule.
:return: The id of this ImageSelectionRule.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ImageSelectionRule.
:param id: The id of this ImageSelectionRule.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ImageSelectionRule.
:return: The name of this ImageSelectionRule.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ImageSelectionRule.
:param name: The name of this ImageSelectionRule.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def registry(self):
"""Gets the registry of this ImageSelectionRule.
:return: The registry of this ImageSelectionRule.
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""Sets the registry of this ImageSelectionRule.
:param registry: The registry of this ImageSelectionRule.
:type registry: str
"""
if registry is None:
raise ValueError("Invalid value for `registry`, must not be `None`") # noqa: E501
self._registry = registry
@property
def repository(self):
"""Gets the repository of this ImageSelectionRule.
:return: The repository of this ImageSelectionRule.
:rtype: str
"""
return self._repository
@repository.setter
def repository(self, repository):
"""Sets the repository of this ImageSelectionRule.
:param repository: The repository of this ImageSelectionRule.
:type repository: str
"""
if repository is None:
raise ValueError("Invalid value for `repository`, must not be `None`") # noqa: E501
self._repository = repository
@property
def image(self):
"""Gets the image of this ImageSelectionRule.
:return: The image of this ImageSelectionRule.
:rtype: ImageRef
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this ImageSelectionRule.
:param image: The image of this ImageSelectionRule.
:type image: ImageRef
"""
if image is None:
raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
self._image = image
| [
"zach@anchore.com"
] | zach@anchore.com |
8ea4f2d22296c8763d073634a3bd564ee230dd68 | 7167c7acbebabc66f3e04959161c720e8091560b | /tour/tests/tour_tests.py | daeba4dec330067ede160de46ba55da76c130ea6 | [
"MIT"
] | permissive | ambitioninc/django-tour | 2a260ae3f6d59218b0cee9a7a9bc8b0a11c3d80f | f0181d71ebd6c66e11dd921ad5e602192fc621cc | refs/heads/develop | 2016-09-05T21:48:39.161270 | 2015-10-05T19:44:58 | 2015-10-05T19:44:58 | 18,072,292 | 26 | 12 | null | 2015-10-05T19:45:15 | 2014-03-24T17:53:55 | Python | UTF-8 | Python | false | false | 10,375 | py | from django.contrib.auth.models import User
from django.test import TestCase
from django_dynamic_fixture import G
from mock import patch
from tour.models import Tour, Step, TourStatus
class BaseTourTest(TestCase):
"""
Provides basic setup for tour tests like creating users
"""
def setUp(self):
super(BaseTourTest, self).setUp()
self.test_user = User.objects.create_user('test', 'test@gmail.com', 'test')
self.test_user2 = User.objects.create_user('test2', 'test2@gmail.com', 'test2')
self.tour1 = G(
Tour, display_name='Mock Tour', name='tour1', complete_url='mock_complete1',
tour_class='tour.tests.mocks.MockTour')
self.tour2 = G(
Tour, display_name='Mock Tour 2', name='tour2', complete_url='mock_complete2',
tour_class='tour.tests.mocks.MockTour2')
self.step1 = G(
Step, step_class='tour.tests.mocks.MockStep1', display_name='Mock Step 1', name='mock1',
url='mock1', parent_step=None, sort_order=0)
self.step2 = G(
Step, step_class='tour.tests.mocks.MockStep2', display_name='Mock Step 2', name='mock2',
url='mock2', parent_step=None, sort_order=1)
self.step3 = G(
Step, step_class='tour.tests.mocks.MockStep3', display_name='Mock Step 3', name='mock3',
url='mock3', parent_step=None, sort_order=2)
self.step4 = G(
Step, step_class='tour.tests.mocks.MockStep4', display_name='Mock Step 4', name='mock4',
url='mock4', parent_step=None, sort_order=3)
self.step5 = G(
Step, step_class='tour.tours.BaseStep', display_name='Mock Step 5', name='mock5',
url=None, parent_step=None, sort_order=4)
def login_user1(self):
self.client.login(username='test', password='test')
class TourTest(BaseTourTest):
"""
Tests the functionality of the BaseTour class
"""
def test_init(self):
"""
Verifies that the tour object is properly set when loaded
"""
self.assertEqual(self.tour1.load_tour_class().tour, self.tour1)
def test_get_steps_flat(self):
"""
Verifies that the steps are loaded in the correct order
"""
self.step1.sort_order = 1
self.step1.save()
self.step2.sort_order = 0
self.step2.save()
self.tour1.steps.add(self.step1, self.step2)
expected_steps = [self.step2, self.step1]
self.assertEqual(expected_steps, self.tour1.load_tour_class().get_steps())
def test_get_steps_nested(self):
"""
Verifies that the nested steps are loaded correctly
"""
self.tour1.steps.add(self.step1, self.step2)
self.step1.steps.add(self.step3, self.step4)
self.step3.sort_order = 1
self.step3.save()
self.step4.sort_order = 0
self.step4.save()
expected_steps = [self.step1, self.step4, self.step3, self.step2]
self.assertEqual(expected_steps, self.tour1.load_tour_class().get_steps())
def test_get_url_list(self):
"""
Verifies that the tour returns the correct step url list
"""
self.tour1.steps.add(self.step1, self.step5, self.step2)
expected_url_list = ['mock1', 'mock2']
self.assertEqual(expected_url_list, self.tour1.load_tour_class().get_url_list())
def test_add_user(self):
"""
Verifies that a user is linked to a tour properly and that the correct tour is returned
"""
# add user to tour
tour_status = self.tour1.load_tour_class().add_user(self.test_user)
# try to add again and make sure it returns the same status
self.assertEqual(tour_status, self.tour1.load_tour_class().add_user(self.test_user))
# make sure only one status
self.assertEqual(1, TourStatus.objects.count())
# mark status as complete
tour_status.complete = True
tour_status.save()
# make sure another tour is created
self.tour1.load_tour_class().add_user(self.test_user)
self.assertEqual(2, TourStatus.objects.count())
self.assertEqual(1, TourStatus.objects.filter(complete=False).count())
def test_mark_complete(self):
"""
Verifies that a tour status record will be marked as complete for a user
"""
# add multiple users to multiple tours
tour1_class = self.tour1.load_tour_class()
tour2_class = self.tour2.load_tour_class()
tour1_class.add_user(self.test_user)
tour1_class.add_user(self.test_user2)
tour2_class.add_user(self.test_user)
tour2_class.add_user(self.test_user2)
# make sure there are 4 records
self.assertEqual(4, TourStatus.objects.count())
# complete the tour for user1
self.assertTrue(tour1_class.mark_complete(self.test_user))
# make sure it is complete
self.assertEqual(1, TourStatus.objects.filter(complete=True).count())
# try to complete the same tour
self.assertFalse(tour1_class.mark_complete(self.test_user))
# add the user to the tour again
tour1_class.add_user(self.test_user)
# make sure there are 5 records
self.assertEqual(5, TourStatus.objects.count())
@patch('tour.tests.mocks.MockStep4.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep3.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep2.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep1.is_complete', spec_set=True)
def test_get_current_step(
self, mock_step1_is_complete, mock_step2_is_complete, mock_step3_is_complete, mock_step4_is_complete):
"""
Verifies that the tour class returns the first incomplete step
:type mock_step1_is_complete: Mock
:type mock_step2_is_complete: Mock
:type mock_step3_is_complete: Mock
:type mock_step4_is_complete: Mock
"""
mock_step1_is_complete.return_value = False
mock_step2_is_complete.return_value = False
mock_step3_is_complete.return_value = False
mock_step4_is_complete.return_value = False
self.tour1.steps.add(self.step1, self.step2)
self.step1.steps.add(self.step3, self.step4)
tour1_class = self.tour1.load_tour_class()
self.assertEqual(self.step1, tour1_class.get_current_step(self.test_user))
mock_step1_is_complete.return_value = True
mock_step3_is_complete.return_value = True
self.assertEqual(self.step4, tour1_class.get_current_step(self.test_user))
mock_step4_is_complete.return_value = True
mock_step2_is_complete.return_value = True
self.assertIsNone(tour1_class.get_current_step(self.test_user))
@patch('tour.tests.mocks.MockStep4.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep3.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep2.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep1.is_complete', spec_set=True)
def test_get_next_url(
self, mock_step1_is_complete, mock_step2_is_complete, mock_step3_is_complete, mock_step4_is_complete):
"""
Verifies that the url is returned for the current step
:type mock_step1_is_complete: Mock
:type mock_step2_is_complete: Mock
:type mock_step3_is_complete: Mock
:type mock_step4_is_complete: Mock
"""
mock_step1_is_complete.return_value = False
mock_step2_is_complete.return_value = False
mock_step3_is_complete.return_value = False
mock_step4_is_complete.return_value = False
self.step5.sort_order = 1
self.step5.save()
self.step2.sort_order = 3
self.step2.save()
self.tour1.steps.add(self.step1, self.step2, self.step5)
self.step5.steps.add(self.step3, self.step4)
tour1_class = self.tour1.load_tour_class()
self.assertEqual('mock1', tour1_class.get_next_url(self.test_user))
mock_step1_is_complete.return_value = True
self.assertEqual('mock3', tour1_class.get_next_url(self.test_user))
mock_step3_is_complete.return_value = True
self.assertEqual('mock4', tour1_class.get_next_url(self.test_user))
mock_step4_is_complete.return_value = True
self.assertEqual('mock2', tour1_class.get_next_url(self.test_user))
mock_step2_is_complete.return_value = True
self.assertEqual('mock_complete1', tour1_class.get_next_url(self.test_user))
@patch('tour.tests.mocks.MockStep1.is_complete', spec_set=True)
def test_is_complete(self, mock_step1_is_complete):
"""
Verifies that a tour returns true when complete and false when incomplete
:type mock_step1_is_complete: Mock
"""
mock_step1_is_complete.return_value = False
self.tour1.steps.add(self.step1)
tour1_class = self.tour1.load_tour_class()
self.assertFalse(tour1_class.is_complete(self.test_user))
mock_step1_is_complete.return_value = True
self.assertTrue(tour1_class.is_complete(self.test_user))
class StepTest(BaseTourTest):
"""
Tests the functionality of the BaseStep class
"""
def test_init(self):
"""
Verifies that the step object is properly set when loaded
"""
self.assertEqual(self.step1.load_step_class().step, self.step1)
def test_is_complete(self):
"""
Verifies that a step returns true by default
"""
step1_class = self.step1.load_step_class()
self.assertTrue(step1_class.is_complete(self.test_user))
def test_get_steps_flat(self):
"""
Verifies that the steps are loaded in the correct order
"""
self.step1.steps.add(self.step2, self.step3)
expected_steps = [self.step2, self.step3]
self.assertEqual(expected_steps, self.step1.load_step_class().get_steps())
def test_get_steps_nested(self):
"""
Verifies that the nested steps are loaded correctly
"""
self.step1.steps.add(self.step2)
self.step2.steps.add(self.step3, self.step4)
expected_steps = [self.step2, self.step3, self.step4]
self.assertEqual(expected_steps, self.step1.load_step_class().get_steps())
| [
"wes.okes@gmail.com"
] | wes.okes@gmail.com |
03de2449eab530e9938f1967544c20b6d90aa7d6 | 7ac83871db3ac0d2f3403ca574d6c16729b14e51 | /p14.py | a035e1d6971ebe5411b33deef53ec2b3baa6c913 | [] | no_license | hexiaolang/TensorFlow | aff18372c8edbc0102c83e535315e42c9ce0b022 | 82f5c00045e0a0bc49cffbcb4572fbdc4d8ec2b7 | refs/heads/master | 2021-04-26T23:56:12.375649 | 2018-03-06T09:23:38 | 2018-03-06T09:23:38 | 123,882,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | # -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
layer_name = 'layer%s' % n_layer
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
tf.summary.histogram(layer_name+'/weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
tf.summary.histogram(layer_name+'/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+'/outputs', outputs)
return outputs
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# define placeholder for inputs to network
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
ys = tf.placeholder(tf.float32, [None, 1], name='y_input')
# add hidden layer
l1 = add_layer(xs, 1, 10, n_layer=1, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, n_layer=2, activation_function=None)
# the error between prediction and real data
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
sess = tf.Session()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("logs/", sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
result = sess.run(merged, feed_dict={xs: x_data, ys: y_data})
writer.add_summary(result, i) | [
"helin0905@163.com"
] | helin0905@163.com |
6f319e57426860fd21a49fcc0ff9ad5f63b64e02 | ed9d718007b5bc776f3405ad6bac3a64abdebf0b | /google/cloud/logging_v2/services/metrics_service_v2/transports/grpc.py | 6a7a2c6a61a74aea88c49021160e9d906ee686b5 | [
"Apache-2.0"
] | permissive | renovate-bot/python-logging | 06b020e1aaae238b2693264bbad489567902481b | 28d141d0e8ed4560d2e33f8de0d43b0825a7f33f | refs/heads/master | 2023-08-31T09:01:49.829649 | 2021-08-30T20:37:20 | 2021-08-30T20:37:20 | 238,564,076 | 0 | 0 | Apache-2.0 | 2020-02-05T22:49:14 | 2020-02-05T22:49:13 | null | UTF-8 | Python | false | false | 15,816 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.logging_v2.types import logging_metrics
from google.protobuf import empty_pb2 # type: ignore
from .base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO
class MetricsServiceV2GrpcTransport(MetricsServiceV2Transport):
"""gRPC backend transport for MetricsServiceV2.
Service for configuring logs-based metrics.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "logging.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "logging.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_log_metrics(
self,
) -> Callable[
[logging_metrics.ListLogMetricsRequest], logging_metrics.ListLogMetricsResponse
]:
r"""Return a callable for the list log metrics method over gRPC.
Lists logs-based metrics.
Returns:
Callable[[~.ListLogMetricsRequest],
~.ListLogMetricsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_log_metrics" not in self._stubs:
self._stubs["list_log_metrics"] = self.grpc_channel.unary_unary(
"/google.logging.v2.MetricsServiceV2/ListLogMetrics",
request_serializer=logging_metrics.ListLogMetricsRequest.serialize,
response_deserializer=logging_metrics.ListLogMetricsResponse.deserialize,
)
return self._stubs["list_log_metrics"]
@property
def get_log_metric(
self,
) -> Callable[[logging_metrics.GetLogMetricRequest], logging_metrics.LogMetric]:
r"""Return a callable for the get log metric method over gRPC.
Gets a logs-based metric.
Returns:
Callable[[~.GetLogMetricRequest],
~.LogMetric]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_log_metric" not in self._stubs:
self._stubs["get_log_metric"] = self.grpc_channel.unary_unary(
"/google.logging.v2.MetricsServiceV2/GetLogMetric",
request_serializer=logging_metrics.GetLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs["get_log_metric"]
@property
def create_log_metric(
self,
) -> Callable[[logging_metrics.CreateLogMetricRequest], logging_metrics.LogMetric]:
r"""Return a callable for the create log metric method over gRPC.
Creates a logs-based metric.
Returns:
Callable[[~.CreateLogMetricRequest],
~.LogMetric]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_log_metric" not in self._stubs:
self._stubs["create_log_metric"] = self.grpc_channel.unary_unary(
"/google.logging.v2.MetricsServiceV2/CreateLogMetric",
request_serializer=logging_metrics.CreateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs["create_log_metric"]
@property
def update_log_metric(
self,
) -> Callable[[logging_metrics.UpdateLogMetricRequest], logging_metrics.LogMetric]:
r"""Return a callable for the update log metric method over gRPC.
Creates or updates a logs-based metric.
Returns:
Callable[[~.UpdateLogMetricRequest],
~.LogMetric]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_log_metric" not in self._stubs:
self._stubs["update_log_metric"] = self.grpc_channel.unary_unary(
"/google.logging.v2.MetricsServiceV2/UpdateLogMetric",
request_serializer=logging_metrics.UpdateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs["update_log_metric"]
@property
def delete_log_metric(
self,
) -> Callable[[logging_metrics.DeleteLogMetricRequest], empty_pb2.Empty]:
r"""Return a callable for the delete log metric method over gRPC.
Deletes a logs-based metric.
Returns:
Callable[[~.DeleteLogMetricRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_log_metric" not in self._stubs:
self._stubs["delete_log_metric"] = self.grpc_channel.unary_unary(
"/google.logging.v2.MetricsServiceV2/DeleteLogMetric",
request_serializer=logging_metrics.DeleteLogMetricRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_log_metric"]
__all__ = ("MetricsServiceV2GrpcTransport",)
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.