blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2cf4326eedaf1275ff01e755c7a246a728335ff | 832bbbb47e337b5872ea2012bb922c198fdf76c7 | /python2/a-parser-python2/__init__.py | 973869c869f3cc886f5abec57849330fc50c26b1 | [
"MIT"
] | permissive | a-parser/api-python | 0d4ac01c37e1ea67a66dfc814cba2ec72a317ac3 | b4f247de692ac9bf008f19573157b5fad4741a64 | refs/heads/master | 2022-11-04T23:39:43.985356 | 2020-06-25T08:37:04 | 2020-06-25T08:37:04 | 274,657,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,449 | py | # AParser Python v2 API
import urllib2, json, time, sys
class AParser:
def __init__(self, uri, password):
self.password = password
self.uri = uri
def doRequest(self, action, data = None, options = {}):
params = { 'password': self.password, 'action': action }
if data:
data.update(options)
params['data'] = data
body = bytes(json.dumps(params))
headers = { 'Content-Type': 'application/json; charset=utf-8' }
req = urllib2.Request(self.uri, data=body, headers=headers)
response = urllib2.urlopen(req).read().decode('utf-8')
responseData = json.loads(response)
return responseData
def ping(self):
return self.doRequest('ping')
def info(self):
return self.doRequest('info')
def getProxies(self):
return self.doRequest('getProxies')
def getParserPreset(self, parser, preset):
data = { 'parser': parser, 'preset': preset }
return self.doRequest('getParserPreset', data)
def oneRequest(self, parser, preset, query, **kwargs):
data = { 'parser': parser, 'preset': preset, 'query': query }
return self.doRequest('oneRequest', data, kwargs)
def bulkRequest(self, parser, preset, configPreset, threads, queries, **kwargs):
data = { 'parser': parser, 'preset': preset, 'configPreset': configPreset, 'threads': threads, 'queries': queries }
return self.doRequest('bulkRequest', data, kwargs)
def addTask(self, parsers, configPreset, queriesFrom, queries, **kwargs):
data = {
'parsers': parsers, 'configPreset': configPreset, 'queriesFrom': queriesFrom,
'queries' if queriesFrom == 'text' else 'queriesFile': queries
}
return self.doRequest('addTask', data, kwargs)
def getTaskState(self, task_id):
data = { 'taskUid': task_id }
return self.doRequest('getTaskState', data)
def getTaskConf(self, task_id):
data = { 'taskUid': task_id }
return self.doRequest('getTaskConf', data)
def changeTaskStatus(self, task_id, to_status):
# starting|pausing|stopping|deleting
data = { 'taskUid': task_id, 'toStatus': to_status }
return self.doRequest('changeTaskStatus', data)
def waitForTask(self, task_id, interval = 5):
while True:
response = self.getTaskState(task_id)
if 'data' not in response:
return response
state = response['data']
if state['status'] == 'completed':
return state
time.sleep(interval)
def moveTask(self, task_id, direction):
# start|end|up|down
data = { 'taskUid': task_id, 'direction': direction }
return self.doRequest('moveTask', data)
def getTaskResultsFile(self, task_id):
data = { 'taskUid': task_id }
return self.doRequest('getTaskResultsFile', data)
def deleteTaskResultsFile(self, task_id):
data = { 'taskUid': task_id }
return self.doRequest('deleteTaskResultsFile', data)
def getTasksList(self):
return self.doRequest('getTasksList')
def getParserInfo(self, parser):
data = { 'parser': parser }
return self.doRequest('getParserInfo', data)
def getAccountsCount(self):
return self.doRequest('getAccountsCount')
def update(self):
return self.doRequest('update')
| [
"bykovvvladlen@gmail.com"
] | bykovvvladlen@gmail.com |
9d3d87b1db818f478f4aa85b0c257eee39b0700b | c609730a43596a2d3303f072fc97d9cf681fac7b | /cagey/usedcar/main_haoche99.py | 386a9e6ee701ee754cd28189f895ff6701bf3b18 | [] | no_license | sinnettluo/ChenProject | 5403311c0c7b78c484145e16d692abff00d2a110 | 0e33ecf1683afb22f1deb4bd54294c41aed8a46b | refs/heads/master | 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from scrapy.cmdline import execute
import sys
import os
website = "haoche99"
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", website])
| [
"1316446041@qq.com"
] | 1316446041@qq.com |
d3550d7689399933bc52ca671f322510fc34bf23 | d94c5849e6308901f9af8a4edf8c8369d46576d1 | /BOJ/14499_주사위 굴리기.py | 22ad001dcfef81e9fc7a3e7aee0a5e29963d830e | [] | no_license | AhnDogeon/algorithm_study | b4c961b934b5e27afccdf2713a2ccb0174d9a698 | b8de39fff92cc98281ba7e94df82bcc9b1503243 | refs/heads/master | 2022-06-05T11:33:27.392131 | 2022-05-23T06:37:50 | 2022-05-23T06:37:50 | 188,783,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,095 | py | import sys
from copy import deepcopy
sys.stdin = open('14499_주사위 굴리기.txt', 'r')
N, M, x, y, K = map(int, input().split())
board = []
for _ in range(N):
board_list = list(map(int, input().split()))
board.append(board_list)
move = list(map(int, input().split()))
# print(move)
#
# print('===========디버깅=====================')
# for i in range(N):
# for j in range(M):
# print(board[i][j], end=' ')
# print()
# print('=====================================')
up = 0
middle = 0
left = 0
right = 0
down = 0
bottom = 0
def RIGHT(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_up
middle = copy_left
left = copy_bottom
right = copy_middle
down = copy_down
bottom = copy_right
board[a][b] = bottom
else:
up = copy_up
middle = copy_left
left = copy_bottom
right = copy_middle
down = copy_down
bottom = board[a][b]
board[a][b] = 0
print(middle)
def LEFT(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_up
middle = copy_right
left = copy_middle
right = copy_bottom
down = copy_down
bottom = copy_left
board[a][b] = bottom
else:
up = copy_up
middle = copy_right
left = copy_middle
right = copy_bottom
down = copy_down
bottom = board[a][b]
board[a][b] = 0
print(middle)
def UP(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_middle
middle = copy_down
left = copy_left
right = copy_right
down = copy_bottom
bottom = copy_up
board[a][b] = bottom
else:
up = copy_middle
middle = copy_down
left = copy_left
right = copy_right
down = copy_bottom
bottom = board[a][b]
board[a][b] = 0
print(middle)
def DOWN(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_bottom
middle = copy_up
left = copy_left
right = copy_right
down = copy_middle
bottom = copy_down
board[a][b] = bottom
else:
up = copy_bottom
middle = copy_up
left = copy_left
right = copy_right
down = copy_middle
bottom = board[a][b]
board[a][b] = 0
print(middle)
for i in move:
if i == 1:
dx, dy = x, y + 1
if 0 <= dx < N and 0 <= dy < M:
RIGHT(dx, dy)
x, y = dx, dy
elif i == 2:
dx, dy = x, y - 1
if 0 <= dx < N and 0 <= dy < M:
LEFT(dx, dy)
x, y = dx, dy
elif i == 3:
dx, dy = x - 1, y
if 0 <= dx < N and 0 <= dy < M:
UP(dx, dy)
x, y = dx, dy
elif i == 4:
dx, dy = x + 1, y
if 0 <= dx < N and 0 <= dy < M:
DOWN(dx, dy)
x, y = dx, dy
#
# print('===========디버깅=====================')
# for i in range(N):
# for j in range(M):
# print(board[i][j], end=' ')
# print()
# print('=====================================')
#
| [
"qltiqlti@gmail.com"
] | qltiqlti@gmail.com |
1ada52b90f298f06b7a57e15ded114cdbd381a91 | 2b9e57bd48c4b55d118a0e9f395faad507a782f0 | /modules/geometry.py | 86249494df29080d34a1fd2c8a055b8d4110296b | [] | no_license | Wei-Tso/Python-Tutorial_From-YouTube | 97f6bc60a7b48176eeb431f84ac1feae06d4ebf2 | 51381111e9a38b0b0003197bcee978bdb6d570ac | refs/heads/main | 2023-01-02T15:22:28.498845 | 2020-11-04T17:53:30 | 2020-11-04T17:53:30 | 310,073,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | # 在 geometry 模組中定義幾何運算功能
# 計算兩點間的距離
def distance(x1, y1, x2, y2):
return ((x2-x1)**2 + (y2-y1)**2)**0.5
# 計算兩點線段的斜率
def slope(x1, y1, x2, y2):
return (y2-y1) / (x2-x1)
# 計算平均值
def average(*numbers):
sum = 0
for n in numbers:
sum = sum + n
return (sum / len(numbers))
# 加法
def add(*numbers):
sum = 0
for n in numbers:
sum = sum + n
return sum
# 減法
def subtract(*numbers):
numbersList = list(numbers)
withoutN1List = numbersList[1:]
n1 = int(numbersList[0])
sum = 0
for n in withoutN1List:
sum = sum - n
return (n1+sum)
# 乘法
def multiply(*numbers):
basic = 1
for n in numbers:
basic = basic*n
return basic
# 除法
def divide(*numbers):
numbersList = list(numbers)
withoutN1List = numbersList[1:]
n1 = int(numbersList[0])
basic = 1
if n1 == 0:
return 0
else:
for n in withoutN1List:
if (0 in withoutN1List)==True:
return "分母不能為 0"
else:
basic = basic * n
return (n1/basic) | [
"kobe87020@gmail.com"
] | kobe87020@gmail.com |
f34ee70b9a52f64e76c7679f05e1caf15fab05e3 | 3750311368d32d3431a6c7e14b8566fb7ad59470 | /backend/src/api/create_store.py | 56e4817140a2b52bf761dd0ef0f6771945eb46db | [] | no_license | queststudio/momma-dog | 5a9a584c9e96d344a70881fa2014c42f5316efb8 | e75951042b50833485667b2b257503ad31a6c3ad | refs/heads/master | 2022-12-11T19:27:15.964172 | 2019-07-07T15:42:16 | 2019-07-07T15:47:49 | 121,881,640 | 1 | 0 | null | 2022-12-07T19:40:40 | 2018-02-17T18:37:08 | Python | UTF-8 | Python | false | false | 459 | py | from src.relays.set_state import set_state
from src.relays.render import render_state
from src.game.store import Store, init_state
from src.game.middlewares import restart_middleware_creator
from src.relays.restart import restart_creator
restart = restart_creator(set_state)
restart_middleware = restart_middleware_creator(restart)
def create_store():
store = Store(init_state, [restart_middleware])
store.subscribe(render_state)
return store
| [
"d.f.goryunov@gmail.com"
] | d.f.goryunov@gmail.com |
0384c48739c535d5420ddbfa9c2b1913e77b9422 | 0642c068d96ae43bbb800283d1aba3bd663740b1 | /download/urls.py | e915a1ba491a6a6cc1cd09a6bb7c6f6dc1a45e80 | [] | no_license | weien8899/CyanScikit | 6724f2f7d3a810a1265edd9ac3a8201c1695d264 | 59a083d9a81c557e5ec7d10d50192f43ff936231 | refs/heads/master | 2020-03-17T17:00:57.825089 | 2017-07-16T15:26:04 | 2017-07-16T15:26:04 | 133,771,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.conf.urls import patterns,include, url
from django.contrib import admin
urlpatterns = patterns(
'download.views',
url(r'^admin/', include(admin.site.urls)),
url(r'^download/$', 'download'),
url(r'^onecateDown/(\w+)/$', 'onecateDown'),
url(r'^more/$', 'more'),
)
| [
"Thinkgamer@163.com"
] | Thinkgamer@163.com |
d89692a51b4763f9f8a3060e3dbea201c0530805 | 3344516cfaade0f0d2223c84127aefb91a8a8071 | /python_intermedio/assert_statement.py | a945ee9cdb30fc096fc54582fd69b79bf3b0cdc3 | [] | no_license | maurogome/platzi | 91ff14519dcfe48a26bfb34d2031e4e6146e131e | 56e51cef4b2ec82e8c52d3c384202a42c480817c | refs/heads/master | 2023-04-09T23:48:02.730877 | 2021-04-25T16:36:28 | 2021-04-25T16:36:28 | 268,672,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | def divisors(num):
divisors = []
for i in range(1, num + 1):
if num % i == 0:
divisors.append(i)
return divisors
def run():
num = input("Ingrese un número:")
assert num.strip("-").isnumeric(), "Debes ingresar un numero"
assert int(num) > 0, "El numero debe ser positivo"
print(divisors(int(num)))
print("Final del programa")
if __name__ == "__main__":
run() | [
"mauriciogom@gmail.com"
] | mauriciogom@gmail.com |
0d5757a1a9ed5bcbb7dbb9f0d6480b75d12b5efe | 4d1f1e188a4db8e909430b55bddf0d8113a28fcf | /reinforcement_learning/0x00-q_learning/2-epsilon_greedy.py | 5b5895e3aafe8d93a6fc7131ffb272cf3044f4a9 | [] | no_license | paurbano/holbertonschool-machine_learning | b0184a71733a1f51633ba7c7f4d3a82b8d50e94f | ff1af62484620b599cc3813068770db03b37036d | refs/heads/master | 2023-07-02T16:20:13.668083 | 2023-06-18T06:25:26 | 2023-06-18T06:25:26 | 279,967,511 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | #!/usr/bin/env python3
'''Epsilon Greedy
https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/
master/Q%20learning/FrozenLake/Q%20Learning%20with%20FrozenLake.ipynb
'''
import numpy as np
def epsilon_greedy(Q, state, epsilon):
'''uses epsilon-greedy to determine the next action:
Args:
Q is a numpy.ndarray containing the q-table
state is the current state
epsilon is the epsilon to use for the calculation
Returns: the next action index
'''
# First we randomize a number
p = np.random.uniform(0, 1)
# If this number > greater than epsilon -->
# exploitation (taking the biggest Q value for this state)
if p > epsilon:
action = np.argmax(Q[state, :])
# Else doing a random choice --> exploration
else:
# action = env.action_space.sample()
action = np.random.randint(0, int(Q.shape[1]))
return action
| [
"paurbano@gmail.com"
] | paurbano@gmail.com |
835d78cc36290fc2fe0ecf0ca6a00d9de1712ccf | e4a8a8066f656761f85905e24486903b4094d3d0 | /DB_site/apps.py | 50263391bbfcc608bc29a88a5727e906b3fd2e14 | [] | no_license | wikibady/DB_lol | 3acfebd7330853557ea5ecf9b9c7f33a584bb1d9 | 43654e0fbd345788086ec0e8ad575460c1a9b355 | refs/heads/master | 2021-01-18T10:47:32.158147 | 2016-05-17T13:42:26 | 2016-05-17T13:42:26 | 59,025,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class DbSiteConfig(AppConfig):
name = 'DB_site'
| [
"hysuperliu@wikibady.com"
] | hysuperliu@wikibady.com |
bcc938c96292fc6885f94488a3965fd6128af6ed | 5e14b8e24a1a3255b67006bafde2f809cf3e7f5c | /profiles/schema.py | 6129c5dbd075ce407932858596e1eabdecad3c7e | [] | no_license | marcoacierno/Pasteit-Backend | c2f5c9c2792deb01ea85732a98b963a8c2c16717 | 0088dfd69e631a8b6656cf4deeb568c1324cabbe | refs/heads/master | 2021-01-15T15:04:33.886036 | 2016-06-06T08:17:15 | 2016-06-06T08:17:15 | 58,814,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | import graphene
from django.contrib.auth import get_user_model
from graphene import relay, with_context
from graphene.contrib.django import DjangoNode
class UserNode(DjangoNode):
pastes = relay.ConnectionField('PasteNode')
node = relay.NodeField()
def resolve_pastes(self, args, info):
return self.pastes.all()
class Meta:
model = get_user_model()
exclude_fields = ('is_staff', 'is_superuser', 'password', 'is_active', 'user')
class Query(graphene.ObjectType):
node = relay.NodeField()
me = graphene.Field(UserNode)
@with_context
def resolve_me(self, args, context, info):
me = context.user
if me.is_anonymous() is True:
return None
# raise ValueError('You cannot query yourself if you are not logged')
return me
class Meta:
abstract = True
| [
"marcoacierno96@gmail.com"
] | marcoacierno96@gmail.com |
374054f4b1cc28d43773f1c286075531a60030a4 | ca2ed68bcd084913cf592f4e856484be01e2bce0 | /week028--get-in-line/charles.mcmillan/solution.py | 19d3c4759ec5878ea634557b18907660590b8334 | [] | no_license | viewthespace/code-challenge | ddcab182abbda3d1fd383bb742972dcdcaeb2758 | 3ceec2ceb1c8b688a5f1a232992cb8155325fc1a | refs/heads/master | 2021-03-30T15:54:48.321310 | 2019-03-20T02:53:38 | 2019-03-20T02:53:38 | 42,602,268 | 0 | 1 | null | 2019-03-05T01:38:45 | 2015-09-16T17:20:44 | OCaml | UTF-8 | Python | false | false | 2,509 | py | ##
## Time Complexity: O(n^2)
## Space Complexity: O(n^2)
##
import toml
import ast
import math
toml_dict = toml.load('../testcases.toml')
class Line:
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __str__(self):
return f"{self.slope}x + {self.intercept}"
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.slope == other.slope and self.intercept == other.intercept
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f"(x: {self.x}, y: {self.y})"
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.x == other.x and self.y == other.y
class Solution:
def __init__(self, points_array):
self.points_array = [Point(point[0], point[1]) for point in points_array]
self.line_hash = {}
def max_linear_points(self):
for point1 in self.points_array:
for point2 in self.points_array:
if point1 == point2:
continue
line = self.line_from_points(point1, point2)
if line in self.line_hash:
self.line_hash[line].add(point1)
self.line_hash[line].add(point2)
else:
self.line_hash[line] = set([point1, point2])
return self.tally_count(self.line_hash, self.points_array)
def tally_count(self, line_hash, points_array):
if len(points_array) == 0:
return 0
elif len(points_array) == 1:
return 1
else:
maximum = 0
for line, point_set in line_hash.items():
number_of_points = len(point_set)
if number_of_points > maximum:
maximum = number_of_points
return maximum
def line_from_points(self, point1, point2):
if point1.x == point2.x:
return Line(math.inf, point1.x)
slope = (point2.y - point1.y) / (point2.x - point1.x)
intercept = point2.y - point2.y * slope
return Line(slope, intercept)
for test_case_name, input_output in toml_dict['test'].items():
print(f'Running test: {test_case_name}')
input_values = ast.literal_eval(input_output['input'])
expected_output = ast.literal_eval(input_output['output'])
actual_output = Solution(input_values).max_linear_points()
print(f" Input: {input_values}")
if actual_output == expected_output:
print("\033[92mPASSED\033[0m")
else:
print("\033[91mFAILED\033[0m")
print(f" Expected Output: {expected_output}")
print(f" Actual Output: {actual_output}\n\n")
| [
"charlesmcm@viewthespace.com"
] | charlesmcm@viewthespace.com |
7ce859d1aeafe754e3298ab2f867f74b8b2f75b0 | 3aa343d79d0d6286a511c8745b698471792c47e6 | /tutorial/urls.py | 4c9c09a196ddffe46b555cd6e1baa20eb7e8c286 | [] | no_license | akcezzz/Tutorial | a80e24f3e1ab7b28821599841008c76f511ad09a | bcae2c5d6661a1f4ae79db8316f08a7219a798ee | refs/heads/master | 2020-05-23T09:00:46.534817 | 2019-05-14T21:49:41 | 2019-05-14T21:49:41 | 186,698,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include, path
from rest_framework import routers
from tutorial.quickstart import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| [
"axil-pacha@hotmail.com"
] | axil-pacha@hotmail.com |
0eed1e43e88e22d5e74f9010387e7ad031989714 | 472baa2414822520f7cb8d491d4bf5608f765ad8 | /zqxt4396/tools/views.py | 3d5f7f76bda31af965d9c812557cadea6c386f1e | [] | no_license | Umi101108/django-projects | cdcf0c9bb8bd272e04a4b7a702f09adb16c28404 | 50edfdc3511e1de5b4a5a3e92fe9ddad932b5396 | refs/heads/master | 2021-01-12T08:20:48.113696 | 2017-06-11T14:45:20 | 2017-06-11T14:45:20 | 76,545,822 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def add(request):
a = request.GET['a']
b = request.GET['b']
a = int(a)
b = int(b)
return HttpResponse(str(a+b))
| [
"408465808@qq.com"
] | 408465808@qq.com |
0d8ed6f82e39bf255cdbdaf41569b7e5d76e34ca | 888f98aa6cd5c706582296b2edea5f331836a3aa | /accounts/urls.py | 8e48b97fb7b90371bf373a53068844e3bffefc83 | [] | no_license | Hibatouallah/djangoSite | 662220634da04fbd4cee0623d393df5adeb2a24a | c6f10b1a4c1334b58ee6cdc8665d844be07ffbdc | refs/heads/master | 2022-11-08T12:06:43.232710 | 2020-06-28T20:50:31 | 2020-06-28T20:50:31 | 275,660,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py |
from django.urls import path
from . import views
from django.contrib.auth.views import LoginView , LogoutView
urlpatterns = [
path('',views.indexView,name="home"),
path('login/',LoginView.as_view(),name="login_url"),
path('register/',views.registerView,name="register_url"),
path('logout/',LogoutView.as_view(next_page='home'),name="logout"),
] | [
"boulsane.1996@gmail.com"
] | boulsane.1996@gmail.com |
e585a401b1aefd44adfc94300e388978d4eab3a1 | 4fcd8f40868d8a3831487a9ea74cceda462c5bfe | /transferit/callbacks.py | fcfbadb8410a47b22dc38304b890129a43a82590 | [
"MIT"
] | permissive | sorenlind/transferit | 822de4a73eecbd66a58f60c27cb6909e0abc0d13 | c77ecb23a2fb6f571cba66155b0b99e4f359fa68 | refs/heads/master | 2023-04-13T23:50:02.634960 | 2021-04-28T20:09:06 | 2021-04-28T20:09:06 | 361,758,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | """Custom callbacks used during training."""
from tensorflow.keras.callbacks import Callback
import matplotlib.pyplot as plt
class PlotLosses(Callback):
"""Simple callback for plotting losses to a file."""
def __init__(self, output_folder, model_name):
self.output_folder = output_folder
self.model_name = model_name
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get("loss"))
self.val_losses.append(logs.get("val_loss"))
self.i += 1
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(self.x, self.losses, label="loss")
ax.plot(self.x, self.val_losses, label="val_loss")
ax.legend()
fig.savefig(str(self.output_folder / f"{self.model_name}_loss.png"))
plt.close(fig)
| [
"soren@gutsandglory.dk"
] | soren@gutsandglory.dk |
d0b8a76d43390187dc5aa8db4012da5d8af32d3d | 2ada8b8020a7a7169f087b8aa2e49ff52831a561 | /steps/common.py | 432230fac5a997dec7f9bbd28c21af0b6ef6cdaa | [] | no_license | marlonrochaeng/webAutomationBDD | a767a8d26f865afcada69c0df4cad1f7b79b9ce4 | 53b20134aaf9fc90ac25bb69083712d27a101e93 | refs/heads/master | 2020-08-21T17:55:24.326911 | 2020-04-15T01:47:30 | 2020-04-15T01:47:30 | 216,212,818 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from behave import given, when, then
from framework.webapp import WebBrowser
@given(u'I open the "{url}" url')
def step_impl_load_website(context, url):
wb = WebBrowser(context)
wb.go_to_page(url) | [
"marlon.alencar.rocha@everis.com"
] | marlon.alencar.rocha@everis.com |
000dfc6172faa1e4cc544650996beca345692cf0 | e28b5950500edac1ec78841ba3232a352c01a5c8 | /models/map.py | dedd8757421d51de371611fa4adc288a363f098f | [] | no_license | jmarcelos/mathspace | 48c82d592dcff240fc9befc2fa6de2c58275cd43 | 37720ec287c77f0fe06aa989292ed73d5bfe26a4 | refs/heads/master | 2021-07-22T08:10:34.523126 | 2017-10-31T12:10:40 | 2017-10-31T12:10:40 | 108,726,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | from math import sqrt
class Map:
MAX_COST = float('inf')
def __init__(self, str_map):
self._validation(str_map)
paths = str_map.split()
size_matrix = sqrt(len(paths))
self.grid = []
line = []
for index, path in enumerate(paths):
if index != 0 and index % size_matrix == 0:
self.grid.append(line)
line = []
line.append(int(path, 16))
self.grid.append(line)
self.start_x, self.start_y = 0, 0
self.end_x, self.end_y = len(self.grid) - 1, len(self.grid) - 1
def get_position_value(self, position_x, position_y):
if not self.is_inside_grid(position_x, position_y):
return self.MAX_COST
return self.grid[position_x][position_y]
def get_neighbours(self, position_x, position_y):
neighbours = []
if self.is_inside_grid(position_x+1, position_y):
neighbours.append(((position_x+1, position_y), 'D'))
if self.is_inside_grid(position_x-1, position_y):
neighbours.append(((position_x-1, position_y), 'U'))
if self.is_inside_grid(position_x, position_y+1):
neighbours.append(((position_x, position_y+1), 'R'))
if self.is_inside_grid(position_x, position_y-1):
neighbours.append(((position_x, position_y-1), 'L'))
return neighbours
def is_inside_grid(self, position_x, position_y):
if position_x < self.start_x or position_x > self.end_x:
return False
if position_y < self.start_y or position_y > self.end_y:
return False
return True
def _validation(self, str_map):
if not str_map or not isinstance(str_map, str):
raise ValueError("A valid str map should be provided")
paths = str_map.split()
size_matrix = sqrt(len(paths))
#assuming a square grid
if round(size_matrix) != size_matrix:
raise ValueError("Invalid map generation")
def __repr__(self):
string = "Map\n"
if self.grid:
for line in self.grid:
string += " ".join([str(x) for x in line])
string += "\n"
return string
| [
"jmarcelos@gmail.com"
] | jmarcelos@gmail.com |
6434ee69271aa8ef76600a1a8e6d60014f9b18f6 | ba1a1e90406230eeb0a86ef22a3a94a7b227b7b8 | /taskmanager/tcp_protocol/message_templates.py | b8b520e92459b14aa099bebaebb9efa8afc3f62b | [
"MIT"
] | permissive | spanickroon/Task-Management-Tools | 6e47ac05a1ff9ddf21a988cf6fc63670bf921d63 | ab8ddba79830fe46bf8e0280832f94ece97a3edb | refs/heads/master | 2022-06-10T18:51:15.235038 | 2020-05-05T23:47:18 | 2020-05-05T23:47:18 | 259,631,581 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | START_APP = '!START!'
STOP_APP = '!STOP!'
SEND_MSG = '!SENDMSG!'
UPD_RPOCESS = '!UPD!'
CONNECT = '!CONNECT!'
| [
"nikitakoznev@gmail.com"
] | nikitakoznev@gmail.com |
449197d603b056a7cfaf92ea38f6cbdabaf57f67 | 300fe198f4145cd10dfcd31884dc4b1aa165d097 | /experiments/test_grid_yspace.py | 327db982f4e4d5c60dbe499456267c3459f35495 | [
"BSD-3-Clause"
] | permissive | WrongWhp/mantis | 126170a9033191b970cc4e4697d4353527d25c2f | 2cf149b5bfa4f7c6dbf5aa47f1010785e886bd2c | refs/heads/master | 2020-06-14T22:00:01.472150 | 2018-10-22T17:55:45 | 2018-10-22T17:55:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,412 | py | import matplotlib.colors as mpl_colors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn.apionly as sns
from mantis import sdp_km_burer_monteiro, copositive_burer_monteiro
from experiments.utils import plot_matrix, plot_data_embedded
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage{amsmath}')
dir_name = '../results/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
dir_name += 'grid_bm/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def plot_bumps_on_data(X, bumps, palette='Set1'):
plot_data_embedded(X, palette='w')
colors = sns.color_palette(palette, n_colors=len(bumps))
colors = [mpl_colors.to_hex(c) for c in colors]
np.random.shuffle(colors)
for i, (b, c) in enumerate(zip(bumps, colors)):
alpha = np.maximum(b, 0) / b.max()
plot_data_embedded(X, palette=c, alpha=alpha)
def align_bumps(Y, ref_idx):
Y_ref = Y[:, ref_idx]
idx_best = np.zeros((Y.shape[1],), dtype=np.int)
corr_best = np.zeros((Y.shape[1],))
for i in range(Y.shape[0]):
Y_cshift = np.roll(Y, i, axis=0)
corr = Y_ref.dot(Y_cshift)
mask = corr > corr_best
idx_best[mask] = i
corr_best[mask] = corr[mask]
Y_aligned = np.zeros_like(Y)
for j in range(Y.shape[1]):
Y_aligned[:, j] = np.roll(Y[:, j], idx_best[j], axis=0)
return Y_aligned
def test_grid(n_clusters=16, use_copositive=False):
X = np.mgrid[0:16, 0:16]
X = X.reshape((len(X), -1)).T
labels = np.arange(len(X))
# X_norm = X - np.mean(X, axis=0)
# cov = X_norm.T.dot(X_norm)
# X_norm /= np.trace(cov.dot(cov)) ** 0.25
#
# alpha = 0.001
# plt.matshow(np.maximum(X_norm.dot(X_norm.T) - alpha, 0), cmap='gray_r')
#
# from scipy.spatial.distance import pdist, squareform
# plt.matshow(squareform(pdist(X)), cmap='gray_r')
#
# return
rank = len(X)
print(rank)
if use_copositive:
beta = n_clusters / len(X)
Y = copositive_burer_monteiro(X, alpha=0.003, beta=beta, rank=rank,
tol=1e-5, constraint_tol=1e-5,
verbose=True)
name = 'grid_copositive_bm'
else:
Y = sdp_km_burer_monteiro(X, n_clusters, rank=rank, tol=1e-6,
verbose=True)
name = 'grid_sdpkm_bm'
Q = Y.dot(Y.T)
idx = np.argsort(np.argmax(Y, axis=0))
Y = Y[:, idx]
sns.set_style('white')
plt.figure(figsize=(12, 4.7), tight_layout=True)
gs = gridspec.GridSpec(1, 3)
ax = plt.subplot(gs[0])
plot_data_embedded(X, palette='hls', ax=ax)
plt_title = ax.set_title('Input dataset', fontsize='xx-large')
# plt_title.set_position((0.5, 1.07))
ax = plt.subplot(gs[1])
plot_matrix(Q, ax=ax, labels=labels, which_labels='both',
labels_palette='hls')
plt_title = ax.set_title(r'$\mathbf{Q}$', fontsize='xx-large')
plt_title.set_position((0.5, 1.07))
ax = plt.subplot(gs[2])
plot_matrix(Y, ax=ax, labels=labels, which_labels='vertical',
labels_palette='hls')
plt_title = ax.set_title(r'$\mathbf{Y}^\top$', fontsize='xx-large')
plt_title.set_position((0.5, 1.07))
plt.savefig('{}{}.pdf'.format(dir_name, name))
pdf_file_name = '{}{}_plot_{}_on_data_{}{}'
for i in range(Y.shape[1]):
plt.figure()
plot_bumps_on_data(X, [Y[:, i]])
plt.savefig(pdf_file_name.format(dir_name, name, 'Y', i, '.png'),
dpi=300, bbox_inches='tight')
plt.close()
pdf_file_name = '{}{}_plot_{}_on_data_{}'
plt.figure()
bumps_locs = np.random.random_integers(Y.shape[1], size=6)
plot_bumps_on_data(X, [Y[:, i] for i in bumps_locs], palette='Set1')
plt.savefig(pdf_file_name.format(dir_name, name, 'Y', 'multiple.png'),
dpi=300, bbox_inches='tight')
Y_aligned = align_bumps(Y, Y.shape[1] // 2)
_, ax = plt.subplots(1, 1)
plot_matrix(Y_aligned, ax=ax)
plt_title = ax.set_title(r'Aligned $\mathbf{Y}^\top$', fontsize='xx-large')
plt_title.set_position((0.5, 1.07))
plt.savefig('{}{}_Y_aligned_2d.pdf'.format(dir_name, name))
_, ax = plt.subplots(1, 1)
ax.plot(Y_aligned)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'Receptive fields', fontsize='xx-large')
plt.savefig('{}{}Y_aligned_1d.pdf'.format(dir_name, name))
pos = np.arange(len(Y))
median = np.median(Y_aligned, axis=1)
mu = np.mean(Y_aligned, axis=1)
sigma = np.std(Y_aligned, axis=1)
_, ax = plt.subplots(1, 1)
plt_mean = ax.plot(pos, mu, color='#377eb8')
ax.fill_between(pos, np.maximum(mu - 3 * sigma, 0), mu + 3 * sigma,
alpha=0.3, color='#377eb8')
plt_median = ax.plot(pos, median, '-.', color='#e41a1c')
ax.set_xticks([])
ax.set_yticks([])
plt_aux = ax.fill(np.NaN, np.NaN, '#377eb8', alpha=0.3, linewidth=0)
ax.legend([(plt_mean[0], plt_aux[0]), plt_median[0]],
[r'Mean $\pm$ 3 STD', 'Median'],
loc='upper left', fontsize='xx-large')
ax.set_title(r'Receptive fields summary', fontsize='xx-large')
plt.savefig('{}{}Y_aligned_1d_summary.pdf'.format(dir_name, name))
if __name__ == '__main__':
# test_grid(use_copositive=True)
test_grid(use_copositive=False)
plt.show()
| [
"Lulita75"
] | Lulita75 |
7733b0f0ac3a81dfe676c533d7b32dbf6a711e97 | 8e5ebf2c0296294cc2d5850e4dbd6282601aaa26 | /backenddj/urls.py | d775e1ba446dfba1374458515861c35b6683d984 | [] | no_license | udaravimukthi/Django-learn | 0e628c46978a8d2cba0105e577d36906a8dcd508 | 6801b3621b5ed0dd3ca3750f9ef14f027e05a308 | refs/heads/main | 2023-01-30T08:44:47.509775 | 2020-12-08T17:17:19 | 2020-12-08T17:17:19 | 306,814,128 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | """backenddj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
#path('', include('webtemp.urls')), // for webtemp projects
path('', include('calc.urls')),
path('admin/', admin.site.urls),
]
| [
"uvlakshan@gmail.com"
] | uvlakshan@gmail.com |
124439bf89b3356762e1f31a4567ac04ce455496 | 90e77dfba83cb6f60b0712dce16d131a7b0fd002 | /projects/Project3/analyse/tools.py | 5e8edf4f1dcba818e5782bf49018266d93349e1f | [] | no_license | halvarsu/FYS3150 | 98649d65773d9694cc0728fe69d99beb66ecf486 | dd52b83aa696b43341418ebf6ad116b8dd347299 | refs/heads/master | 2022-03-08T04:51:33.386081 | 2019-11-22T15:39:40 | 2019-11-22T15:39:40 | 104,348,830 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | import os, sys
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def enablePrint():
sys.stdout = sys.__stdout__
def printDisable(*args):
enablePrint()
print(args)
blockPrint()
| [
"halvard.sutterud@gmail.com"
] | halvard.sutterud@gmail.com |
6b3ee56fb7f4552bfebdfa4efb793cedd84f4731 | 8512ec0b778cf4efaa960ef88aad4da9e4013a9d | /pip_benchmark_python/utilities/Formatter.py | 561df3c88f38d2db10db99ddfbf665ad67bd96ab | [
"MIT"
] | permissive | pip-benchmark/pip-benchmark-python | b375bd16f0102e698a0a45edbc92fc02735220ab | d75672e940af12e2f9818607e2188490e989d8c5 | refs/heads/master | 2020-05-27T03:22:07.706723 | 2020-05-08T19:31:18 | 2020-05-08T19:31:18 | 82,516,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | # -*- coding: utf-8 -*-
import datetime
from pip_services3_commons.convert import StringConverter
class Formatter:
@staticmethod
def pad_left(value, lenght, pad_symbol):
output = ''
output += pad_symbol
output += value
output += pad_symbol
while len(output) < lenght + 2:
output = pad_symbol + output
return output
@staticmethod
def pad_right(value, lenght, pad_symbol):
output = ''
output += pad_symbol
output += value
output += pad_symbol
while len(output) < lenght + 2:
output = pad_symbol + output
return output
@staticmethod
def format_number(value, decimals=2):
value = value or 0
return str(round(value, decimals or 2))
@staticmethod
def format_date(date):
date = date or datetime.datetime.now()
value = StringConverter.to_string(date)
pos = value.index('T')
return value[0:pos]
@staticmethod
def format_time(date):
date = date or datetime.datetime.now()
value = StringConverter.to_string(date)
pos = value.index('T')
value = value[pos + 1:]
pos = value.index('.')
return value[0:pos] if pos > 0 else value
@staticmethod
def format_time_span(ticks):
ticks = ticks * 1000
millis = str(int(round((ticks % 1000), 0)))
seconds = str(int(round((ticks / 1000) % 60, 0)))
minutes = str(int(round(((ticks / 1000) / 60) % 60, 0)))
hours = str(int(round((ticks / 1000 / 60 / 60), 0)))
return '{}:{}:{}:{}'.format(hours, minutes, seconds, millis)
| [
"anastasf/2gmail.com"
] | anastasf/2gmail.com |
a5680836916c2ce43cd2b4b36b019cde8f18cee4 | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/classic/mi/pcnn.py | 825d23c100525d15bf520d848194da8230315155 | [
"MIT"
] | permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | #!/usr/bin/python
import sys
sys.path.append('../../../')
from io_utils import RuSentRelBasedExperimentsIOUtils
from arekit.contrib.experiments.callback import CustomCallback
from arekit.contrib.networks.multi.configurations.max_pooling import MaxPoolingOverSentencesConfig
from arekit.contrib.networks.multi.architectures.max_pooling import MaxPoolingOverSentences
from arekit.common.evaluation.evaluators.two_class import TwoClassEvaluator
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.context.architectures.pcnn import PiecewiseCNN
from arekit.contrib.experiments.multi.model import MultiInstanceTensorflowModel
from arekit.contrib.experiments.nn_io.rusentrel import RuSentRelBasedNeuralNetworkIO
from arekit.contrib.experiments.engine import run_testing
from rusentrel.mi_names import MaxPoolingModelNames
from rusentrel.classic.ctx.pcnn import ctx_pcnn_custom_config
from rusentrel.classic.common import \
classic_common_callback_modification_func, \
classic_mi_common_config_settings
def mi_pcnn_custom_config(config):
ctx_pcnn_custom_config(config.ContextConfig)
config.fix_context_parameters()
def run_testing_pcnn(name_prefix=u'',
cv_count=1,
model_names_classtype=MaxPoolingModelNames,
network_classtype=MaxPoolingOverSentences,
config_classtype=MaxPoolingOverSentencesConfig,
custom_config_func=mi_pcnn_custom_config,
custom_callback_func=classic_common_callback_modification_func):
run_testing(full_model_name=name_prefix + model_names_classtype().PCNN,
create_network=lambda: network_classtype(context_network=PiecewiseCNN()),
create_config=lambda: config_classtype(context_config=CNNConfig()),
create_nn_io=RuSentRelBasedNeuralNetworkIO,
cv_count=cv_count,
create_model=MultiInstanceTensorflowModel,
evaluator_class=TwoClassEvaluator,
create_callback=CustomCallback,
experiments_io=RuSentRelBasedExperimentsIOUtils(),
common_callback_modification_func=custom_callback_func,
custom_config_modification_func=custom_config_func,
common_config_modification_func=classic_mi_common_config_settings)
if __name__ == "__main__":
run_testing_pcnn()
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
ae9f47dcd6973ca4c8e603f1503be4d5ca8b26ce | a9063fd669162d4ce0e1d6cd2e35974274851547 | /test/test_role_members_add.py | ed565058c42a11f8a5eb9894159405db3ff757a7 | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.role_members_add import RoleMembersAdd # noqa: E501
from swagger_client.rest import ApiException
class TestRoleMembersAdd(unittest.TestCase):
"""RoleMembersAdd unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRoleMembersAdd(self):
"""Test RoleMembersAdd"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.role_members_add.RoleMembersAdd() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"github@rootalley.com"
] | github@rootalley.com |
3e1303281faa8390bc75072c73e0d996ebfdb03e | 8d9b85f92a934c57306f13d6bdddfe2c0c04c101 | /Lessons/rand_tmp.py | ecb43bf0a21835931e8c1505361c3d54ec0d0ce0 | [] | no_license | estherica/wonderland | 640dcbce9343753ecde9f87b03fdebdc7950c49a | 458e77f7e20b8852bc18fd97add4f62558d175c7 | refs/heads/master | 2022-12-07T16:19:54.727623 | 2020-08-25T14:13:19 | 2020-08-25T14:13:19 | 284,969,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from random import randint
print("Random numbers game\n")
num1=(randint(1,37))
num2=(randint(1,37))
print("1st number: " + str(num1) + "\n2nd number: " + str(num2) + "\n")
if (num1==num2):
print("You won 100$! \n")
else:
print("Maybe next time...")
print("\nBye-bye!") | [
"belleshamharoth@gmail.com"
] | belleshamharoth@gmail.com |
5a430ef971af1b67af314b64ae6eac1b2d348931 | 28f726ae55c94ad559aba289f5e3f8f51c966a4d | /导出数据.py | c0fbbae723b55cecabffaaba977e678da8dae968 | [] | no_license | bruce994/python_training | 546ed4f27ef8da7a9f94f7b8f4db9100ffeae137 | 12ba20f8ef662ef2e8cc3018ed1254c33e75375b | refs/heads/master | 2021-01-10T12:54:28.306911 | 2019-09-18T03:15:16 | 2019-09-18T03:15:16 | 43,752,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | #!/usr/bin/env python
#-*- coding:utf-8-*-
import socket
import thread
import time,os,shutil,platform,datetime,sys,re
import MySQLdb
import sqlite3
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='test',charset='utf8', init_command='SET NAMES UTF8')
cursor = conn.cursor()
cursor.execute("select title,body,id from jzwj_archives as a join jzwj_addonarticle as b on a.id=b.aid where title <> '' and id<8542 order by id desc ")
for row in cursor.fetchall():
title = row[0]
content = row[1].encode('utf-8')
id = row[2]
title = title.replace("?","")
title = title.replace(",","")
title = title.replace("!","")
title = title.replace(".","")
title = title.replace("\"","")
title = title.replace("|","")
title = title.replace("/","")
tmp2=''
for x in title:
tmp2 += x + ' '
tmp2 = tmp2[:-1] + '.txt'
try:
file_write = open(tmp2, 'wb')
except Exception, e:
continue
else:
reps = [r"<[^>]+>",r" "]
for rep in reps:
regex = re.compile(rep)
content = regex.sub("", content)
file_write.write(content)
file_write.close
print str(id) + ":" +tmp2
conn.close()
| [
"noreply@github.com"
] | bruce994.noreply@github.com |
5643935e9ef0b3663b510e3177bffe98981c5630 | 650bd88bf5da6b4105d84d0ef97434a4f4512790 | /nn_meter/prediction/predictors/kernel_predictor.py | c6ed0b812841e50f50bc65ba2960b1b525302987 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | JiahangXu/nn-Meter | 3a0303c08f59ca91673047fe6dcd5cb052ebc4d3 | c11b8223ecf8b5ba881528071a8ae18df80584ba | refs/heads/main | 2023-08-25T14:57:05.299811 | 2021-10-12T10:15:36 | 2021-10-12T10:15:36 | 393,234,662 | 0 | 0 | MIT | 2021-08-06T03:20:11 | 2021-08-06T03:20:10 | null | UTF-8 | Python | false | false | 13,250 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from sklearn.ensemble import RandomForestRegressor
def get_model(hardware, kernel):
model = None
if kernel == "convbnrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=320,
min_samples_leaf=1,
min_samples_split=2,
max_features=6,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=80,
n_estimators=550,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
n_jobs=32,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=500,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
n_jobs=32,
random_state=10,
)
if kernel == "dwconvbnrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=240,
min_samples_leaf=1,
min_samples_split=2,
max_features=6,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=40,
n_estimators=240,
min_samples_leaf=1,
min_samples_split=2,
max_features=7,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=650,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
n_jobs=32,
random_state=10,
)
if kernel == "fc":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=330,
min_samples_leaf=1,
min_samples_split=2,
max_features=4,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=330,
min_samples_leaf=1,
min_samples_split=2,
max_features=4,
oob_score=True,
n_jobs=32,
random_state=10,
)
if kernel == "channelshuffle":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "se":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=20,
n_estimators=290,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=110,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "maxpool":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=210,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if kernel == "globalavgpool":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=70,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "hswish":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=110,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "avgpool":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=390,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if kernel == "bnrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "relu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "bn":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=370,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=390,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
if kernel == "concat":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=690,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if hardware == "gpu":
model = RandomForestRegressor(
max_depth=100,
n_estimators=690,
min_samples_leaf=1,
min_samples_split=2,
max_features=5,
oob_score=True,
random_state=10,
)
if kernel == "addrelu":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=3,
oob_score=True,
random_state=10,
)
if hardware == "addrelu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=3,
oob_score=True,
random_state=10,
)
if hardware == "vpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=570,
min_samples_leaf=1,
min_samples_split=2,
max_features=3,
oob_score=True,
random_state=10,
)
if kernel == "split":
if hardware == "cpu":
model = RandomForestRegressor(
max_depth=50,
n_estimators=190,
min_samples_leaf=1,
min_samples_split=2,
max_features=2,
oob_score=True,
random_state=10,
)
return model
| [
"lzhani@microsoft.com"
] | lzhani@microsoft.com |
9cbccdf6741a644e2d43e78594b58ded66dc35c4 | af992da82e277bf3982b003a0fb8b6f65d4311b4 | /dataAnalysis/fit_task.py | cc9226d71dd6fbcd41bb6154583bc0eb3d2e0c11 | [
"MIT"
] | permissive | emailhy/lab5 | 5529c7c388111f16215262f7e45a3ba7201b767c | c0f499bf396d228290ce6d06fc90567e81cb638c | refs/heads/master | 2021-05-10T07:37:35.532945 | 2017-10-09T04:17:56 | 2017-10-09T04:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | from dataAnalysis.prmodel import FormulationDataModel
from math import ceil, floor
import numpy as np
import json
from app import celery
from redis import Redis
from datetime import datetime
r = Redis(host='127.0.0.1')
@celery.task
def fit_model_task(f_id, training_uuid, logging_uuid, epochs=100):
fdm = FormulationDataModel(f_id)
model, fit_history = fdm.fit_model(logging_uuid, epochs=epochs)
data_traces, grid_traces = fdm.get_formulation_predict_data()
# save model with a format name like 2017-07-12_20-38-39_loss-0.0118556629749.hdf5
model_name = '%s_loss-%s.hdf5' % (datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), str(fit_history.history['loss'][-1]))
fdm.save_model(model=model, model_name=model_name)
result = json.dumps({'status': 'success',
'formulation_id': f_id,
'data_traces': data_traces,
'grid_traces': grid_traces,
'model_name': model_name})
r.set(training_uuid, result)
r.set(logging_uuid, json.dumps({'model_state': 'trained'}))
| [
"qinzishi@gmail.com"
] | qinzishi@gmail.com |
cad08c6af20f321507af6bc050e428731b67a33f | 7dc240e587213e4b420676c60aa1b24905b1b2e4 | /src/app/tests/mailchimp/conftest.py | d5af1f4a3624389007aae35e1b133692b303f6ce | [
"MIT"
] | permissive | denokenya/education-backend | 834d22280717f15f93407108846e2eea767421c8 | 3b43ba0cc54c6a2fc2f1716170393f943323a29b | refs/heads/master | 2023-08-27T09:07:48.257108 | 2021-11-03T00:19:04 | 2021-11-03T00:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | import pytest
import requests_mock
from app.integrations.mailchimp import AppMailchimp, MailchimpMember
pytestmark = [pytest.mark.django_db]
@pytest.fixture(autouse=True)
def _set_mailchimp_credentials(settings):
settings.MAILCHIMP_API_KEY = 'key-us05'
settings.MAILCHIMP_CONTACT_LIST_ID = '123cba'
@pytest.fixture
def mailchimp():
client = AppMailchimp()
with requests_mock.Mocker() as http_mock:
client.http_mock = http_mock
yield client
@pytest.fixture
def mailchimp_member(user):
return MailchimpMember.from_django_user(user)
@pytest.fixture
def post(mocker):
return mocker.patch('app.integrations.mailchimp.http.MailchimpHTTP.post')
@pytest.fixture
def user(mixer):
return mixer.blend('users.User', email='test@e.mail', first_name='Rulon', last_name='Oboev')
| [
"noreply@github.com"
] | denokenya.noreply@github.com |
09a5dcf778c742d075bd8decf005f393a6b3b6e6 | e6d1bbac91b97ee7a9d028c3aafa5d85a0ee593c | /Python04Month/chapter/chapter3/demo/code/3-1_abnormal_check.py | bd08daf230d7e50525b8458610580eb8e1138662 | [] | no_license | LiuJingGitLJ/PythonSuanFa_2 | 82159043523d6fe69beef7f86421cd4be2242919 | 0afba93c4c29231bc6c2aaf6e4663beee2b5cbbb | refs/heads/master | 2021-09-20T13:49:08.521080 | 2018-08-10T06:13:22 | 2018-08-10T06:13:22 | 124,337,675 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | #-*- coding: utf-8 -*-
import pandas as pd
catering_sale = '../data/catering_sale.xls' #餐饮数据
data = pd.read_excel(catering_sale, index_col = u'日期') #读取数据,指定“日期”列为索引列
print(data)
import matplotlib.pyplot as plt #导入图像库
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
plt.figure() #建立图像
p = data.boxplot(return_type='dict') #画箱线图,直接使用DataFrame的方法
x = p['fliers'][0].get_xdata() # 'flies'即为异常值的标签
y = p['fliers'][0].get_ydata()
y.sort() #从小到大排序,该方法直接改变原对象
#用annotate添加注释
#其中有些相近的点,注解会出现重叠,难以看清,需要一些技巧来控制。
#以下参数都是经过调试的,需要具体问题具体调试。
for i in range(len(x)):
if i>0:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.05 -0.8/(y[i]-y[i-1]),y[i]))
else:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.08,y[i]))
plt.show() #展示箱线图
| [
"15201078137@163.com"
] | 15201078137@163.com |
b5ce7163b13168c36c12bcfe8d7bc802301482c5 | 935e9d6d806f507eb541a88de731b2b16b0cc6c9 | /Pages/TeamPage.py | fc8b3995c2d004271bce5de45eab28c4b6dd717d | [] | no_license | Anandqualwebs/AgileSportz | 27ff864115907c7b80466ad0ece955add6054642 | e59dbb26970f3d486507b8c968068fb3b1ae7069 | refs/heads/master | 2020-07-30T20:13:50.390846 | 2019-09-23T11:56:48 | 2019-09-23T11:56:48 | 210,344,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,967 | py | import sys
import os
sys.path.append(os.path.dirname(sys.path[0]+"\Locators"))
from Locators.Locators import locators
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
class TeamPage():
def __init__(self, driver):
self.driver = driver
self.team_tab_xpath = locators.team_tab_xpath
self.team_name_input_xpath = locators.team_name_input_xpath
self.team_location_input_xpath = locators.team_location_input_xpath
self.select_league_xpath = locators.select_league_xpath
self.select_founded_year_xpath = locators.select_founded_year_xpath
self.number_of_sprints_xpath = locators.number_of_sprints_xpath
self.sub_domain_input_xpath = locators.sub_domain_input_xpath
self.add_new_team_admin_checkbox = locators.add_new_team_admin_checkbox
self.team_admin_first_name_css = locators.team_admin_first_name_css
self.team_admin_last_name_css = locators.team_admin_last_name_css
self.team_admin_email_css = locators.team_admin_email_css
self.team_admin_user_name_css = locators.team_admin_user_name_css
self.add_new_league_admin_checkbox = locators.add_new_league_admin_checkbox
self.league_admin_first_name_css = locators.league_admin_first_name_css
self.league_admin_last_name_css = locators.league_admin_last_name_css
self.league_admin_email_css = locators.league_admin_email_css
self.league_admin_user_name_css = locators.league_admin_user_name_css
self.table_team_names_xpath = locators.table_team_names_xpath
self.table_team_location_xpath = locators.table_team_location_xpath
self.table_team_founded_year_xpath = locators.table_team_founded_year_xpath
self.table_team_admin_name_xpath = locators.table_team_admin_name_xpath
self.table_team_league_name_xpath = locators.table_team_league_name_xpath
self.table_team_league_number_of_sprints_xpath = locators.table_team_league_number_of_sprints_xpath
self.table_team_league_number_of_games_xpath = locators.table_team_league_number_of_games_xpath
def click_team_tab(self):
self.driver.find_element_by_xpath(self.team_tab_xpath).click()
def enter_team_name(self, text):
self.driver.find_element_by_xpath(self.team_name_input_xpath).send_keys(text)
def enter_team_location(self, text):
self.driver.find_element_by_xpath(self.team_location_input_xpath).send_keys(text)
def select_league(self, league):
Select(self.driver.find_element_by_xpath(self.select_league_xpath)).select_by_index(league)
def select_founded_year(self, year):
Select(self.driver.find_element_by_xpath(self.select_founded_year_xpath)).select_by_index(year)
def enter_sub_domain(self, text):
self.driver.find_element_by_xpath(self.sub_domain_input_xpath).send_keys(text)
def click_add_new_team_admin_checkbox(self):
self.driver.find_element_by_xpath(self.add_new_team_admin_checkbox).click()
def enter_new_team_admin_first_name(self, text):
self.driver.find_element_by_css_selector(self.team_admin_first_name_css).send_keys(text)
def enter_new_team_admin_last_name(self, text):
self.driver.find_element_by_css_selector(self.team_admin_last_name_css).send_keys(text)
def enter_new_team_admin_email_xpath(self, text):
self.driver.find_element_by_css_selector(self.team_admin_email_css).send_keys(text)
def enter_new_team_admin_user_name(self, text):
self.driver.find_element_by_css_selector(self.team_admin_user_name_css).send_keys(text)
def click_add_new_league_admin_checkbox(self):
self.driver.find_element_by_xpath(self.add_new_league_admin_checkbox).click()
def enter_new_league_admin_first_name(self, text):
self.driver.find_element_by_css_selector(self.league_admin_first_name_css).send_keys(text)
def enter_new_league_admin_last_name(self, text):
self.driver.find_element_by_css_selector(self.league_admin_last_name_css).send_keys(text)
def enter_new_league_admin_email_xpath(self, text):
self.driver.find_element_by_css_selector(self.league_admin_email_css).send_keys(text)
def enter_new_league_admin_user_name(self, text):
self.driver.find_element_by_css_selector(self.league_admin_user_name_css).send_keys(text)
def print_team_names(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_names_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_locations(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_location_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_founded_year(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_founded_year_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_admin_names(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_admin_name_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_league_names(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_league_name_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_sprints(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_league_number_of_sprints_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
def print_team_number_of_games(self):
consumptions = self.driver.find_elements_by_xpath(self.table_team_league_number_of_games_xpath)
amount = len(consumptions)
for r in range(0, amount):
if consumptions[r].text:
print("{}.{}".format(r + 1, consumptions[r].text))
else:
pass
| [
"envio login s.shukla@enviosystems.com"
] | envio login s.shukla@enviosystems.com |
330d07b94732fbde6570ab71a73dd30c8eebd34c | 2b9a7ada172dd7adbed7921e6787bf4d43891b36 | /sb05/package.py | 73d97e13365e4b004b6ae6c1c712f4743f3b6c8f | [] | no_license | Vinhnguyen19922/glvis | ba566d3f6117ee005ad1ce89f884631bccf40644 | 1db0c6e57c2b2a4677ab3765525f4f22b7345771 | refs/heads/master | 2020-09-05T04:45:22.064164 | 2016-10-08T07:53:16 | 2016-10-08T07:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | from PyQt5 import QtCore
from PyQt5.QtWidgets import (QGridLayout, QWidget)
from sn.qt import *
from sn.gl import *
class SB05(GLWidget):
def __init__(self, parent, width=200, height=200):
super().__init__(parent)
self._width = width; self._height = height
def initializeGL(self, path):
super().initializeGL()
self.program = self.program or Program(path)
self.va = VertexArray()
def minimumSizeHint(self): return QtCore.QSize(self._width, self._height)
def onTick(self):
self.updateGL()
keyPressEvent = Window.keyPressEvent
def start(Widget):
app = Application()
widget = Widget(None)
widget.show()
app.startTimer(timeout = 1000/60, onTick = widget.onTick)
app.run()
if __name__ == '__main__' and False:
import sb05a, sb05b, sb05c, sb05d, sb05e, sb05f, sb05g
app = Application()
app.startTimer(1000/60)
w = SB05(None)
grid = QGridLayout(w)
for r, c, W in [
(1, 0, sb05a.W), (1, 1, sb05b.W), (1, 2, sb05c.W),
(2, 0, sb05d.W), (2, 1, sb05e.W), (2, 2, sb05f.W),
(3, 1, sb05g.W) ]:
wx = W(w, width=400, height=300)
Application.addOnTick(wx.onTick)
grid.addWidget(wx, r, c)
w.setLayout(grid)
w.show()
import sys
sys.exit(app.exec_())
| [
"wakita@is.titech.ac.jp"
] | wakita@is.titech.ac.jp |
f1d3ed05dd0a8188d896cde41bfb30bf2177629c | 14a19a5dfbe5519529c097cf9606cd325549d1b3 | /metadataapp/views.py | 06c429f825f87f1b29aa747ab12d4b28781764f3 | [] | no_license | factvsankit/bbtApp | ea9d7b2c72f22b67c6e0f5dd4f0a7321d6c2d834 | 0fc5aa209bc454629f0eaf4a635b0624313bd5de | refs/heads/master | 2021-07-24T10:40:24.861572 | 2017-11-04T04:51:43 | 2017-11-04T04:51:43 | 109,467,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,375 | py | import json, os
import math
from django.http import HttpResponse, JsonResponse
from content_page.models import ContentPage, ContentIndexPage
from metadataapp.models import Plant, Event, Fruit, CropPricing
from .constants import NEPALI_MONTH_INDEX, NO_DAYS_IN_MONTH, EVENT_URL
YEAR = 2073
BASE_TECHNIQUES_URL = "http://barsabharitarkari.org/en/techniques/"
from django.conf import settings
BASE_DIR = settings.BASE_DIR
LOG_FILE = os.path.join(BASE_DIR, "my_log.txt")
def add_zero_if_less_than_ten(number):
if number > 10:
return str(number)
if not number:
number += 1
return "0" + str(number)
def get_date_from_days(total_days):
"""bivu edit test this function"""
year = math.floor(total_days / 365)
remainder_days = total_days - year * 365
month = math.floor(remainder_days / 30)
# for past month assign next year
days = round(remainder_days - month * 30)
# convert 1 to 01 for java date compatibility
month = add_zero_if_less_than_ten(month)
days = add_zero_if_less_than_ten(days)
return "{}/{}/{}".format(month, days, year)
def obtain_english_calendar_from_event(event_object):
"""
subtract 1 because baisakh 15 is 15 days although baisakh is 1
subtract 20698 to get english dates
"""
month_index = NEPALI_MONTH_INDEX[event_object.month]
days = NO_DAYS_IN_MONTH[event_object.week]
total_bikram_sambat_days = round(YEAR * 365 + month_index * 30.5 + days)
total_number_english_days = total_bikram_sambat_days - 20698
return get_date_from_days(total_number_english_days)
def get_event_url(event_object):
slug = EVENT_URL[event_object.event_name.strip()]
return BASE_TECHNIQUES_URL + slug + "/"
def convert_event_into_dict(event_object):
'''
outputs dict with keys:
url, eventDate, name
'''
event_dict = {
'detailURL': get_event_url(event_object),
'eventDate': obtain_english_calendar_from_event(event_object),
'name': event_object.event_name,
'nepaliName': event_object.nepali_event_name
}
return event_dict
def get_timeline_for_plant(plant_object):
plant_pk = plant_object.pk # pk = 33
plant_model = Plant.objects.get(pk=plant_pk)
events = Event.objects.filter(plant_events=plant_model)
if not events:
return []
all_timeline = []
for event in events:
all_timeline.append(convert_event_into_dict(event))
return all_timeline
def generate_unique_name(name):
return name.replace("-", "_")
def remove_paranthesis(name):
return name.split("(")[0].strip()
def get_nepali_name(name):
# bivu edit add function definition
try:
return name.split("(")[1].split(")")[0].strip()
except IndexError:
return name
def get_json_from_plant(plant):
import datetime
try:
plant_dict = {
'name': plant.name,
'plantNepaliName': get_nepali_name(plant.name),
'season': plant.season,
'detailURL': plant.detailURL,
'image': plant.image,
'unique_name': generate_unique_name(plant.unique_name),
'timeline': get_timeline_for_plant(plant)
}
get_timeline_for_plant(plant)
return plant_dict
except Exception as e:
with open(LOG_FILE, "a") as f:
f.write("Exception on {}".format(str(datetime.datetime.today())))
f.write(str(e))
f.write("\n")
return False
def check_to_add(name):
names_to_add = ["Beans", "Cress", "mustard", "Fava", "Colocasia",
"Coriander", "Cauliflower", "Bottle", "Sweet"]
for each_name in names_to_add:
if each_name in name:
return True
return False
def get_json_of_all_plants():
plants = Plant.objects.all()
all_plants = []
for q in plants:
json_from_plant = get_json_from_plant(q)
if json_from_plant:
all_plants.append(get_json_from_plant(q))
return all_plants
def convert_fruit_into_dict(fruit_object):
return {
'name': fruit_object.name,
'image': fruit_object.image,
'unique_name': fruit_object.unique_name,
'detailURL': fruit_object.detailURL
}
def get_dict_from_technique(technique_object):
# name = technique_object.title.split('(')[0].strip()
name = technique_object.title.strip()
detail_url = "http://barsabharitarkari.org/en/techniques/" + technique_object.slug
detail_nepali_url = detail_url.replace("/en/", "/ne/")
try:
nepali_name = technique_object.title.split("(")[1].split(")")[0].strip()
except IndexError:
nepali_name = ""
return {
'name': name,
'detailURL': detail_url,
'detailNepaliURL': detail_nepali_url,
'nepaliName': nepali_name
}
def get_json_of_all_techniques():
_techniques = ContentIndexPage.objects.get(slug="techniques").get_children()
all_techniques = []
for t in _techniques:
slug = t.slug
content_page = ContentPage.objects.get(slug=slug)
if content_page.improved_technique:
all_techniques.append(get_dict_from_technique(t))
return all_techniques
def get_json_of_all_fruits():
fruits = Fruit.objects.all()
all_fruits = []
for f in fruits:
all_fruits.append(convert_fruit_into_dict(f))
return all_fruits
def get_price_json_one_item(price_object):
def get_plant_unique_name(plant):
return plant.split("(")[0].strip().lower().replace(" ", "_")
return {
get_plant_unique_name(price_object.name): str(price_object.price)
}
def get_json_of_all_prices():
def get_plant_unique_name(plant):
return plant.strip().split("(")[0].strip().lower().replace(" ", "_")
all_crops = {}
for i in CropPricing.objects.all():
all_crops[get_plant_unique_name(i.name)] = str(i.price)
return all_crops
def get_plants_data(request):
all_plants = get_json_of_all_plants()
all_fruits = get_json_of_all_fruits()
all_techniques = get_json_of_all_techniques()
all_prices = get_json_of_all_prices()
final_json = {
"plants": all_plants,
"fruits": all_fruits,
"techniques": all_techniques,
"prices": all_prices
}
return JsonResponse(final_json)
| [
"mta.ankit@gmail.com"
] | mta.ankit@gmail.com |
231d481e63ca5e223979d6b4a158a15ed9294642 | 0e74ba41a89742cc81c6ffaab685ee7f991fd0dc | /gettingFileList/getFileListTtB.py | 36d448060c3422231d6f199b8056b3dc85179822 | [] | no_license | bigalex95/androidMalwareDetectionSystem | 661059acbb40ad56fb6ca99943c9a02a87e1362c | 542373ca7dc700fa4a569deb34d3d87ca80d4ecd | refs/heads/master | 2021-07-16T21:54:03.263225 | 2020-07-25T22:08:12 | 2020-07-25T22:08:12 | 192,614,718 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import glob
#path where your source files
BENIGN_PATH_TEST = '../Benign/Test/'
#opening and putting to list array all sources file names
class_benign_test = glob.glob(BENIGN_PATH_TEST + '*.txt')
#writing all source file names to fileList.txt
with open('file_list_test_Benign.txt', 'w') as f:
for item in class_benign_test:
f.write("%s\n" % item)
f.close()
| [
"amanbayeva95@gmail.com"
] | amanbayeva95@gmail.com |
bf8e5d90cbc8364cd686f902be7a8ff4071e570c | 3ada098871f017f316209f0ff7751c5ac784121a | /queue/multicast/main.py | 654a82c5a59cd6f29bafbd62a650d9cd9c62072b | [
"Apache-2.0"
] | permissive | kubemq-io/python-sdk-cookbook | 1824d2da06fdf6f7d778c8ed465866a2d125266e | 0c843ec77e8923a79a0853c83915f7ee240c5ddb | refs/heads/master | 2023-07-13T06:27:36.167221 | 2021-08-21T11:29:17 | 2021-08-21T11:29:17 | 352,559,888 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | from kubemq.queue.message_queue import MessageQueue
from kubemq.queue.message import Message
def create_queue_message(meta_data, body, policy=None):
message = Message()
message.metadata = meta_data
message.body = body
message.tags = [
('key', 'value'),
('key2', 'value2'),
]
message.attributes = None
message.policy = policy
return message
if __name__ == "__main__":
channel = "queue.a;queue.b;queue.c"
queue = MessageQueue(channel, "python-sdk-cookbook-queues-multicast-client", "localhost:50000")
message = create_queue_message("queueName {}".format(channel),
"some-simple-queue-multicast-message-1".encode('UTF-8'))
try:
sent = queue.send_queue_message(message)
if sent.error:
print('message enqueue error, error:' + sent.error)
else:
print('Send to Queue at: %d' % (
sent.sent_at
))
except Exception as err:
print('message enqueue error, error:%s' % (
err
))
queue_a = MessageQueue('queue.a', "python-sdk-cookbook-queues-multicast-client-receiver-A", "localhost:50000", 2, 1)
try:
res = queue_a.receive_queue_messages()
if res.error:
print(
"'Error Received:'%s'" % (
res.error
)
)
else:
for message in res.messages:
print(
"'Queue A Received :%s ,Body: sending:'%s'" % (
message.MessageID,
message.Body
)
)
except Exception as err:
print(
"'error sending:'%s'" % (
err
)
)
queue_b = MessageQueue('queue.b', "python-sdk-cookbook-queues-multicast-client-receiver-B", "localhost:50000", 2, 1)
try:
res = queue_b.receive_queue_messages()
if res.error:
print(
"'Error Received:'%s'" % (
res.error
)
)
else:
for message in res.messages:
print(
"'Queue B Received :%s ,Body: sending:'%s'" % (
message.MessageID,
message.Body
)
)
except Exception as err:
print(
"'error sending:'%s'" % (
err
)
)
queue_c = MessageQueue('queue.c', "python-sdk-cookbook-queues-multicast-client-receiver-C", "localhost:50000", 2, 1)
try:
res = queue_c.receive_queue_messages()
if res.error:
print(
"'Error Received:'%s'" % (
res.error
)
)
else:
for message in res.messages:
print(
"'Queue C Received :%s ,Body: sending:'%s'" % (
message.MessageID,
message.Body
)
)
except Exception as err:
print(
"'error sending:'%s'" % (
err
)
)
| [
"eitam.ring@kubemq.io"
] | eitam.ring@kubemq.io |
8a2e8a556542b2c6270c5ebb0463bb7eda92fe92 | e02366473ccd3ddbddd96e1fecd2f460bf07db95 | /lambdas.py | 703fd3139332731f05659d137a00f4ca418268cd | [
"MIT"
] | permissive | davidlares/python-overview | 03413e4b27107d71cc1ced122ba104a83c99e48c | 523a4fd59ecc356b95f091adbe609448f85e8aa5 | refs/heads/master | 2021-09-14T17:34:15.468977 | 2018-05-16T19:09:57 | 2018-05-16T19:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # asignar una funcion a una variable
def grados(grados):
return grados * 1.8 + 32
function_variable = grados
resultado = function_variable(32)
print(resultado)
# lambdas o funciones anonimas
mifunc = lambda grados=0 : grados * 1.8 + 32 # todas las lambdas retornan un valor
resultado = mifunc(32)
print(resultado)
| [
"david.e.lares@gmail.com"
] | david.e.lares@gmail.com |
fc75c39c188f5c86376de80a21d363b87e28047e | a3e2201173a475c78c2b89456b916c919657ed25 | /core_contracts/rebalancing/utils/checks.py | af0638719f4a9748f3285a7ce9ac84ffdbf6a9d4 | [
"MIT"
] | permissive | subba72/balanced-contracts | a302994044dab909f9d5ef84bbee593e6e9695c7 | cd185fa831de18b4d9c634689a3c6e7b559bbabe | refs/heads/main | 2023-08-15T12:15:47.967642 | 2021-08-06T04:09:19 | 2021-08-06T04:09:19 | 396,687,527 | 0 | 0 | MIT | 2021-08-16T08:04:25 | 2021-08-16T08:04:24 | null | UTF-8 | Python | false | false | 1,361 | py | from iconservice import *
# ================================================
# Exceptions
# ================================================
class SenderNotScoreOwnerError(Exception):
pass
class SenderNotAuthorized(Exception):
pass
class SenderNotGovernance(Exception):
pass
class SenderNotRebalance(Exception):
pass
class NotAFunctionError(Exception):
pass
def only_governance(func):
if not isfunction(func):
raise NotAFunctionError
@wraps(func)
def __wrapper(self: object, *args, **kwargs):
if self.msg.sender != self._governance.get():
raise SenderNotGovernance(self.msg.sender)
return func(self, *args, **kwargs)
return __wrapper
def only_owner(func):
if not isfunction(func):
raise NotAFunctionError
@wraps(func)
def __wrapper(self: object, *args, **kwargs):
if self.msg.sender != self.owner:
raise SenderNotScoreOwnerError(self.owner)
return func(self, *args, **kwargs)
return __wrapper
def only_admin(func):
if not isfunction(func):
raise NotAFunctionError
@wraps(func)
def __wrapper(self: object, *args, **kwargs):
if self.msg.sender != self._admin.get():
raise SenderNotAuthorized(self.msg.sender)
return func(self, *args, **kwargs)
return __wrapper
| [
"adhikarisuyog49@gmail.com"
] | adhikarisuyog49@gmail.com |
c822aa27e0f3c129883fbe4282712e9357a385d1 | 0e1abae708fa0d0afc312bcfdc15b4d587d487e1 | /flask_library_app/models/book.py | 5546a38a4e11d46451bd9e0c0c7b12deb60a45b5 | [] | no_license | davidiakobidze/flask_library | 2ea156545401a5ba78441bcd5c3c28dd4b831446 | 92a1a15fe1fcb40513e665018dfce9ee6dae8dcd | refs/heads/master | 2023-05-11T16:02:15.743752 | 2019-06-05T11:25:00 | 2019-06-05T11:25:00 | 178,378,117 | 0 | 0 | null | 2023-05-01T20:57:26 | 2019-03-29T09:47:41 | Python | UTF-8 | Python | false | false | 1,319 | py | from flask_library_app.db import db
from flask_library_app.lib.exceptions import HandleException
book_authors = db.Table(
'books_authors',
db.Column('author_id', db.Integer, db.ForeignKey('authors.author_id')),
db.Column('book_id', db.Integer, db.ForeignKey('books.book_id'))
)
class BookModel(db.Model):
__tablename__ = "books"
book_id = db.Column(db.Integer, primary_key=True)
isbn = db.Column(db.String(16))
title = db.Column(db.String(80))
language = db.Column(db.String(80))
length = db.Column(db.Integer)
genre = db.Column(db.String(80))
authors = db.relationship('AuthorModel', secondary=book_authors)
def __init__(self, isbn, title, language, length, genre):
self.isbn = isbn
self.title = title
self.language = language
self.length = length
self.genre = genre
@classmethod
def find_by_id_get(cls, book_id):
book = cls.query.filter_by(book_id=book_id).first()
if not book:
raise HandleException("Could not find book with id {}".format(book_id), status_code=404)
return book
def add_to_db(self):
db.session.add(self)
db.session.commit()
return self.book_id
def delete_book(self):
db.session.delete(self)
db.session.commit()
| [
"davidiakobidze1@gmail.com"
] | davidiakobidze1@gmail.com |
5b3165a574457eeb1f369cd70b0259bd520aec67 | 8e2404c7bcfd28329bed789839192b2c4e85ea1b | /LeetCode/Linked_List_Cycle_II.py | ca97be57324afaacc01727943d36debb9971ccae | [] | no_license | Pabitra-26/Problem-Solved | 408bd51bbffc69f8c5e1def92797c2e6f027f91d | c27de1dd6c4ad14444fa5ee911a16186c200a7f9 | refs/heads/master | 2023-07-30T16:51:28.062349 | 2021-09-27T06:06:54 | 2021-09-27T06:06:54 | 269,935,039 | 2 | 0 | null | 2021-09-27T06:06:55 | 2020-06-06T09:39:33 | Python | UTF-8 | Python | false | false | 886 | py | # Problem name: Linked List Cycle II
# Description: Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
# To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to.
# If pos is -1, then there is no cycle in the linked list.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
table={}
curr=head
i=0
m=0
while(curr is not None):
if(curr in table):
m=1
return curr
else:
table[curr]=1
curr=curr.next
if(m==0):
return None | [
"noreply@github.com"
] | Pabitra-26.noreply@github.com |
2809b47d249d56790cb08fb8a0c7d5f1fbdd146e | d53baf0a3aaa10521cfc28a7be8f2c498bc9e741 | /examples/CaffeModels/load-vgg16.py | 96780e85eac94a3b1709a479d22cf2e3faa232fd | [
"Apache-2.0"
] | permissive | qianlinjun/tensorpack | 8f6e99ba17095334de1163d6412e740642343752 | 7f505225cd41aaeee3a0b0688fe67afc0af8fb30 | refs/heads/master | 2020-03-29T22:38:22.269889 | 2018-09-25T07:20:48 | 2018-09-25T07:20:48 | 150,432,021 | 1 | 0 | Apache-2.0 | 2018-09-26T13:35:19 | 2018-09-26T13:35:18 | null | UTF-8 | Python | false | false | 3,493 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: load-vgg16.py
from __future__ import print_function
import cv2
import tensorflow as tf
import numpy as np
import os
import six
import argparse
from tensorpack import *
from tensorpack.dataflow.dataset import ILSVRCMeta
enable_argscope_for_module(tf.layers)
def tower_func(image):
is_training = get_current_tower_context().is_training
with argscope([tf.layers.conv2d], kernel_size=3, activation=tf.nn.relu, padding='same'):
x = image
x = tf.layers.conv2d(x, 64, name='conv1_1')
x = tf.layers.conv2d(x, 64, name='conv1_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool1')
x = tf.layers.conv2d(x, 128, name='conv2_1')
x = tf.layers.conv2d(x, 128, name='conv2_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool2')
x = tf.layers.conv2d(x, 256, name='conv3_1')
x = tf.layers.conv2d(x, 256, name='conv3_2')
x = tf.layers.conv2d(x, 256, name='conv3_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool3')
x = tf.layers.conv2d(x, 512, name='conv4_1')
x = tf.layers.conv2d(x, 512, name='conv4_2')
x = tf.layers.conv2d(x, 512, name='conv4_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool4')
x = tf.layers.conv2d(x, 512, name='conv5_1')
x = tf.layers.conv2d(x, 512, name='conv5_2')
x = tf.layers.conv2d(x, 512, name='conv5_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool5')
x = tf.layers.flatten(x, name='flatten')
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc6')
x = tf.layers.dropout(x, rate=0.5, name='drop0', training=is_training)
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc7')
x = tf.layers.dropout(x, rate=0.5, name='drop1', training=is_training)
logits = tf.layers.dense(x, 1000, activation=tf.identity, name='fc8')
tf.nn.softmax(logits, name='prob')
def run_test(path, input):
param_dict = dict(np.load(path))
param_dict = {k.replace('/W', '/kernel').replace('/b', '/bias'): v for k, v in six.iteritems(param_dict)}
predict_func = OfflinePredictor(PredictConfig(
inputs_desc=[InputDesc(tf.float32, (None, 224, 224, 3), 'input')],
tower_func=tower_func,
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['prob'] # prob:0 is the probability distribution
))
im = cv2.imread(input)
assert im is not None, input
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (224, 224)).reshape((1, 224, 224, 3)).astype('float32')
# VGG16 requires channelwise mean substraction
VGG_MEAN = [103.939, 116.779, 123.68]
im -= VGG_MEAN[::-1]
outputs = predict_func(im)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
print("Top10 predictions:", ret)
meta = ILSVRCMeta().get_synset_words_1000()
print("Top10 class names:", [meta[k] for k in ret])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', required=True,
help='.npz model file generated by tensorpack.utils.loadcaffe')
parser.add_argument('--input', help='an input image', required=True)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
run_test(args.load, args.input)
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
b6a4a9e47571cdc8e1f355c4ff97f2f25ce41edb | ee7e42417d9d1e76b0e84e44dc6eb037adc3ebad | /.history/pet/api_20190703151654.py | 3b027aed09213348242bbcfd996055000b31003a | [] | no_license | web3-qa/pets-api | 4632127ee84a299f207d95754f409fc1e4c0013d | ee4a04e7291740ac8eb6147c305b41d27d5be29c | refs/heads/master | 2023-05-12T09:09:47.509063 | 2019-07-18T15:07:13 | 2019-07-18T15:07:13 | 197,611,701 | 0 | 0 | null | 2023-05-01T19:42:17 | 2019-07-18T15:19:59 | Python | UTF-8 | Python | false | false | 7 | py | from fl | [
"dcolmer@statestreet.com"
] | dcolmer@statestreet.com |
a29b5c5fd84534f37e17dd2410016807deff86f6 | 9c404f18c27297e5c6fe6dde50097765478e09bf | /src/blog/migrations/0007_contact.py | 48d4ef5c22e50203a8a429c09560e19633698873 | [] | no_license | rishav4101/Click_Galaxy | 6001619e25d41504cd7f27cc40a1dfd064bfd52c | 404482ce760f8422837438fbddc046575d41b351 | refs/heads/master | 2021-03-09T23:00:21.274638 | 2020-03-17T16:49:20 | 2020-03-17T16:49:20 | 246,389,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # Generated by Django 3.0.4 on 2020-03-10 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_delete_feedback'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
],
),
]
| [
"rajkumarrishav4101@gmail.com"
] | rajkumarrishav4101@gmail.com |
2f9a0e5eb894f82c3246cbe316a3aaec12605ea4 | 49c715a71da32472e078be32c46ffe2c7315d253 | /TestREST_Framework/env/bin/pip2 | b6279f9dd630da82762490a0d1b35818b84ddd83 | [] | no_license | janicheen/KontrollrommetBETA | aaddc92a3b58ecb2bbed5e0e79f7d3b461fe79e4 | 00a78e5ca8079beb327ceacd7fb4a02a9011ca06 | refs/heads/master | 2020-06-24T13:57:57.848532 | 2017-07-07T23:47:47 | 2017-07-07T23:47:47 | 96,937,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/Users/Janic/Kontrollrommet/TestREST_Framework/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"janic@online.no"
] | janic@online.no | |
f7d01c0eda3577f57ae5c0e2137ea657057871cc | 7c64785c00de294f1456a3d167727e0885af0f59 | /setup.py | 12d4c9d70662766fff071ed4bd294fb7978eeaaa | [
"Apache-2.0"
] | permissive | yh-luo/prosdk-addons-python | b5bc311d98b1c095bcf86c19d1f3c4228f27bd22 | 9335cf9a17da7673892c2b849f0884b89e8cdabf | refs/heads/master | 2020-03-19T14:49:10.745441 | 2018-06-08T16:16:18 | 2018-06-08T16:16:18 | 136,640,731 | 0 | 0 | null | 2018-06-08T16:09:54 | 2018-06-08T16:09:54 | null | UTF-8 | Python | false | false | 1,634 | py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='tobii_research_addons',
version='0.1.0',
description='Addons for the Tobii Pro SDK.',
long_description=long_description,
url='https://github.com/tobiipro/prosdk-addons-python',
author='Tobii AB',
author_email='tobiiprosdk@tobii.com',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Beta',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
keywords='tobii research eyetracking sdk tobiipro',
py_modules=["tobii_research_addons"],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['tobii_research'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
package_data={
'sample': [],
},
project_urls={
'Bug Reports': 'https://github.com/tobiipro/prosdk-addons-python/issues',
'Source': 'https://github.com/tobiipro/prosdk-addons-python',
},
)
| [
"pontus.nyman@tobii.com"
] | pontus.nyman@tobii.com |
16d79b54d69df57c653a5cc4fbe3d3bba8ccedce | 5f2d270bd8acddc6262a3be4e569e96e83bbf70f | /examples/h2o-classifier/train.py | 5ac44b25a4ab7a3db38a530dd61a4fc30b11f431 | [] | no_license | bchalamayya/promote-python | f42ee55f884b18da298749e01790de20aa5a4b84 | 906bf4b3ee80d5280129be048b2cd1ab83f9f8d2 | refs/heads/master | 2020-04-14T17:57:48.365691 | 2018-11-20T20:59:04 | 2018-11-20T20:59:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import h2o
from h2o.estimators import H2ORandomForestEstimator
import os
h2o.init()
data = h2o.import_file('iris.csv')
training_columns = ['C1', 'C2', 'C3', 'C4']
response_column = 'C5'
train, test = data.split_frame(ratios=[0.8])
model = H2ORandomForestEstimator(ntrees=50, max_depth=20, nfolds=10)
model.train(x=training_columns, y=response_column, training_frame=train)
save_path = os.path.realpath('.') + '/objects/'
h2o.save_model(model=model, path=save_path) | [
"colin.ristig@gmail.com"
] | colin.ristig@gmail.com |
754231c8d6e0ac524b966bfe25b565c8c101d363 | 79859d498c9fbb568f2eae19399a23558c3c6fd1 | /information/views/__init__.py | af1c64d210d93f1b5620c675925cedab6deefd12 | [] | no_license | Kiharaten/VirtualClassRoom | 7d04a57308587735e189d1d1c338b99fca6a3cbe | 170ecb3dbdd54a67496d0d95a8730804570c1a8b | refs/heads/master | 2023-03-13T19:57:24.925622 | 2021-02-19T03:41:50 | 2021-02-19T03:41:50 | 299,920,280 | 0 | 0 | null | 2021-02-19T03:41:51 | 2020-09-30T13:00:54 | Python | UTF-8 | Python | false | false | 367 | py | from django.shortcuts import get_object_or_404, render
context = {
'fixed': {
'sitename': '遠隔授業システム',
'title': '- サイト情報 -',
},
}
# Create your views here.
def index(request):
return render(request, 'information/top.html', context)
def help(request):
return render(request, 'information/help.html', context) | [
"kiharaten1129@gmail.com"
] | kiharaten1129@gmail.com |
2392a6dd5a8bc0cc84ab0904642c9fb7c3252d87 | a38b90349f7e2bae1400d2db4cfc9200d369d7ba | /blog/migrations/0001_initial.py | 1f3616c21f11d71129d5f6ea46c20112985d6f94 | [] | no_license | merymeru/my-first-blog | 01d79f45a71344edfb2caba2f5c777ca9f0f6ff4 | 95ddfbd07513de9f8b1ea90aca83a58b6a937e67 | refs/heads/master | 2020-03-27T06:26:20.929721 | 2018-08-26T20:37:20 | 2018-08-26T20:37:20 | 146,105,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.0.8 on 2018-08-25 14:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"merymeru@gmail.com"
] | merymeru@gmail.com |
adb77a3ed74b681351ac695e90d5e55ea1b00919 | 1798ab9a1116022e8517f77f840f93b7f3668198 | /euler056.py | d8d3f8b78c4ce727c0e93114fa55915f563cbba5 | [] | no_license | iynaix/eulerproject | c79e9b37b77fe5e14e1ed23e1fc24121cb757512 | 4d1ba226e2a6d4261ce2cf0d64ebd4b0e538e0b0 | refs/heads/master | 2021-01-19T14:30:17.960934 | 2014-09-29T01:28:39 | 2014-09-29T01:28:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from utils import digits
def euler56():
ret = 0
for a in range(1, 100):
for b in range(1, 100):
ret = max(ret, sum(digits(a ** b)))
return ret
| [
"iynaix@gmail.com"
] | iynaix@gmail.com |
3e7df88f0417ba13618a0e02619b628a14db66c2 | f6a9634d65a24731f6c1ef39aaa8e59d974d79b1 | /python/exercises/flowcontrols_conditions.py | 40c8aeda2a920d4caecfe7e5accc6d8f3ee40c40 | [] | no_license | emersonmellado/devopsbc | 653a98526396c88da9cf0e9584d4a4048a9f173c | a99d84fd6569480e6ebf1d95da3844ae2dfafc26 | refs/heads/master | 2022-11-30T09:12:56.915799 | 2020-08-13T04:30:26 | 2020-08-13T04:30:26 | 266,922,507 | 1 | 0 | null | 2022-07-22T07:35:45 | 2020-05-26T02:09:37 | Python | UTF-8 | Python | false | false | 559 | py | """
Syntax:
if CONDITION:
# Condition is True
else:
# Condition is False
Usage: Condition is replaced with an expression
True path: you write logic for the true case
False path: you write logic for the false case
"""
value = input("Give me a number: ")
if isinstance(value, int):
print("All good, keep going")
else:
value = float(value)
if value == 100:
print("Value is equal to 100")
elif value>100:
print("Value is greater than 100")
elif value<100:
print("Value is less than 100")
else:
print("Value it NOT equal to 100") | [
"emersonmellado@gmail.com"
] | emersonmellado@gmail.com |
890e60f80c689b4b20df4c533f1250dfabacfa0e | f466c7d8dc1034df6dfd150b0468fe2fe45f8565 | /xls2pdf/xls2pdf.spec | 2edd20d4b1b8385715dfd8eeb6765e0d661b8c71 | [] | no_license | MarlboroLeo/utils | ebce4ca300ce4c5fe85c4a03930f12363e08627c | 7bc717cf46d8f39807dd9c294535bb16ece17f0d | refs/heads/master | 2021-06-21T01:39:11.147692 | 2021-03-25T15:51:35 | 2021-03-25T15:51:35 | 194,812,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['xls2pdf.py'],
pathex=['D:\\Leau\\code\\xls2doc'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='xls2pdf',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"mu_zhennan@126.com"
] | mu_zhennan@126.com |
50ddae41737c1856fdea70885af523908cdebab0 | d83fa072a084642ebaa40317dda61f7a2f660284 | /cleancoderscom/gateways/codecast_gateway.py | 89edbc42cf18f413af36449ce9f5bf8e0749df70 | [] | no_license | xstrengthofonex/CleanCodeCaseStudy | 479ca1f0c028f3f481635b23bf44363fd50dec18 | 312aeef9f2127033f2b9e0b4a2c41baf4e6cc01e | refs/heads/master | 2021-01-02T22:55:50.471384 | 2017-08-06T14:36:17 | 2017-08-06T14:36:17 | 99,425,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | from abc import ABCMeta, abstractmethod
from typing import List, Optional
from cleancoderscom.entities.codecast import Codecast
class CodecastGateway(metaclass=ABCMeta):
@abstractmethod
def find_all_codecasts_ordered_by_date(self) -> List[Codecast]:
pass
@abstractmethod
def find_codecast_by_title(self, title) -> Optional[Codecast]:
pass
| [
"xstrengthofonex@gmail.com"
] | xstrengthofonex@gmail.com |
48035def9dc27ef8655ec0557839d1a7558ed009 | 08bfc8a1f8e44adc624d1f1c6250a3d9635f99de | /SDKs/Qt/5.12.3_python_37/msvc2017_64/PySide/PySide2/scripts/uic.py | 1471f24152ba72980656c2caa300f5e965452b38 | [] | no_license | Personwithhat/CE_SDKs | cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02 | 7afbd2f7767c9c5e95912a1af42b37c24d57f0d4 | refs/heads/master | 2020-04-09T22:14:56.917176 | 2019-07-04T00:19:11 | 2019-07-04T00:19:11 | 160,623,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:7342dc46431b086d9ffeed1ae7e528d3b0e53a3dc1ccd79003825db7ec8dad8e
size 2880
| [
"personwithhats2@Gmail.com"
] | personwithhats2@Gmail.com |
ac8fff68a489144a8de93d5312a8f51903d2b38c | dc5fd106270d1e81f9eefcc542695c4bb1f8c691 | /customers/migrations/0010_auto_20160411_0245.py | 643d4c1d61597f5db2eafdb5acd5ed8c4a6139eb | [] | no_license | iblogc/backend | 3bd134701cc2a6dbcf4438026693814524f07cc2 | e7a5f85eaa57765cf91a62f022ea64b61ce56ffd | refs/heads/master | 2021-01-21T08:57:36.300994 | 2016-09-21T05:34:35 | 2016-09-21T05:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-11 02:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customers', '0009_accountkey'),
]
operations = [
migrations.AddField(
model_name='approvelog',
name='action_user',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='customeraccount',
name='gender',
field=models.IntegerField(choices=[(0, b'\xe7\x94\xb7'), (1, b'\xe5\xa5\xb3')], default=0),
),
migrations.AlterField(
model_name='accountkey',
name='account',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='key', to='customers.CustomerAccount'),
),
migrations.AlterField(
model_name='customeraccount',
name='register_date',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| [
"14841787@qq.com"
] | 14841787@qq.com |
bc8c5f17ce1d0ec1610763eeda69ec813f6f4a2f | 4dbc1e4a7115b834bbf239fd5254adf293b61516 | /vfmflathub/__init__.py | 973ac4103d9eea129e43384235b067edb27a0f4f | [
"MIT",
"CC-BY-SA-4.0"
] | permissive | sharkwouter/vaporos-flatpak-manager | 32d398ec0b66494471d0c63eea088504efa65dd1 | da1dce2a806fdb51aa9366408ace50c28fbc3ff6 | refs/heads/master | 2020-07-06T05:34:16.634849 | 2019-10-11T18:23:06 | 2019-10-11T18:23:06 | 202,908,134 | 2 | 0 | MIT | 2019-09-10T14:58:50 | 2019-08-17T16:51:10 | Python | UTF-8 | Python | false | false | 194 | py | from vfmflathub.api import get_applications
from vfmflathub.application import Application
from vfmflathub.flatpak import add_flathub, get_installed_applications, install, uninstall, update_all
| [
"wwijsman@live.nl"
] | wwijsman@live.nl |
a11c6f9099d8b70366f5afa7c539fef9e9c2c750 | 32e97c6f83142d6fc365749a42356e689ea2fa70 | /mk/com/dragan/nupic/result_generators/AbstractResultGenerator.py | 94e470c3b50269e2745870f4e50a4d438e3c7bcd | [] | no_license | inside-dragan/magisterska | 7168f9f8864c2d214cb764abc79d42983c4c0d9b | 334408bc4a89371f8c63880a9865ea2f37054d50 | refs/heads/master | 2016-09-06T06:54:30.052230 | 2013-05-21T21:41:37 | 2013-05-21T21:41:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,780 | py | '''
Created on Jun 7, 2012
@author: dzaharie
'''
from mk.dragan.config.Params import Params
from mk.dragan.config.ResultContainer import ResultContainer
from mk.dragan.utils.CsvUtils import writeData
from mk.dragan.utils.DatabaseUtils import dbgetlist, dbsetlist
import logging
import os
import shutil
import traceback
log = logging.getLogger('Abstract Result Generator')
class AbstractResultGenerator(object):
__resultFolder = None
_executor = None
def __init__(self, executor):
self._executor = executor
def _areValidNodes(self):
raise NotImplementedError()
def _shouldStopIncreasingMaxDistance(self):
raise NotImplementedError()
def _shouldStopDecreasingMaxDistance(self):
raise NotImplementedError()
def _getLevel1BottomUpOut(self):
raise NotImplementedError()
def _getSigmas(self):
raise NotImplementedError()
def _createData(self, dataCreator):
outPath = os.getcwd() + '/input'
if os.path.exists(outPath):
shutil.rmtree(outPath)
os.mkdir(outPath)
writeData(dataCreator.getCat(), outPath + '/cat.csv')
inputs = dataCreator.getInputs()
for i in range(0, len(inputs)):
writeData(inputs[i], outPath + '/input' + str(i+1) + '.csv')
def _strResult(self, dictionaryList):
result = ''
for dictionary in dictionaryList:
result += "level 1) %0.6f" % dictionary['max-distance'] + ' '
result += "%0.6f" % dictionary['sigma'] + ' '
result += "level 2) %0.6f" % dictionary['max-distance2'] + ' '
result += "%0.6f" % dictionary['sigma2'] + ' '
result += "result) %0.2f" % dictionary['correct'] + ' '
result += "%0.2f" % dictionary['unknown'] + ' '
result += '\n'
return result
def _isValidNode(self, nodeName):
coincidences = ResultContainer().coincidences[nodeName]['num']
largestGroupSize = ResultContainer().coincidences[nodeName]['size-of-largest']
return coincidences >= 10 and largestGroupSize > 1
def _isValidResult(self):
perc = ResultContainer().getLatestResult()['correct']
unknown = ResultContainer().getLatestResult()['unknown']
return perc > 55 and unknown < 40
def _shouldStopIncreasingMaxDistanceForNode(self, nodeName):
if ResultContainer().isEmpty():
return False
coincidences = ResultContainer().coincidences[nodeName]['num']
largestGroupSize = ResultContainer().coincidences[nodeName]['size-of-largest']
log.info("stop increasing? largestGroupSize=" + str(largestGroupSize) + " coincidences=" + str(coincidences))
return coincidences < 10 or largestGroupSize * 3 > coincidences;
def _shouldStopDecreasingMaxDistanceForNode(self, nodeName):
if ResultContainer().isEmpty():
return False
largestGroupSize = ResultContainer().coincidences[nodeName]['size-of-largest']
coincidences = ResultContainer().coincidences[nodeName]['num']
log.info("stop decreasing? largestGroupSize=" + str(largestGroupSize) + " coincidences=" + str(coincidences))
return largestGroupSize < 2 or coincidences > self._getLevel1BottomUpOut()
def _calculateMaxDistances(self):
result = []
maximal = 0.01
while not self._shouldStopIncreasingMaxDistance():
Params().MAX_DISTANCE = maximal
try:
log.info('calculating max distance. trying: ' + str(maximal))
self._executor.executeSupervised(execEval=True, execTest=False, execVal=False)
except RuntimeError:
log.error('error thrown for maxDistance=' + str(maximal))
maximal *= 2
minimal = 50.0
while not self._shouldStopDecreasingMaxDistance():
Params().MAX_DISTANCE = minimal
try:
log.info('calculating max distance. trying: ' + str(minimal))
self._executor.executeSupervised(execEval=True, execTest=False, execVal=False)
except RuntimeError:
log.error('error thrown for maxDistance=' + str(minimal))
log.error(traceback.format_exc())
break #error is thrown because so small maxDistance is not allowed any more
minimal /= 2
log.info("max distance calculated in the range of: " + str(minimal) + " - " + str(maximal))
if maximal > minimal:
step = (maximal - minimal) / 10
for i in range(0, 11):
result.append(minimal + step*i)
return result
def _initGenerateResult(self, dataCreator):
if dataCreator != None:
self._createData(dataCreator)
Params().BOTTOM_UP_OUT = self._getLevel1BottomUpOut()
ResultContainer().clear()
def generateResult(self, dataCreator):
self._initGenerateResult(dataCreator)
result = ''
pairs = []
maxDistances = dbgetlist('maxDistances', dataCreator.getDescription())
if not maxDistances:
maxDistances = self._calculateMaxDistances()
dbsetlist('maxDistances', dataCreator.getDescription(), maxDistances)
ResultContainer().clear()
for s in self._getSigmas():
for d in maxDistances:
log.info("trying: distance=%s sigma=%s" % (d, s))
Params().MAX_DISTANCE = d
Params().SIGMA = s
try:
self._executor.executeSupervised(execEval=True, execTest=False, execVal=False)
if (self._isValidResult() and self._areValidNodes()):
pairs.append((d, s))
except RuntimeError:
log.error('error thrown for maxDistance=' + str(d) + ' and sigma=' + str(s))
log.error(traceback.format_exc())
r = ResultContainer().result
result += 'training results: \n' + self._strResult(r)
ResultContainer().result = []
for pair in pairs:
d = pair[0]
s = pair[1]
log.info("testing combination: distance=%s sigma=%s" % (d, s))
Params().MAX_DISTANCE = d
Params().SIGMA = s
self._executor.executeSupervised(execEval=False, execTest=True, execVal=False)
r = ResultContainer().result
result += 'testing results: \n' + self._strResult(r)
found = []
for line in r:
if line['unknown'] < 40: #zemi gi samo tie so unknown pomalku od 40%
found.append(line)
if len(found) > 0:
maxx = found[0]
for x in found:
if x['correct'] > maxx['correct']:
maxx = x
result += 'best testing result: ' + self._strResult([maxx])
ResultContainer().result = []
Params().MAX_DISTANCE = maxx['max-distance']
Params().SIGMA = maxx['sigma']
self._executor.executeSupervised(execEval=False, execTest=False, execVal=True)
validationResult = ResultContainer().getLatestResult()
result += 'validation result: ' + self._strResult([validationResult])
info = dataCreator.getDescription()
print 'tesing combination: ' + str(info)
self.addToFile(str(info) + '\n')
self.addToFile(result + '\n\n\n')
print result
return result
| [
"dragan.zahariev@inside-solutions.ch"
] | dragan.zahariev@inside-solutions.ch |
fca8833ff2ffcf10a7e5395e8b705cd0a33fad29 | cd4bbecc3f713b0c25508d0c5674d9e103db5df4 | /toontown/building/DistributedAnimDoor.py | 37bb7065eba4aa04a774aaff39c4ee732815e3bb | [] | no_license | peppythegod/ToontownOnline | dce0351cfa1ad8c476e035aa3947fdf53de916a6 | 2e5a106f3027714d301f284721382cb956cd87a0 | refs/heads/master | 2020-04-20T05:05:22.934339 | 2020-01-02T18:05:28 | 2020-01-02T18:05:28 | 168,646,608 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,294 | py | from pandac.PandaModules import NodePath, VBase3
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import Parallel, Sequence, Wait, HprInterval, LerpHprInterval, SoundInterval
from toontown.building import DistributedDoor
from toontown.building import DoorTypes
if __debug__:
import pdb
class DistributedAnimDoor(DistributedDoor.DistributedDoor):
def __init__(self, cr):
DistributedDoor.DistributedDoor.__init__(self, cr)
base.animDoor = self
def getBuilding(self):
if 'building' not in self.__dict__:
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
searchStr = '**/??' + \
str(self.block) + ':animated_building_*_DNARoot;+s'
self.notify.debug('searchStr=%s' % searchStr)
self.building = self.cr.playGame.hood.loader.geom.find(
searchStr)
else:
self.notify.error(
'DistributedAnimDoor.getBuiding with doorType=%s' %
self.doorType)
return self.building
def getDoorNodePath(self):
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
if hasattr(self, 'tempDoorNodePath'):
return self.tempDoorNodePath
else:
building = self.getBuilding()
doorNP = building.find('**/door_origin')
self.notify.debug('creating doorOrigin at %s %s' % (str(
doorNP.getPos()), str(doorNP.getHpr())))
otherNP = NodePath('doorOrigin')
otherNP.setPos(doorNP.getPos())
otherNP.setHpr(doorNP.getHpr())
otherNP.reparentTo(doorNP.getParent())
self.tempDoorNodePath = otherNP
else:
self.notify.error(
'DistributedAnimDoor.getDoorNodePath with doorType=%s' %
self.doorType)
return otherNP
def setTriggerName(self):
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
building = self.getBuilding()
if not building.isEmpty():
doorTrigger = building.find('**/door_0_door_trigger')
if not doorTrigger.isEmpty():
doorTrigger.node().setName(self.getTriggerName())
else:
self.notify.warning('setTriggerName failed no building')
else:
self.notify.error('setTriggerName doorTYpe=%s' % self.doorType)
def getAnimBuilding(self):
if 'animBuilding' not in self.__dict__:
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
bldg = self.getBuilding()
key = bldg.getParent().getParent()
animPropList = self.cr.playGame.hood.loader.animPropDict.get(
key)
if animPropList:
for prop in animPropList:
if bldg == prop.getActor().getParent():
self.animBuilding = prop
break
continue
else:
self.notify.error('could not find' + str(key))
else:
self.notify.error('No such door type as ' + str(self.doorType))
return self.animBuilding
def getBuildingActor(self):
result = self.getAnimBuilding().getActor()
return result
def enterOpening(self, ts):
bldgActor = self.getBuildingActor()
rightDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_right_door')
if rightDoor.isEmpty():
self.notify.warning('enterOpening(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorOpen-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Parallel(
SoundInterval(self.openSfx, node=rightDoor),
Sequence(
HprInterval(rightDoor, VBase3(0, 0, 0)),
Wait(0.40000000000000002),
LerpHprInterval(
nodePath=rightDoor,
duration=0.59999999999999998,
hpr=VBase3(h, 0, 0),
startHpr=VBase3(0, 0, 0),
blendType='easeInOut')),
name=trackName)
self.doorTrack.start(ts)
def enterClosing(self, ts):
bldgActor = self.getBuildingActor()
rightDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_right_door')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(
LerpHprInterval(
nodePath=rightDoor,
duration=1.0,
hpr=VBase3(0, 0, 0),
startHpr=VBase3(h, 0, 0),
blendType='easeInOut'),
SoundInterval(self.closeSfx, node=rightDoor),
name=trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
request = self.getRequestStatus()
messenger.send('doorDoneEvent', [request])
def exitDoorEnterOpening(self, ts):
bldgActor = self.getBuildingActor()
leftDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_left_door')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorDoorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Parallel(
SoundInterval(self.openSfx, node=leftDoor),
Sequence(
LerpHprInterval(
nodePath=leftDoor,
duration=0.59999999999999998,
hpr=VBase3(h, 0, 0),
startHpr=VBase3(0, 0, 0),
blendType='easeInOut')),
name=trackName)
self.doorExitTrack.start(ts)
else:
self.notify.warning(
'exitDoorEnterOpening(): did not find leftDoor')
def exitDoorEnterClosing(self, ts):
bldgActor = self.getBuildingActor()
leftDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_left_door')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Sequence(
LerpHprInterval(
nodePath=leftDoor,
duration=1.0,
hpr=VBase3(0, 0, 0),
startHpr=VBase3(h, 0, 0),
blendType='easeInOut'),
SoundInterval(self.closeSfx, node=leftDoor),
name=trackName)
self.doorExitTrack.start(ts)
| [
"47166977+peppythegod@users.noreply.github.com"
] | 47166977+peppythegod@users.noreply.github.com |
13728d7d3e4fd069f326f6493d706e6f0df8f729 | dbef97b46cbef9d2a2f9f89f5a4fec7f49875857 | /extract_short.py | 299e8198d35ab78fda0ab4665aebda2303e75829 | [] | no_license | ayu1992/MachineLearning | e1a3626bb60bed98866ea228e27f9310bb2d3102 | f885064160f9f01e1c48edb742f770d264fc645f | refs/heads/master | 2020-06-05T13:58:41.392529 | 2015-03-30T06:09:58 | 2015-03-30T06:09:58 | 33,105,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | with open("/Volumes/YUCHI/ML_anan/movies.txt.txt.txt", "r") as f:
with open("summary_rating.csv", "w") as of:
arr = ["", "", "", ""]
for i, line in enumerate(f):
if i % 100000 == 0:
print i
if line.startswith("product/productId"):
arr[0] = line.split()[1]
elif line.startswith("review/userId"):
arr[1] = line.split()[1]
elif line.startswith("review/score"):
arr[2] = line.split()[1]
elif line.startswith("review/summary"):
arr[3] = ' '.join(line.split()[1:])
of.write(", ".join(arr) + "\n")
| [
"yuxx0535@umn.edu"
] | yuxx0535@umn.edu |
100f6258f50963233c3177c141a8c294e712e957 | 4a5931556117ceb4cb1a770928a68454658c7bd0 | /Katakana/tests.py | 72105cac7bb31df68b5a4135660bf6a9a336ad1b | [] | no_license | zooyl/JapaneseMemo | 8ce21683eae305667e11b61acd7aeeba8867044a | 9e808bc39be3e9d3c1986eb4c8d8cd7819668a8c | refs/heads/master | 2022-12-15T11:39:40.448575 | 2021-08-11T11:23:38 | 2021-08-11T11:23:38 | 175,037,873 | 0 | 0 | null | 2022-12-08T04:59:08 | 2019-03-11T16:12:41 | Python | UTF-8 | Python | false | false | 5,587 | py | from django.test import TestCase
import django
from django.test import Client
from django.urls import reverse
from django.contrib.auth.models import User, Permission
# app imports
from Hiragana.models import Stats
# Create your tests here.
class PresetsTests(django.test.TestCase):
fixtures = ['Katakana.json', 'Katakana_Levels.json']
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(username='test_preset', password='12345')
self.stats = Stats.objects.create(user=self.user)
def test_preset_easy_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_easy'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_easy_with_permission(self):
perm = Permission.objects.get(codename='easy_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_easy'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_medium_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_medium'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_medium_with_permission(self):
perm = Permission.objects.get(codename='medium_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_medium'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_hard_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_hard'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_hard_with_permission(self):
perm = Permission.objects.get(codename='hard_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_hard'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_diacritics_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_diacritics'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_diacritics_with_permission(self):
perm = Permission.objects.get(codename='diacritics_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_diacritics'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
def test_preset_mixed_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('kata_mixed'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_preset_mixed_with_permission(self):
perm = Permission.objects.get(codename='mixed_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('kata_mixed'))
self.assertTemplateUsed('question.html')
self.assertContains(response, "Points:")
self.assertContains(response, "Pronunciation:")
class KatakanaPageTest(django.test.TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(username='test_katakana', password='12345')
self.stats = Stats.objects.create(user=self.user)
def test_not_authenticated_user(self):
response = self.client.get(reverse('katakana'))
self.assertRedirects(response, '/login/?next=/home/katakana', status_code=302, target_status_code=200)
def test_authenticated_user_without_permission(self):
self.client.force_login(self.user)
response = self.client.get(reverse('katakana'))
self.assertTemplateUsed('error.html')
self.assertContains(response, "<p>Not so fast</p>")
self.assertContains(response, "You don't have permission to visit this page")
def test_authenticated_user_with_permission(self):
perm = Permission.objects.get(codename='easy_katakana')
self.user.user_permissions.add(perm)
self.client.force_login(self.user)
response = self.client.get(reverse('katakana'))
self.assertTemplateUsed(response, 'katakana.html')
self.assertContains(response, 'List of unlocked levels')
| [
"natoniewski.m@gmail.com"
] | natoniewski.m@gmail.com |
aa718ed8354abdea50f56b54e171775a136dd57a | dd116fe1e94191749ab7a9b00be25bfd88641d82 | /cairis/cairis/SearchDialog.py | c128364ca182e31bbb94073ecd249cd1315fc760 | [
"Apache-2.0"
] | permissive | RobinQuetin/CAIRIS-web | fbad99327707ea3b995bdfb4841a83695989e011 | 4a6822db654fecb05a09689c8ba59a4b1255c0fc | HEAD | 2018-12-28T10:53:00.595152 | 2015-06-20T16:53:39 | 2015-06-20T16:53:39 | 33,935,403 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
import ARM
from SearchPanel import SearchPanel
from Borg import Borg
class SearchDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,armid.SEARCHMODEL_ID,'Search model',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(700,500))
b = Borg()
self.dbProxy = b.dbProxy
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = SearchPanel(self)
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,armid.SEARCHMODEL_BUTTONFIND_ID,self.onFind)
def onFind(self,evt):
ssCtrl = self.FindWindowById(armid.SEARCHMODEL_TEXTSEARCHSTRING_ID)
ssValue = ssCtrl.GetValue()
if (len(ssValue) == 0) or (ssValue == ' '):
dlg = wx.MessageDialog(self,'Search string empty','Search model',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
listCtrl = self.FindWindowById(armid.SEARCHMODEL_LISTRESULTS_ID)
listCtrl.DeleteAllItems()
searchOptionsCtrl = self.FindWindowById(armid.SEARCHOPTIONSPANEL_ID)
searchOptions = searchOptionsCtrl.optionFlags()
try:
searchResults = self.dbProxy.searchModel(ssValue,searchOptions)
for idx,result in enumerate(searchResults):
listCtrl.InsertStringItem(idx,result[0])
listCtrl.SetStringItem(idx,1,result[1])
listCtrl.SetStringItem(idx,2,result[2])
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Search model',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
| [
"shamal.faily@googlemail.com"
] | shamal.faily@googlemail.com |
aa0d2e6554684c54501f6f150d32cf14d1cc827e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/136/21959/submittedfiles/funcoes.py | efca9f8ab430ae8fca7e83512158b118f168e4d3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | #ARQUIVO COM SUAS FUNCOES
from __future__ import division
def calcula_valor_absoluto(x):
if x < 0:
x = x*(-1)
return x
def calcula_pi(m):
expr = 0
i = 1
x = 2
while i<=m:
if 1<=m<=2000: #para m maior ou igual a 1 e menor ou igual a 2000
if i%2==0: #se i for par
expr = expr - (4/(x*(x+1)*(x+2)))
else: #caso contrário
expr = expr + (4/(x*(x+1)*(x+2)))
x = x +2
i = i +1
calcula_pi = 3 + expr #pi será igual a 3 + a expressão final
return calcula_pi #a função retorna o valor de pi
def fatorial(n):
fatorial = 1
for i in range (0, n, 1):
fatorial = fatorial * i
return fatorial
def calcula_co_seno(z, epsilon):
soma = 0
i = 1
expoente = 2
fracao = (z**expoente)/fatorial(expoente) # observa-se, aqui, que é chamada a função fatorial com o exponte dentro da mesma
while fracao>epsilon:
fracao = (z**expoente)/fatorial(expoente)
if i%2==1:
soma = soma - fracao
else:
soma = soma + fracao
expoente = expoente + 2
i = i + 1
calcula_co_seno = soma + 1
return calcula_co_seno
def calcula_razao_aurea(m, epsilon):
fi = 2 * calcula_co_seno(calcula_pi(m)/5, epsilon)
return fi
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
eb8ac1065478d710684981c40c7244e68a8d4b27 | 8469dff9fcfae4ea79af33bf5663c0870e4cea77 | /socket/socket_local/sock_client.py | ab90946c8d20c1a30009df68c49091b2016f1228 | [] | no_license | mrliuminlong/note | 130de6f038fe6c7a7d6991beab4bf965bee8424f | f9b34e79b4d1a467e362a65350422c7fc870d205 | refs/heads/master | 2020-04-19T16:18:01.736696 | 2019-02-13T10:39:39 | 2019-02-13T10:39:39 | 168,300,257 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | #本地套接字
from socket import *
sockfd = socket(AF_UNIX, SOCK_STREAM)
#两端需要使用相同的套接字文件
sockfd.connect("./sock")
while True:
msg = input(">>")
if not msg:
break
sockfd.send(msg.encode())
sockfd.close()
# os.remove(file)删除一个文件
# os os.path.exists()判断一个文件是否存在 | [
"liuminlong2010@sina.cn"
] | liuminlong2010@sina.cn |
e859ec2e54f53f7b9c6871255c5541097f1f8cc2 | 3f23eec5418587e6608af6b1b57a33e88046e750 | /7-gce/config.py | 6228544da2063b36407607663af6c53105b65d50 | [] | no_license | OlexiyVovnyuk/bookshelf | 5830327acb456cbf1947863936520b834aa611db | e53916ae45c9ea7e871a79812fcf5466d25dce9d | refs/heads/main | 2023-04-03T16:55:08.283035 | 2021-04-05T07:37:35 | 2021-04-05T07:37:35 | 354,753,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,494 | py | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains all of the configuration values for the application.
Update this file with the values for your specific Google Cloud project.
You can create and manage projects at https://console.developers.google.com
"""
import os
# The secret key is used by Flask to encrypt session cookies.
SECRET_KEY = '\xfd{H\xe5<\x95\xf9\xe3\x96.5\xd1\x01O<!\xd5\xa2\xa0\x9fR"\xa1\xa8'
# There are three different ways to store the data in the application.
# You can choose 'datastore', 'cloudsql', or 'mongodb'. Be sure to
# configure the respective settings for the one you choose below.
# You do not have to configure the other data backends. If unsure, choose
# 'datastore' as it does not require any additional configuration.
DATA_BACKEND = 'cloudsql'
# Google Cloud Project ID. This can be found on the 'Overview' page at
# https://console.developers.google.com
PROJECT_ID = 'bookshelf-309511'
# CloudSQL & SQLAlchemy configuration
# Replace the following values the respective values of your Cloud SQL
# instance.
CLOUDSQL_USER = 'root'
CLOUDSQL_PASSWORD = 'Jbo6x0b5k898pkyd'
CLOUDSQL_DATABASE = 'bookshelf'
# Set this value to the Cloud SQL connection name, e.g.
# "project:region:cloudsql-instance".
# You must also update the value in app.yaml.
CLOUDSQL_CONNECTION_NAME = 'bookshelf-309511:europe-central2:bookshelf-sql'
# The CloudSQL proxy is used locally to connect to the cloudsql instance.
# To start the proxy, use:
#
# $ cloud_sql_proxy -instances=your-connection-name=tcp:3306
#
# Port 3306 is the standard MySQL port. If you need to use a different port,
# change the 3306 to a different port number.
# Alternatively, you could use a local MySQL instance for testing.
LOCAL_SQLALCHEMY_DATABASE_URI = (
'mysql+pymysql://{user}:{password}@127.0.0.1:3306/{database}').format(
user=CLOUDSQL_USER, password=CLOUDSQL_PASSWORD,
database=CLOUDSQL_DATABASE)
# When running on App Engine a unix socket is used to connect to the cloudsql
# instance.
LIVE_SQLALCHEMY_DATABASE_URI = (
'mysql+pymysql://{user}:{password}@localhost/{database}'
'?unix_socket=/cloudsql/{connection_name}').format(
user=CLOUDSQL_USER, password=CLOUDSQL_PASSWORD,
database=CLOUDSQL_DATABASE, connection_name=CLOUDSQL_CONNECTION_NAME)
if os.environ.get('GAE_INSTANCE'):
SQLALCHEMY_DATABASE_URI = LIVE_SQLALCHEMY_DATABASE_URI
else:
SQLALCHEMY_DATABASE_URI = LOCAL_SQLALCHEMY_DATABASE_URI
# Mongo configuration
# If using mongolab, the connection URI is available from the mongolab control
# panel. If self-hosting on compute engine, replace the values below.
# MONGO_URI = 'mongodb://user:password@host:27017/database'
# Google Cloud Storage and upload settings.
# Typically, you'll name your bucket the same as your project. To create a
# bucket:
#
# $ gsutil mb gs://<your-bucket-name>
#
# You also need to make sure that the default ACL is set to public-read,
# otherwise users will not be able to see their upload images:
#
# $ gsutil defacl set public-read gs://<your-bucket-name>
#
# You can adjust the max content length and allow extensions settings to allow
# larger or more varied file types if desired.
CLOUD_STORAGE_BUCKET = 'bookshelf-309511'
MAX_CONTENT_LENGTH = 8 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# OAuth2 configuration.
# This can be generated from the Google Developers Console at
# https://console.developers.google.com/project/_/apiui/credential.
# Note that you will need to add all URLs that your application uses as
# authorized redirect URIs. For example, typically you would add the following:
#
# * http://localhost:8080/oauth2callback
# * https://<your-app-id>.appspot.com/oauth2callback.
#
# If you receive a invalid redirect URI error review you settings to ensure
# that the current URI is allowed.
GOOGLE_OAUTH2_CLIENT_ID = \
'your-client-id'
GOOGLE_OAUTH2_CLIENT_SECRET = 'your-client-secret'
| [
"noreply@github.com"
] | OlexiyVovnyuk.noreply@github.com |
1def8bfa91528ad23d33f5f84710747a8dc3cf57 | c0f86b926fc82baa633862896096c149dd9913cf | /Python/Numpy/Mean-Var-and-Std/Python2/solution.py | 74b8d96a55af697e4421abd696b485c3a4ebf3f7 | [] | no_license | qxzsilver1/HackerRank | 8df74dd0cd4a9dedd778cdecea395f4234eda767 | bcb1b74711a625d8ad329a3f9fdd9f49b1bebc54 | refs/heads/master | 2021-09-09T15:45:35.681284 | 2021-09-07T00:11:16 | 2021-09-07T00:11:16 | 75,671,896 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import numpy
n, m = map(int, raw_input().split())
a = numpy.array([raw_input().split() for _ in xrange(n)], int)
print numpy.mean(a, axis=1)
print numpy.var(a, axis=0)
print numpy.std(a, None)
| [
"noreply@github.com"
] | qxzsilver1.noreply@github.com |
fa196682cdbaa35f05d090a579a50930f5be698b | 1af44bdb5f59f5a58ead1094daea44f8d49e015c | /recursion.py | 3d9e66a67a862ee1b0c3a0fe2094d334c86496ce | [] | no_license | KrishnaRekapalli/out-think | 865bf2dba27ac220db084de9c0e5fbe7fc9db2e6 | 94fd32c04e8b4a5755c88dc180a3dc293392c62f | refs/heads/master | 2021-01-10T16:52:30.175292 | 2017-05-29T17:41:20 | 2017-05-29T17:41:20 | 53,183,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | def get_fib(position):
# Write your code here.
if position==0:
return 0
elif position==1:
return 1
else:
fib = [0 for i in range(position+1)]
fib[0] = 0
fib[1] = 1
for j in range(2,position+1):
fib[j] = fib[j-1]+fib[j-2]
return fib[position]
#n = int(raw_input())
print(get_fib(23))
| [
"noreply@github.com"
] | KrishnaRekapalli.noreply@github.com |
7b3e108a66ca87302ccf56e8cdf18d7fb50ce119 | 472f15abd5b889e96e554272e371208c63d044d2 | /blog/urls.py | 2556a4365076550a279151aab7999c127012f975 | [] | no_license | BjoernBerlin/my-first-blog | 499cafdc3c06d998fbbb5c3ad6d72033a1941cd6 | 5cae465941e1764041ed7c0125bccea2037b3725 | refs/heads/master | 2016-09-01T06:06:38.448887 | 2015-10-10T11:22:09 | 2015-10-10T11:22:09 | 43,352,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from django.conf.urls import url
from .import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail, name='post_detail'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_edit, name='post_edit'),
url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),
url(r'^post/(?P<pk>[0-9]+)/publish/$', views.post_publish, name='post_publish'),
url(r'^post/(?P<pk>[0-9]+)/remove/$', views.post_remove, name='post_remove'),
] | [
"bjoern@lengers.de"
] | bjoern@lengers.de |
f4c6ce59efd1e1f03e2d9705d803ef33a713b166 | e1b0308dc4ba9e412d12e945c31f7f46f524daa4 | /project/image64/models.py | 947999279a351494ff5d2ca946b6ea023600b019 | [] | no_license | loressl/djangorestframework_image_base64 | 8d05d11af8c5802afe287be433714012dfa174e9 | a9078664cc5a3fe5044b65c00497f05fec811ab7 | refs/heads/master | 2022-11-17T16:57:33.727584 | 2020-07-13T22:51:00 | 2020-07-13T22:51:00 | 279,215,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django.db import models
# Create your models here.
class Image_Base64(models.Model):
image= models.TextField()
def __str__(self):
return self.image
| [
"loryssl@hotmail.com"
] | loryssl@hotmail.com |
98447ab158842379f6445b580543c5b19f094a29 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/keras/engine/training_arrays.py | 5de18f2e9cb1d7dd00b968bd7ddef3a828ccaf01 | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/keras/engine/training_arrays.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
ac25d0db95dee1117ae6fe4b899083d273595bed | df41dbe7691f1b954057b8aa787c988ffcc6692a | /test.py | 94eeb3df1c071bfaf6ec15838838fe0a649e571c | [] | no_license | mago960806/RemoteCheck | 02e55622c1cd2ce1defb70fa64f40c54cd3eff0b | a46d279fa8bca30c29c12d28a445ca5814f76338 | refs/heads/master | 2020-04-01T22:31:00.400213 | 2019-05-09T02:02:10 | 2019-05-09T02:02:10 | 153,712,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from remotecheck import map_with_multi_thread_output_json
host_list = [
[
'192.168.1.1',
'22',
'root',
'admin123'
],
[
'192.168.1.2',
'10022',
'weblogic',
'admin123'
]
]
result = map_with_multi_thread_output_json(host_list)
print(result)
| [
"mago960806@hotmail.com"
] | mago960806@hotmail.com |
54905961f5da67d188acd3d289b59b48346852ab | ebac75f37d7afb53d63d82e173a1f9708e477961 | /rango/utilities.py | 78b927f273b5da82c6702e60af841a48a253534b | [] | no_license | mscienski/rango | adcef6f232aded43be3de0ea505666533ec92d53 | cdc8167f972ea0eb57169921f0159292c904ac19 | refs/heads/master | 2020-06-02T04:06:55.064207 | 2020-03-03T18:55:13 | 2020-03-03T18:55:13 | 21,962,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | __author__ = 'idfl'
def urlencoding(param):
param = param.__str__()
if '_' in param:
return param.replace('_', ' ')
elif ' ' in param:
return param.replace(' ', '_')
else:
return param | [
"michal.scienski@idfl.com"
] | michal.scienski@idfl.com |
86e58c0836f1d5180acbfb2d7d40c1b45183e6e5 | e3f92d9157c5af78aa2ea0a4ea05027a04014b4c | /sampler.py | 18b33f76121e6094887932758f31531682ed5ca8 | [] | no_license | gdesjardins/smlpt | 10c7900ef62f02ca5fcb23313a7ace3f1bf9656c | facf90d522d056f150dfc8874ebf16e0a299fc5c | refs/heads/master | 2021-01-17T11:58:50.311901 | 2014-01-22T22:37:08 | 2014-01-22T22:37:08 | 16,154,636 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | import theano
import theano.tensor as T
import numpy
class BlockGibbsSampler(object):
def __init__(self, block_updates, n_steps=1):
"""
:param block_updates: dictionary whose keys are conditionally independent (theano
shared) variables, and whose values are the update expression to use for block gibbs
sampling
:param n_steps: number of block Gibbs steps to perform
"""
self.block_updates = block_updates
self.n_steps = n_steps
self.sample_block = {}
for i, (k,v) in enumerate(block_updates.iteritems()):
self.sample_block[k] = theano.function([],[],
updates={k:v},allow_input_downcast = False)
def simulate(self, n_steps=None):
n_steps = n_steps if n_steps else self.n_steps
for n in xrange(n_steps):
for fn in self.sample_block.itervalues():
fn()
def get_state(self):
state = {}
for v in self.block_updates.iterkeys():
state[v] = v.value
return state
def draw(self, n_steps=None):
self.simulate(n_steps=n_steps)
return self.get_state()
| [
"guillaume.desjardins@gmail.com"
] | guillaume.desjardins@gmail.com |
ff42b0afb739c60d4ad201d92376e6272401eeb7 | 03a878e126a4645e2ae0d814f7005a9d7eebf6e4 | /backend/schedules/migrations/0033_auto_20200919_1356.py | 7ff43226fbbe650af5d723a604b2aed508eadf21 | [] | no_license | nickfff-dev/GeneSys | d8a471734fe6afba8a968004a204a20bc1d6fcdc | e4972f735234bbf69e77b3cbfd9279e32558ede7 | refs/heads/master | 2023-04-06T05:53:32.842913 | 2021-04-02T14:08:12 | 2021-04-02T14:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | # Generated by Django 3.0.5 on 2020-09-19 05:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('schedules', '0032_clinicschedulepatient_appointment_type'),
]
operations = [
migrations.RemoveField(
model_name='clinicschedulepatient',
name='time_end',
),
migrations.RemoveField(
model_name='clinicschedulepatient',
name='time_start',
),
migrations.RemoveField(
model_name='event',
name='end_time',
),
migrations.RemoveField(
model_name='event',
name='start_time',
),
]
| [
"abdulmaula.nacan@gmail.com"
] | abdulmaula.nacan@gmail.com |
333ed64669224b879c5b0bc36d873a73ef3b7b12 | ecb8f796de591ed38a7a176f54182b074a59768d | /recusion_hanoi_four_pillar_tower.py | ed50cfd0cf376662273157fb7073e0c7ab5e1cfc | [] | no_license | maxianren/algorithm_python | 89d0ebc7e475875b97c5b25da2dc7c2118245af8 | 7218348831af059db69aa02637b85b9d9a799b6b | refs/heads/master | 2023-05-25T09:01:36.798743 | 2021-06-03T12:17:45 | 2021-06-03T12:17:45 | 313,368,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | '''
The issue of the Hanoi Tower originated from an ancient Indian legend.
For the original Hanoi Tower game, there are only three pillars available for players to operate.
As a result, the original legend requires more than 1.8*10^19 steps to solve the problem.
The number of required steps can be greatly reduced by adding columns.
Find the minimum number of steps to complete the migration under the restriction below:
- the number of plates is given,
- the number of pillars is 4 (that is, the limit is 4 pillars)
- the other rules of the original legend are not changed.
Input format:
A non-negative integer M, M represents the number of disks, M<=1000.
Output format:
A non-negative integer that represents the minimum number of steps to complete the migration.
Input sample:
3
Sample output:
5
'''
#the main function
def best_hanoi_4_tower(m,cache_4,cache_3):
if m ==0:
cache_4[0] = 0
return 0
# Recursion termination condition
elif m==1:
cache_4[1] = 1
return 1
else:
for n in range(1, m):
# result of hanoi3
if cache_3[n]==None:
res_3= hanoi_3_tower(n)
cache_3[n]=res_3
# consult previous result that already saved in cache list
else:
res_3=cache_3[n]
#result of reduced haoi4
if cache_4[m-n]==None:
res_4=best_hanoi_4_tower(m - n,cache_4,cache_3)
# consult previous result that already saved in cache list
else:
res_4 = cache_4[m - n]
#result of desired haoi4: sum of reduced hanoi4 and hanoi3
res=res_3+2*res_4
#results of the best hanoi4 solution
if cache_4[m] == None:
cache_4[m]=res
# consult previous result that already saved in cache list
else:
# keep updating the haoi result
if res < cache_4[m]:
cache_4[m]=res
return cache_4[m]
def hanoi_3_tower(n):
#Recursion termination condition
if n==1:
return 1
else:
h=1+2*hanoi_3_tower(n-1)
return h
if __name__ == "__main__":
m = 3#int(input())
print(best_hanoi_4_tower(m,(m+1)*[None],(m+1)*[None])) | [
"maxianren@gmail.com"
] | maxianren@gmail.com |
f4627a9f0b0e5a3bc6856616a26598590fe7c8db | f9a96f02fb59ebb320d48ae7d266a1ba1bb2f7cc | /ex31.py | 9267bf4cd0e0abe51b98857e138015a5aaec168e | [] | no_license | virtualet/LPTHW | eb54eca5471c179652b1466e604419601a3a082c | e31b703e835640fc9f04ad99b027bcf6d6c1a746 | refs/heads/master | 2021-01-13T01:53:50.027232 | 2014-10-06T22:03:27 | 2014-10-06T22:03:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | __author__ = 'echoecho'
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There's a giant bear here eating a cheese cake. What do you do?"
print "1. Take the cake"
print "2. Scream at the bear"
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away" % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina"
print "1. Blueberries"
print "2. Yello jacket clothespins"
print "3. Understanding revolvers yelling melodies"
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good luck!"
else:
print "The insanity rots your eyes into a pool of muck. Good luck!"
else:
print "You stumble around and fall on a knife and die. Good job!"
| [
"echoecho@gmail.com"
] | echoecho@gmail.com |
15585c539acb0e4546ebbccb70364de39847516c | e57d7785276053332c633b57f6925c90ad660580 | /sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_workspace_managed_sql_server_extended_blob_auditing_policies_operations.py | 516ad87a451dfc9680c799edc043f4ea896578f1 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 17,241 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations(object):
"""WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
blob_auditing_policy_name, # type: Union[str, "_models.BlobAuditingPolicyName"]
**kwargs # type: Any
):
# type: (...) -> "_models.ExtendedServerBlobAuditingPolicy"
"""Get server's extended blob auditing policy.
Get a workspace SQL server's extended blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param blob_auditing_policy_name: The name of the blob auditing policy.
:type blob_auditing_policy_name: str or ~azure.mgmt.synapse.models.BlobAuditingPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedServerBlobAuditingPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedServerBlobAuditingPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
blob_auditing_policy_name, # type: Union[str, "_models.BlobAuditingPolicyName"]
parameters, # type: "_models.ExtendedServerBlobAuditingPolicy"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExtendedServerBlobAuditingPolicy"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExtendedServerBlobAuditingPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExtendedServerBlobAuditingPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
blob_auditing_policy_name, # type: Union[str, "_models.BlobAuditingPolicyName"]
parameters, # type: "_models.ExtendedServerBlobAuditingPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExtendedServerBlobAuditingPolicy"]
"""Create or Update server's extended blob auditing policy.
Create or Update a workspace managed sql server's extended blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param blob_auditing_policy_name: The name of the blob auditing policy.
:type blob_auditing_policy_name: str or ~azure.mgmt.synapse.models.BlobAuditingPolicyName
:param parameters: Properties of extended blob auditing policy.
:type parameters: ~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExtendedServerBlobAuditingPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedServerBlobAuditingPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
blob_auditing_policy_name=blob_auditing_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'blobAuditingPolicyName': self._serialize.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
def list_by_workspace(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExtendedServerBlobAuditingPolicyListResult"]
"""List server's extended blob auditing policies.
List workspace managed sql server's extended blob auditing policies.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtendedServerBlobAuditingPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.ExtendedServerBlobAuditingPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedServerBlobAuditingPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExtendedServerBlobAuditingPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings'} # type: ignore
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
3bb4a436ba047184d62d283d7b2b9e40cae5dd1a | c7d87b146913128fcc12dd4241f69a6b5b346235 | /week6/6_6_BMI.py | 71a33ddf3a4233c33c6832af7a7716835956133c | [] | no_license | PutkisDude/Developing-Python-Applications | cbe9fc169937087721440a378a912383ba7c2930 | 928c22bcb1b05408dc008c605c1c3a4b509a5536 | refs/heads/main | 2023-04-09T03:53:08.312225 | 2021-04-13T09:15:15 | 2021-04-13T09:15:15 | 336,190,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | #Author Lauri Putkonen
#6. Returns the BMI.
def bmi(weight, height):
height = height / 100
bmi = weight / (height * height)
return bmi
weight = float(input("Type your weight(kg) : "))
height = float(input("Type height (cm): "))
print("Your BMI is %.2f" % bmi(weight, height))
#OUTPUT:
# Type your weight(kg) : 90
# Type height (cm): 180
# Your BMI is 27.78
| [
"putkis@gmail.com"
] | putkis@gmail.com |
fad45a86132e84bd2b36271cb1a2dfe8fc908e37 | 416bbc7b84b728950b1811ab310afa30ed652ec1 | /cBOW_skipGram.py | d30695371bff6023498d6913ace880a713de21b1 | [] | no_license | datacampmumbai/First-Project | b9b2acb86c2baeff9a7e11a01cf670b7a0254336 | 41772fa8017372b4dd696145eec3137603f2471e | refs/heads/master | 2020-04-05T17:36:05.126030 | 2018-11-14T10:20:18 | 2018-11-14T10:20:18 | 157,068,394 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 28 09:10:42 2018
@author: Sanmoy
"""
import os
import pandas as pd
import gensim
from string import punctuation
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
custom=set(stopwords.words('english')+list(punctuation)+['»'])
path="C:/F/NMIMS/DataScience/Sem-3/TA/data"
os.chdir(path)
data = open("11-0.txt", encoding="latin-1").read()
doc = data.replace("\n", " ")
print(doc)
data = []
for sent in sent_tokenize(doc):
temp = []
for j in word_tokenize(sent):
if j not in custom:
temp.append(j.lower())
data.append(temp)
len(data)
data
##Create CBOW Model
model1 = gensim.models.Word2Vec(data, min_count=1, size=100, window=5)
print("Cosine similarity between 'alice' "+"and'wonderland'-CBOW: ", model1.similarity('alice', 'wonderland'))
print("Cosine similarity between 'alice' "+"and'machines'-CBOW: ", model1.similarity('alice', 'machines'))
from textblob import TextBlob as tb
blob = tb(doc)
blob_wor = list(blob.words)
blob_wor
data = [word.lower() for word in blob_wor if word not in custom]
model1 = gensim.models.Word2Vec([data], min_count=1, size=100, window=5)
print(model1.similarity('after', 'like'))
#data = [word for word in data if word not in custom]
| [
"noreply@github.com"
] | datacampmumbai.noreply@github.com |
4fa3f1d14d7a3874a09c7c5cc9abb92aad5c255c | 2b5f57510315d96de0ab5c374560adaac76a5abf | /Grade-Calculator.py | 72ebdc9b91c7de88322355ebcc3dc32be95bd502 | [] | no_license | zeem5/Grade-calculator | 92ad1e9fb8bd69370b250c60cc388ec7220b1f5a | ab0c955d603d1903e472d5b5b43ec36dacd13c92 | refs/heads/master | 2020-04-08T03:35:53.580214 | 2018-11-24T23:46:14 | 2018-11-24T23:46:14 | 158,982,476 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | x = int (input ("Enter your score"))
if x > 0 and x < 39:
print ("F")
elif x > 39 and x < 44:
print ("E")
elif x > 44 and x < 50:
print ("D")
elif x > 50 and x < 60:
print ("C")
elif x > 60 and x < 70:
print ("B")
elif x > 70 and x < 100:
print ("A")
else:
print ("Please enter marks between 0-100! Thanks")
| [
"noreply@github.com"
] | zeem5.noreply@github.com |
59019e4e0de44502a63c95bc121cf7f067510cda | 841a4906780c75fe72f0bea68e641bcab1fa19f5 | /2019/07/two.py | 75e4be895e0f2c8fba69cf1f2ce7e6abe32faa95 | [] | no_license | RobertMusser/Avent-of-Code | 87584a37e7d81b252affb2c04bda8abbc9ef9fd3 | 6bcdd866efaa1088b02f2ad50a125a453d41d7f5 | refs/heads/master | 2023-02-06T13:47:50.596782 | 2023-01-26T01:31:34 | 2023-01-26T01:31:34 | 224,293,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,787 | py | import math
import itertools
# Reads file into string, code adapted from ( https://github.com/imhoffman/advent/blob/master/2015/01/one.py )
def file_to_string(file_name):
with open(file_name) as fp:
while True:
line = fp.read()
if not line: # a little bit of error catching
break
string_opcode = line
return string_opcode
# finds commas in string
# could be done with .split(), but oh well
def comma_finder(string_opcode):
start_search = 0
all_commas = []
while True:
comma_loc = string_opcode.find(',', start_search)
if comma_loc == -1: # breaks out of loop once all commas found
break
all_commas.append(comma_loc)
start_search = comma_loc + 1
return all_commas
# parses string into an array
# could be done with .split()
def string_to_array(opcode_string, comma_index):
opcode = []
buffer = 0
for i in range(len(comma_index)+1):
start = buffer
if i == len(comma_index):
end = len(opcode_string)+1
opcode.append(int(opcode_string[start:end]))
break
if i < len(comma_index):
end = comma_index[i]
opcode.append(int(opcode_string[start:end]))
buffer = comma_index[i]+1
return opcode
# makes number str and back-fills with 0s
def yarnifier(number):
yarn = str(number)
yarn = ("0" * int(5-len(yarn))) + yarn
return yarn
# returns true if number was a valid opcode, false if not
def opcode_checker(number):
answer = False # default falseyness
yarn = str(number) # string of number
if len(yarn) > 5: # greater than 5 digits c/t be an opcode
return answer
if number < 1: # 0 or -#s c/t be opcodes
return answer
yarn = ("0" * int(5-len(yarn))) + yarn # fill yarn with 0s, just like yarnifier
ones = int(yarn[4]) # purely symbolic
tens = int(yarn[3])
mode_three = int(yarn[0])
mode_two = int(yarn[1])
mode_one = int(yarn[2])
# https://stackoverflow.com/questions/148042/using-or-comparisons-with-if-statements
if ones in (1, 2, 3, 4, 5, 6, 7, 8):
if tens == 0:
if mode_three in (0, 1) and mode_two in (0, 1) and mode_one in (0, 1):
answer = True
if int(yarn[3:5]) == 99:
if mode_three in (0, 1) and mode_two in (0, 1) and mode_one in (0, 1):
answer = True
return answer
# given a pointer and a program, executes instructions and returns modified program + pointer
def opcode_processor(pointer, program, setting):
input_received = -1 # default falsyness
output = -1
opcode = program[pointer] # purely symbolic
if opcode_checker(opcode): # this is only helpful for debugging
yarn = yarnifier(opcode)
first = int(yarn[2])
second = int(yarn[1])
if int(yarn[4]) == 1:
x = program[pointer + 1] # default set to value not address
y = program[pointer + 2]
if first == 0: # x and y updated if modes not 1
x = program[x]
if second == 0:
y = program[y]
program[program[pointer + 3]] = x + y # + rule
pointer += 4
elif int(yarn[4]) == 2:
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
program[program[pointer + 3]] = x * y # * rule
pointer += 4
elif int(yarn[4]) == 3: # get input rule
x = setting # always address mode
program[program[pointer + 1]] = x
input_received = 1
pointer += 2
elif int(yarn[4]) == 4: # print rule
if first == 0:
output = program[program[pointer + 1]]
elif first == 1:
output = program[pointer + 1]
pointer += 2
elif int(yarn[4]) == 5: # jump-if-true
x = program[pointer+1]
y = program[pointer+2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x != 0:
pointer = y
else: # this might need to be something else
pointer += 3
elif int(yarn[4]) == 6: # jump-if-false
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x == 0:
pointer = y
else: # this might need to be something else
pointer += 3
elif int(yarn[4]) == 7:
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x < y:
program[program[pointer+3]] = 1
else:
program[program[pointer + 3]] = 0
pointer += 4
elif int(yarn[4]) == 8:
x = program[pointer + 1]
y = program[pointer + 2]
if first == 0:
x = program[x]
if second == 0:
y = program[y]
if x == y:
program[program[pointer + 3]] = 1
else:
program[program[pointer + 3]] = 0
pointer += 4
elif int(yarn[4]) == 9:
return 'DONE', program, 0, output
else:
print("--- ERORR ---")
print("@ adress: ", pointer, "which is int: ", opcode)
return 'DONE', 'ERROR', 0, 0
return pointer, program, input_received, output
# runs one amp at specified phase setting and input signal
def single_amp(program, input_one, input_two):
pointer = 0
setting = input_one
while True:
pointer, program, input_received, maybe_output = opcode_processor(pointer, program, setting)
if input_received != -1:
setting = input_two
if maybe_output != -1:
output = maybe_output
if pointer == 'DONE':
break
return program, output
# runs all five amps, with specified phase settings
def test_amp_config(program, amp_setting):
_, output = single_amp(program, int(amp_setting[0]), 0)
_, output = single_amp(program, int(amp_setting[1]), output)
_, output = single_amp(program, int(amp_setting[2]), output)
_, output = single_amp(program, int(amp_setting[3]), output)
_, signal = single_amp(program, int(amp_setting[4]), output)
return signal
# generates all possible amp settings, and finds highest signal return
def phase_setting_checker(program):
# generates all possible settings
# https://stackoverflow.com/questions/104420/how-to-generate-all-permutations-of-a-list
x = list(itertools.permutations([0, 1, 2, 3, 4]))
all_settings = []
for c in x:
c = str(c)
all_settings.append(c[1] + c[4] + c[7] + c[10] + c[13])
highest_signal = 0
for setting in all_settings:
signal = test_amp_config(program, setting)
if signal > highest_signal:
highest_signal = signal
return highest_signal
# main program:
program = file_to_string('input.txt') # change file name here!
all_commas = comma_finder(program)
program = string_to_array(program, all_commas)
# done with file io / formatting
answer = phase_setting_checker(program)
print(answer)
| [
"robert.musser@questu.ca"
] | robert.musser@questu.ca |
76d07d0af3b66039cf6a45daa29221885fca4724 | a00a9591df0c32f12595ac7c1c07ffbfd1185642 | /punctatools/lib/preprocess.py | 57260029e06ed82f4183b50b1169c5a496ff91b0 | [
"Apache-2.0"
] | permissive | stjude/punctatools | 4bcc100620e45c6e1839035cc2f6227d93f3fe7f | 0630b67fdf2d81772b11b95b140468dca20a35de | refs/heads/main | 2023-04-18T04:33:24.882656 | 2022-12-13T17:05:57 | 2022-12-13T17:05:57 | 377,252,061 | 7 | 8 | NOASSERTION | 2022-12-13T17:05:58 | 2021-06-15T18:00:57 | Jupyter Notebook | UTF-8 | Python | false | false | 3,708 | py | import os
from typing import Union
import intake_io
import numpy as np
import pandas as pd
from am_utils.parallel import run_parallel
from am_utils.utils import walk_dir
from tqdm import tqdm
def compute_histogram(dataset):
"""
Compute intensity histogram for a give image.
Parameters
----------
img : xr.Dataset
Input image
Returns
-------
pd.DataFrame:
Histogram as pandas DataFrame
"""
imghist = pd.DataFrame()
for i in range(dataset.dims['c']):
img = dataset.loc[dict(c=dataset.coords['c'][i])]['image'].data
hist, bins = np.histogram(img, bins=np.max(img) + 1, range=(0, np.max(img) + 1))
chist = pd.DataFrame({
'values': bins[:-1],
'counts': hist
})
chist = chist[chist['counts'] > 0]
chist['channel'] = dataset.coords['c'][i].data
imghist = pd.concat([imghist, chist], ignore_index=True)
return imghist
def compute_histogram_batch(input_dir: str, output_dir: str):
"""
Compute intensity histograms for all images in a folder and save as csv.
Parameters
----------
input_dir : str
Input directory
output_dir : str
Output directory
"""
samples = walk_dir(input_dir)
all_hist = pd.DataFrame()
for sample in tqdm(samples):
dataset = intake_io.imload(sample)
imghist = compute_histogram(dataset)
imghist['Image name'] = sample
fn_out = sample.replace(input_dir, output_dir).replace(os.path.splitext(sample)[-1], '.csv')
os.makedirs(os.path.dirname(fn_out), exist_ok=True)
imghist.to_csv(fn_out, index=False)
all_hist = pd.concat([all_hist, imghist], ignore_index=True)
all_hist.to_csv(output_dir.rstrip('/') + '.csv', index=False)
def subtract_background(dataset, bg_value):
bg_value = np.array([bg_value]).ravel()
channels = dataset.coords['c'].data
if len(bg_value) >= len(channels):
for i in range(len(channels)):
img = dataset.loc[dict(c=channels[i])]['image'].data
img = np.clip(img, bg_value[i], None)
dataset['image'].loc[dict(c=channels[i])] = img - bg_value[i]
else:
img = dataset['image'].data
img = np.clip(img, bg_value[0], None)
dataset['image'].data = img - bg_value[0]
return dataset
def __subtract_bg_helper(item, **kwargs):
fn_in, fn_out = item
dataset = intake_io.imload(fn_in)
dataset = subtract_background(dataset, **kwargs)
os.makedirs(os.path.dirname(fn_out), exist_ok=True)
intake_io.imsave(dataset, fn_out)
def subtract_background_batch(input_dir: str, output_dir: str,
bg_value: Union[int, float, list, tuple], n_jobs: int = 8):
"""
Parameters
----------
input_dir : str
Input directory
output_dir : str
Output directory
bg_value : scalar or list
Background values for each channel.
If one value provided, it will be subtracted from all channels.
n_jobs : int, optional
Number of jobs to run in parallel if `parallel` is True
Default: 8
"""
run_parallel(items=[(sample,
sample.replace(input_dir, output_dir))
for sample in walk_dir(input_dir)],
process=__subtract_bg_helper,
max_threads=n_jobs,
bg_value=bg_value)
def rescale_intensity(x, quantiles=(0.0025, 0.9975)):
mn, mx = [np.percentile(x, p * 100) for p in quantiles]
if mx > mn + 5:
return np.clip((x.astype(np.float32) - mn) / (mx - mn), 0, 1)
else:
return np.zeros(x.shape, dtype=np.float32)
| [
"37274810+amedyukhina@users.noreply.github.com"
] | 37274810+amedyukhina@users.noreply.github.com |
cfbdf7c3da7f8b2699eaf24f527932d1c674b6d1 | 4e44c4bbe274b0a8ccca274f29c4140dfad16d5e | /Push2_MIDI_Scripts/decompiled 10.1.2b5 scripts/pushbase/touch_encoder_element.py | f9f76e3eeae43809b8f5db8daf6b10d1825bf8fa | [] | no_license | intergalacticfm/Push2_MIDI_Scripts | b48841e46b7a322f2673259d1b4131d2216f7db6 | a074e2337b2e5d2e5d2128777dd1424f35580ae1 | refs/heads/master | 2021-06-24T15:54:28.660376 | 2020-10-27T11:53:57 | 2020-10-27T11:53:57 | 137,673,221 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,161 | py | # uncompyle6 version 3.0.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.13 (default, Jan 19 2017, 14:48:08)
# [GCC 6.3.0 20170118]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\pushbase\touch_encoder_element.py
# Compiled at: 2018-11-27 11:59:28
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface.elements import TouchEncoderElement as TouchEncoderElementBase
class TouchEncoderObserver(object):
u""" Interface for observing the state of one or more TouchEncoderElements """
def on_encoder_touch(self, encoder):
pass
def on_encoder_parameter(self, encoder):
pass
class TouchEncoderElement(TouchEncoderElementBase):
u""" Class representing an encoder that is touch sensitive """
def __init__(self, undo_step_handler=None, delete_handler=None, *a, **k):
super(TouchEncoderElement, self).__init__(*a, **k)
self._trigger_undo_step = False
self._undo_step_open = False
self._undo_step_handler = undo_step_handler
self._delete_handler = delete_handler
self.set_observer(None)
return
def set_observer(self, observer):
if observer is None:
observer = TouchEncoderObserver()
self._observer = observer
return
def on_nested_control_element_value(self, value, control):
self._trigger_undo_step = value
if value:
param = self.mapped_parameter()
if self._delete_handler and self._delete_handler.is_deleting and param:
self._delete_handler.delete_clip_envelope(param)
else:
self.begin_gesture()
self._begin_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
else:
self._end_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
self.end_gesture()
def connect_to(self, parameter):
if parameter != self.mapped_parameter():
self.last_mapped_parameter = parameter
super(TouchEncoderElement, self).connect_to(parameter)
self._observer.on_encoder_parameter(self)
def release_parameter(self):
if self.mapped_parameter() != None:
super(TouchEncoderElement, self).release_parameter()
self._observer.on_encoder_parameter(self)
return
def receive_value(self, value):
self._begin_undo_step()
super(TouchEncoderElement, self).receive_value(value)
def disconnect(self):
super(TouchEncoderElement, self).disconnect()
self._undo_step_handler = None
return
def _begin_undo_step(self):
if self._undo_step_handler and self._trigger_undo_step:
self._undo_step_handler.begin_undo_step()
self._trigger_undo_step = False
self._undo_step_open = True
def _end_undo_step(self):
if self._undo_step_handler and self._undo_step_open:
self._undo_step_handler.end_undo_step() | [
"ratsnake.cbs@gmail.com"
] | ratsnake.cbs@gmail.com |
acd9a985926faad6a4fcbdf4d441313cd62cd668 | b0741867b842fe177205c2fd714cabd34652ced4 | /crawling/mmtaobao/sexpic.py | dd4edbee55c824bc1e1e6a92158773afc91f5084 | [] | no_license | zdYng/python | 6737ea43b041f57e0d23598cfa2e5e23d5bd11ff | fd074f5700ec9733958e8640eb63af83aac3001f | refs/heads/master | 2021-07-22T13:50:24.745405 | 2020-04-02T02:15:29 | 2020-04-02T02:15:29 | 93,690,795 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | # -*- coding: utf-8 -*-
import requests
import urllib2,re
import os
from mmtaobao.cons import headers
from lxml import etree
from parsel import Selector
import datetime
html =requests.get("http://cl.j4q.pw/htm_data/2/1709/2664044.html")
html.encoding = 'utf-8'
# req = urllib2.Request('http://cl.j4q.pw/htm_data/2/1709/2664044.html')
# req.add_header('user-agent', headers())
# html = urllib2.urlopen(req).read()
print html.content
# select = Selector(html.text)
# content =select.xpath('//div//img/@src')
regt = r'<img src="(.*?)" onclick="(?#...)" style="cursor:pointer>"'
hh = re.findall(regt, html)
print hh
# for imgurl in content:
#
# x=datetime.datetime.now()
#
# name = imgurl[-7:-1]
# os.chdir(r"D://pic")
# req = urllib2.Request(imgurl)
# req.add_header('User-agent', headers())
# #html = urllib2.urlopen(req).read().decode('gbk').encode('utf-8')
# response =urllib2.urlopen(req)
# f = open(name,'wb')
# f.write(response.read())
# f.close()
# y=datetime.datetime.now()
#
# print imgurl,(y-x).seconds
| [
"qianzhongdao@163.com"
] | qianzhongdao@163.com |
3d320782a9808236efa872d44247c0f6d4dd8806 | 246ee82e5e53770c71374e0bc781ccf7b7341634 | /aula6.py | 520c0bd4489a2827d36e79c69a01f7440b9cf398 | [] | no_license | Felipe-builder/Dio_Introducao_PYTHON | f3e281e391977cc34c15033b3dfc8465971408fd | 530ce4f11b5ce23dc78f6994dc5abc5e104e7644 | refs/heads/master | 2023-07-01T16:46:52.373154 | 2021-08-10T21:23:22 | 2021-08-10T21:23:22 | 394,672,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | conjunto = {1, 2, 3, 4, 5}
conjunto2 = {5, 6, 7, 8}
conjunto3 = {1, 2, 3, 4}
conjunto4 = {1, 2, 3, 5}
conjunto_uniao = conjunto.union(conjunto2)
print('União: {}'.format(conjunto_uniao))
conjunto_interseccao = conjunto.intersection(conjunto2)
print('Intersecção: {}'.format(conjunto_interseccao))
conjunto_diferenca1 = conjunto.difference(conjunto2)
conjunto_diferenca2 = conjunto2.difference(conjunto)
print('Diferença entre 1 e 2: {}'.format(conjunto_diferenca1))
print('Diferença entre 2 e 1: {}'.format(conjunto_diferenca2))
conjunto_diff_simetrica1 = conjunto.symmetric_difference(conjunto2)
conjunto_diff_simetrica2 = conjunto3.symmetric_difference(conjunto4)
print('Diferença simétrica 1: {}'.format(conjunto_diff_simetrica1))
print('Diferença simétrica 2: {}'.format(conjunto_diff_simetrica2))
conjunto_a = {1, 2, 3}
conjunto_b = {1, 2, 3, 4, 5}
conjunto_subset1 = conjunto_a.issubset(conjunto_b)
conjunto_subset2 = conjunto_b.issubset(conjunto_a)
print('A é subconjunto de B: {}'.format(conjunto_subset1))
print('B é subconjunto de A: {}'.format(conjunto_subset2))
conjunto_superset1 = conjunto_a.issuperset(conjunto_b)
conjunto_superset2 = conjunto_b.issuperset(conjunto_a)
print('A é superconjunto de B: {}'.format(conjunto_superset1))
print('B é superconjunto de A: {}'.format(conjunto_superset2))
lista = ['cachorro', 'cachorro', 'gato', 'gato', 'elefante']
print(lista)
conjunto_animais = set(lista)
print(conjunto_animais)
lista_animais = list(conjunto_animais)
print(lista_animais)
# conjunto = {1, 2, 3, 4, 4, 2}
# conjunto.add(5)
# print(type(conjunto))
# print(conjunto)
# conjunto.discard(2)
# print(conjunto)
| [
"felipesvascon@gmail.com"
] | felipesvascon@gmail.com |
e2b7d9b4825d95f7b92c5d81dd50fc3bdbd93371 | 13feec69e423695e650d018a1ceca1f6fa83d275 | /training/config.py | dae9c6a0f262c66987ac2e7df5872e677b787141 | [
"Apache-2.0"
] | permissive | OpenImageDenoise/oidn | 4da631f5d9ce32ee632538aa5819bba650a08995 | 5579cd99edfa0839f87ec6960d16dcafcfe0eb31 | refs/heads/master | 2023-09-04T19:03:14.242623 | 2023-06-24T12:06:59 | 2023-06-24T12:06:59 | 168,025,831 | 1,491 | 157 | Apache-2.0 | 2023-07-14T09:19:50 | 2019-01-28T19:48:52 | C++ | UTF-8 | Python | false | false | 10,880 | py | ## Copyright 2018 Intel Corporation
## SPDX-License-Identifier: Apache-2.0
import os
import sys
import argparse
import time
import torch
from util import *
# Returns the main feature from a list of features
def get_main_feature(features):
if len(features) > 1:
features = list(set(features) & {'hdr', 'ldr', 'sh1'})
if len(features) > 1:
error('multiple main features specified')
if not features:
error('no main feature specified')
return features[0]
# Returns the auxiliary features from a list of features
def get_aux_features(features):
main_feature = get_main_feature(features)
return list(set(features).difference([main_feature]))
# Returns the config filename in a directory
def get_config_filename(dir):
return os.path.join(dir, 'config.json')
# Loads the config from a directory
def load_config(dir):
filename = get_config_filename(dir)
cfg = load_json(filename)
return argparse.Namespace(**cfg)
# Saves the config to a directory
def save_config(dir, cfg):
filename = get_config_filename(dir)
save_json(filename, vars(cfg))
# Parses the config from the command line arguments
def parse_args(cmd=None, description=None):
def get_default_device():
return 'cuda' if torch.cuda.is_available() else 'cpu'
if cmd is None:
cmd, _ = os.path.splitext(os.path.basename(sys.argv[0]))
parser = argparse.ArgumentParser(description=description)
parser.usage = '\rIntel(R) Open Image Denoise - Training\n' + parser.format_usage()
advanced = parser.add_argument_group('optional advanced arguments')
parser.add_argument('--config', '-c', type=str, help='load configuration from JSON file (overrides command-line arguments)')
if cmd in {'preprocess', 'train', 'find_lr'}:
parser.add_argument('features', type=str, nargs='*',
choices=['hdr', 'ldr', 'sh1', 'albedo', 'alb', 'normal', 'nrm', []],
help='set of input features')
parser.add_argument('--clean_aux', action='store_true',
help='train with noise-free (reference) auxiliary features')
parser.add_argument('--filter', '-f', type=str,
choices=['RT', 'RTLightmap'],
help='filter to train (determines some default arguments)')
parser.add_argument('--preproc_dir', '-P', type=str, default='preproc',
help='directory of preprocessed datasets')
parser.add_argument('--train_data', '-t', type=str,
help='name of the training dataset')
advanced.add_argument('--transfer', '-x', type=str,
choices=['linear', 'srgb', 'pu', 'log'],
help='transfer function')
if cmd in {'preprocess', 'train'}:
parser.add_argument('--valid_data', '-v', type=str,
help='name of the validation dataset')
if cmd in {'preprocess', 'infer'}:
parser.add_argument('--data_dir', '-D', type=str, default='data',
help='directory of datasets (e.g. training, validation, test)')
if cmd in {'train', 'find_lr', 'infer', 'export', 'visualize'}:
parser.add_argument('--results_dir', '-R', type=str, default='results',
help='directory of training results')
parser.add_argument('--result', '-r', type=str, required=(not cmd in {'train', 'find_lr'}),
help='name of the training result')
if cmd in {'infer'}:
parser.add_argument('--aux_results', '-a', type=str, nargs='*', default=[],
help='prefilter auxiliary features using the specified training results')
if cmd in {'train', 'infer', 'export'}:
parser.add_argument('--num_epochs', '--epochs', '-e', type=int,
default=(2000 if cmd == 'train' else None),
help='number of training epochs')
if cmd in {'train'}:
parser.add_argument('--num_valid_epochs', '--valid_epochs', type=int, default=10,
help='perform validation every this many epochs')
parser.add_argument('--num_save_epochs', '--save_epochs', type=int, default=10,
help='save checkpoints every this many epochs')
parser.add_argument('--lr', '--learning_rate', type=float,
help='initial learning rate')
parser.add_argument('--max_lr', '--max_learning_rate', type=float,
help='maximum learning rate')
parser.add_argument('--lr_warmup', '--learning_rate_warmup', type=float, default=0.15,
help='the percentage of the cycle spent increasing the learning rate (warm-up)')
if cmd in {'find_lr'}:
parser.add_argument('--lr', '--learning_rate', type=float, default=1e-8,
help='minimum learning rate')
parser.add_argument('--max_lr', '--max_learning_rate', type=float, default=0.1,
help='maximum learning rate')
if cmd in {'train', 'find_lr'}:
parser.add_argument('--batch_size', '--bs', '-b', type=int, default=16,
help='mini-batch size (total batch size of all devices)')
parser.add_argument('--num_loaders', '--loaders', '-j', type=int, default=4,
help='number of data loader threads per device')
parser.add_argument('--precision', '-p', type=str, choices=['fp32', 'mixed'],
help='training precision')
advanced.add_argument('--model', '-m', type=str, choices=['unet'], default='unet',
help='network model')
advanced.add_argument('--loss', '-l', type=str,
choices=['l1', 'mape', 'smape', 'l2', 'ssim', 'msssim', 'l1_msssim', 'l1_grad'],
default='l1_msssim',
help='loss function')
advanced.add_argument('--msssim_weights', type=float, nargs='*',
help='MS-SSIM scale weights')
advanced.add_argument('--tile_size', '--ts', type=int, default=256,
help='size of the cropped image tiles')
advanced.add_argument('--seed', '-s', type=int,
help='seed for random number generation')
if cmd in {'infer', 'compare_image'}:
parser.add_argument('--metric', '-M', type=str, nargs='*',
choices=['psnr', 'mse', 'ssim', 'msssim'], default=['psnr', 'ssim'],
help='metrics to compute')
if cmd in {'infer'}:
parser.add_argument('--input_data', '-i', type=str, default='test',
help='name of the input dataset')
parser.add_argument('--output_dir', '-O', type=str, default='infer',
help='directory of output images')
parser.add_argument('--output_suffix', '-o', type=str,
help='suffix of the output image names')
parser.add_argument('--format', '-F', type=str, nargs='*', default=['exr'],
help='output image formats')
parser.add_argument('--save_all', action='store_true',
help='save input and target images too')
if cmd in {'export'}:
parser.add_argument('target', type=str, nargs='?',
choices=['weights', 'package'], default='weights',
help='what to export')
parser.add_argument('--output', '-o', type=str,
help='output file')
if cmd in {'convert_image', 'split_exr'}:
parser.add_argument('input', type=str,
help='input image')
if cmd in {'compare_image'}:
parser.add_argument('input', type=str, nargs=2,
help='input images')
if cmd in {'convert_image'}:
parser.add_argument('output', type=str,
help='output image')
if cmd in {'convert_image', 'compare_image'}:
parser.add_argument('--exposure', '-E', type=float, default=1.,
help='linear exposure scale for HDR image')
if cmd in {'split_exr'}:
parser.add_argument('--layer', type=str,
help='name of the image layer')
if cmd in {'preprocess', 'train', 'find_lr', 'infer', 'export'}:
parser.add_argument('--device', '-d', type=str,
choices=['cpu', 'cuda'], default=get_default_device(),
help='type of device(s) to use')
parser.add_argument('--device_id', '-k', type=int, default=0,
help='ID of the first device to use')
parser.add_argument('--num_devices', '-n', type=int, default=1,
help='number of devices to use (with IDs device_id .. device_id+num_devices-1)')
advanced.add_argument('--deterministic', '--det', action='store_true',
default=(cmd in {'preprocess', 'infer', 'export'}),
help='makes computations deterministic (slower performance)')
cfg = parser.parse_args()
# Load and apply configuration from file if specified
if cfg.config is not None:
cfg_dict = vars(cfg)
cfg_dict.update(load_json(cfg.config))
cfg = argparse.Namespace(**cfg_dict)
if cmd in {'preprocess', 'train', 'find_lr'}:
# Check the filter
if cfg.filter is None:
warning('filter not specified, using generic default arguments')
# Replace feature names with IDs
FEATURE_IDS = {'albedo' : 'alb', 'normal' : 'nrm'}
cfg.features = [FEATURE_IDS.get(f, f) for f in cfg.features]
# Remove duplicate features
cfg.features = list(dict.fromkeys(cfg.features).keys())
# Set the default transfer function
if cfg.transfer is None:
main_feature = get_main_feature(cfg.features)
if main_feature == 'hdr':
cfg.transfer = 'log' if cfg.filter == 'RTLightmap' else 'pu'
elif main_feature in {'ldr', 'alb'}:
cfg.transfer = 'srgb'
else:
cfg.transfer = 'linear'
# Set the default datasets
if cfg.train_data is None and (cmd == 'find_lr' or cfg.valid_data is None):
cfg.train_data = 'train'
if cmd != 'find_lr':
cfg.valid_data = 'valid'
if cmd in {'train', 'find_lr'}:
# Check the batch size
if cfg.batch_size % cfg.num_devices != 0:
parser.error('batch_size is not divisible by num_devices')
# Set the default result name (generated)
if cfg.result is None:
cfg.result = WORKER_UID
# Set the default MS-SSIM weights
if cfg.msssim_weights is None:
if cfg.filter == 'RT':
cfg.msssim_weights = [0.2, 0.2, 0.2, 0.2, 0.2]
if cmd in {'train'}:
# Set the default training precision
if cfg.precision is None:
cfg.precision = 'mixed' if cfg.device == 'cuda' else 'fp32'
# Set the default maximum learning rate
if cfg.max_lr is None:
cfg.max_lr = 3.125e-6 * cfg.batch_size
# Print PyTorch version
print('PyTorch:', torch.__version__)
return cfg | [
"attila.t.afra@intel.com"
] | attila.t.afra@intel.com |
0e3aebd5a6b8e7490e4f7f478497e0a2e46b2f3d | 61f9553eedc2ec936ea87f06da5b986091e3b8ff | /workspace/buildout-cache/eggs/plone.app.upgrade-1.3.4-py2.7.egg/plone/app/upgrade/v40/tests.py | 5d20ec6119c77470822fbbc82a2aec777d5bd649 | [] | no_license | gruhter/gso | 47880b055455cc99d63eec72498048c857e7831b | c0eb949f8a06aab6b97329d51a6d046e2fc0a653 | refs/heads/master | 2016-09-01T18:28:05.589620 | 2015-05-14T19:38:18 | 2015-05-14T19:38:18 | 35,579,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,186 | py | import time
from zope.component import getSiteManager, queryUtility
from zope.ramcache.interfaces.ram import IRAMCache
from Products.CMFCore.ActionInformation import Action
from Products.CMFCore.Expression import Expression
from Products.CMFCore.utils import getToolByName
from Products.MailHost.interfaces import IMailHost
from plone.app.upgrade.utils import loadMigrationProfile
from plone.app.upgrade.v40.alphas import _KNOWN_ACTION_ICONS
from plone.app.upgrade.v40.alphas import migrateActionIcons
from plone.app.upgrade.v40.alphas import migrateTypeIcons
from plone.app.upgrade.v40.alphas import addOrReplaceRamCache
from plone.app.upgrade.v40.alphas import changeWorkflowActorVariableExpression
from plone.app.upgrade.v40.alphas import changeAuthenticatedResourcesCondition
from plone.app.upgrade.v40.alphas import setupReferencebrowser
from plone.app.upgrade.v40.alphas import migrateMailHost
from plone.app.upgrade.v40.alphas import migrateFolders
from plone.app.upgrade.v40.alphas import renameJoinFormFields
from plone.app.upgrade.v40.alphas import updateLargeFolderType
from plone.app.upgrade.v40.alphas import addRecursiveGroupsPlugin
from plone.app.upgrade.v40.alphas import cleanUpClassicThemeResources
from plone.app.upgrade.v40.betas import repositionRecursiveGroupsPlugin
from plone.app.upgrade.v40.betas import updateIconMetadata
from plone.app.upgrade.v40.betas import removeLargePloneFolder
from plone.app.upgrade.tests.base import MigrationTest
class FakeSecureMailHost(object):
meta_type = 'Secure Mail Host'
id = 'MailHost'
title = 'Fake MailHost'
smtp_host = 'smtp.example.com'
smtp_port = 587
smtp_userid='me'
smtp_pass='secret'
smtp_notls=False
def manage_fixupOwnershipAfterAdd(self):
pass
class TestMigrations_v4_0alpha1(MigrationTest):
profile = "profile-plone.app.upgrade.v40:3-4alpha1"
def afterSetUp(self):
self.atool = getToolByName(self.portal, 'portal_actions')
self.aitool = getToolByName(self.portal, 'portal_actionicons')
self.cptool = getToolByName(self.portal, 'portal_controlpanel')
self.wftool = getToolByName(self.portal, 'portal_workflow')
self.csstool = getToolByName(self.portal, 'portal_css')
self.jstool = getToolByName(self.portal, 'portal_javascripts')
def testProfile(self):
# This tests the whole upgrade profile can be loaded
self.setRoles(['Manager'])
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testMigrateActionIcons(self):
_KNOWN_ACTION_ICONS['object_buttons'].extend(['test_id', 'test2_id'])
self.aitool.addActionIcon(
category='object_buttons',
action_id='test_id',
icon_expr='test.gif',
title='Test my icon',
)
self.aitool.addActionIcon(
category='object_buttons',
action_id='test2_id',
icon_expr='python:context.getIcon()',
title='Test my second icon',
)
test_action = Action('test_id',
title='Test me',
description='',
url_expr='',
icon_expr='',
available_expr='',
permissions=('View', ),
visible = True)
test2_action = Action('test2_id',
title='Test me too',
description='',
url_expr='',
icon_expr='',
available_expr='',
permissions=('View', ),
visible = True)
object_buttons = self.atool.object_buttons
if getattr(object_buttons, 'test_id', None) is None:
object_buttons._setObject('test_id', test_action)
if getattr(object_buttons, 'test2_id', None) is None:
object_buttons._setObject('test2_id', test2_action)
self.assertEqual(object_buttons.test_id.icon_expr, '')
self.assertEqual(object_buttons.test2_id.icon_expr, '')
self.assertEqual(
self.aitool.getActionIcon('object_buttons', 'test_id'),
'test.gif')
# Test it twice
for i in range(2):
migrateActionIcons(self.portal)
icons = [ic.getActionId() for ic in self.aitool.listActionIcons()]
self.failIf('test_id' in icons)
self.failIf('test2_id' in icons)
self.assertEqual(object_buttons.test_id.icon_expr,
'string:$portal_url/test.gif')
self.assertEqual(object_buttons.test2_id.icon_expr,
'python:context.getIcon()')
def testMigrateControlPanelActionIcons(self):
_KNOWN_ACTION_ICONS['controlpanel'].extend(['test_id'])
self.aitool.addActionIcon(
category='controlpanel',
action_id='test_id',
icon_expr='test.gif',
title='Test my icon',
)
self.cptool.registerConfiglet(
id='test_id',
name='Test Configlet',
action='string:${portal_url}/test',
permission='Manage portal',
category='Plone',
visible=True,
appId='',
icon_expr='',
)
action = self.cptool.getActionObject('Plone/test_id')
self.assertEqual(action.getIconExpression(), '')
self.assertEqual(self.aitool.getActionIcon('controlpanel', 'test_id'),
'test.gif')
# Test it twice
for i in range(2):
migrateActionIcons(self.portal)
icons = [ic.getActionId() for ic in self.aitool.listActionIcons()]
self.failIf('test_id' in icons)
self.assertEqual(action.getIconExpression(),
'string:$portal_url/test.gif')
def testContentTypeIconExpressions(self):
"""
FTIs should now be using icon_expr instead of content_icon.
(The former caches the expression object.)
"""
tt = getToolByName(self.portal, "portal_types")
tt.Document.icon_expr = None
loadMigrationProfile(self.portal, self.profile, ('typeinfo', ))
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.png")
def testMigrateTypeIcons(self):
"""
FTIs having content_icon should be upgraded to icon_expr.
"""
tt = getToolByName(self.portal, "portal_types")
del tt.Document.icon_expr
tt.Document.content_icon = 'document_icon.gif'
migrateTypeIcons(self.portal)
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.gif")
self.assertTrue(hasattr(tt.Document, 'icon_expr_object'))
#Don't upgrade if there is already an icon_expr.
tt.Document.icon_expr = "string:${portal_url}/document_icon.png"
tt.Document.content_icon = 'document_icon.gif'
migrateTypeIcons(self.portal)
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.png")
def testPngContentIcons(self):
tt = getToolByName(self.portal, "portal_types")
tt.Document.icon_expr = "string:${portal_url}/document_icon.gif"
loadMigrationProfile(self.portal, self.profile, ('typeinfo', ))
self.assertEqual(tt.Document.icon_expr,
"string:${portal_url}/document_icon.png")
def testAddRAMCache(self):
# Test it twice
for i in range(2):
sm = getSiteManager()
sm.unregisterUtility(provided=IRAMCache)
util = queryUtility(IRAMCache)
self.assertEqual(util.maxAge, 86400)
addOrReplaceRamCache(self.portal)
util = queryUtility(IRAMCache)
self.assertEqual(util.maxAge, 3600)
def testReplaceOldRamCache(self):
sm = getSiteManager()
# Test it twice
for i in range(2):
sm.unregisterUtility(provided=IRAMCache)
from zope.app.cache.interfaces.ram import IRAMCache as OldIRAMCache
from zope.app.cache.ram import RAMCache as OldRAMCache
sm.registerUtility(factory=OldRAMCache, provided=OldIRAMCache)
addOrReplaceRamCache(self.portal)
util = queryUtility(IRAMCache)
self.assertEqual(util.maxAge, 3600)
def testChangeWorkflowActorVariableExpression(self):
self.wftool.intranet_folder_workflow.variables.actor.setProperties('')
for i in range(2):
changeWorkflowActorVariableExpression(self.portal)
wf = self.wftool.intranet_folder_workflow
self.assertEqual(wf.variables.actor.getDefaultExprText(),
'user/getId')
wf = self.wftool.one_state_workflow
self.assertEqual(wf.variables.actor.getDefaultExprText(),
'user/getId')
wf = self.wftool.simple_publication_workflow
self.assertEqual(wf.variables.actor.getDefaultExprText(),
'user/getId')
# make sure it doesn't break if the workflow is missing
wf = self.wftool.intranet_folder_workflow
self.wftool._delOb('intranet_folder_workflow')
changeWorkflowActorVariableExpression(self.portal)
self.wftool._setOb('intranet_folder_workflow', wf)
def testChangeAuthenticatedResourcesCondition(self):
# make sure CSS resource is updated
res = self.csstool.getResource('member.css')
res.setAuthenticated(False)
res.setExpression('not: portal/portal_membership/isAnonymousUser')
# test it twice
for i in range(2):
changeAuthenticatedResourcesCondition(self.portal)
self.assertEqual(res.getExpression(), '')
self.failUnless(res.getAuthenticated())
# make sure it doesn't update it if the expression has been
# customized
res.setExpression('python:False')
changeAuthenticatedResourcesCondition(self.portal)
self.assertEqual(res.getExpression(), 'python:False')
def testAddedUseEmailProperty(self):
tool = getToolByName(self.portal, 'portal_properties')
sheet = getattr(tool, 'site_properties')
#self.assertEqual(sheet.getProperty('use_email_as_login'), False)
self.removeSiteProperty('use_email_as_login')
loadMigrationProfile(self.portal, self.profile, ('propertiestool', ))
self.assertEqual(sheet.getProperty('use_email_as_login'), False)
def testReplaceReferencebrowser(self):
self.setRoles(['Manager'])
skins_tool = getToolByName(self.portal, 'portal_skins')
sels = skins_tool._getSelections()
for skinname, layer in sels.items():
layers = layer.split(',')
self.failIf('ATReferenceBrowserWidget' in layers)
layers.remove('referencebrowser')
new_layers = ','.join(layers)
sels[skinname] = new_layers
loadMigrationProfile(self.portal, self.profile)
setupReferencebrowser(self.portal)
sels = skins_tool._getSelections()
for skinname, layer in sels.items():
layers = layer.split(',')
self.failUnless('referencebrowser' in layers)
def testInstallNewDependencies(self):
self.setRoles(['Manager'])
# test for running the TinyMCE profile by checking for the skin layer
# it installs (the profile is marked as noninstallable, so we can't
# ask the quick installer)
skins_tool = getToolByName(self.portal, 'portal_skins')
del skins_tool['tinymce']
for i in range(2):
loadMigrationProfile(self.portal, self.profile)
self.failUnless('tinymce' in skins_tool)
# sleep to avoid a GS log filename collision :-o
time.sleep(1)
def testNewJSIsInstalled(self):
installedScriptIds = self.jstool.getResourceIds()
expected = [
# js resources that are part of plone.app.jquerytools
'++resource++plone.app.jquerytools.js',
'++resource++plone.app.jquerytools.overlayhelpers.js',
# js resource that is new in CMFPlone
'popupforms.js']
for e in expected:
self.failUnless(e in installedScriptIds, e)
def testReplaceSecureMailHost(self):
portal = self.portal
sm = getSiteManager(context=portal)
# try it with an unmodified site to ensure it doesn't give any errors
migrateMailHost(portal.portal_setup)
portal._delObject('MailHost')
# Run it with our MailHost replaced
portal._setObject('MailHost', FakeSecureMailHost())
self.assertEqual(portal.MailHost.meta_type, 'Secure Mail Host')
sm.unregisterUtility(provided=IMailHost)
sm.registerUtility(portal.MailHost, provided=IMailHost)
migrateMailHost(portal)
new_mh = portal.MailHost
self.failUnlessEqual(new_mh.meta_type, 'Mail Host')
self.failUnlessEqual(new_mh.title, 'Fake MailHost')
self.failUnlessEqual(new_mh.smtp_host, 'smtp.example.com')
self.failUnlessEqual(new_mh.smtp_port, 587)
self.failUnlessEqual(new_mh.smtp_uid, 'me')
self.failUnlessEqual(new_mh.smtp_pwd, 'secret')
#Force TLS is always false, because SMH has no equivalent option
self.failUnlessEqual(new_mh.force_tls, False)
def testFolderMigration(self):
from plone.app.folder.tests.content import create
from plone.app.folder.tests.test_migration import reverseMigrate
from plone.app.folder.tests.test_migration import isSaneBTreeFolder
# create a folder in an unmigrated state & check it's broken...
folder = create('Folder', self.portal, 'foo', title='Foo')
reverseMigrate(self.portal)
self.failIf(isSaneBTreeFolder(self.portal.foo))
# now run the migration step...
migrateFolders(self.portal)
folder = self.portal.foo
self.failUnless(isSaneBTreeFolder(folder))
self.assertEqual(folder.getId(), 'foo')
self.assertEqual(folder.Title(), 'Foo')
class TestMigrations_v4_0alpha2(MigrationTest):
def testMigrateJoinFormFields(self):
ptool = getToolByName(self.portal, 'portal_properties')
sheet = getattr(ptool, 'site_properties')
self.removeSiteProperty('user_registration_fields')
self.addSiteProperty('join_form_fields')
sheet.join_form_fields = (
'username', 'password', 'email', 'mail_me', 'groups')
renameJoinFormFields(self)
self.assertEqual(sheet.hasProperty('join_form_fields'), False)
self.assertEqual(sheet.hasProperty('user_registration_fields'), True)
self.assertEqual(sheet.getProperty('user_registration_fields'),
('username', 'password', 'email', 'mail_me'))
class TestMigrations_v4_0alpha3(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4alpha2-4alpha3"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testJoinActionURL(self):
self.portal.portal_actions.user.join.url_expr = 'foo'
loadMigrationProfile(self.portal, self.profile, ('actions', ))
self.assertEqual(self.portal.portal_actions.user.join.url_expr,
'string:${globals_view/navigationRootUrl}/@@register')
class TestMigrations_v4_0alpha5(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4alpha4-4alpha5"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testMigrateLargeFolderType(self):
portal = self.portal
catalog = getToolByName(portal, 'portal_catalog')
# set things up in the old way...
ids = 'news', 'events', 'Members'
for id in ids:
obj = portal[id]
obj._setPortalTypeName('Large Plone Folder')
obj.reindexObject()
self.assertEquals(obj.portal_type, 'Large Plone Folder')
# Type falls back to meta_type since there's no
# Large Plone Folder FTI
self.assertEquals(obj.Type(), 'ATFolder')
brain, = catalog(getId=id)
self.assertEquals(brain.portal_type, 'Large Plone Folder')
self.assertEquals(brain.Type, 'ATFolder')
# migrate & check again...
updateLargeFolderType(self.portal)
for id in ids:
obj = portal[id]
self.assertEquals(obj.portal_type, 'Folder')
self.assertEquals(obj.Type(), 'Folder')
brain, = catalog(getId=id)
self.assertEquals(brain.portal_type, 'Folder')
self.assertEquals(brain.Type, 'Folder')
def testAddRecursiveGroupsPlugin(self):
acl = getToolByName(self.portal, 'acl_users')
addRecursiveGroupsPlugin(self.portal)
self.failUnless('recursive_groups' in acl)
# Now that we have an existing one, let's make sure it's handled
# properly if this migration is run again.
addRecursiveGroupsPlugin(self.portal)
self.failUnless('recursive_groups' in acl)
def testClassicThemeResourcesCleanUp(self):
"""Test that the plonetheme.classic product doesn't have any
registered CSS resource in its metadata after migration.
"""
portal = self.portal
qi = getToolByName(portal, 'portal_quickinstaller')
qi.installProduct('plonetheme.classic')
classictheme = qi['plonetheme.classic']
classictheme.resources_css = ['something'] # add a random resource
cleanUpClassicThemeResources(portal)
self.failUnlessEqual(classictheme.resources_css, [])
def testGetObjPositionInParentIndex(self):
from plone.app.folder.nogopip import GopipIndex
catalog = self.portal.portal_catalog
catalog.delIndex('getObjPositionInParent')
catalog.addIndex('getObjPositionInParent', 'FieldIndex')
self.failIf(isinstance(catalog.Indexes['getObjPositionInParent'],
GopipIndex))
loadMigrationProfile(self.portal, self.profile)
self.failUnless('getObjPositionInParent' in catalog.indexes())
self.failUnless(isinstance(catalog.Indexes['getObjPositionInParent'],
GopipIndex))
def testGetEventTypeIndex(self):
catalog = self.portal.portal_catalog
catalog.addIndex('getEventType', 'KeywordIndex')
self.failUnless('getEventType' in catalog.indexes())
loadMigrationProfile(self.portal, self.profile)
self.failIf('getEventType' in catalog.indexes())
class TestMigrations_v4_0beta1(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4alpha5-4beta1"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testRepositionRecursiveGroupsPlugin(self):
# Ensure that the recursive groups plugin is moved to the bottom
# of the IGroups plugins list, if active.
addRecursiveGroupsPlugin(self.portal)
# Plugin is installed, but not active, run against this state.
from Products.PluggableAuthService.interfaces.plugins import \
IGroupsPlugin
acl = getToolByName(self.portal, 'acl_users')
plugins = acl.plugins
# The plugin was originally moved to the top of the list of
# IGroupsPlugin plugins by p.a.controlpanel. Recreate that state.
while (plugins.getAllPlugins('IGroupsPlugin')['active'].index(
'recursive_groups') > 0):
plugins.movePluginsUp(IGroupsPlugin, ['recursive_groups'])
active_groups = plugins.getAllPlugins('IGroupsPlugin')['active']
self.assertEqual(active_groups[0], 'recursive_groups')
# Rerun the migration, making sure that it's now the last item in the
# list of IGroupsPlugin plugins.
repositionRecursiveGroupsPlugin(self.portal)
active_groups = plugins.getAllPlugins('IGroupsPlugin')['active']
self.assertEqual(active_groups[-1], 'recursive_groups')
class TestMigrations_v4_0beta2(MigrationTest):
profile = "profile-plone.app.upgrade.v40:4beta1-4beta2"
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testCoreContentIconExprCleared(self):
types = getToolByName(self.portal, 'portal_types')
catalog = getToolByName(self.portal, 'portal_catalog')
# Reinstate the now-empty icon expression for the Document type
doc_icon_expr = Expression('string:${portal_url}/document_icon.png')
types['Document'].icon_expr_object = doc_icon_expr
front = self.portal['front-page']
catalog.reindexObject(front)
old_modified = front.modified()
# Make sure the getIcon metadata column shows the "original" value
brains = catalog(id='front-page')
self.assertEqual(brains[0].getIcon, 'document_icon.png')
# Run the migration
loadMigrationProfile(self.portal, self.profile)
updateIconMetadata(self.portal)
# The getIcon column should now be empty
self.assertEqual(catalog(id='front-page')[0].getIcon, '')
self.assertEquals(front.modified(), old_modified)
class TestMigrations_v4_0beta4(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4beta3-4beta4'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def testRemoveLargePloneFolder(self):
# re-create pre-migration settings
ptool = self.portal.portal_properties
nav_props = ptool.navtree_properties
l = list(nav_props.parentMetaTypesNotToQuery)
nav_props.parentMetaTypesNotToQuery = l + ['Large Plone Folder']
site_props = ptool.site_properties
l = list(site_props.typesLinkToFolderContentsInFC)
site_props.typesLinkToFolderContentsInFC = l + ['Large Plone Folder']
temp_folder_fti = self.portal.portal_types['TempFolder']
l = list(temp_folder_fti.allowed_content_types)
temp_folder_fti.allowed_content_types = l + ['Large Plone Folder']
l = set(self.portal.portal_factory.getFactoryTypes())
l.add('Large Plone Folder')
ftool = self.portal.portal_factory
ftool.manage_setPortalFactoryTypes(listOfTypeIds=list(l))
for i in xrange(2):
loadMigrationProfile(self.portal, self.profile)
removeLargePloneFolder(self.portal)
self.failIf('Large Plone Folder' in self.portal.portal_types)
self.failIf('Large Plone Folder' in
temp_folder_fti.allowed_content_types)
self.failUnless('Folder' in temp_folder_fti.allowed_content_types)
self.failIf('Large Plone Folder' in ftool.getFactoryTypes())
self.failUnless('Folder' in ftool.getFactoryTypes())
self.failIf('Large Plone Folder' in
nav_props.parentMetaTypesNotToQuery)
self.failUnless('TempFolder' in
nav_props.parentMetaTypesNotToQuery)
self.failIf('Large Plone Folder' in
site_props.typesLinkToFolderContentsInFC)
self.failUnless('Folder' in
site_props.typesLinkToFolderContentsInFC)
# sleep to avoid a GS log filename collision :-o
time.sleep(1)
class TestMigrations_v4_0beta5(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4beta4-4beta5'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0rc1(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4beta5-4rc1'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4rc1-4final'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_1(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0-4.0.1'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_2(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.1-4.0.2'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_3(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.2-4.0.3'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_4(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.3-4.0.4'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
class TestMigrations_v4_0_5(MigrationTest):
profile = 'profile-plone.app.upgrade.v40:4.0.4-4.0.5'
def testProfile(self):
# This tests the whole upgrade profile can be loaded
loadMigrationProfile(self.portal, self.profile)
self.failUnless(True)
def test_suite():
from unittest import defaultTestLoader
return defaultTestLoader.loadTestsFromName(__name__)
| [
"gso@abv.bg"
] | gso@abv.bg |
2a4891fc504a6b60e310d8e66dfe03173c3f98d5 | 6a2a6408be018ba2772a2888c8b3a7ee6838ddeb | /weechat/python/wee_slack.py | 820f99f2061d275436bb6a7bf7d249a53139de9d | [] | no_license | gicmo/dot-files | c5b4598ffa399936f7d149039e558a89f5de7239 | 6ca9343cad5612e3c6daa61a7c80aa8bbfa01e28 | refs/heads/master | 2023-04-06T07:48:14.453990 | 2023-03-27T14:20:27 | 2023-03-27T14:20:27 | 41,631,064 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 149,634 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from functools import wraps
from itertools import islice
import textwrap
import time
import json
import pickle
import sha
import os
import re
import urllib
import sys
import traceback
import collections
import ssl
import random
import string
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from websocket import create_connection, WebSocketConnectionClosedException
# hack to make tests possible.. better way?
try:
import weechat
except:
pass
SCRIPT_NAME = "slack"
SCRIPT_AUTHOR = "Ryan Huber <rhuber@gmail.com>"
SCRIPT_VERSION = "2.0.0"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
RECORD_DIR = "/tmp/weeslack-debug"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "conversations.open",
"leave": "conversations.close",
"mark": "im.mark",
},
"mpim": {
"history": "mpim.history",
"join": "mpim.open", # conversations.open lacks unread_count_display
"leave": "conversations.close",
"mark": "mpim.mark",
"info": "groups.info",
},
"group": {
"history": "groups.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "groups.mark",
"info": "groups.info"
},
"thread": {
"history": None,
"join": None,
"leave": None,
"mark": None,
}
}
###### Decorators have to be up here
def slack_buffer_or_ignore(f):
"""
Only run this function if we're in a slack buffer, else ignore
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_OK
return f(data, current_buffer, *args, **kwargs)
return wrapper
def slack_buffer_required(f):
"""
Only run this function if we're in a slack buffer, else print error
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_ERROR
return f(data, current_buffer, *args, **kwargs)
return wrapper
def utf8_decode(f):
"""
Decode all arguments from byte strings to unicode strings. Use this for
functions called from outside of this script, e.g. callbacks from weechat.
"""
@wraps(f)
def wrapper(*args, **kwargs):
return f(*decode_from_utf8(args), **decode_from_utf8(kwargs))
return wrapper
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
EMOJI = []
###### Unicode handling
def encode_to_utf8(data):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, bytes):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(encode_to_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(encode_to_utf8, data))
else:
return data
def decode_from_utf8(data):
if isinstance(data, bytes):
return data.decode('utf-8')
if isinstance(data, unicode):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(decode_from_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(decode_from_utf8, data))
else:
return data
class WeechatWrapper(object):
def __init__(self, wrapped_class):
self.wrapped_class = wrapped_class
# Helper method used to encode/decode method calls.
def wrap_for_utf8(self, method):
def hooked(*args, **kwargs):
result = method(*encode_to_utf8(args), **encode_to_utf8(kwargs))
# Prevent wrapped_class from becoming unwrapped
if result == self.wrapped_class:
return self
return decode_from_utf8(result)
return hooked
# Encode and decode everything sent to/received from weechat. We use the
# unicode type internally in wee-slack, but has to send utf8 to weechat.
def __getattr__(self, attr):
orig_attr = self.wrapped_class.__getattribute__(attr)
if callable(orig_attr):
return self.wrap_for_utf8(orig_attr)
else:
return decode_from_utf8(orig_attr)
# Ensure all lines sent to weechat specifies a prefix. For lines after the
# first, we want to disable the prefix, which is done by specifying a space.
def prnt_date_tags(self, buffer, date, tags, message):
message = message.replace("\n", "\n \t")
return self.wrap_for_utf8(self.wrapped_class.prnt_date_tags)(buffer, date, tags, message)
##### Helpers
def get_nick_color_name(nick):
info_name_prefix = "irc_" if int(weechat_version) < 0x1050000 else ""
return w.info_get(info_name_prefix + "nick_color_name", nick)
##### BEGIN NEW
IGNORED_EVENTS = [
# "pref_change",
# "reconnect_url",
]
###### New central Event router
class EventRouter(object):
def __init__(self):
"""
complete
Eventrouter is the central hub we use to route:
1) incoming websocket data
2) outgoing http requests and incoming replies
3) local requests
It has a recorder that, when enabled, logs most events
to the location specified in RECORD_DIR.
"""
self.queue = []
self.slow_queue = []
self.slow_queue_timer = 0
self.teams = {}
self.context = {}
self.weechat_controller = WeechatController(self)
self.previous_buffer = ""
self.reply_buffer = {}
self.cmds = {k[8:]: v for k, v in globals().items() if k.startswith("command_")}
self.proc = {k[8:]: v for k, v in globals().items() if k.startswith("process_")}
self.handlers = {k[7:]: v for k, v in globals().items() if k.startswith("handle_")}
self.local_proc = {k[14:]: v for k, v in globals().items() if k.startswith("local_process_")}
self.shutting_down = False
self.recording = False
self.recording_path = "/tmp"
def record(self):
"""
complete
Toggles the event recorder and creates a directory for data if enabled.
"""
self.recording = not self.recording
if self.recording:
if not os.path.exists(RECORD_DIR):
os.makedirs(RECORD_DIR)
def record_event(self, message_json, file_name_field, subdir=None):
"""
complete
Called each time you want to record an event.
message_json is a json in dict form
file_name_field is the json key whose value you want to be part of the file name
"""
now = time.time()
if subdir:
directory = "{}/{}".format(RECORD_DIR, subdir)
else:
directory = RECORD_DIR
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get(file_name_field, 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write("{}".format(json.dumps(message_json)))
f.close()
def store_context(self, data):
"""
A place to store data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
identifier = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(40))
self.context[identifier] = data
dbg("stored context {} {} ".format(identifier, data.url))
return identifier
def retrieve_context(self, identifier):
"""
A place to retrieve data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
data = self.context.get(identifier, None)
if data:
# dbg("retrieved context {} ".format(identifier))
return data
def delete_context(self, identifier):
"""
Requests can span multiple requests, so we may need to delete this as a last step
"""
if identifier in self.context:
# dbg("deleted eontext {} ".format(identifier))
del self.context[identifier]
def shutdown(self):
"""
complete
This toggles shutdown mode. Shutdown mode tells us not to
talk to Slack anymore. Without this, typing /quit will trigger
a race with the buffer close callback and may result in you
leaving every slack channel.
"""
self.shutting_down = not self.shutting_down
def register_team(self, team):
"""
complete
Adds a team to the list of known teams for this EventRouter.
"""
if isinstance(team, SlackTeam):
self.teams[team.get_team_hash()] = team
else:
raise InvalidType(type(team))
def reconnect_if_disconnected(self):
for team_id, team in self.teams.iteritems():
if not team.connected:
team.connect()
dbg("reconnecting {}".format(team))
def receive_ws_callback(self, team_hash):
"""
incomplete (reconnect)
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
try:
# Read the data from the websocket associated with this team.
data = decode_from_utf8(self.teams[team_hash].ws.recv())
message_json = json.loads(data)
metadata = WeeSlackMetadata({
"team": team_hash,
}).jsonify()
message_json["wee_slack_metadata"] = metadata
if self.recording:
self.record_event(message_json, 'type', 'websocket')
self.receive_json(json.dumps(message_json))
except WebSocketConnectionClosedException:
# TODO: handle reconnect here
self.teams[team_hash].set_disconnected()
return w.WEECHAT_RC_OK
except ssl.SSLWantReadError:
# Expected to happen occasionally on SSL websockets.
return w.WEECHAT_RC_OK
except Exception:
dbg("socket issue: {}\n".format(traceback.format_exc()))
return w.WEECHAT_RC_OK
def receive_httprequest_callback(self, data, command, return_code, out, err):
"""
complete
Receives the result of an http request we previously handed
off to weechat (weechat bundles libcurl). Weechat can fragment
replies, so it buffers them until the reply is complete.
It is then populated with metadata here so we can identify
where the request originated and route properly.
"""
request_metadata = self.retrieve_context(data)
try:
dbg("RECEIVED CALLBACK with request of {} id of {} and code {} of length {}".format(request_metadata.request, request_metadata.response_id, return_code, len(out)))
except:
dbg(request_metadata)
return
if return_code == 0:
if len(out) > 0:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
try:
j = json.loads(self.reply_buffer[request_metadata.response_id].getvalue())
except:
pass
# dbg("Incomplete json, awaiting more", True)
try:
j["wee_slack_process_method"] = request_metadata.request_normalized
j["wee_slack_request_metadata"] = pickle.dumps(request_metadata)
self.reply_buffer.pop(request_metadata.response_id)
if self.recording:
self.record_event(j, 'wee_slack_process_method', 'http')
self.receive_json(json.dumps(j))
self.delete_context(data)
except:
dbg("HTTP REQUEST CALLBACK FAILED", True)
pass
# We got an empty reply and this is weird so just ditch it and retry
else:
dbg("length was zero, probably a bug..")
self.delete_context(data)
self.receive(request_metadata)
elif return_code != -1:
self.reply_buffer.pop(request_metadata.response_id, None)
self.delete_context(data)
else:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
def receive_json(self, data):
"""
complete
Receives a raw JSON string from and unmarshals it
as dict, then places it back on the queue for processing.
"""
dbg("RECEIVED JSON of len {}".format(len(data)))
message_json = json.loads(data)
self.queue.append(message_json)
def receive(self, dataobj):
"""
complete
Receives a raw object and places it on the queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.queue.append(dataobj)
def receive_slow(self, dataobj):
"""
complete
Receives a raw object and places it on the slow queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.slow_queue.append(dataobj)
def handle_next(self):
"""
complete
Main handler of the EventRouter. This is called repeatedly
via callback to drain events from the queue. It also attaches
useful metadata and context to events as they are processed.
"""
if len(self.slow_queue) > 0 and ((self.slow_queue_timer + 1) < time.time()):
# for q in self.slow_queue[0]:
dbg("from slow queue", 0)
self.queue.append(self.slow_queue.pop())
# self.slow_queue = []
self.slow_queue_timer = time.time()
if len(self.queue) > 0:
j = self.queue.pop(0)
# Reply is a special case of a json reply from websocket.
kwargs = {}
if isinstance(j, SlackRequest):
if j.should_try():
if j.retry_ready():
local_process_async_slack_api_request(j, self)
else:
self.slow_queue.append(j)
else:
dbg("Max retries for Slackrequest")
else:
if "reply_to" in j:
dbg("SET FROM REPLY")
function_name = "reply"
elif "type" in j:
dbg("SET FROM type")
function_name = j["type"]
elif "wee_slack_process_method" in j:
dbg("SET FROM META")
function_name = j["wee_slack_process_method"]
else:
dbg("SET FROM NADA")
function_name = "unknown"
# Here we are passing the actual objects. No more lookups.
meta = j.get("wee_slack_metadata", None)
if meta:
try:
if isinstance(meta, basestring):
dbg("string of metadata")
team = meta.get("team", None)
if team:
kwargs["team"] = self.teams[team]
if "user" in j:
kwargs["user"] = self.teams[team].users[j["user"]]
if "channel" in j:
kwargs["channel"] = self.teams[team].channels[j["channel"]]
except:
dbg("metadata failure")
if function_name not in IGNORED_EVENTS:
dbg("running {}".format(function_name))
if function_name.startswith("local_") and function_name in self.local_proc:
self.local_proc[function_name](j, self, **kwargs)
elif function_name in self.proc:
self.proc[function_name](j, self, **kwargs)
elif function_name in self.handlers:
self.handlers[function_name](j, self, **kwargs)
else:
raise ProcessNotImplemented(function_name)
def handle_next(*args):
"""
complete
This is just a place to call the event router globally.
This is a dirty hack. There must be a better way.
"""
try:
EVENTROUTER.handle_next()
except:
if config.debug_mode:
traceback.print_exc()
else:
pass
return w.WEECHAT_RC_OK
class WeechatController(object):
"""
Encapsulates our interaction with weechat
"""
def __init__(self, eventrouter):
self.eventrouter = eventrouter
self.buffers = {}
self.previous_buffer = None
self.buffer_list_stale = False
def iter_buffers(self):
for b in self.buffers:
yield (b, self.buffers[b])
def register_buffer(self, buffer_ptr, channel):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
self.buffers[buffer_ptr] = channel
else:
raise InvalidType(type(buffer_ptr))
def unregister_buffer(self, buffer_ptr, update_remote=False, close_buffer=False):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
try:
self.buffers[buffer_ptr].destroy_buffer(update_remote)
if close_buffer:
w.buffer_close(buffer_ptr)
del self.buffers[buffer_ptr]
except:
dbg("Tried to close unknown buffer")
else:
raise InvalidType(type(buffer_ptr))
def get_channel_from_buffer_ptr(self, buffer_ptr):
return self.buffers.get(buffer_ptr, None)
def get_all(self, buffer_ptr):
return self.buffers
def get_previous_buffer_ptr(self):
return self.previous_buffer
def set_previous_buffer(self, data):
self.previous_buffer = data
def check_refresh_buffer_list(self):
return self.buffer_list_stale and self.last_buffer_list_update + 1 < time.time()
def set_refresh_buffer_list(self, setting):
self.buffer_list_stale = setting
###### New Local Processors
def local_process_async_slack_api_request(request, event_router):
"""
complete
Sends an API request to Slack. You'll need to give this a well formed SlackRequest object.
DEBUGGING!!! The context here cannot be very large. Weechat will crash.
"""
if not event_router.shutting_down:
weechat_request = 'url:{}'.format(request.request_string())
weechat_request += '&nonce={}'.format(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
request.tried()
context = event_router.store_context(request)
# TODO: let flashcode know about this bug - i have to 'clear' the hashtable or retry requests fail
w.hook_process_hashtable('url:', params, config.slack_timeout, "", context)
w.hook_process_hashtable(weechat_request, params, config.slack_timeout, "receive_httprequest_callback", context)
###### New Callbacks
@utf8_decode
def receive_httprequest_callback(data, command, return_code, out, err):
"""
complete
This is a dirty hack. There must be a better way.
"""
# def url_processor_cb(data, command, return_code, out, err):
EVENTROUTER.receive_httprequest_callback(data, command, return_code, out, err)
return w.WEECHAT_RC_OK
@utf8_decode
def receive_ws_callback(*args):
"""
complete
The first arg is all we want here. It contains the team
hash which is set when we _hook the descriptor.
This is a dirty hack. There must be a better way.
"""
EVENTROUTER.receive_ws_callback(args[0])
return w.WEECHAT_RC_OK
@utf8_decode
def reconnect_callback(*args):
EVENTROUTER.reconnect_if_disconnected()
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_closing_callback(signal, sig_type, data):
"""
complete
Receives a callback from weechat when a buffer is being closed.
We pass the eventrouter variable name in as a string, as
that is the only way we can do dependency injection via weechat
callback, hence the eval.
"""
eval(signal).weechat_controller.unregister_buffer(data, True, False)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_input_callback(signal, buffer_ptr, data):
"""
incomplete
Handles everything a user types in the input bar. In our case
this includes add/remove reactions, modifying messages, and
sending messages.
"""
eventrouter = eval(signal)
channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(buffer_ptr)
if not channel:
return w.WEECHAT_RC_ERROR
reaction = re.match("^(\d*)(\+|-):(.*):\s*$", data)
substitute = re.match("^(\d*)s/", data)
if reaction:
if reaction.group(2) == "+":
channel.send_add_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif reaction.group(2) == "-":
channel.send_remove_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif substitute:
msgno = int(substitute.group(1) or 1)
try:
old, new, flags = re.split(r'(?<!\\)/', data)[1:]
except ValueError:
pass
else:
# Replacement string in re.sub() is a string, not a regex, so get
# rid of escapes.
new = new.replace(r'\/', '/')
old = old.replace(r'\/', '/')
channel.edit_nth_previous_message(msgno, old, new, flags)
else:
if data.startswith(('//', ' ')):
data = data[1:]
channel.send_message(data)
# this is probably wrong channel.mark_read(update_remote=True, force=True)
return w.WEECHAT_RC_OK
# Workaround for supporting multiline messages. It intercepts before the input
# callback is called, as this is called with the whole message, while it is
# normally split on newline before being sent to buffer_input_callback
def input_text_for_buffer_cb(data, modifier, current_buffer, string):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return string
message = decode_from_utf8(string)
if not message.startswith("/") and "\n" in message:
buffer_input_callback("EVENTROUTER", current_buffer, message)
return ""
return string
@utf8_decode
def buffer_switch_callback(signal, sig_type, data):
"""
incomplete
Every time we change channels in weechat, we call this to:
1) set read marker 2) determine if we have already populated
channel history data
"""
eventrouter = eval(signal)
prev_buffer_ptr = eventrouter.weechat_controller.get_previous_buffer_ptr()
# this is to see if we need to gray out things in the buffer list
prev = eventrouter.weechat_controller.get_channel_from_buffer_ptr(prev_buffer_ptr)
if prev:
prev.mark_read()
new_channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(data)
if new_channel:
if not new_channel.got_history:
new_channel.get_history()
eventrouter.weechat_controller.set_previous_buffer(data)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_list_update_callback(data, somecount):
"""
incomplete
A simple timer-based callback that will update the buffer list
if needed. We only do this max 1x per second, as otherwise it
uses a lot of cpu for minimal changes. We use buffer short names
to indicate typing via "#channel" <-> ">channel" and
user presence via " name" <-> "+name".
"""
eventrouter = eval(data)
# global buffer_list_update
for b in eventrouter.weechat_controller.iter_buffers():
b[1].refresh()
# buffer_list_update = True
# if eventrouter.weechat_controller.check_refresh_buffer_list():
# # gray_check = False
# # if len(servers) > 1:
# # gray_check = True
# eventrouter.weechat_controller.set_refresh_buffer_list(False)
return w.WEECHAT_RC_OK
def quit_notification_callback(signal, sig_type, data):
stop_talking_to_slack()
return w.WEECHAT_RC_OK
@utf8_decode
def typing_notification_cb(signal, sig_type, data):
msg = w.buffer_get_string(data, "input")
if len(msg) > 8 and msg[:1] != "/":
global typing_timer
now = time.time()
if typing_timer + 4 < now:
current_buffer = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel and channel.type != "thread":
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.team.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
@utf8_decode
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
@utf8_decode
def slack_never_away_cb(data, remaining_calls):
if config.never_away:
for t in EVENTROUTER.teams.values():
slackbot = t.get_channel_map()['slackbot']
channel = t.channels[slackbot]
request = {"type": "typing", "channel": channel.identifier}
channel.team.send_to_websocket(request, expect_reply=False)
return w.WEECHAT_RC_OK
@utf8_decode
def typing_bar_item_cb(data, current_buffer, args):
"""
Privides a bar item indicating who is typing in the current channel AND
why is typing a DM to you globally.
"""
typers = []
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# first look for people typing in this channel
if current_channel:
# this try is mostly becuase server buffers don't implement is_someone_typing
try:
if current_channel.type != 'im' and current_channel.is_someone_typing():
typers += current_channel.get_typing_list()
except:
pass
# here is where we notify you that someone is typing in DM
# regardless of which buffer you are in currently
for t in EVENTROUTER.teams.values():
for channel in t.channels.values():
if channel.type == "im":
if channel.is_someone_typing():
typers.append("D/" + channel.slack_name)
pass
typing = ", ".join(typers)
if typing != "":
typing = w.color('yellow') + "typing: " + typing
return typing
@utf8_decode
def nick_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u:
w.hook_completion_list_add(completion, "@" + u.name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def emoji_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all :-prefixed emoji to completion list
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None:
return w.WEECHAT_RC_OK
for e in current_channel.team.emoji_completions:
w.hook_completion_list_add(completion, ":" + e + ":", 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def complete_next_cb(data, current_buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# channel = channels.find(current_buffer)
if not hasattr(current_channel, 'members') or current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
line_input = w.buffer_get_string(current_buffer, "input")
current_pos = w.buffer_get_integer(current_buffer, "input_pos") - 1
input_length = w.buffer_get_integer(current_buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and line_input[current_pos] != '@' and not line_input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if line_input[l] != '@' and not line_input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not line_input[l].isalnum():
word_end = l
break
word = line_input[word_start:word_end]
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u and u.name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(current_buffer, "input", line_input[:word_start] + "@" + line_input[word_start:])
w.buffer_set(current_buffer, "input_pos", str(w.buffer_get_integer(current_buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
complete
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
EVENTROUTER.shutdown()
return w.WEECHAT_RC_OK
##### New Classes
class SlackRequest(object):
"""
complete
Encapsulates a Slack api request. Valuable as an object that we can add to the queue and/or retry.
makes a SHA of the requst url and current time so we can re-tag this on the way back through.
"""
def __init__(self, token, request, post_data={}, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.tries = 0
self.start_time = time.time()
self.domain = 'api.slack.com'
self.request = request
self.request_normalized = re.sub(r'\W+', '', request)
self.token = token
post_data["token"] = token
self.post_data = post_data
self.params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
self.url = 'https://{}/api/{}?{}'.format(self.domain, request, urllib.urlencode(encode_to_utf8(post_data)))
self.response_id = sha.sha("{}{}".format(self.url, self.start_time)).hexdigest()
self.retries = kwargs.get('retries', 3)
# def __repr__(self):
# return "URL: {} Tries: {} ID: {}".format(self.url, self.tries, self.response_id)
def request_string(self):
return "{}".format(self.url)
def tried(self):
self.tries += 1
self.response_id = sha.sha("{}{}".format(self.url, time.time())).hexdigest()
def should_try(self):
return self.tries < self.retries
def retry_ready(self):
return (self.start_time + (self.tries**2)) < time.time()
class SlackTeam(object):
"""
incomplete
Team object under which users and channels live.. Does lots.
"""
def __init__(self, eventrouter, token, websocket_url, subdomain, nick, myidentifier, users, bots, channels, **kwargs):
self.ws_url = websocket_url
self.connected = False
self.connecting = False
# self.ws = None
self.ws_counter = 0
self.ws_replies = {}
self.eventrouter = eventrouter
self.token = token
self.team = self
self.subdomain = subdomain
self.domain = subdomain + ".slack.com"
self.preferred_name = self.domain
self.nick = nick
self.myidentifier = myidentifier
try:
if self.channels:
for c in channels.keys():
if not self.channels.get(c):
self.channels[c] = channels[c]
except:
self.channels = channels
self.users = users
self.bots = bots
self.team_hash = SlackTeam.generate_team_hash(self.nick, self.subdomain)
self.name = self.domain
self.channel_buffer = None
self.got_history = True
self.create_buffer()
self.set_muted_channels(kwargs.get('muted_channels', ""))
for c in self.channels.keys():
channels[c].set_related_server(self)
channels[c].check_should_open()
# self.channel_set_related_server(c)
# Last step is to make sure my nickname is the set color
self.users[self.myidentifier].force_color(w.config_string(w.config_get('weechat.color.chat_nick_self')))
# This highlight step must happen after we have set related server
self.set_highlight_words(kwargs.get('highlight_words', ""))
self.load_emoji_completions()
def __repr__(self):
return "domain={} nick={}".format(self.subdomain, self.nick)
def __eq__(self, compare_str):
if compare_str == self.token or compare_str == self.domain or compare_str == self.subdomain:
return True
else:
return False
def load_emoji_completions(self):
self.emoji_completions = list(EMOJI)
if self.emoji_completions:
s = SlackRequest(self.token, "emoji.list", {}, team_hash=self.team_hash)
self.eventrouter.receive(s)
def add_channel(self, channel):
self.channels[channel["id"]] = channel
channel.set_related_server(self)
# def connect_request_generate(self):
# return SlackRequest(self.token, 'rtm.start', {})
# def close_all_buffers(self):
# for channel in self.channels:
# self.eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, update_remote=False, close_buffer=True)
# #also close this server buffer
# self.eventrouter.weechat_controller.unregister_buffer(self.channel_buffer, update_remote=False, close_buffer=True)
def create_buffer(self):
if not self.channel_buffer:
if config.short_buffer_names:
self.preferred_name = self.subdomain
elif config.server_aliases not in ['', None]:
name = config.server_aliases.get(self.subdomain, None)
if name:
self.preferred_name = name
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new("{}".format(self.preferred_name), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'server')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.nick)
w.buffer_set(self.channel_buffer, "localvar_set_server", self.preferred_name)
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
def set_muted_channels(self, muted_str):
self.muted_channels = {x for x in muted_str.split(',')}
def set_highlight_words(self, highlight_str):
self.highlight_words = {x for x in highlight_str.split(',')}
if len(self.highlight_words) > 0:
for v in self.channels.itervalues():
v.set_highlights()
def formatted_name(self, **kwargs):
return self.domain
def buffer_prnt(self, data):
w.prnt_date_tags(self.channel_buffer, SlackTS().major, tag("team"), data)
def find_channel_by_members(self, members, channel_type=None):
for channel in self.channels.itervalues():
if channel.get_members() == members and (
channel_type is None or channel.type == channel_type):
return channel
def get_channel_map(self):
return {v.slack_name: k for k, v in self.channels.iteritems()}
def get_username_map(self):
return {v.name: k for k, v in self.users.iteritems()}
def get_team_hash(self):
return self.team_hash
@staticmethod
def generate_team_hash(nick, subdomain):
return str(sha.sha("{}{}".format(nick, subdomain)).hexdigest())
def refresh(self):
self.rename()
def rename(self):
pass
# def attach_websocket(self, ws):
# self.ws = ws
def is_user_present(self, user_id):
user = self.users.get(user_id)
if user.presence == 'active':
return True
else:
return False
def mark_read(self, ts=None, update_remote=True, force=False):
pass
def connect(self):
if not self.connected and not self.connecting:
self.connecting = True
if self.ws_url:
try:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs)
self.hook = w.hook_fd(ws.sock._sock.fileno(), 1, 0, 0, "receive_ws_callback", self.get_team_hash())
ws.sock.setblocking(0)
self.ws = ws
# self.attach_websocket(ws)
self.set_connected()
self.connecting = False
except Exception as e:
dbg("websocket connection error: {}".format(decode_from_utf8(e)))
self.connecting = False
return False
else:
# The fast reconnect failed, so start over-ish
for chan in self.channels:
self.channels[chan].got_history = False
s = initiate_connection(self.token, retries=999)
self.eventrouter.receive(s)
self.connecting = False
# del self.eventrouter.teams[self.get_team_hash()]
self.set_reconnect_url(None)
def set_connected(self):
self.connected = True
def set_disconnected(self):
w.unhook(self.hook)
self.connected = False
def set_reconnect_url(self, url):
self.ws_url = url
def next_ws_transaction_id(self):
if self.ws_counter > 999:
self.ws_counter = 0
self.ws_counter += 1
return self.ws_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.next_ws_transaction_id()
message = json.dumps(data)
try:
if expect_reply:
self.ws_replies[data["id"]] = data
self.ws.send(encode_to_utf8(message))
dbg("Sent {}...".format(message[:100]))
except:
print "WS ERROR"
dbg("Unexpected error: {}\nSent: {}".format(sys.exc_info()[0], data))
self.set_connected()
def update_member_presence(self, user, presence):
user.presence = presence
for c in self.channels:
c = self.channels[c]
if user.id in c.members:
c.update_nicklist(user.id)
def subscribe_users_presence(self):
# FIXME: There is a limitation in the API to the size of the
# json we can send.
# We should try to be smarter to fetch the users whom we want to
# subscribe to.
users = self.users.keys()[0:750]
self.send_to_websocket({
"type": "presence_sub",
"ids": users,
}, expect_reply=False)
class SlackChannel(object):
"""
Represents an individual slack channel.
"""
def __init__(self, eventrouter, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.active = False
for key, value in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs["name"]
self.slack_purpose = kwargs.get("purpose", {"value": ""})
self.topic = kwargs.get("topic", {}).get("value", "")
self.identifier = kwargs["id"]
self.last_read = SlackTS(kwargs.get("last_read", SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team', None)
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.set_name(self.slack_name)
# short name relates to the localvar we change for typing indication
self.current_short_name = self.name
self.set_members(kwargs.get('members', []))
self.unread_count_display = 0
def __eq__(self, compare_str):
if compare_str == self.slack_name or compare_str == self.formatted_name() or compare_str == self.formatted_name(style="long_default"):
return True
else:
return False
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def set_name(self, slack_name):
self.name = "#" + slack_name
def refresh(self):
return self.rename()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(typing=self.is_someone_typing(), style="sidebar")
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def set_members(self, members):
self.members = set(members)
self.update_nicklist()
def get_members(self):
return self.members
def set_unread_count_display(self, count):
self.unread_count_display = count
self.new_messages = bool(self.unread_count_display)
for c in range(self.unread_count_display):
if self.type == "im":
w.buffer_set(self.channel_buffer, "hotlist", "2")
else:
w.buffer_set(self.channel_buffer, "hotlist", "1")
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
elif self.type == "group":
prepend = config.group_name_prefix
else:
prepend = "#"
select = {
"default": prepend + self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}{}".format(self.team.preferred_name, prepend, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return select[style]
def render_topic(self):
if self.channel_buffer:
if self.topic != "":
topic = self.topic
else:
topic = self.slack_purpose['value']
w.buffer_set(self.channel_buffer, "title", topic)
def set_topic(self, value):
self.topic = value
self.render_topic()
def update_from_message_json(self, message_json):
for key, value in message_json.items():
setattr(self, key, value)
def open(self, update_remote=True):
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
self.active = True
self.get_history()
# self.create_buffer()
def check_should_open(self, force=False):
if hasattr(self, "is_archived") and self.is_archived:
return
if force:
self.create_buffer()
return
# Only check is_member if is_open is not set, because in some cases
# (e.g. group DMs), is_member should be ignored in favor of is_open.
is_open = self.is_open if hasattr(self, "is_open") else self.is_member
if is_open or self.unread_count_display:
self.create_buffer()
if config.background_load_all_history:
self.get_history(slow_queue=True)
def set_related_server(self, team):
self.team = team
def set_highlights(self):
# highlight my own name and any set highlights
if self.channel_buffer:
highlights = self.team.highlight_words.union({'@' + self.team.nick, self.team.myidentifier, "!here", "!channel", "!everyone"})
h_str = ",".join(highlights)
w.buffer_set(self.channel_buffer, "highlight_words", h_str)
def create_buffer(self):
"""
incomplete (muted doesn't work)
Creates the weechat buffer where the channel magic happens.
"""
if not self.channel_buffer:
self.active = True
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.render_topic()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
if self.channel_buffer:
# if self.team.server_alias:
# w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.server_alias)
# else:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
# else:
# self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
self.update_nicklist()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if self.type == "im":
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def destroy_buffer(self, update_remote):
if self.channel_buffer is not None:
self.channel_buffer = None
self.messages = OrderedDict()
self.hashed_messages = {}
self.got_history = False
# if update_remote and not eventrouter.shutting_down:
self.active = False
if update_remote and not self.eventrouter.shutting_down:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["leave"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def buffer_prnt(self, nick, text, timestamp=str(time.time()), tagset=None, tag_nick=None, **kwargs):
data = "{}\t{}".format(format_nick(nick), text)
ts = SlackTS(timestamp)
last_read = SlackTS(self.last_read)
# without this, DMs won't open automatically
if not self.channel_buffer and ts > last_read:
self.open(update_remote=False)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
backlog = True if ts <= last_read else False
if tagset:
tags = tag(tagset, user=tag_nick)
self.new_messages = True
# we have to infer the tagset because we weren't told
elif ts <= last_read:
tags = tag("backlog", user=tag_nick)
elif self.type in ["im", "mpdm"]:
if tag_nick != self.team.nick:
tags = tag("dm", user=tag_nick)
self.new_messages = True
else:
tags = tag("dmfromme")
else:
tags = tag("default", user=tag_nick)
self.new_messages = True
try:
if config.unhide_buffers_with_activity and not self.is_visible() and (self.identifier not in self.team.muted_channels):
w.buffer_set(self.channel_buffer, "hidden", "0")
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_print_time(self.channel_buffer, ts.minorstr(), ts.major)
if backlog:
self.mark_read(ts, update_remote=False, force=True)
except:
dbg("Problem processing buffer_prnt")
def send_message(self, message, request_dict_ext={}):
# team = self.eventrouter.teams[self.team]
message = linkify_text(message, self.team, self)
dbg(message)
request = {"type": "message", "channel": self.identifier, "text": message, "_team": self.team.team_hash, "user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
self.mark_read(update_remote=False, force=True)
def store_message(self, message, team, from_me=False):
if not self.active:
return
if from_me:
message.message_json["user"] = team.myidentifier
self.messages[SlackTS(message.ts)] = message
sorted_messages = sorted(self.messages.items())
messages_to_delete = sorted_messages[:-SCROLLBACK_SIZE]
messages_to_keep = sorted_messages[-SCROLLBACK_SIZE:]
for message_hash in [m[1].hash for m in messages_to_delete]:
if message_hash in self.hashed_messages:
del self.hashed_messages[message_hash]
self.messages = OrderedDict(messages_to_keep)
def change_message(self, ts, text=None, suffix=None):
ts = SlackTS(ts)
if ts in self.messages:
m = self.messages[ts]
if text:
m.change_text(text)
if suffix:
m.change_suffix(suffix)
text = m.render(force=True)
modify_buffer_line(self.channel_buffer, text, ts.major, ts.minor)
return True
def edit_nth_previous_message(self, n, old, new, flags):
message = self.my_last_message(n)
if new == "" and old == "":
s = SlackRequest(self.team.token, "chat.delete", {"channel": self.identifier, "ts": message['ts']}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
else:
num_replace = 1
if 'g' in flags:
num_replace = 0
new_message = re.sub(old, new, message["text"], num_replace)
if new_message != message["text"]:
s = SlackRequest(self.team.token, "chat.update", {"channel": self.identifier, "ts": message['ts'], "text": new_message}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def my_last_message(self, msgno):
for key in self.main_message_keys_reversed():
m = self.messages[key]
if "user" in m.message_json and "text" in m.message_json and m.message_json["user"] == self.team.myidentifier:
msgno -= 1
if msgno == 0:
return m.message_json
def is_visible(self):
return w.buffer_get_integer(self.channel_buffer, "hidden") == 0
def get_history(self, slow_queue=False):
if not self.got_history:
# we have probably reconnected. flush the buffer
if self.team.connected:
w.buffer_clear(self.channel_buffer)
self.buffer_prnt('', 'getting channel history...', tagset='backlog')
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["history"], {"channel": self.identifier, "count": BACKLOG_SIZE}, team_hash=self.team.team_hash, channel_identifier=self.identifier, clear=True)
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
def send_add_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.add", msg_number, reaction)
def send_remove_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.remove", msg_number, reaction)
def send_change_reaction(self, method, msg_number, reaction):
if 0 < msg_number < len(self.messages):
keys = self.main_message_keys_reversed()
timestamp = next(islice(keys, msg_number - 1, None))
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
s = SlackRequest(self.team.token, method, data)
self.eventrouter.receive(s)
def main_message_keys_reversed(self):
return (key for key in reversed(self.messages)
if type(self.messages[key]) == SlackMessage)
# Typing related
def set_typing(self, user):
if self.channel_buffer and self.is_visible():
self.typing[user] = time.time()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def unset_typing(self, user):
if self.channel_buffer and self.is_visible():
u = self.typing.get(user, None)
if u:
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def is_someone_typing(self):
"""
Walks through dict of typing folks in a channel and fast
returns if any of them is actively typing. If none are,
nulls the dict and returns false.
"""
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
return False
def get_typing_list(self):
"""
Returns the names of everyone in the channel who is currently typing.
"""
typing = []
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
typing.append(user)
else:
del self.typing[user]
return typing
def mark_read(self, ts=None, update_remote=True, force=False):
if not ts:
ts = next(self.main_message_keys_reversed(), SlackTS())
if self.new_messages or force:
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
if update_remote:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["mark"], {"channel": self.identifier, "ts": ts}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.new_messages = False
def user_joined(self, user_id):
# ugly hack - for some reason this gets turned into a list
self.members = set(self.members)
self.members.add(user_id)
self.update_nicklist(user_id)
def user_left(self, user_id):
self.members.discard(user_id)
self.update_nicklist(user_id)
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
if self.type not in ["channel", "group", "mpim"]:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
# create nicklists for the current channel if they don't exist
# if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
if user and len(self.members) < 1000:
user = self.team.users[user]
if user.deleted:
return
nick = w.nicklist_search_nick(self.channel_buffer, "", user.name)
# since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
# now add it back in to whichever..
nick_group = afk
if self.team.is_user_present(user.identifier):
nick_group = here
if user.identifier in self.members:
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
# if we didn't get a user, build a complete list. this is expensive.
else:
if len(self.members) < 1000:
try:
for user in self.members:
user = self.team.users[user]
if user.deleted:
continue
nick_group = afk
if self.team.is_user_present(user.identifier):
nick_group = here
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
except Exception as e:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, decode_from_utf8(e)))
else:
w.nicklist_remove_all(self.channel_buffer)
for fn in ["1| too", "2| many", "3| users", "4| to", "5| show"]:
w.nicklist_add_group(self.channel_buffer, '', fn, w.color('white'), 1)
def hash_message(self, ts):
ts = SlackTS(ts)
def calc_hash(msg):
return sha.sha(str(msg.ts)).hexdigest()
if ts in self.messages and not self.messages[ts].hash:
message = self.messages[ts]
tshash = calc_hash(message)
hl = 3
shorthash = tshash[:hl]
while any(x.startswith(shorthash) for x in self.hashed_messages):
hl += 1
shorthash = tshash[:hl]
if shorthash[:-1] in self.hashed_messages:
col_msg = self.hashed_messages.pop(shorthash[:-1])
col_new_hash = calc_hash(col_msg)[:hl]
col_msg.hash = col_new_hash
self.hashed_messages[col_new_hash] = col_msg
self.change_message(str(col_msg.ts))
if col_msg.thread_channel:
col_msg.thread_channel.rename()
self.hashed_messages[shorthash] = message
message.hash = shorthash
class SlackDMChannel(SlackChannel):
"""
Subclass of a normal channel for person-to-person communication, which
has some important differences.
"""
def __init__(self, eventrouter, users, **kwargs):
dmuser = kwargs["user"]
kwargs["name"] = users[dmuser].name
super(SlackDMChannel, self).__init__(eventrouter, **kwargs)
self.type = 'im'
self.update_color()
self.set_name(self.slack_name)
self.topic = create_user_status_string(users[dmuser].profile)
def set_name(self, slack_name):
self.name = slack_name
def get_members(self):
return {self.user}
def create_buffer(self):
if not self.channel_buffer:
super(SlackDMChannel, self).create_buffer()
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
def update_color(self):
if config.colorize_private_chats:
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
else:
self.color = ""
self.color_name = ""
def formatted_name(self, style="default", typing=False, present=True, enable_color=False, **kwargs):
if config.colorize_private_chats and enable_color:
print_color = self.color
else:
print_color = ""
if not present:
prepend = " "
else:
prepend = "+"
select = {
"default": self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}".format(self.team.preferred_name, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return print_color + select[style]
def open(self, update_remote=True):
self.create_buffer()
# self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(style="sidebar", present=self.team.is_user_present(self.user), enable_color=config.colorize_private_chats)
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def refresh(self):
return self.rename()
class SlackGroupChannel(SlackChannel):
"""
A group channel is a private discussion group.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackGroupChannel, self).__init__(eventrouter, **kwargs)
self.type = "group"
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = config.group_name_prefix + slack_name
# def formatted_name(self, prepend="#", enable_color=True, basic=False):
# return prepend + self.slack_name
class SlackMPDMChannel(SlackChannel):
"""
An MPDM channel is a special instance of a 'group' channel.
We change the name to look less terrible in weechat.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackMPDMChannel, self).__init__(eventrouter, **kwargs)
n = kwargs.get('name')
self.set_name(n)
self.type = "mpim"
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote and 'join' in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]['join'], {'users': ','.join(self.members)}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
# self.create_buffer()
@staticmethod
def adjust_name(n):
return "|".join("-".join(n.split("-")[1:-1]).split("--"))
def set_name(self, n):
self.name = self.adjust_name(n)
def formatted_name(self, style="default", typing=False, **kwargs):
adjusted_name = self.adjust_name(self.slack_name)
if typing and config.channel_name_typing_indicator:
prepend = ">"
else:
prepend = "@"
select = {
"default": adjusted_name,
"sidebar": prepend + adjusted_name,
"base": adjusted_name,
"long_default": "{}.{}".format(self.team.preferred_name, adjusted_name),
"long_base": "{}.{}".format(self.team.preferred_name, adjusted_name),
}
return select[style]
def rename(self):
pass
class SlackThreadChannel(object):
"""
A thread channel is a virtual channel. We don't inherit from
SlackChannel, because most of how it operates will be different.
"""
def __init__(self, eventrouter, parent_message):
self.eventrouter = eventrouter
self.parent_message = parent_message
self.channel_buffer = None
# self.identifier = ""
# self.name = "#" + kwargs['name']
self.type = "thread"
self.got_history = False
self.label = None
self.members = self.parent_message.channel.members
self.team = self.parent_message.team
# self.set_name(self.slack_name)
# def set_name(self, slack_name):
# self.name = "#" + slack_name
def formatted_name(self, style="default", **kwargs):
hash_or_ts = self.parent_message.hash or self.parent_message.ts
styles = {
"default": " +{}".format(hash_or_ts),
"long_default": "{}.{}".format(self.parent_message.channel.formatted_name(style="long_default"), hash_or_ts),
"sidebar": " +{}".format(hash_or_ts),
}
return styles[style]
def refresh(self):
self.rename()
def mark_read(self, ts=None, update_remote=True, force=False):
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
def buffer_prnt(self, nick, text, timestamp, **kwargs):
data = "{}\t{}".format(format_nick(nick), text)
ts = SlackTS(timestamp)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
# backlog = False
# if ts <= SlackTS(self.last_read):
# tags = tag("backlog")
# backlog = True
# elif self.type in ["im", "mpdm"]:
# tags = tag("dm")
# self.new_messages = True
# else:
tags = tag("default")
# self.new_messages = True
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_print_time(self.channel_buffer, ts.minorstr(), ts.major)
# if backlog:
# self.mark_read(ts, update_remote=False, force=True)
def get_history(self):
self.got_history = True
for message in self.parent_message.submessages:
# message = SlackMessage(message_json, team, channel)
text = message.render()
# print text
suffix = ''
if 'edited' in message.message_json:
suffix = ' (edited)'
# try:
# channel.unread_count += 1
# except:
# channel.unread_count = 1
self.buffer_prnt(message.sender, text + suffix, message.ts)
def send_message(self, message):
# team = self.eventrouter.teams[self.team]
message = linkify_text(message, self.team, self)
dbg(message)
request = {"type": "message", "channel": self.parent_message.channel.identifier, "text": message, "_team": self.team.team_hash, "user": self.team.myidentifier, "thread_ts": str(self.parent_message.ts)}
self.team.send_to_websocket(request)
self.mark_read(update_remote=False, force=True)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
# if "info" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
# if update_remote:
# if "join" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"name": self.name}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
self.create_buffer()
def rename(self):
if self.channel_buffer and not self.label:
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
def create_buffer(self):
"""
incomplete (muted doesn't work)
Creates the weechat buffer where the thread magic happens.
"""
if not self.channel_buffer:
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
time_format = w.config_string(w.config_get("weechat.look.buffer_time_format"))
parent_time = time.localtime(SlackTS(self.parent_message.ts).major)
topic = '{} {} | {}'.format(time.strftime(time_format, parent_time), self.parent_message.sender, self.parent_message.render() )
w.buffer_set(self.channel_buffer, "title", topic)
# self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
# try:
# if self.unread_count != 0:
# for c in range(1, self.unread_count):
# if self.type == "im":
# w.buffer_set(self.channel_buffer, "hotlist", "2")
# else:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
# else:
# pass
# #dbg("no unread in {}".format(self.name))
# except:
# pass
# dbg("exception no unread count")
# if self.unread_count != 0 and not self.muted:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
def destroy_buffer(self, update_remote):
if self.channel_buffer is not None:
self.channel_buffer = None
self.got_history = False
# if update_remote and not eventrouter.shutting_down:
self.active = False
class SlackUser(object):
"""
Represends an individual slack user. Also where you set their name formatting.
"""
def __init__(self, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.identifier = kwargs["id"]
self.profile = {} # in case it's not in kwargs
for key, value in kwargs.items():
setattr(self, key, value)
if self.profile.get("display_name"):
self.slack_name = self.profile["display_name"]
self.name = self.profile["display_name"].replace(' ', '')
else:
# No display name set. Fall back to the deprecated username field.
self.slack_name = kwargs["name"]
self.name = self.slack_name
self.update_color()
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def force_color(self, color_name):
self.color_name = color_name
self.color = w.color(self.color_name)
def update_color(self):
# This will automatically be none/"" if the user has disabled nick
# colourization.
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
def update_status(self, status_emoji, status_text):
self.profile["status_emoji"] = status_emoji
self.profile["status_text"] = status_text
def formatted_name(self, prepend="", enable_color=True):
if enable_color:
return self.color + prepend + self.name
else:
return prepend + self.name
class SlackBot(SlackUser):
"""
Basically the same as a user, but split out to identify and for future
needs
"""
def __init__(self, **kwargs):
super(SlackBot, self).__init__(**kwargs)
class SlackMessage(object):
"""
Represents a single slack message and associated context/metadata.
These are modifiable and can be rerendered to change a message,
delete a message, add a reaction, add a thread.
Note: these can't be tied to a SlackUser object because users
can be deleted, so we have to store sender in each one.
"""
def __init__(self, message_json, team, channel, override_sender=None):
self.team = team
self.channel = channel
self.message_json = message_json
self.submessages = []
self.thread_channel = None
self.hash = None
if override_sender:
self.sender = override_sender
self.sender_plain = override_sender
else:
senders = self.get_sender()
self.sender, self.sender_plain = senders[0], senders[1]
self.suffix = ''
self.ts = SlackTS(message_json['ts'])
text = self.message_json.get('text')
if text and text.startswith('_') and text.endswith('_') and 'subtype' not in message_json:
message_json['text'] = text[1:-1]
message_json['subtype'] = 'me_message'
if message_json.get('subtype') == 'me_message' and not message_json['text'].startswith(self.sender):
message_json['text'] = self.sender + ' ' + self.message_json['text']
def __hash__(self):
return hash(self.ts)
def render(self, force=False):
if len(self.submessages) > 0:
return "{} {} {}".format(render(self.message_json, self.team, self.channel, force), self.suffix, "{}[ Thread: {} Replies: {} ]".format(w.color(config.thread_suffix_color), self.hash or self.ts, len(self.submessages)))
return "{} {}".format(render(self.message_json, self.team, self.channel, force), self.suffix)
def change_text(self, new_text):
self.message_json["text"] = new_text
dbg(self.message_json)
def change_suffix(self, new_suffix):
self.suffix = new_suffix
dbg(self.message_json)
def get_sender(self):
name = ""
name_plain = ""
if 'user' in self.message_json:
if self.message_json['user'] == self.team.myidentifier:
u = self.team.users[self.team.myidentifier]
elif self.message_json['user'] in self.team.users:
u = self.team.users[self.message_json['user']]
name = "{}".format(u.formatted_name())
name_plain = "{}".format(u.formatted_name(enable_color=False))
elif 'username' in self.message_json:
u = self.message_json["username"]
if self.message_json.get("subtype") == "bot_message":
name = "{} :]".format(u)
name_plain = "{}".format(u)
else:
name = "-{}-".format(u)
name_plain = "{}".format(u)
elif 'service_name' in self.message_json:
name = "-{}-".format(self.message_json["service_name"])
name_plain = "{}".format(self.message_json["service_name"])
elif self.message_json.get('bot_id') in self.team.bots:
name = "{} :]".format(self.team.bots[self.message_json["bot_id"]].formatted_name())
name_plain = "{}".format(self.team.bots[self.message_json["bot_id"]].formatted_name(enable_color=False))
else:
name = ""
name_plain = ""
return (name, name_plain)
def add_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
found = False
for r in m:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({"name": reaction, "users": [user]})
else:
self.message_json["reactions"] = [{"name": reaction, "users": [user]}]
def remove_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
for r in m:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
else:
pass
class SlackThreadMessage(SlackMessage):
def __init__(self, parent_id, *args):
super(SlackThreadMessage, self).__init__(*args)
self.parent_id = parent_id
class WeeSlackMetadata(object):
"""
A simple container that we pickle/unpickle to hold data.
"""
def __init__(self, meta):
self.meta = meta
def jsonify(self):
return self.meta
class SlackTS(object):
def __init__(self, ts=None):
if ts:
self.major, self.minor = [int(x) for x in ts.split('.', 1)]
else:
self.major = int(time.time())
self.minor = 0
def __cmp__(self, other):
if isinstance(other, SlackTS):
if self.major < other.major:
return -1
elif self.major > other.major:
return 1
elif self.major == other.major:
if self.minor < other.minor:
return -1
elif self.minor > other.minor:
return 1
else:
return 0
else:
s = self.__str__()
if s < other:
return -1
elif s > other:
return 1
elif s == other:
return 0
def __hash__(self):
return hash("{}.{}".format(self.major, self.minor))
def __repr__(self):
return str("{0}.{1:06d}".format(self.major, self.minor))
def split(self, *args, **kwargs):
return [self.major, self.minor]
def majorstr(self):
return str(self.major)
def minorstr(self):
return str(self.minor)
###### New handlers
def handle_rtmstart(login_data, eventrouter):
"""
This handles the main entry call to slack, rtm.start
"""
metadata = pickle.loads(login_data["wee_slack_request_metadata"])
if not login_data["ok"]:
w.prnt("", "ERROR: Failed connecting to Slack with token starting with {}: {}"
.format(metadata.token[:15], login_data["error"]))
return
# Let's reuse a team if we have it already.
th = SlackTeam.generate_team_hash(login_data['self']['name'], login_data['team']['domain'])
if not eventrouter.teams.get(th):
users = {}
for item in login_data["users"]:
users[item["id"]] = SlackUser(**item)
bots = {}
for item in login_data["bots"]:
bots[item["id"]] = SlackBot(**item)
channels = {}
for item in login_data["channels"]:
channels[item["id"]] = SlackChannel(eventrouter, **item)
for item in login_data["ims"]:
channels[item["id"]] = SlackDMChannel(eventrouter, users, **item)
for item in login_data["groups"]:
if item["name"].startswith('mpdm-'):
channels[item["id"]] = SlackMPDMChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackGroupChannel(eventrouter, **item)
t = SlackTeam(
eventrouter,
metadata.token,
login_data['url'],
login_data["team"]["domain"],
login_data["self"]["name"],
login_data["self"]["id"],
users,
bots,
channels,
muted_channels=login_data["self"]["prefs"]["muted_channels"],
highlight_words=login_data["self"]["prefs"]["highlight_words"],
)
eventrouter.register_team(t)
else:
t = eventrouter.teams.get(th)
t.set_reconnect_url(login_data['url'])
t.connect()
t.buffer_prnt('Connected to Slack')
t.buffer_prnt('{:<20} {}'.format("Websocket URL", login_data["url"]))
t.buffer_prnt('{:<20} {}'.format("User name", login_data["self"]["name"]))
t.buffer_prnt('{:<20} {}'.format("User ID", login_data["self"]["id"]))
t.buffer_prnt('{:<20} {}'.format("Team name", login_data["team"]["name"]))
t.buffer_prnt('{:<20} {}'.format("Team domain", login_data["team"]["domain"]))
t.buffer_prnt('{:<20} {}'.format("Team id", login_data["team"]["id"]))
dbg("connected to {}".format(t.domain))
def handle_emojilist(emoji_json, eventrouter, **kwargs):
if emoji_json["ok"]:
request_metadata = pickle.loads(emoji_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
team.emoji_completions.extend(emoji_json["emoji"].keys())
def handle_channelsinfo(channel_json, eventrouter, **kwargs):
request_metadata = pickle.loads(channel_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
channel.set_unread_count_display(channel_json['channel']['unread_count_display'])
channel.set_members(channel_json['channel']['members'])
def handle_groupsinfo(group_json, eventrouter, **kwargs):
request_metadata = pickle.loads(group_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
group = team.channels[request_metadata.channel_identifier]
unread_count_display = group_json['group']['unread_count_display']
group_id = group_json['group']['id']
group.set_unread_count_display(unread_count_display)
def handle_conversationsopen(conversation_json, eventrouter, object_name='channel', **kwargs):
request_metadata = pickle.loads(conversation_json["wee_slack_request_metadata"])
# Set unread count if the channel isn't new (channel_identifier exists)
if hasattr(request_metadata, 'channel_identifier'):
channel_id = request_metadata.channel_identifier
team = eventrouter.teams[request_metadata.team_hash]
conversation = team.channels[channel_id]
unread_count_display = conversation_json[object_name]['unread_count_display']
conversation.set_unread_count_display(unread_count_display)
def handle_mpimopen(mpim_json, eventrouter, object_name='group', **kwargs):
handle_conversationsopen(mpim_json, eventrouter, object_name, **kwargs)
def handle_groupshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_channelshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_imhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_mpimhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_history(message_json, eventrouter, **kwargs):
request_metadata = pickle.loads(message_json["wee_slack_request_metadata"])
kwargs['team'] = eventrouter.teams[request_metadata.team_hash]
kwargs['channel'] = kwargs['team'].channels[request_metadata.channel_identifier]
try:
clear = request_metadata.clear
except:
clear = False
dbg(clear)
kwargs['output_type'] = "backlog"
if clear:
w.buffer_clear(kwargs['channel'].channel_buffer)
for message in reversed(message_json["messages"]):
process_message(message, eventrouter, **kwargs)
###### New/converted process_ and subprocess_ methods
def process_hello(message_json, eventrouter, **kwargs):
kwargs['team'].subscribe_users_presence()
def process_reconnect_url(message_json, eventrouter, **kwargs):
kwargs['team'].set_reconnect_url(message_json['url'])
def process_manual_presence_change(message_json, eventrouter, **kwargs):
process_presence_change(message_json, eventrouter, **kwargs)
def process_presence_change(message_json, eventrouter, **kwargs):
if "user" in kwargs:
# TODO: remove once it's stable
user = kwargs["user"]
team = kwargs["team"]
team.update_member_presence(user, message_json["presence"])
if "users" in message_json:
team = kwargs["team"]
for user_id in message_json["users"]:
user = team.users[user_id]
team.update_member_presence(user, message_json["presence"])
def process_pref_change(message_json, eventrouter, **kwargs):
team = kwargs["team"]
if message_json['name'] == 'muted_channels':
team.set_muted_channels(message_json['value'])
elif message_json['name'] == 'highlight_words':
team.set_highlight_words(message_json['value'])
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_user_change(message_json, eventrouter, **kwargs):
"""
Currently only used to update status, but lots here we could do.
"""
user = message_json['user']
profile = user.get("profile")
team = kwargs["team"]
team.users[user["id"]].update_status(profile.get("status_emoji"), profile.get("status_text"))
dmchannel = team.find_channel_by_members({user["id"]}, channel_type='im')
if dmchannel:
dmchannel.set_topic(create_user_status_string(profile))
def process_user_typing(message_json, eventrouter, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
if channel:
channel.set_typing(team.users.get(message_json["user"]).name)
w.bar_item_update("slack_typing_notice")
def process_team_join(message_json, eventrouter, **kwargs):
user = message_json['user']
team = kwargs["team"]
team.users[user["id"]] = SlackUser(**user)
def process_pong(message_json, eventrouter, **kwargs):
pass
def process_message(message_json, eventrouter, store=True, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
# try:
# send these subtype messages elsewhere
known_subtypes = [
'thread_message',
'message_replied',
'message_changed',
'message_deleted',
'channel_join',
'channel_leave',
'channel_topic',
# 'group_join',
# 'group_leave',
]
if "thread_ts" in message_json and "reply_count" not in message_json:
message_json["subtype"] = "thread_message"
subtype = message_json.get("subtype", None)
if subtype and subtype in known_subtypes:
f = eval('subprocess_' + subtype)
f(message_json, eventrouter, channel, team)
else:
message = SlackMessage(message_json, team, channel)
text = message.render()
dbg("Rendered message: %s" % text)
dbg("Sender: %s (%s)" % (message.sender, message.sender_plain))
# Handle actions (/me).
# We don't use `subtype` here because creating the SlackMessage may
# have changed the subtype based on the detected message contents.
if message.message_json.get('subtype') == 'me_message':
try:
channel.unread_count_display += 1
except:
channel.unread_count_display = 1
channel.buffer_prnt(w.prefix("action").rstrip(), text, message.ts, tag_nick=message.sender_plain, **kwargs)
else:
suffix = ''
if 'edited' in message_json:
suffix = ' (edited)'
try:
channel.unread_count_display += 1
except:
channel.unread_count_display = 1
channel.buffer_prnt(message.sender, text + suffix, message.ts, tag_nick=message.sender_plain, **kwargs)
if store:
channel.store_message(message, team)
dbg("NORMAL REPLY {}".format(message_json))
# except:
# channel.buffer_prnt("WEE-SLACK-ERROR", json.dumps(message_json), message_json["ts"], **kwargs)
# traceback.print_exc()
def subprocess_thread_message(message_json, eventrouter, channel, team):
# print ("THREADED: " + str(message_json))
parent_ts = message_json.get('thread_ts', None)
if parent_ts:
parent_message = channel.messages.get(SlackTS(parent_ts), None)
if parent_message:
message = SlackThreadMessage(parent_ts, message_json, team, channel)
parent_message.submessages.append(message)
channel.hash_message(parent_ts)
channel.store_message(message, team)
channel.change_message(parent_ts)
text = message.render()
# channel.buffer_prnt(message.sender, text, message.ts, **kwargs)
if parent_message.thread_channel:
parent_message.thread_channel.buffer_prnt(message.sender, text, message.ts)
# channel = channels.find(message_json["channel"])
# server = channel.server
# #threadinfo = channel.get_message(message_json["thread_ts"])
# message = Message(message_json, server=server, channel=channel)
# dbg(message, main_buffer=True)
#
# orig = channel.get_message(message_json['thread_ts'])
# if orig[0]:
# channel.get_message(message_json['thread_ts'])[2].add_thread_message(message)
# else:
# dbg("COULDN'T find orig message {}".format(message_json['thread_ts']), main_buffer=True)
# if threadinfo[0]:
# channel.messages[threadinfo[1]].become_thread()
# message_json["item"]["ts"], message_json)
# channel.change_message(message_json["thread_ts"], None, message_json["text"])
# channel.become_thread(message_json["item"]["ts"], message_json)
def subprocess_channel_join(message_json, eventrouter, channel, team):
joinprefix = w.prefix("join")
message = SlackMessage(message_json, team, channel, override_sender=joinprefix)
channel.buffer_prnt(joinprefix, message.render(), message_json["ts"], tagset='joinleave')
channel.user_joined(message_json['user'])
def subprocess_channel_leave(message_json, eventrouter, channel, team):
leaveprefix = w.prefix("quit")
message = SlackMessage(message_json, team, channel, override_sender=leaveprefix)
channel.buffer_prnt(leaveprefix, message.render(), message_json["ts"], tagset='joinleave')
channel.user_left(message_json['user'])
# channel.update_nicklist(message_json['user'])
# channel.update_nicklist()
def subprocess_message_replied(message_json, eventrouter, channel, team):
pass
def subprocess_message_changed(message_json, eventrouter, channel, team):
m = message_json.get("message", None)
if m:
new_message = m
# message = SlackMessage(new_message, team, channel)
if "attachments" in m:
message_json["attachments"] = m["attachments"]
if "text" in m:
if "text" in message_json:
message_json["text"] += m["text"]
dbg("added text!")
else:
message_json["text"] = m["text"]
if "fallback" in m:
if "fallback" in message_json:
message_json["fallback"] += m["fallback"]
else:
message_json["fallback"] = m["fallback"]
new_message["text"] += unwrap_attachments(message_json, new_message["text"])
if "edited" in new_message:
channel.change_message(new_message["ts"], new_message["text"], ' (edited)')
else:
channel.change_message(new_message["ts"], new_message["text"])
def subprocess_message_deleted(message_json, eventrouter, channel, team):
channel.change_message(message_json["deleted_ts"], "(deleted)", '')
def subprocess_channel_topic(message_json, eventrouter, channel, team):
text = unhtmlescape(unfurl_refs(message_json["text"], ignore_alt_text=False))
channel.buffer_prnt(w.prefix("network").rstrip(), text, message_json["ts"], tagset="muted")
channel.set_topic(unhtmlescape(message_json["topic"]))
def process_reply(message_json, eventrouter, **kwargs):
dbg('processing reply')
team = kwargs["team"]
identifier = message_json["reply_to"]
try:
original_message_json = team.ws_replies[identifier]
del team.ws_replies[identifier]
if "ts" in message_json:
original_message_json["ts"] = message_json["ts"]
else:
dbg("no reply ts {}".format(message_json))
c = original_message_json.get('channel', None)
channel = team.channels[c]
m = SlackMessage(original_message_json, team, channel)
# if "type" in message_json:
# if message_json["type"] == "message" and "channel" in message_json.keys():
# message_json["ts"] = message_json["ts"]
# channels.find(message_json["channel"]).store_message(m, from_me=True)
# channels.find(message_json["channel"]).buffer_prnt(server.nick, m.render(), m.ts)
process_message(m.message_json, eventrouter, channel=channel, team=team)
channel.mark_read(update_remote=True, force=True)
dbg("REPLY {}".format(message_json))
except KeyError:
dbg("Unexpected reply {}".format(message_json))
def process_channel_marked(message_json, eventrouter, **kwargs):
"""
complete
"""
channel = kwargs["channel"]
ts = message_json.get("ts", None)
if ts:
channel.mark_read(ts=ts, force=True, update_remote=False)
else:
dbg("tried to mark something weird {}".format(message_json))
def process_group_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_im_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_mpim_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_channel_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
kwargs['team'].channels[item["id"]].update_from_message_json(item)
kwargs['team'].channels[item["id"]].open()
def process_channel_created(message_json, eventrouter, **kwargs):
item = message_json["channel"]
c = SlackChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].buffer_prnt('Channel created: {}'.format(c.slack_name))
def process_channel_rename(message_json, eventrouter, **kwargs):
item = message_json["channel"]
channel = kwargs['team'].channels[item["id"]]
channel.slack_name = message_json['channel']['name']
def process_im_created(message_json, eventrouter, **kwargs):
team = kwargs['team']
item = message_json["channel"]
c = SlackDMChannel(eventrouter, team=team, users=team.users, **item)
team.channels[item["id"]] = c
kwargs['team'].buffer_prnt('IM channel created: {}'.format(c.name))
def process_im_open(message_json, eventrouter, **kwargs):
channel = kwargs['channel']
item = message_json
kwargs['team'].channels[item["channel"]].check_should_open(True)
w.buffer_set(channel.channel_buffer, "hotlist", "2")
def process_im_close(message_json, eventrouter, **kwargs):
item = message_json
cbuf = kwargs['team'].channels[item["channel"]].channel_buffer
eventrouter.weechat_controller.unregister_buffer(cbuf, False, True)
def process_group_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
if item["name"].startswith("mpdm-"):
c = SlackMPDMChannel(eventrouter, team=kwargs["team"], **item)
else:
c = SlackGroupChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].channels[item["id"]].open()
def process_reaction_added(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.add_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.remove_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("Reaction to item type not supported: " + str(message_json))
def process_emoji_changed(message_json, eventrouter, **kwargs):
team = kwargs['team']
team.load_emoji_completions()
###### New module/global methods
def render_formatting(text):
text = re.sub(r'(^| )\*([^*]+)\*([^a-zA-Z0-9_]|$)',
r'\1{}\2{}\3'.format(w.color(config.render_bold_as),
w.color('-' + config.render_bold_as)),
text)
text = re.sub(r'(^| )_([^_]+)_([^a-zA-Z0-9_]|$)',
r'\1{}\2{}\3'.format(w.color(config.render_italic_as),
w.color('-' + config.render_italic_as)),
text)
return text
def render(message_json, team, channel, force=False):
# If we already have a rendered version in the object, just return that.
if not force and message_json.get("_rendered_text", ""):
return message_json["_rendered_text"]
else:
# server = servers.find(message_json["_server"])
if "fallback" in message_json:
text = message_json["fallback"]
elif "text" in message_json:
if message_json['text'] is not None:
text = message_json["text"]
else:
text = ""
else:
text = ""
text = unfurl_refs(text)
text += unfurl_refs(unwrap_attachments(message_json, text))
text = text.lstrip()
text = unhtmlescape(text.replace("\t", " "))
if message_json.get('mrkdwn', True):
text = render_formatting(text)
# if self.threads:
# text += " [Replies: {} Thread ID: {} ] ".format(len(self.threads), self.thread_id)
# #for thread in self.threads:
text += create_reaction_string(message_json.get("reactions", ""))
message_json["_rendered_text"] = text
return text
def linkify_text(message, team, channel):
# The get_username_map function is a bit heavy, but this whole
# function is only called on message send..
usernames = team.get_username_map()
channels = team.get_channel_map()
message = (message
# Replace IRC formatting chars with Slack formatting chars.
.replace('\x02', '*')
.replace('\x1D', '_')
.replace('\x1F', config.map_underline_to)
# Escape chars that have special meaning to Slack. Note that we do not
# (and should not) perform full HTML entity-encoding here.
# See https://api.slack.com/docs/message-formatting for details.
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.split(' '))
for item in enumerate(message):
targets = re.match('^\s*([@#])([\w.-]+[\w. -])(\W*)', item[1])
if targets and targets.groups()[0] == '@':
named = targets.groups()
if named[1] in ["group", "channel", "here"]:
message[item[0]] = "<!{}>".format(named[1])
else:
try:
if usernames[named[1]]:
message[item[0]] = "<@{}>{}".format(usernames[named[1]], named[2])
except:
message[item[0]] = "@{}{}".format(named[1], named[2])
if targets and targets.groups()[0] == '#':
named = targets.groups()
try:
if channels[named[1]]:
message[item[0]] = "<#{}|{}>{}".format(channels[named[1]], named[1], named[2])
except:
message[item[0]] = "#{}{}".format(named[1], named[2])
# dbg(message)
return " ".join(message)
def unfurl_refs(text, ignore_alt_text=None, auto_link_display=None):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# Test patterns lives in ./_pytest/test_unfurl.py
if ignore_alt_text is None:
ignore_alt_text = config.unfurl_ignore_alt_text
if auto_link_display is None:
auto_link_display = config.unfurl_auto_link_display
matches = re.findall(r"(<[@#]?(?:[^>]*)>)", text)
for m in matches:
# Replace them with human readable strings
text = text.replace(
m, unfurl_ref(m[1:-1], ignore_alt_text, auto_link_display))
return text
def unfurl_ref(ref, ignore_alt_text, auto_link_display):
id = ref.split('|')[0]
display_text = ref
if ref.find('|') > -1:
if ignore_alt_text:
display_text = resolve_ref(id)
else:
if id.startswith("#C"):
display_text = "#{}".format(ref.split('|')[1])
elif id.startswith("@U"):
display_text = ref.split('|')[1]
else:
url, desc = ref.split('|', 1)
match_url = r"^\w+:(//)?{}$".format(re.escape(desc))
url_matches_desc = re.match(match_url, url)
if url_matches_desc and auto_link_display == "text":
display_text = desc
elif url_matches_desc and auto_link_display == "url":
display_text = url
else:
display_text = "{} ({})".format(url, desc)
else:
display_text = resolve_ref(ref)
return display_text
def unhtmlescape(text):
return text.replace("<", "<") \
.replace(">", ">") \
.replace("&", "&")
def unwrap_attachments(message_json, text_before):
text_before_unescaped = unhtmlescape(text_before)
attachment_texts = []
a = message_json.get("attachments", None)
if a:
if text_before:
attachment_texts.append('')
for attachment in a:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
title = attachment.get('title', None)
title_link = attachment.get('title_link', '')
if title_link in text_before_unescaped:
title_link = ''
if title and title_link:
t.append('%s%s (%s)' % (prepend_title_text, title, title_link,))
prepend_title_text = ''
elif title and not title_link:
t.append('%s%s' % (prepend_title_text, title,))
prepend_title_text = ''
from_url = attachment.get('from_url', '')
if from_url not in text_before_unescaped and from_url != title_link:
t.append(from_url)
atext = attachment.get("text", None)
if atext:
tx = re.sub(r' *\n[\n ]+', '\n', atext)
t.append(prepend_title_text + tx)
prepend_title_text = ''
fields = attachment.get("fields", None)
if fields:
for f in fields:
if f['title'] != '':
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
fallback = attachment.get("fallback", None)
if t == [] and fallback:
t.append(fallback)
attachment_texts.append("\n".join([x.strip() for x in t if x]))
return "\n".join(attachment_texts)
def resolve_ref(ref):
# TODO: This hack to use eventrouter needs to go
# this resolver should probably move to the slackteam or eventrouter itself
# global EVENTROUTER
if 'EVENTROUTER' in globals():
e = EVENTROUTER
if ref.startswith('@U') or ref.startswith('@W'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].users:
# try:
return "@{}".format(e.teams[t].users[ref[1:]].name)
# except:
# dbg("NAME: {}".format(ref))
elif ref.startswith('#C'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].channels:
# try:
return "{}".format(e.teams[t].channels[ref[1:]].name)
# except:
# dbg("CHANNEL: {}".format(ref))
# Something else, just return as-is
return ref
def create_user_status_string(profile):
real_name = profile.get("real_name")
status_emoji = profile.get("status_emoji")
status_text = profile.get("status_text")
if status_emoji or status_text:
return "{} | {} {}".format(real_name, status_emoji, status_text)
else:
return real_name
def create_reaction_string(reactions):
count = 0
if not isinstance(reactions, list):
reaction_string = " [{}]".format(reactions)
else:
reaction_string = ' ['
for r in reactions:
if len(r["users"]) > 0:
count += 1
if config.show_reaction_nicks:
nicks = [resolve_ref("@{}".format(user)) for user in r["users"]]
users = "({})".format(",".join(nicks))
else:
users = len(r["users"])
reaction_string += ":{}:{} ".format(r["name"], users)
reaction_string = reaction_string[:-1] + ']'
if count == 0:
reaction_string = ''
return reaction_string
def modify_buffer_line(buffer, new_line, timestamp, time_id):
# get a pointer to this buffer's lines
own_lines = w.hdata_pointer(w.hdata_get('buffer'), buffer, 'own_lines')
if own_lines:
# get a pointer to the last line
line_pointer = w.hdata_pointer(w.hdata_get('lines'), own_lines, 'last_line')
# hold the structure of a line and of line data
struct_hdata_line = w.hdata_get('line')
struct_hdata_line_data = w.hdata_get('line_data')
# keep track of the number of lines with the matching time and id
number_of_matching_lines = 0
while line_pointer:
# get a pointer to the data in line_pointer via layout of struct_hdata_line
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
if data:
line_timestamp = w.hdata_time(struct_hdata_line_data, data, 'date')
line_time_id = w.hdata_integer(struct_hdata_line_data, data, 'date_printed')
# prefix = w.hdata_string(struct_hdata_line_data, data, 'prefix')
if timestamp == int(line_timestamp) and int(time_id) == line_time_id:
number_of_matching_lines += 1
elif number_of_matching_lines > 0:
# since number_of_matching_lines is non-zero, we have
# already reached the message and can stop traversing
break
else:
dbg(('Encountered line without any data while trying to modify '
'line. This is not handled, so aborting modification.'))
return w.WEECHAT_RC_ERROR
# move backwards one line and try again - exit the while if you hit the end
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, -1)
# split the message into at most the number of existing lines
lines = new_line.split('\n', number_of_matching_lines - 1)
# updating a line with a string containing newlines causes the lines to
# be broken when viewed in bare display mode
lines = [line.replace('\n', ' | ') for line in lines]
# pad the list with empty strings until the number of elements equals
# number_of_matching_lines
lines += [''] * (number_of_matching_lines - len(lines))
if line_pointer:
for line in lines:
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, 1)
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
w.hdata_update(struct_hdata_line_data, data, {"message": line})
return w.WEECHAT_RC_OK
def modify_print_time(buffer, new_id, time):
"""
This overloads the time printed field to let us store the slack
per message unique id that comes after the "." in a slack ts
"""
# get a pointer to this buffer's lines
own_lines = w.hdata_pointer(w.hdata_get('buffer'), buffer, 'own_lines')
if own_lines:
# get a pointer to the last line
line_pointer = w.hdata_pointer(w.hdata_get('lines'), own_lines, 'last_line')
# hold the structure of a line and of line data
struct_hdata_line = w.hdata_get('line')
struct_hdata_line_data = w.hdata_get('line_data')
prefix = ''
while not prefix and line_pointer:
# get a pointer to the data in line_pointer via layout of struct_hdata_line
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
if data:
prefix = w.hdata_string(struct_hdata_line_data, data, 'prefix')
w.hdata_update(struct_hdata_line_data, data, {"date_printed": new_id})
else:
dbg('Encountered line without any data while setting message id.')
return w.WEECHAT_RC_ERROR
# move backwards one line and repeat, so all the lines of the message are set
# exit when you reach a prefix, which means you have reached the
# first line of the message, or if you hit the end
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, -1)
return w.WEECHAT_RC_OK
def format_nick(nick):
nick_prefix = w.config_string(w.config_get('weechat.look.nick_prefix'))
nick_prefix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_prefix_color = w.color(nick_prefix_color_name)
nick_suffix = w.config_string(w.config_get('weechat.look.nick_suffix'))
nick_suffix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_suffix_color = w.color(nick_suffix_color_name)
return nick_prefix_color + nick_prefix + w.color("reset") + nick + nick_suffix_color + nick_suffix + w.color("reset")
def tag(tagset, user=None):
if user:
default_tag = "nick_" + user.replace(" ", "_")
else:
default_tag = 'nick_unknown'
tagsets = {
# messages in the team/server buffer, e.g. "new channel created"
"team": "no_highlight,log3",
# when replaying something old
"backlog": "irc_privmsg,no_highlight,notify_none,logger_backlog",
# when posting messages to a muted channel
"muted": "irc_privmsg,no_highlight,notify_none,log1",
# when receiving a direct message
"dm": "irc_privmsg,notify_private,log1",
"dmfromme": "irc_privmsg,no_highlight,notify_none,log1",
# when this is a join/leave, attach for smart filter ala:
# if user in [x.strip() for x in w.prefix("join"), w.prefix("quit")]
"joinleave": "irc_smart_filter,no_highlight,log4",
# catchall ?
"default": "irc_privmsg,notify_message,log1",
}
return "{},slack_{},{}".format(default_tag, tagset, tagsets[tagset])
###### New/converted command_ commands
@slack_buffer_or_ignore
@utf8_decode
def part_command_cb(data, current_buffer, args):
e = EVENTROUTER
args = args.split()
if len(args) > 1:
team = e.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
channel = "".join(args[1:])
if channel in cmap:
buffer_ptr = team.channels[cmap[channel]].channel_buffer
e.weechat_controller.unregister_buffer(buffer_ptr, update_remote=True, close_buffer=True)
else:
e.weechat_controller.unregister_buffer(current_buffer, update_remote=True, close_buffer=True)
return w.WEECHAT_RC_OK_EAT
def parse_topic_command(command):
args = command.split()[1:]
channel_name = None
topic = None
if args:
if args[0].startswith('#'):
channel_name = args[0][1:]
topic = args[1:]
else:
topic = args
if topic == []:
topic = None
if topic:
topic = ' '.join(topic)
if topic == '-delete':
topic = ''
return channel_name, topic
@slack_buffer_or_ignore
@utf8_decode
def topic_command_cb(data, current_buffer, command):
"""
Change the topic of a channel
/topic [<channel>] [<topic>|-delete]
"""
channel_name, topic = parse_topic_command(command)
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if channel_name:
channel = team.channels.get(team.get_channel_map().get(channel_name))
else:
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not channel:
w.prnt(team.channel_buffer, "#{}: No such channel".format(channel_name))
return w.WEECHAT_RC_OK_EAT
if topic is None:
w.prnt(channel.channel_buffer, 'Topic for {} is "{}"'.format(channel.name, channel.topic))
else:
s = SlackRequest(team.token, "channels.setTopic", {"channel": channel.identifier, "topic": topic}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def whois_command_cb(data, current_buffer, command):
"""
Get real name of user
/whois <display_name>
"""
args = command.split()
if len(args) < 2:
w.prnt(current_buffer, "Not enough arguments")
return w.WEECHAT_RC_OK_EAT
user = args[1]
if (user.startswith('@')):
user = user[1:]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
u = team.users.get(team.get_username_map().get(user))
if u:
team.buffer_prnt("[{}]: {}".format(user, u.real_name))
if u.profile.get("status_text"):
team.buffer_prnt("[{}]: {} {}".format(user, u.profile.status_emoji, u.profile.status_text))
team.buffer_prnt("[{}]: Real name: {}".format(user, u.profile.get('real_name_normalized', '')))
team.buffer_prnt("[{}]: Title: {}".format(user, u.profile.get('title', '')))
team.buffer_prnt("[{}]: Email: {}".format(user, u.profile.get('email', '')))
team.buffer_prnt("[{}]: Phone: {}".format(user, u.profile.get('phone', '')))
else:
team.buffer_prnt("[{}]: No such user".format(user))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def me_command_cb(data, current_buffer, args):
message = "_{}_".format(args.split(' ', 1)[1])
buffer_input_callback("EVENTROUTER", current_buffer, message)
return w.WEECHAT_RC_OK_EAT
def command_register(data, current_buffer, args):
CLIENT_ID = "2468770254.51917335286"
CLIENT_SECRET = "dcb7fe380a000cba0cca3169a5fe8d70" # Not really a secret.
if args == 'register':
message = textwrap.dedent("""
#### Retrieving a Slack token via OAUTH ####
1) Paste this into a browser: https://slack.com/oauth/authorize?client_id=2468770254.51917335286&scope=client
2) Select the team you wish to access from wee-slack in your browser.
3) Click "Authorize" in the browser **IMPORTANT: the redirect will fail, this is expected**
4) Copy the "code" portion of the URL to your clipboard
5) Return to weechat and run `/slack register [code]`
""")
w.prnt("", message)
return
try:
_, oauth_code = args.split()
except ValueError:
w.prnt("",
"ERROR: wrong number of arguments given for register command")
return
uri = (
"https://slack.com/api/oauth.access?"
"client_id={}&client_secret={}&code={}"
).format(CLIENT_ID, CLIENT_SECRET, oauth_code)
ret = urllib.urlopen(uri).read()
d = json.loads(ret)
if not d["ok"]:
w.prnt("",
"ERROR: Couldn't get Slack OAuth token: {}".format(d['error']))
return
if config.is_default('slack_api_token'):
w.config_set_plugin('slack_api_token', d['access_token'])
else:
# Add new token to existing set, joined by comma.
tok = config.get_string('slack_api_token')
w.config_set_plugin('slack_api_token',
','.join([tok, d['access_token']]))
w.prnt("", "Success! Added team \"%s\"" % (d['team_name'],))
w.prnt("", "Please reload wee-slack with: /python reload slack")
@slack_buffer_or_ignore
@utf8_decode
def msg_command_cb(data, current_buffer, args):
dbg("msg_command_cb")
aargs = args.split(None, 2)
who = aargs[1]
if who == "*":
who = EVENTROUTER.weechat_controller.buffers[current_buffer].slack_name
else:
command_talk(data, current_buffer, "talk " + who)
if len(aargs) > 2:
message = aargs[2]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
if who in cmap:
channel = team.channels[cmap[who]]
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_channels(data, current_buffer, args):
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Channels:")
for channel in team.get_channel_map():
team.buffer_prnt(" {}".format(channel))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_users(data, current_buffer, args):
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Users:")
for user in team.users.values():
team.buffer_prnt(" {:<25}({})".format(user.name, user.presence))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def command_talk(data, current_buffer, args):
"""
Open a chat with the specified user(s)
/slack talk <user>[,<user2>[,<user3>...]]
"""
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
channel_name = args.split(' ')[1]
if channel_name.startswith('#'):
channel_name = channel_name[1:]
# Try finding the channel by name
chan = team.channels.get(team.get_channel_map().get(channel_name))
# If the channel doesn't exist, try finding a DM or MPDM instead
if not chan:
# Get the IDs of the users
u = team.get_username_map()
users = set()
for user in channel_name.split(','):
if user.startswith('@'):
user = user[1:]
if user in u:
users.add(u[user])
if users:
if len(users) > 1:
channel_type = 'mpim'
# Add the current user since MPDMs include them as a member
users.add(team.myidentifier)
else:
channel_type = 'im'
chan = team.find_channel_by_members(users, channel_type=channel_type)
# If the DM or MPDM doesn't exist, create it
if not chan:
s = SlackRequest(team.token, SLACK_API_TRANSLATOR[channel_type]['join'], {'users': ','.join(users)}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
if chan:
chan.open()
if config.switch_buffer_on_join:
w.buffer_set(chan.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK_EAT
def command_showmuted(data, current_buffer, args):
current = w.current_buffer()
w.prnt(EVENTROUTER.weechat_controller.buffers[current].team.channel_buffer, str(EVENTROUTER.weechat_controller.buffers[current].team.muted_channels))
@utf8_decode
def thread_command_callback(data, current_buffer, args):
current = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current)
if channel:
args = args.split()
if args[0] == '/thread':
if len(args) == 2:
try:
pm = channel.messages[SlackTS(args[1])]
except:
pm = channel.hashed_messages[args[1]]
tc = SlackThreadChannel(EVENTROUTER, pm)
pm.thread_channel = tc
tc.open()
# tc.create_buffer()
if config.switch_buffer_on_join:
w.buffer_set(tc.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
elif args[0] == '/reply':
count = int(args[1])
msg = " ".join(args[2:])
mkeys = channel.main_message_keys_reversed()
parent_id = str(next(islice(mkeys, count - 1, None)))
channel.send_message(msg, request_dict_ext={"thread_ts": parent_id})
return w.WEECHAT_RC_OK_EAT
w.prnt(current, "Invalid thread command.")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def rehistory_command_callback(data, current_buffer, args):
current = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current)
channel.got_history = False
w.buffer_clear(channel.channel_buffer)
channel.get_history()
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def hide_command_callback(data, current_buffer, args):
c = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if c:
name = c.formatted_name(style='long_default')
if name in config.distracting_channels:
w.buffer_set(c.channel_buffer, "hidden", "1")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def slack_command_cb(data, current_buffer, args):
a = args.split(' ', 1)
if len(a) > 1:
function_name, args = a[0], args
else:
function_name, args = a[0], args
try:
EVENTROUTER.cmds[function_name]("", current_buffer, args)
except KeyError:
w.prnt("", "Command not found: " + function_name)
return w.WEECHAT_RC_OK
@slack_buffer_required
def command_distracting(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel:
fullname = channel.formatted_name(style="long_default")
if config.distracting_channels.count(fullname) == 0:
config.distracting_channels.append(fullname)
else:
config.distracting_channels.pop(config.distracting_channels.index(fullname))
save_distracting_channels()
def save_distracting_channels():
w.config_set_plugin('distracting_channels', ','.join(config.distracting_channels))
@slack_buffer_required
def command_slash(data, current_buffer, args):
"""
Support for custom slack commands
/slack slash /customcommand arg1 arg2 arg3
"""
e = EVENTROUTER
channel = e.weechat_controller.buffers.get(current_buffer, None)
if channel:
team = channel.team
if args == 'slash':
w.prnt("", "Usage: /slack slash /someslashcommand [arguments...].")
return
split_args = args.split(None, 2)
command = split_args[1]
text = split_args[2] if len(split_args) > 2 else ""
s = SlackRequest(team.token, "chat.command", {"command": command, "text": text, 'channel': channel.identifier}, team_hash=team.team_hash, channel_identifier=channel.identifier)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_mute(data, current_buffer, args):
current = w.current_buffer()
channel_id = EVENTROUTER.weechat_controller.buffers[current].identifier
team = EVENTROUTER.weechat_controller.buffers[current].team
if channel_id not in team.muted_channels:
team.muted_channels.add(channel_id)
else:
team.muted_channels.discard(channel_id)
s = SlackRequest(team.token, "users.prefs.set", {"name": "muted_channels", "value": ",".join(team.muted_channels)}, team_hash=team.team_hash, channel_identifier=channel_id)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_openweb(data, current_buffer, args):
# if done from server buffer, open slack for reals
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if isinstance(channel, SlackTeam):
url = "https://{}".format(channel.team.domain)
else:
now = SlackTS()
url = "https://{}/archives/{}/p{}000000".format(channel.team.domain, channel.slack_name, now.majorstr())
w.prnt_date_tags(channel.team.channel_buffer, SlackTS().major, "openweb,logger_backlog_end,notify_none", url)
def command_nodistractions(data, current_buffer, args):
global hide_distractions
hide_distractions = not hide_distractions
if config.distracting_channels != ['']:
for channel in config.distracting_channels:
dbg('hiding channel {}'.format(channel))
# try:
for c in EVENTROUTER.weechat_controller.buffers.itervalues():
if c == channel:
dbg('found channel {} to hide'.format(channel))
w.buffer_set(c.channel_buffer, "hidden", str(int(hide_distractions)))
# except:
# dbg("Can't hide channel {} .. removing..".format(channel), main_buffer=True)
# config.distracting_channels.pop(config.distracting_channels.index(channel))
# save_distracting_channels()
@slack_buffer_required
def command_upload(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
url = 'https://slack.com/api/files.upload'
fname = args.split(' ', 1)
file_path = os.path.expanduser(fname[1])
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if ' ' in file_path:
file_path = file_path.replace(' ', '\ ')
command = 'curl -F file=@{} -F channels={} -F token={} {}'.format(file_path, channel.identifier, team.token, url)
w.hook_process(command, config.slack_timeout, '', '')
@utf8_decode
def away_command_cb(data, current_buffer, args):
# TODO: reimplement all.. maybe
(all, message) = re.match("^/away(?:\s+(-all))?(?:\s+(.+))?", args).groups()
if message is None:
command_back(data, current_buffer, args)
else:
command_away(data, current_buffer, args)
return w.WEECHAT_RC_OK
@slack_buffer_required
def command_away(data, current_buffer, args):
"""
Sets your status as 'away'
/slack away
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "away"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_status(data, current_buffer, args):
"""
Lets you set your Slack Status (not to be confused with away/here)
/slack status [emoji] [status_message]
"""
e = EVENTROUTER
channel = e.weechat_controller.buffers.get(current_buffer, None)
if channel:
team = channel.team
split_args = args.split(None, 2)
emoji = split_args[1] if len(split_args) > 1 else ""
text = split_args[2] if len(split_args) > 2 else ""
profile = {"status_text":text,"status_emoji":emoji}
s = SlackRequest(team.token, "users.profile.set", {"profile": profile}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_back(data, current_buffer, args):
"""
Sets your status as 'back'
/slack back
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "auto"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
@utf8_decode
def label_command_cb(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if channel and channel.type == 'thread':
aargs = args.split(None, 2)
new_name = " +" + aargs[1]
channel.label = new_name
w.buffer_set(channel.channel_buffer, "short_name", new_name)
@utf8_decode
def set_unread_cb(data, current_buffer, command):
for channel in EVENTROUTER.weechat_controller.buffers.values():
channel.mark_read()
return w.WEECHAT_RC_OK
@slack_buffer_or_ignore
@utf8_decode
def set_unread_current_buffer_cb(data, current_buffer, command):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
channel.mark_read()
return w.WEECHAT_RC_OK
def command_p(data, current_buffer, args):
args = args.split(' ', 1)[1]
w.prnt("", "{}".format(eval(args)))
###### NEW EXCEPTIONS
class ProcessNotImplemented(Exception):
"""
Raised when we try to call process_(something), but
(something) has not been defined as a function.
"""
def __init__(self, function_name):
super(ProcessNotImplemented, self).__init__(function_name)
class InvalidType(Exception):
"""
Raised when we do type checking to ensure objects of the wrong
type are not used improperly.
"""
def __init__(self, type_str):
super(InvalidType, self).__init__(type_str)
###### New but probably old and need to migrate
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is not None:
w.buffer_set(slack_debug, "display", "1")
else:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
def load_emoji():
try:
DIR = w.info_get("weechat_dir", "")
with open('{}/weemoji.json'.format(DIR), 'r') as ef:
return json.loads(ef.read())["emoji"]
except Exception as e:
dbg("Couldn't load emoji list: {}".format(e), 5)
return []
def setup_hooks():
cmds = {k[8:]: v for k, v in globals().items() if k.startswith("command_")}
w.bar_item_new('slack_typing_notice', 'typing_bar_item_cb', '')
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_callback", "EVENTROUTER")
w.hook_timer(3000, 0, 0, "reconnect_callback", "EVENTROUTER")
w.hook_timer(1000 * 60 * 5, 0, 0, "slack_never_away_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_callback", "EVENTROUTER")
w.hook_signal('buffer_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('window_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('quit', "quit_notification_callback", "")
if config.send_typing_notice:
w.hook_signal('input_text_changed', "typing_notification_cb", "")
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(cmds.keys()) +
'\nUse /slack help [command] to find out more\n',
# Completions
'|'.join(cmds.keys()),
# Function name
'slack_command_cb', '')
# w.hook_command('me', '', 'stuff', 'stuff2', '', 'me_command_cb', '')
w.hook_command_run('/me', 'me_command_cb', '')
w.hook_command_run('/query', 'command_talk', '')
w.hook_command_run('/join', 'command_talk', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/leave', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/thread', 'thread_command_callback', '')
w.hook_command_run('/reply', 'thread_command_callback', '')
w.hook_command_run('/rehistory', 'rehistory_command_callback', '')
w.hook_command_run('/hide', 'hide_command_callback', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run('/label', 'label_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_command_run("/input set_unread", "set_unread_cb", "")
w.hook_command_run("/input set_unread_current_buffer", "set_unread_current_buffer_cb", "")
w.hook_command_run('/away', 'away_command_cb', '')
w.hook_command_run('/whois', 'whois_command_cb', '')
w.hook_completion("nicks", "complete @-nicks for slack", "nick_completion_cb", "")
w.hook_completion("emoji", "complete :emoji: for slack", "emoji_completion_cb", "")
# Hooks to fix/implement
# w.hook_signal('buffer_opened', "buffer_opened_cb", "")
# w.hook_signal('window_scrolled', "scrolled_cb", "")
# w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
##### END NEW
def dbg(message, level=0, main_buffer=False, fout=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
# TODO: do this smarter
# return
if level >= config.debug_level:
global debug_string
message = "DEBUG: {}".format(message)
if fout:
file('/tmp/debug.log', 'a+').writelines(message + '\n')
if main_buffer:
# w.prnt("", "---------")
w.prnt("", "slack: " + message)
else:
if slack_debug and (not debug_string or debug_string in message):
# w.prnt(slack_debug, "---------")
w.prnt(slack_debug, message)
###### Config code
Setting = collections.namedtuple('Setting', ['default', 'desc'])
class PluginConfig(object):
# Default settings.
# These are, initially, each a (default, desc) tuple; the former is the
# default value of the setting, in the (string) format that weechat
# expects, and the latter is the user-friendly description of the setting.
# At __init__ time these values are extracted, the description is used to
# set or update the setting description for use with /help, and the default
# value is used to set the default for any settings not already defined.
# Following this procedure, the keys remain the same, but the values are
# the real (python) values of the settings.
default_settings = {
'background_load_all_history': Setting(
default='false',
desc='Load history for each channel in the background as soon as it'
' opens, rather than waiting for the user to look at it.'),
'channel_name_typing_indicator': Setting(
default='true',
desc='Change the prefix of a channel from # to > when someone is'
' typing in it. Note that this will (temporarily) affect the sort'
' order if you sort buffers by name rather than by number.'),
'colorize_private_chats': Setting(
default='false',
desc='Whether to use nick-colors in DM windows.'),
'debug_mode': Setting(
default='false',
desc='Open a dedicated buffer for debug messages and start logging'
' to it. How verbose the logging is depends on log_level.'),
'debug_level': Setting(
default='3',
desc='Show only this level of debug info (or higher) when'
' debug_mode is on. Lower levels -> more messages.'),
'distracting_channels': Setting(
default='',
desc='List of channels to hide.'),
'group_name_prefix': Setting(
default='&',
desc='The prefix of buffer names for groups (private channels).'),
'map_underline_to': Setting(
default='_',
desc='When sending underlined text to slack, use this formatting'
' character for it. The default ("_") sends it as italics. Use'
' "*" to send bold instead.'),
'never_away': Setting(
default='false',
desc='Poke Slack every five minutes so that it never marks you "away".'),
'record_events': Setting(
default='false',
desc='Log all traffic from Slack to disk as JSON.'),
'render_bold_as': Setting(
default='bold',
desc='When receiving bold text from Slack, render it as this in weechat.'),
'render_italic_as': Setting(
default='italic',
desc='When receiving bold text from Slack, render it as this in weechat.'
' If your terminal lacks italic support, consider using "underline" instead.'),
'send_typing_notice': Setting(
default='true',
desc='Alert Slack users when you are typing a message in the input bar '
'(Requires reload)'),
'server_aliases': Setting(
default='',
desc='A comma separated list of `subdomain:alias` pairs. The alias'
' will be used instead of the actual name of the slack (in buffer'
' names, logging, etc). E.g `work:no_fun_allowed` would make your'
' work slack show up as `no_fun_allowed` rather than `work.slack.com`.'),
'short_buffer_names': Setting(
default='false',
desc='Use `foo.#channel` rather than `foo.slack.com.#channel` as the'
' internal name for Slack buffers. Overrides server_aliases.'),
'show_reaction_nicks': Setting(
default='false',
desc='Display the name of the reacting user(s) alongside each reactji.'),
'slack_api_token': Setting(
default='INSERT VALID KEY HERE!',
desc='List of Slack API tokens, one per Slack instance you want to'
' connect to. See the README for details on how to get these.'),
'slack_timeout': Setting(
default='20000',
desc='How long (ms) to wait when communicating with Slack.'),
'switch_buffer_on_join': Setting(
default='true',
desc='When /joining a channel, automatically switch to it as well.'),
'thread_suffix_color': Setting(
default='lightcyan',
desc='Color to use for the [thread: XXX] suffix on messages that'
' have threads attached to them.'),
'unfurl_ignore_alt_text': Setting(
default='false',
desc='When displaying ("unfurling") links to channels/users/etc,'
' ignore the "alt text" present in the message and instead use the'
' canonical name of the thing being linked to.'),
'unfurl_auto_link_display': Setting(
default='both',
desc='When displaying ("unfurling") links to channels/users/etc,'
' determine what is displayed when the text matches the url'
' without the protocol. This happens when Slack automatically'
' creates links, e.g. from words separated by dots or email'
' addresses. Set it to "text" to only display the text written by'
' the user, "url" to only display the url or "both" (the default)'
' to display both.'),
'unhide_buffers_with_activity': Setting(
default='false',
desc='When activity occurs on a buffer, unhide it even if it was'
' previously hidden (whether by the user or by the'
' distracting_channels setting).'),
}
# Set missing settings to their defaults. Load non-missing settings from
# weechat configs.
def __init__(self):
self.settings = {}
# Set all descriptions, replace the values in the dict with the
# default setting value rather than the (setting,desc) tuple.
# Use items() rather than iteritems() so we don't need to worry about
# invalidating the iterator.
for key, (default, desc) in self.default_settings.items():
w.config_set_desc_plugin(key, desc)
self.settings[key] = default
# Migrate settings from old versions of Weeslack...
self.migrate()
# ...and then set anything left over from the defaults.
for key, default in self.settings.iteritems():
if not w.config_get_plugin(key):
w.config_set_plugin(key, default)
self.config_changed(None, None, None)
def __str__(self):
return "".join([x + "\t" + str(self.settings[x]) + "\n" for x in self.settings.keys()])
def config_changed(self, data, key, value):
for key in self.settings:
self.settings[key] = self.fetch_setting(key)
if self.debug_mode:
create_slack_debug_buffer()
return w.WEECHAT_RC_OK
def fetch_setting(self, key):
if hasattr(self, 'get_' + key):
try:
return getattr(self, 'get_' + key)(key)
except:
return self.settings[key]
else:
# Most settings are on/off, so make get_boolean the default
return self.get_boolean(key)
def __getattr__(self, key):
return self.settings[key]
def get_boolean(self, key):
return w.config_string_to_boolean(w.config_get_plugin(key))
def get_string(self, key):
return w.config_get_plugin(key)
def get_int(self, key):
return int(w.config_get_plugin(key))
def is_default(self, key):
default = self.default_settings.get(key).default
return w.config_get_plugin(key) == default
get_debug_level = get_int
get_group_name_prefix = get_string
get_map_underline_to = get_string
get_render_bold_as = get_string
get_render_italic_as = get_string
get_slack_timeout = get_int
get_thread_suffix_color = get_string
get_unfurl_auto_link_display = get_string
def get_distracting_channels(self, key):
return [x.strip() for x in w.config_get_plugin(key).split(',')]
def get_server_aliases(self, key):
alias_list = w.config_get_plugin(key)
if len(alias_list) > 0:
return dict(item.split(":") for item in alias_list.split(","))
def get_slack_api_token(self, key):
token = w.config_get_plugin("slack_api_token")
if token.startswith('${sec.data'):
return w.string_eval_expression(token, {}, {}, {})
else:
return token
def migrate(self):
"""
This is to migrate the extension name from slack_extension to slack
"""
if not w.config_get_plugin("migrated"):
for k in self.settings.keys():
if not w.config_is_set_plugin(k):
p = w.config_get("plugins.var.python.slack_extension.{}".format(k))
data = w.config_string(p)
if data != "":
w.config_set_plugin(k, data)
w.config_set_plugin("migrated", "true")
# to Trace execution, add `setup_trace()` to startup
# and to a function and sys.settrace(trace_calls) to a function
def setup_trace():
global f
now = time.time()
f = open('{}/{}-trace.json'.format(RECORD_DIR, now), 'w')
def trace_calls(frame, event, arg):
global f
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print >> f, 'Call to %s on line %s of %s from line %s of %s' % \
(func_name, func_line_no, func_filename,
caller_line_no, caller_filename)
f.flush()
return
def initiate_connection(token, retries=3):
return SlackRequest(token,
'rtm.start',
{"batch_presence_aware": 1 },
retries=retries)
# Main
if __name__ == "__main__":
w = WeechatWrapper(weechat)
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
weechat_version = w.info_get("version_number", "") or 0
if int(weechat_version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
global EVENTROUTER
EVENTROUTER = EventRouter()
# setup_trace()
# WEECHAT_HOME = w.info_get("weechat_dir", "")
# Global var section
slack_debug = None
config = PluginConfig()
config_changed_cb = config.config_changed
typing_timer = time.time()
# domain = None
# previous_buffer = None
# slack_buffer = None
# never_away = False
hide_distractions = False
# hotlist = w.infolist_get("hotlist", "", "")
# main_weechat_buffer = w.info_get("irc_buffer", "{}.{}".format(domain, "DOESNOTEXIST!@#$"))
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_modifier("input_text_for_buffer", "input_text_for_buffer_cb", "")
EMOJI.extend(load_emoji())
setup_hooks()
# attach to the weechat hooks we need
tokens = config.slack_api_token.split(',')
for t in tokens:
s = initiate_connection(t)
EVENTROUTER.receive(s)
if config.record_events:
EVENTROUTER.record()
EVENTROUTER.handle_next()
w.hook_timer(10, 0, 0, "handle_next", "")
# END attach to the weechat hooks we need
| [
"christian@kellner.me"
] | christian@kellner.me |
2c2c8cb78bcf9652ef11f20e9933579b3cc83471 | 40425604bbd709a80e273e02d62b5925d493fdc0 | /servidor/tests/testsServidor.py | bc06fe590e7e5cb264caeb93cb316140b4f45554 | [] | no_license | mafvidal/UdriveTPTaller2 | b93daa9a44b37048f953f640805b7c67e98d59c6 | 1a1bc28fb7bc2e2e58d36263d99d41fc1ea73f27 | HEAD | 2016-09-06T16:43:35.822346 | 2015-11-24T02:41:00 | 2015-11-24T02:41:00 | 42,020,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,573 | py | #! /usr/bin/python
import requests
import unittest
class TestServidor(unittest.TestCase):
def test_01registrarUsuarioCorrectamente(self):
#No le envio los metadatos del usuario, para simplificar el tests
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro el usuario
salida = requests.post('http://localhost:8000/usuarios/usu1', json=registrarUsuarioJson)
salidaJson = salida.json()
self.assertEqual("Se registro correctamente el usuario", salidaJson["Mensaje"])
self.assertEqual("OK", salidaJson["Estado"])
def test_02iniciarSesionUsuarioCorrectamente(self):
iniciarSesionJson = {'Clave': 'MiClave'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu2', json=registrarUsuarioJson)
#Inicio sesion con el usuario
salida = requests.post('http://localhost:8000/iniciarsesion/usu2', json=iniciarSesionJson)
salidaJson = salida.json()
self.assertEqual("Inicio existoso", salidaJson["Mensaje"])
self.assertEqual("OK", salidaJson["Estado"])
def test_03iniciarSesionUsuarioConClaveIncorrectaReciboError(self):
iniciarSesionJson = {'Clave': 'otraClave'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu3', json=registrarUsuarioJson)
#Inicio sesion con el usuario
salida = requests.post('http://localhost:8000/iniciarsesion/usu3', json=iniciarSesionJson)
salidaJson = salida.json()
self.assertEqual("Usuario o clave incorrecta", salidaJson["Mensaje"])
self.assertEqual("ERROR", salidaJson["Estado"])
def test_04registrarUsuarioExistenteReciboQueElUsuarioYaExiste(self):
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500}
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu1', json=registrarUsuarioJson)
salidaJson = salida.json()
self.assertEqual("Error usuario existente", salidaJson["Mensaje"])
self.assertEqual("ERROR", salidaJson["Estado"])
def test_05ObtenerDatosUsuarioRegistrado(self):
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu5', json=registrarUsuarioJson)
#Obtengo los datos del usuario
salida = requests.get('http://localhost:8000/usuarios/usu5')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("pepe@mail.com", salidaJson["Email"])
def test_06ActualizarDatosUsuario(self):
#Metadatos originales
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
#Metadatos para actualizar
MetaDatosActualizados = {'Email': 'pepito@mail.com','Foto': 'otraFoto','Nombre': 'carlos','UltimaUbicacion': 'China'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
actualizarUsuarioJson = {'Clave': 'otraClave','Cuota': 100500,'MetaDatos': MetaDatosActualizados }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu6', json=registrarUsuarioJson)
#Se actualizan los datos del usuario
salida = requests.put('http://localhost:8000/usuarios/usu6', json=actualizarUsuarioJson)
#Se obtienen los datos del usuario
salida = requests.get('http://localhost:8000/usuarios/usu6')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("pepito@mail.com", salidaJson["Email"])
self.assertEqual("otraFoto", salidaJson["Foto"])
self.assertEqual("China", salidaJson["UltimaUbicacion"])
def test_07AlCrearArchivoElUsuarioDebeTenerlo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu7','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu7', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu7/archivos', json=archivoJson)
#Se obtiene el archivo del usuario
salida = requests.get('http://localhost:8000/usuarios/usu7/archivos')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu7", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_08AlEliminarArchivoEsteDebeEstarEnLaPapelera(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu8','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos necesarios para eliminar archivo
eliminarArchivoJson = {'Propietario': 'usu8','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin' }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu8', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu8/archivos', json=archivoJson)
#Se elimina el archivo
salida = requests.delete('http://localhost:8000/usuarios/usu8/archivos', json= eliminarArchivoJson)
#Se obtiene el archivo de la papelera
salida = requests.get('http://localhost:8000/usuarios/usu8/papelera')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu8", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_09AlCompartirUnArchivoConOtroUsuarioEsteDebeTenerlo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu9','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos necesarios para compartir archivo
archivoCompartirJson = {'Propietario': 'usu9','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin', 'Usuarios': ['usu9_2'] }
#Registro de dos usuarios
salida = requests.post('http://localhost:8000/usuarios/usu9', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu9_2', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu9/archivos', json=archivoJson)
#Se comparte el archivo al segundo usuario
salida = requests.put('http://localhost:8000/usuarios/usu9/archivos/compartir', json= archivoCompartirJson)
#Se obtiene el archivo compartido del segundo usuario
salida = requests.get('http://localhost:8000/usuarios/usu9_2/archivos/compartidos')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu9", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_10BuscarArchivoPorEtiquetas(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu10','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Registro al usuario
salida = requests.post('http://localhost:8000/usuarios/usu10', json=registrarUsuarioJson)
#Se crea el archivo
salida = requests.post('http://localhost:8000/usuarios/usu10/archivos', json=archivoJson)
#Se busca el archivo por etiqueta
salida = requests.get('http://localhost:8000/usuarios/usu10/archivos/etiquetas/saludo')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu10", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_11BuscarArchivoPorNombre(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu11','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
salida = requests.post('http://localhost:8000/usuarios/usu11', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu11/archivos', json=archivoJson)
#Se busca el archivo por nombre
salida = requests.get('http://localhost:8000/usuarios/usu11/archivos/nombre/hola')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu11", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_12BuscarArchivoPorPropietario(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu12','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
salida = requests.post('http://localhost:8000/usuarios/usu12', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu12/archivos', json=archivoJson)
#Se busca el archivo por propietario
salida = requests.get('http://localhost:8000/usuarios/usu12/archivos/propietario/usu12')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu12", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_13BuscarArchivoPorExtension(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu13','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
salida = requests.post('http://localhost:8000/usuarios/usu13', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu13/archivos', json=archivoJson)
#Se busca el archivo por extension
salida = requests.get('http://localhost:8000/usuarios/usu13/archivos/extension/txt')
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("usu13", salidaJson["Archivos"][0]["Propietario"])
self.assertEqual("hola", salidaJson["Archivos"][0]["Nombre"])
def test_14ActualizarArchivo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu14','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos del Archivo a actualizar
actualizacionArchivoJson = {'Propietario': 'usu14', 'DirectorioOriginal' : 'documentos/bin', 'NombreOriginal': 'hola', 'ExtensionOriginal': 'txt','DirectorioNuevo': 'doc/','NombreNuevo': 'saludo', 'ExtensionNueva': 'bat','Etiquetas': ['hola','saludo'] }
#Se registra el usuario
salida = requests.post('http://localhost:8000/usuarios/usu14', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu14/archivos', json=archivoJson)
#Se actualiza el archivo
salida = requests.put('http://localhost:8000/usuarios/usu14/archivos/actualizar',json= actualizacionArchivoJson)
#Obtengo el archivo actualizado
archivoActualizado = requests.get('http://localhost:8000/usuarios/usu14/archivos')
archivoActualizadoJson = archivoActualizado.json()
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
#Verifico que se actualizo el nombre
self.assertEqual("saludo", archivoActualizadoJson["Archivos"][0]["Nombre"])
def test_15RestaurarArchivo(self):
#Datos del usuario
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu15','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Datos del Archivo a actualizar
actualizacionArchivoJson = {'Propietario': 'usu15', 'DirectorioOriginal' : 'documentos/bin', 'NombreOriginal': 'hola', 'ExtensionOriginal': 'txt','DirectorioNuevo': 'doc/','NombreNuevo': 'saludo', 'ExtensionNueva': 'bat','FechaDeModificacion' : '2015/08/03','UsuarioQueModifico' : 'pepe300','Etiquetas': ['hola','saludo'] }
#Datos del archivo a restaurar
archivoRestaurarJson = {'Propietario': 'usu15','Nombre': 'saludo','Extension': 'bat','Directorio': 'doc/','FechaDeModificacion' : '2015/08/03','UsuarioQueModifico' : 'usu15' }
#Se registra el usuario
salida = requests.post('http://localhost:8000/usuarios/usu15', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu15/archivos', json=archivoJson)
#Se actualiza el archivo
salida = requests.put('http://localhost:8000/usuarios/usu15/archivos/actualizar',json= actualizacionArchivoJson)
#Se restaura el archivo
salida = requests.put('http://localhost:8000/usuarios/usu15/archivos/restaurar',json= archivoRestaurarJson)
#Obtengo el archivo restaurado
archivoRestaurado = requests.get('http://localhost:8000/usuarios/usu15/archivos')
archivoRestauradoJson = archivoRestaurado.json()
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
#Verifico que se actualizo el nombre
self.assertEqual("hola", archivoRestauradoJson["Archivos"][0]["Nombre"])
def test_16CrearArchivoFisico(self):
MetaDatos = {'Email': 'pepe@mail.com','Foto': 'miFoto','Nombre': 'carlos','UltimaUbicacion': 'Bs As'}
registrarUsuarioJson = {'Clave': 'MiClave','Cuota': 100500,'MetaDatos': MetaDatos }
#Datos del Archivo
archivoJson = {'Propietario': 'usu16','Nombre': 'hola','Extension': 'txt','Directorio': 'documentos/bin','Etiquetas': ['hola','saludo'] }
#Registramos al usuario y agregamos el archivo logico
salida = requests.post('http://localhost:8000/usuarios/usu16', json=registrarUsuarioJson)
salida = requests.post('http://localhost:8000/usuarios/usu16/archivos', json=archivoJson)
salidaJson = salida.json()
#Obtengo el ID del archivo
idArchivo = salidaJson["Mensaje"]
#Abro el archivo
files = {'file': open('mainTest.cpp')}
#Envio el archivo fisico
salida = requests.post('http://localhost:8000/usuarios/usu16/archivofisico/'+idArchivo, files=files)
salidaJson = salida.json()
self.assertEqual("OK", salidaJson["Estado"])
self.assertEqual("Archivo creado correctamente", salidaJson["Mensaje"])
if __name__ == '__main__':
unittest.main()
| [
"mafvidal@gmail.com"
] | mafvidal@gmail.com |
ecefc0e5c68e1e71c4d81babea0adbd873cc0749 | b0680a0905161b5b8c8539f5021eb8dfcd33576a | /String Examples/ex14.py | c6ba2641e8aca25a72a144ed773bfe8b4f737999 | [] | no_license | FerruccioSisti/LearnPython3 | 32a78f3b63a985b7a42dcf69ae3ac432ec3dea76 | a4c0d238041836d22e99cf9f2cde80381daa91b9 | refs/heads/master | 2020-05-02T14:23:35.182958 | 2020-01-21T16:14:29 | 2020-01-21T16:14:29 | 178,009,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | #This example is using both argv and input to get information from the user
from sys import argv
#Get the name of the script and the user
script, user_name = argv
prompt = "> "
print (f"Hi {user_name}, I'm the {script} script")
print ("I'd like to ask you a few questions.")
print (f"Do you like me {user_name}?")
likes = input(prompt)
#Entering creepville right now
print (f"Where do you live {user_name}?")
lives = input(prompt)
print ("What kind of computer do you have?")
pc = input(prompt)
print (f"Alright, so you said {likes} about liking me.\nI've also noted you live in {lives}")
print (f"Finally, you said that you own a {pc}")
| [
"ferrucciosisti@gmail.com"
] | ferrucciosisti@gmail.com |
0588e6013bc4ccd0a97c815853df716c9fa6e040 | c0ea89d58fd6f780a23f10a0b5535b3feada5a1a | /anchore_engine/services/policy_engine/api/models/image_selection_rule.py | e0f9abbea332fcca8e57209b3916beb1d02c3c34 | [
"Apache-2.0"
] | permissive | longfeide2008/anchore-engine | b62acbab8c7ebbf7fa67a2503768c677942220e4 | 622786ec653531f4fb216cb33e11ffe31fe33a29 | refs/heads/master | 2022-11-08T10:02:51.988961 | 2020-06-15T18:00:37 | 2020-06-15T18:00:37 | 274,068,878 | 1 | 0 | Apache-2.0 | 2020-06-22T07:27:39 | 2020-06-22T07:27:38 | null | UTF-8 | Python | false | false | 4,764 | py | # coding: utf-8
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from anchore_engine.services.policy_engine.api.models.base_model_ import Model
from anchore_engine.services.policy_engine.api.models.image_ref import ImageRef # noqa: F401,E501
from anchore_engine.services.policy_engine.api import util
class ImageSelectionRule(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, registry=None, repository=None, image=None): # noqa: E501
"""ImageSelectionRule - a model defined in Swagger
:param id: The id of this ImageSelectionRule. # noqa: E501
:type id: str
:param name: The name of this ImageSelectionRule. # noqa: E501
:type name: str
:param registry: The registry of this ImageSelectionRule. # noqa: E501
:type registry: str
:param repository: The repository of this ImageSelectionRule. # noqa: E501
:type repository: str
:param image: The image of this ImageSelectionRule. # noqa: E501
:type image: ImageRef
"""
self.swagger_types = {
'id': str,
'name': str,
'registry': str,
'repository': str,
'image': ImageRef
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'registry': 'registry',
'repository': 'repository',
'image': 'image'
}
self._id = id
self._name = name
self._registry = registry
self._repository = repository
self._image = image
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ImageSelectionRule of this ImageSelectionRule. # noqa: E501
:rtype: ImageSelectionRule
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this ImageSelectionRule.
:return: The id of this ImageSelectionRule.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ImageSelectionRule.
:param id: The id of this ImageSelectionRule.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ImageSelectionRule.
:return: The name of this ImageSelectionRule.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ImageSelectionRule.
:param name: The name of this ImageSelectionRule.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def registry(self):
"""Gets the registry of this ImageSelectionRule.
:return: The registry of this ImageSelectionRule.
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""Sets the registry of this ImageSelectionRule.
:param registry: The registry of this ImageSelectionRule.
:type registry: str
"""
if registry is None:
raise ValueError("Invalid value for `registry`, must not be `None`") # noqa: E501
self._registry = registry
@property
def repository(self):
"""Gets the repository of this ImageSelectionRule.
:return: The repository of this ImageSelectionRule.
:rtype: str
"""
return self._repository
@repository.setter
def repository(self, repository):
"""Sets the repository of this ImageSelectionRule.
:param repository: The repository of this ImageSelectionRule.
:type repository: str
"""
if repository is None:
raise ValueError("Invalid value for `repository`, must not be `None`") # noqa: E501
self._repository = repository
@property
def image(self):
"""Gets the image of this ImageSelectionRule.
:return: The image of this ImageSelectionRule.
:rtype: ImageRef
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this ImageSelectionRule.
:param image: The image of this ImageSelectionRule.
:type image: ImageRef
"""
if image is None:
raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
self._image = image
| [
"zach@anchore.com"
] | zach@anchore.com |
e3dd831fccc3a95a952dbdc11ecb63ba2363ac4a | 06cd596e0f49d1e5de09a3de56be504453881413 | /graphm/matrix.py | 251c4584e075f168eb420e2b6fea01820f7b76c4 | [] | no_license | aguytech/graphm | 630b0e8b252d286c91a3c2429f344952a3513b7b | c4f58dabced17be83bb89da2c8bf5eb554a69ea2 | refs/heads/master | 2023-04-11T23:17:09.014633 | 2021-05-11T11:00:31 | 2021-05-11T11:00:31 | 365,999,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,282 | py | '''
Created on Apr 26, 2021
@author: salem Aguemoun
'''
import functools
import graphm.matrixboolean
class Matrix(graphm.matrixboolean.MatrixBoolean):
""" Manage a arithmetic matrix
.. CAUTION:: Instance variables
:var list matrix: matrix with real numbers
:var int dimM: number of rows
:var int dimN: number of columns
"""
def __init__(self, **d) -> 'Matrix':
"""Set the matrix properties with type given by one option in:
:matrix: get a matrix
:empty: get 2 dimensions of an empty matrix
:random: get 2 dimensions of randomized matrix
:unity: get the dimension of unity matrix
:param dict \*\*d: options to specify the type of matrix
:matrix: (list) matrix in [[int,...], ...] or ((int,...), ...)
:empty: dimensions for matrix (dimM: int, dimN: (tuple) int)
:random: dimensions for matrix (dimM: int, dimN: (tuple) int)
:unity: (int) dimensions for square matrix
:return: the matrix
:rtype: Matrix
"""
super().__init__(**d)
def __add__(self, matrix: 'Matrix') -> 'Matrix':
""" Return the result of the sum of this instance and that given in argument
:param Matrix matrix: matrix to be added to the instance
:return: the result of the sum of this instance and that given in argument
:rtype: Matrix
>>> m = Matrix(matrix=[[0,10,4,2], [1,3,5,7]])
>>> m2 = Matrix(matrix=[[4,5,8,2], [10,5,7,4]])
>>> print(m + m2)
dim 2,4
4,15,12,4
11,8,12,11
"""
# wrong dimensions
if matrix.dimM != self.dimM or matrix.dimN != self.dimN:
raise ValueError("Matrix have wrong dimensions")
r = Matrix(empty=(self.dimM, self.dimN))
for m in range(self.dimM):
for n in range(self.dimN):
r.matrix[m][n] = self.get_value(m, n) + matrix.get_value(m, n)
return r
def __mul__(self, matrix: 'Matrix') -> 'Matrix':
""" Return the matrix multiplication with a logical '&'
between instance and that passed in argument
:param Matrix matrix: matrix to be added to the instance
:return: the result of the multiplication of this instance and that given in argument
:rtype: Matrix
>>> m = Matrix(matrix=[[0,10,4,2], [1,3,5,7], [2,-1,5,3]])
>>> m2 = Matrix(matrix=[[4,2], [1,2], [2,3], [1,1]])
>>> print(m * m2)
dim 3,2
20,34
24,30
20,20
"""
# wrong dimensions
if matrix.dimM != self.dimN:
raise ValueError("Matrix have wrong dimensions")
r = Matrix(empty=(self.dimM, matrix.dimN))
for m in range(self.dimM):
for n in range(matrix.dimN):
l = (self.get_value(m, i) * matrix.get_value(i, n) for i in range(self.dimN))
# with functools package
r.matrix[m][n] = functools.reduce(lambda x, y: x + y, l)
#r.matrix[n][m] = 1 if sum(l) > 0 else 0
#r.matrix[n][m] = sum(self.matrix[n][i] * matrix.matrix[i][m] for i in range(self.dimN))
return r
def __repr__(self) -> str:
""" Return a linear representation of matrix
:return: a linear representation of the matrix separated by comma
>>> m = Matrix(matrix=['00001', '00100', '00010'])
>>> repr(m)
'0,0,0,0,1 0,0,1,0,0 0,0,0,1,0'
"""
return " ".join(",".join(str(n) for n in m) for m in self.matrix)
def __str__(self) -> str:
""" Return dimensions of matrix and matrix in 2 dimensions
:return: a 2 dimensions representation of the matrix
>>> m = Matrix(matrix=['00001', '00100', '00010'])
>>> print(m)
dim 3,5
0,0,0,0,1
0,0,1,0,0
0,0,0,1,0
"""
return f"dim {self.dimM},{self.dimN}" +"\n" \
+ "\n".join(",".join(str(n) for n in m) for m in self.matrix)
def __sub__(self, matrix: 'Matrix') -> 'Matrix':
""" Return the result of the substraction of this instance and that given in argument
:param Matrix matrix: matrix to be added to the instance
:return: the result of the sum of this instance and that given in argument
:rtype: Matrix
>>> m = Matrix(matrix=[[0,10,4,2], [1,3,5,7]])
>>> m2 = Matrix(matrix=[[4,5,8,2], [10,5,7,4]])
>>> print(m - m2)
dim 2,4
-4,5,-4,0
-9,-2,-2,3
"""
# wrong dimensions
if matrix.dimM != self.dimM or matrix.dimN != self.dimN:
raise ValueError("Matrix have wrong dimensions")
r = Matrix(empty=(self.dimM, self.dimN))
for m in range(self.dimM):
for n in range(self.dimN):
r.matrix[m][n] = self.get_value(m, n) - matrix.get_value(m, n)
return r
| [
"aguytech@free.fr"
] | aguytech@free.fr |
6eb5cb0c208022350e4de33e4e9a311131f2b321 | 6f8a0685ecba9540ee5aefb44b3b09fb0e68ba14 | /src/repeating_key_XOR.py | ec0f0b481a377301806a7e1ffcef6b2e8017c31f | [] | no_license | Auguste0904/CAESAR | def873cc6c965f1b0f92e7b1a560b4cd82935c6b | 0f58741f40582b59b5923532fa199fc8876b2bbd | refs/heads/master | 2023-03-17T18:58:50.165218 | 2021-03-11T08:54:19 | 2021-03-11T08:54:19 | 346,637,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | #!/usr/bin/env python3
##
## EPITECH PROJECT, 2020
## B-SEC-500-PAR-5-1-caesar-lucas.moritel
## File description:
## repeating_key_XOR.py
##
import os
import sys
import codecs
def error_gestion_arg(argv):
if len(argv) != 2:
print("Error: Invalid number of arguments")
exit(84)
if os.path.isfile(argv[1]) == False:
print("Error: The argument is not a file")
exit(84)
def repeating_key_xor(key, text):
output = b''
i = 0
for chara in text:
output += bytes([chara ^ key[i]])
if (i + 1) == len(key):
i = 0
else:
i += 1
return output
def main():
error_gestion_arg(sys.argv)
file = open(sys.argv[1], "r")
encoded_key = file.readline().strip('\n')
encoded_text = file.readline().strip('\n')
if len(encoded_key) == 0:
print("Error: There is no key in your file")
exit(84)
if len(encoded_text) == 0:
print("Error: There is no text to decrypt in your file")
exit(84)
size_key = len(encoded_key) % 2
if size_key != 0:
print("Error: Length of the encoded key content is not even but odd")
exit(84)
if encoded_text == '' or encoded_key == '':
print("Error: The encoded key or the encoded tesxt is missing")
exit(84)
decoded_text = ''.join(encoded_text).encode()
decoded_key = ''.join(encoded_key).encode()
decoded_text = codecs.decode(decoded_text, 'hex')
decoded_key = codecs.decode(decoded_key, 'hex')
ciphertext = repeating_key_xor(decoded_key, decoded_text)
print(ciphertext.hex().upper())
if __name__ == "__main__":
main()
| [
"auguste.alexandre@epitech.eu"
] | auguste.alexandre@epitech.eu |
97dff6c9bea41dde6e6d416fe20ab4804a53ed50 | 9757f47db825c61fd7180462be97e59909d183fc | /env/bin/python-config | 0eedffec046f906eddb2944d98f3b1f0b97d70c6 | [] | no_license | dignacio0815/translatorrepo | dcf449eadc17bc3d89a111dec3120dfbb1a31cc7 | f1beaf0fe378363e597db153976ccf1d46c79910 | refs/heads/master | 2020-03-25T15:44:18.681461 | 2018-08-10T19:38:56 | 2018-08-10T19:38:56 | 143,899,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | #!/home/ubuntu/workspace/translator_project/env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"denizeignacio@gmail.com"
] | denizeignacio@gmail.com | |
cafc587d98b94559ed1c700073ef8df288023c8a | a2f08f07c5a8473fc6d65b54844948524a8d56a5 | /codigwithsilentsec/src/account/admin.py | b72b53b9d4b00e41e1423fa4da64b110879754f5 | [] | no_license | silnetmagar321/etenderwebsite | 9fd94e11d4103b68397a2af6179453f659591bb5 | 39521808442c63cc2ade17602430f625d091f213 | refs/heads/main | 2023-01-07T13:25:56.174456 | 2020-10-20T09:08:14 | 2020-10-20T09:08:14 | 304,482,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from account.models import Account
# Register your models here.
class AccountAdmin(UserAdmin):
list_display = ('email', 'username', 'date_joined', 'last_login', 'is_admin', 'is_staff')
search_fields = ('email', 'username',)
readonly_fields = ('date_joined', 'last_login')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(Account, AccountAdmin) | [
"56421032+silnetmagar321@users.noreply.github.com"
] | 56421032+silnetmagar321@users.noreply.github.com |
8ea4f2d22296c8763d073634a3bd564ee230dd68 | 7167c7acbebabc66f3e04959161c720e8091560b | /tour/tests/tour_tests.py | daeba4dec330067ede160de46ba55da76c130ea6 | [
"MIT"
] | permissive | ambitioninc/django-tour | 2a260ae3f6d59218b0cee9a7a9bc8b0a11c3d80f | f0181d71ebd6c66e11dd921ad5e602192fc621cc | refs/heads/develop | 2016-09-05T21:48:39.161270 | 2015-10-05T19:44:58 | 2015-10-05T19:44:58 | 18,072,292 | 26 | 12 | null | 2015-10-05T19:45:15 | 2014-03-24T17:53:55 | Python | UTF-8 | Python | false | false | 10,375 | py | from django.contrib.auth.models import User
from django.test import TestCase
from django_dynamic_fixture import G
from mock import patch
from tour.models import Tour, Step, TourStatus
class BaseTourTest(TestCase):
"""
Provides basic setup for tour tests like creating users
"""
def setUp(self):
super(BaseTourTest, self).setUp()
self.test_user = User.objects.create_user('test', 'test@gmail.com', 'test')
self.test_user2 = User.objects.create_user('test2', 'test2@gmail.com', 'test2')
self.tour1 = G(
Tour, display_name='Mock Tour', name='tour1', complete_url='mock_complete1',
tour_class='tour.tests.mocks.MockTour')
self.tour2 = G(
Tour, display_name='Mock Tour 2', name='tour2', complete_url='mock_complete2',
tour_class='tour.tests.mocks.MockTour2')
self.step1 = G(
Step, step_class='tour.tests.mocks.MockStep1', display_name='Mock Step 1', name='mock1',
url='mock1', parent_step=None, sort_order=0)
self.step2 = G(
Step, step_class='tour.tests.mocks.MockStep2', display_name='Mock Step 2', name='mock2',
url='mock2', parent_step=None, sort_order=1)
self.step3 = G(
Step, step_class='tour.tests.mocks.MockStep3', display_name='Mock Step 3', name='mock3',
url='mock3', parent_step=None, sort_order=2)
self.step4 = G(
Step, step_class='tour.tests.mocks.MockStep4', display_name='Mock Step 4', name='mock4',
url='mock4', parent_step=None, sort_order=3)
self.step5 = G(
Step, step_class='tour.tours.BaseStep', display_name='Mock Step 5', name='mock5',
url=None, parent_step=None, sort_order=4)
def login_user1(self):
self.client.login(username='test', password='test')
class TourTest(BaseTourTest):
"""
Tests the functionality of the BaseTour class
"""
def test_init(self):
"""
Verifies that the tour object is properly set when loaded
"""
self.assertEqual(self.tour1.load_tour_class().tour, self.tour1)
def test_get_steps_flat(self):
"""
Verifies that the steps are loaded in the correct order
"""
self.step1.sort_order = 1
self.step1.save()
self.step2.sort_order = 0
self.step2.save()
self.tour1.steps.add(self.step1, self.step2)
expected_steps = [self.step2, self.step1]
self.assertEqual(expected_steps, self.tour1.load_tour_class().get_steps())
def test_get_steps_nested(self):
"""
Verifies that the nested steps are loaded correctly
"""
self.tour1.steps.add(self.step1, self.step2)
self.step1.steps.add(self.step3, self.step4)
self.step3.sort_order = 1
self.step3.save()
self.step4.sort_order = 0
self.step4.save()
expected_steps = [self.step1, self.step4, self.step3, self.step2]
self.assertEqual(expected_steps, self.tour1.load_tour_class().get_steps())
def test_get_url_list(self):
"""
Verifies that the tour returns the correct step url list
"""
self.tour1.steps.add(self.step1, self.step5, self.step2)
expected_url_list = ['mock1', 'mock2']
self.assertEqual(expected_url_list, self.tour1.load_tour_class().get_url_list())
def test_add_user(self):
"""
Verifies that a user is linked to a tour properly and that the correct tour is returned
"""
# add user to tour
tour_status = self.tour1.load_tour_class().add_user(self.test_user)
# try to add again and make sure it returns the same status
self.assertEqual(tour_status, self.tour1.load_tour_class().add_user(self.test_user))
# make sure only one status
self.assertEqual(1, TourStatus.objects.count())
# mark status as complete
tour_status.complete = True
tour_status.save()
# make sure another tour is created
self.tour1.load_tour_class().add_user(self.test_user)
self.assertEqual(2, TourStatus.objects.count())
self.assertEqual(1, TourStatus.objects.filter(complete=False).count())
def test_mark_complete(self):
"""
Verifies that a tour status record will be marked as complete for a user
"""
# add multiple users to multiple tours
tour1_class = self.tour1.load_tour_class()
tour2_class = self.tour2.load_tour_class()
tour1_class.add_user(self.test_user)
tour1_class.add_user(self.test_user2)
tour2_class.add_user(self.test_user)
tour2_class.add_user(self.test_user2)
# make sure there are 4 records
self.assertEqual(4, TourStatus.objects.count())
# complete the tour for user1
self.assertTrue(tour1_class.mark_complete(self.test_user))
# make sure it is complete
self.assertEqual(1, TourStatus.objects.filter(complete=True).count())
# try to complete the same tour
self.assertFalse(tour1_class.mark_complete(self.test_user))
# add the user to the tour again
tour1_class.add_user(self.test_user)
# make sure there are 5 records
self.assertEqual(5, TourStatus.objects.count())
@patch('tour.tests.mocks.MockStep4.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep3.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep2.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep1.is_complete', spec_set=True)
def test_get_current_step(
self, mock_step1_is_complete, mock_step2_is_complete, mock_step3_is_complete, mock_step4_is_complete):
"""
Verifies that the tour class returns the first incomplete step
:type mock_step1_is_complete: Mock
:type mock_step2_is_complete: Mock
:type mock_step3_is_complete: Mock
:type mock_step4_is_complete: Mock
"""
mock_step1_is_complete.return_value = False
mock_step2_is_complete.return_value = False
mock_step3_is_complete.return_value = False
mock_step4_is_complete.return_value = False
self.tour1.steps.add(self.step1, self.step2)
self.step1.steps.add(self.step3, self.step4)
tour1_class = self.tour1.load_tour_class()
self.assertEqual(self.step1, tour1_class.get_current_step(self.test_user))
mock_step1_is_complete.return_value = True
mock_step3_is_complete.return_value = True
self.assertEqual(self.step4, tour1_class.get_current_step(self.test_user))
mock_step4_is_complete.return_value = True
mock_step2_is_complete.return_value = True
self.assertIsNone(tour1_class.get_current_step(self.test_user))
@patch('tour.tests.mocks.MockStep4.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep3.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep2.is_complete', spec_set=True)
@patch('tour.tests.mocks.MockStep1.is_complete', spec_set=True)
def test_get_next_url(
self, mock_step1_is_complete, mock_step2_is_complete, mock_step3_is_complete, mock_step4_is_complete):
"""
Verifies that the url is returned for the current step
:type mock_step1_is_complete: Mock
:type mock_step2_is_complete: Mock
:type mock_step3_is_complete: Mock
:type mock_step4_is_complete: Mock
"""
mock_step1_is_complete.return_value = False
mock_step2_is_complete.return_value = False
mock_step3_is_complete.return_value = False
mock_step4_is_complete.return_value = False
self.step5.sort_order = 1
self.step5.save()
self.step2.sort_order = 3
self.step2.save()
self.tour1.steps.add(self.step1, self.step2, self.step5)
self.step5.steps.add(self.step3, self.step4)
tour1_class = self.tour1.load_tour_class()
self.assertEqual('mock1', tour1_class.get_next_url(self.test_user))
mock_step1_is_complete.return_value = True
self.assertEqual('mock3', tour1_class.get_next_url(self.test_user))
mock_step3_is_complete.return_value = True
self.assertEqual('mock4', tour1_class.get_next_url(self.test_user))
mock_step4_is_complete.return_value = True
self.assertEqual('mock2', tour1_class.get_next_url(self.test_user))
mock_step2_is_complete.return_value = True
self.assertEqual('mock_complete1', tour1_class.get_next_url(self.test_user))
@patch('tour.tests.mocks.MockStep1.is_complete', spec_set=True)
def test_is_complete(self, mock_step1_is_complete):
"""
Verifies that a tour returns true when complete and false when incomplete
:type mock_step1_is_complete: Mock
"""
mock_step1_is_complete.return_value = False
self.tour1.steps.add(self.step1)
tour1_class = self.tour1.load_tour_class()
self.assertFalse(tour1_class.is_complete(self.test_user))
mock_step1_is_complete.return_value = True
self.assertTrue(tour1_class.is_complete(self.test_user))
class StepTest(BaseTourTest):
"""
Tests the functionality of the BaseStep class
"""
def test_init(self):
"""
Verifies that the step object is properly set when loaded
"""
self.assertEqual(self.step1.load_step_class().step, self.step1)
def test_is_complete(self):
"""
Verifies that a step returns true by default
"""
step1_class = self.step1.load_step_class()
self.assertTrue(step1_class.is_complete(self.test_user))
def test_get_steps_flat(self):
"""
Verifies that the steps are loaded in the correct order
"""
self.step1.steps.add(self.step2, self.step3)
expected_steps = [self.step2, self.step3]
self.assertEqual(expected_steps, self.step1.load_step_class().get_steps())
def test_get_steps_nested(self):
"""
Verifies that the nested steps are loaded correctly
"""
self.step1.steps.add(self.step2)
self.step2.steps.add(self.step3, self.step4)
expected_steps = [self.step2, self.step3, self.step4]
self.assertEqual(expected_steps, self.step1.load_step_class().get_steps())
| [
"wes.okes@gmail.com"
] | wes.okes@gmail.com |
41f4b127bfbd6b75174719694a023c07f6cca470 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.TZO/Sun-ExtA_16/pdf_to_json_test_Latn.TZO_Sun-ExtA_16.py | 1ec5da78381362fbe785a67e34d5996d974a7995 | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TZO/Sun-ExtA_16/udhr_Latn.TZO_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
d7e882092e4b190087f4548e9372a44995255bcf | d3737731634ee3f6fa2b19f6806d42ecc27d21a5 | /wals3/scripts/initializedb.py | 273185799ef7e0f763aec0421b0141b6c83648e1 | [] | no_license | Maggi12/wals3 | 3ad2475714b2d0bd1a7e5bb52baac1070eb07a5f | e66f08766ef67f51cae3d9656bcd4da1a8cf63c8 | refs/heads/master | 2021-01-22T20:02:56.225183 | 2014-07-25T15:42:39 | 2014-07-25T15:42:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,628 | py | from __future__ import unicode_literals
import sys
import transaction
from itertools import groupby, cycle
import re
from datetime import date, datetime
from collections import defaultdict
from pytz import utc
from sqlalchemy import create_engine
from sqlalchemy.orm import joinedload_all
from path import path
from bs4 import BeautifulSoup
from clld.db.meta import DBSession, VersionedDBSession
from clld.db.models import common
from clld.db.util import compute_language_sources
from clld.scripts.util import initializedb, Data, gbs_func
from clld.lib.bibtex import EntryType
from clld.lib.dsv import reader
from clld.util import LGR_ABBRS
import wals3
from wals3 import models
from wals3.scripts import uncited
from wals3.scripts import issues
UNCITED_MAP = {}
for k, v in uncited.MAP.items():
UNCITED_MAP[k.lower()] = v
# start with what's online right now:
DB = create_engine('postgresql://robert@/wals-vm42')
REFDB = create_engine('postgresql://robert@/walsrefs')
GC = create_engine('postgresql://robert@/glottolog3')
ABBRS = {
"A": "agent-like argument",
"ACCOMP": "accompanied ",
"ACR": "actor",
"ACT": "actual",
"ADEL": "adelative",
"ADVZ": "adverbializer",
"AFF": "affirmative",
"AGT": "agent",
"ALL": "allative",
"AN": "action nominal",
"ANC": "action nominal construction",
"ANIM": "animate",
"ANTIP": "antipassive",
"APPL": "applicative",
"AS": "asseverative",
"ASSOC": "associative",
"ASY": "asymmetric",
"ATTR": "attributive",
"AUD": "auditory evidential",
"AUG": "augmented",
"C": "common gender",
"CL": "class (= noun class, gender)",
"CLF": "classifier",
"CMPL": "completive",
"CNTR": "contrary to expectation marker",
"COLL": "collective",
"COM": "comitative",
"COMPR": "comparative",
"CONN": "connective",
"CONNEG": "connegative",
"CONSTR": "construct",
"CONT": "continuative, continous",
"CONTEMP": "contemporative",
"COP": "copula",
"CPW": "categories per word",
"CRS": "currently relevant state",
"DECL": "declarative",
"DEG": "degree word",
"DEP": "dependent marker",
"DES": "desire",
"DESID": "desiderative",
"DIM": "diminutive",
"DIR": "direct",
"DIR.EVD": "direct evidential",
"DIRL": "directional",
"DIST.PST": "distant past",
"DOBJ": "direct object",
"DS": "different subject",
"EMPH": "emphatic",
"EPENTH": "epenthetic",
"EPV": "expletive verbal suffix",
"EVD": "evidential",
"FACT": "fact",
"FAM": "familiar",
"FIN": "finite",
"FIN.AOR": "finite aorist",
"FV": "verb-final vowel",
"HAB": "habitual",
"HEST": "hesternal past",
"HHON": "super honorific",
"HOD": "hodiernal past",
"HON": "honorific",
"HORT": "hortative",
"HUM": "human",
"IE": "Indo-European",
"ILL": "illative",
"IMM.PRET": "immediate preterite",
"IMM.PST": "immediate past",
"IMPERS": "impersonal",
"INAN": "inanimate",
"INCEP": "inceptive",
"INCOMPL": "incompletive",
"IND": "indicative",
"INDIR.EVD": "indirect evidential",
"INFER": "inferential evidential",
"INGR": "ingressive",
"INTENT": "intentional",
"INTER": "interrogative",
"INTF": "intensifier",
"INTGEN": "intended genitive",
"INV": "inverse",
"IO": "indirect object ",
"IRR": "irrealis",
"ITER": "iterative",
"LIG": "ligature",
"LOCUT": "locutor person marker",
"MED": "medial",
"NARR": "narrative",
"NC": "noun class",
"NEC": "necessity",
"NHON": "non-honorific",
"NOMIN": "nominalization",
"NON.F": "non-feminine ",
"NONFIN": "non-finite ",
"NONFIN.AOR": "non-finite aorist",
"NP": "noun phrase",
"NPST": "non-past",
"NSG": "non-singular",
"NUM": "numeral",
"O": "object pronominal marker",
"OBV": "obviative",
"OPT": "optative",
"P": "patient-like argument",
"PAT": "patient",
"PATH": "path locative",
"PCL": "particle",
"PERS": "personal",
"PHR.TERM": "phrase terminal marker",
"PLUPERF": "pluperfect",
"POS": "possibility",
"POSTP": "postposition",
"POT": "potential",
"PP": "prepositional/postpositional phrase",
"PRECONTEMP": "precontemporal",
"PRED": "predicative",
"PREF": "prefix",
"PREP": "preposition",
"PREV": "preverb",
"PROL": "prolative",
"PRON": "pronoun",
"PROP": "proper name",
"PRTV": "partitive",
"PST.CONT": "past continuous",
"PST.PUNCT": "past punctiliar",
"PSTBEFOREYEST": "past before yesterday (= prehesternal)",
"PUNCT": "punctual stem",
"Q": "question-marker",
"QUOT": "quotative",
"RDP": "reduplication",
"REAL": "realis",
"REC": "recent (past)",
"RECP": "reciprocal",
"REM.PST": "remote past",
"REMOTE": "remote",
"REPET": "repetitive",
"RLZ": "realized",
"RNR": "result nominalizer",
"S": "sole argument of the intransitive verb",
"SBJV": "subjunctive",
"SENS": "sensory evidential",
"SPEC": "specific",
"SR": "switch Reference",
"SS": "same subject",
"STAT": "stative",
"SUBORD": "subordination",
"SUFF": "suffix",
"SUP": "superessive",
"SYM": "symmetric",
"SymAsy": "symmetric and asymmetric",
"T/A": "tense/ aspect",
"TD": "time depth/ proximality marker",
"TELIC": "telic",
"TEMPRY": "temporary",
"TH": "thematic suffix",
"THM": "theme (i.e. the semantic role)",
"TOD.PST": "today past",
"TRASL": "traslative",
"TRI": "trial",
"UNSP": "unspecified",
"VBLZ": "verbalizer",
"VENT": "ventive",
"VIS": "visual evidential",
"VP": "verb phrase",
}
for k, v in LGR_ABBRS.items():
ABBRS.setdefault(k, v)
def get_source(id): # pragma: no cover
"""retrieve a source record from wals_refdb
"""
field_map = {
'onlineversion': 'url',
'gbs_id': 'google_book_search_id',
'doi': 'jsondata',
'cited': 'jsondata',
'conference': 'jsondata',
'iso_code': 'jsondata',
'olac_field': 'jsondata',
'wals_code': 'jsondata',
}
res = {'id': id, 'jsondata': {'iso_code': [], 'olac_field': [], 'wals_code': []}}
refdb_id = UNCITED_MAP.get(id.lower())
if not refdb_id:
for row in REFDB.execute("""\
select id, genre from ref_record, ref_recordofdocument
where id = id_r_ref and citekey = '%s'""" % id
):
res['bibtex_type'] = row['genre']
refdb_id = row['id']
break
if not refdb_id:
if id[-1] in ['a', 'b', 'c', 'd']:
refdb_id = UNCITED_MAP.get(id[:-1].lower())
if not refdb_id:
print 'missing ref', id
return {}
res['pk'] = int(refdb_id)
if 'bibtex_type' not in res:
for row in REFDB.execute("select genre from ref_record where id = %s" % refdb_id):
res['bibtex_type'] = row['genre']
break
for row in REFDB.execute(
"select * from ref_recfields where id_r_ref = %s" % refdb_id
):
field = field_map.get(row['id_name'], row['id_name'])
if field == 'jsondata':
if row['id_name'] in ['iso_code', 'olac_field', 'wals_code']:
res['jsondata'][row['id_name']].append(row['id_value'])
else:
res['jsondata'][row['id_name']] = row['id_value']
else:
res[field] = row['id_value']
if res['bibtex_type'] == 'thesis':
if res['format'] == 'phd':
res['bibtex_type'] == 'phdthesis'
del res['format']
elif res['format'] == 'ma':
res['bibtex_type'] == 'mastersthesis'
del res['format']
else:
res['bibtex_type'] == 'misc'
if res['bibtex_type'] == 'online':
res['howpublished'] = 'online'
res['bibtex_type'] = getattr(EntryType, res['bibtex_type'], EntryType.misc)
if 'format' in res:
res['type'] = res['format']
del res['format']
authors = ''
for row in REFDB.execute(
"select * from ref_recauthors where id_r_ref = %s order by ord" % refdb_id
):
if row['type'] == 'etal':
authors += ' et al.'
else:
if authors:
authors += ' and '
authors += row['value']
res['author'] = authors
for row in REFDB.execute(
"select * from ref_recjournal where id_r_ref = %s" % refdb_id
):
res['journal'] = row['name']
break
return res
def parse_igt(html): # pragma: no cover
"""
<table class="IGT">
<caption>
<div class="translation">I want the white one.</div>
</caption>
<tbody>
<tr class="phrase">
<td class="morpheme"><i>Pojne-j-ben </i></td>
<td class="morpheme"><i>lew-din </i></td>
<td class="morpheme"><i>erd'-ije. </i></td>
</tr>
<tr class="gloss">
<td class="morpheme">white-PTCP-NMLZ</td>
<td class="morpheme">eat-INF</td>
<td class="morpheme">want-1SG.INTR</td>
</tr>
</tbody>
</table>
"""
def get_text(e):
if not isinstance(e, list):
e = [e]
return ' '.join(' '.join(ee.stripped_strings) for ee in e)
res = {}
soup = BeautifulSoup(html)
e = soup.find('caption')
if e:
res['description'] = get_text(e)
e = soup.find('tr', attrs={'class': 'phrase'})
if e:
morphemes = e.find_all('td', attrs={'class': 'morpheme'})
res['name'] = get_text(morphemes)
res['analyzed'] = '\t'.join(get_text(m) for m in morphemes)
res['markup_analyzed'] = '\t'.join(
''.join(unicode(c) for c in m.contents) for m in morphemes)
e = soup.find('tr', attrs={'class': 'gloss'})
if e:
morphemes = e.find_all('td', attrs={'class': 'morpheme'})
res['gloss'] = '\t'.join(get_text(m).replace('. ', '.') for m in morphemes)
res['markup_gloss'] = '\t'.join(
''.join(unicode(c) for c in m.contents) for m in morphemes)
assert len(res.get('gloss', '').split('\t')) == len(res.get('analyzed', '').split('\t'))
return res
def teaser(html): # pragma: no cover
res = ''
for s in BeautifulSoup(html).stripped_strings:
res = '%s %s' % (res, s)
if len(res) > 100:
break
return res.strip()
def get_vs2008(args): # pragma: no cover
vs2008 = {}
for row in reader(args.data_file('datapoints_2008.csv'), delimiter=','):
vs2008[(row[0], '%sA' % row[1])] = int(row[2])
return vs2008
E2008 = utc.localize(datetime(2008, 4, 21))
E2011 = utc.localize(datetime(2011, 4, 28))
E2013 = utc.localize(datetime(2013, 11, 15))
data = Data(created=E2008, updated=E2008)
def migrate(from_, to_, converter): # pragma: no cover
for row in DB.execute("select * from %s" % from_):
res = converter(row)
if not res:
continue
if isinstance(res, dict):
DBSession.add(to_(**res))
else:
data.add(to_, res[0], **res[1])
DBSession.flush()
def main(args): # pragma: no cover
glottocodes = {}
for row in GC.execute('select ll.hid, l.id from language as l, languoid as ll where ll.pk = l.pk'):
if row[0] and len(row[0]) == 3:
glottocodes[row[0]] = row[1]
icons = issues.Icons()
old_db = DB
vs2008 = get_vs2008(args)
missing_sources = []
refdb_ids = {}
max_id = 7350
with open('/home/robert/venvs/clld/data/wals-data/missing_source.py', 'w') as fp:
for row in old_db.execute("select * from reference"):
try:
author, year = row['id'].split('-')
except:
author, year = None, None
bibdata = get_source(row['id'])
if not bibdata:
fp.write('"%s",\n' % row['id'])
missing_sources.append(row['id'])
bibdata['pk'] = max_id
max_id += 1
if bibdata['pk'] in refdb_ids:
print 'already seen:', row['id'], 'as', refdb_ids[bibdata['pk']]
data['Source'][row['id']] = data['Source'][refdb_ids[bibdata['pk']]]
continue
refdb_ids[bibdata['pk']] = row['id']
bibdata.update({
'id': row['id'],
'name': row['name'],
'description': bibdata.get('title', bibdata.get('booktitle')),
'google_book_search_id': row['gbs_id'] or None,
})
data.add(common.Source, row['id'], **bibdata)
#
# TODO: add additional bibdata as data items
#
print('sources missing for %s refs' % len(missing_sources))
for id, name in ABBRS.items():
DBSession.add(common.GlossAbbreviation(id=id, name=name))
migrate(
'country',
models.Country,
lambda r: (r['id'], dict(id=r['id'], name=r['name'], continent=r['continent'])))
migrate(
'family',
models.Family,
lambda r: (r['id'], dict(id=r['id'], name=r['name'], description=r['comment'])))
for row, icon in zip(
list(old_db.execute("select * from genus order by family_id")),
cycle(iter(icons))
):
genus = data.add(
models.Genus, row['id'],
id=row['id'], name=row['name'], icon=icon, subfamily=row['subfamily'])
genus.family = data['Family'][row['family_id']]
DBSession.flush()
migrate(
'altname',
common.Identifier,
lambda r: (
(r['name'], r['type']), dict(name=r['name'], type='name', description=r['type'])))
# names for isolanguages are not unique!
enames = {}
for r in DB.execute("select * from isolanguage"):
id_ = 'ethnologue-%s' % r['id']
if r['name'] in enames:
data['Identifier'][id_] = enames[r['name']]
else:
enames[r['name']] = data.add(
common.Identifier, id_,
id=id_,
name=r['name'],
type='name',
description='ethnologue')
DBSession.flush()
migrate(
'isolanguage',
common.Identifier,
lambda r: (
r['id'],
dict(
id=r['id'],
name=r['id'],
type=common.IdentifierType.iso.value,
description=r['name'])))
migrate(
'isolanguage',
common.Identifier,
lambda r: None if r['id'] not in glottocodes else (
'gc-%s' % r['id'],
dict(
id='gc-%s' % r['id'],
name=glottocodes[r['id']],
type=common.IdentifierType.glottolog.value,
description=r['name'])))
migrate(
'language',
models.WalsLanguage,
lambda r: (
r['id'],
dict(
id=r['id'],
name=r['name'],
latitude=r['latitude'],
longitude=r['longitude'],
ascii_name=r['ascii_name'],
genus=data['Genus'][r['genus_id']],
samples_100=r['samples_100'] != 0,
samples_200=r['samples_200'] != 0)))
migrate(
'author',
common.Contributor,
lambda r: (
r['id'],
dict(name=r['name'], url=r['www'], id=r['id'], description=r['note'])))
dataset = common.Dataset(
id='wals',
name='WALS Online',
description='The World Atlas of Language Structures Online',
domain='wals.info',
published=date(2013, 8, 15),
contact='contact.wals@livingreviews.org',
license='http://creativecommons.org/licenses/by-nc-nd/2.0/de/deed.en',
jsondata={
'license_icon': 'http://wals.info/static/images/cc_by_nc_nd.png',
'license_name': 'Creative Commons Attribution-NonCommercial-NoDerivs 2.0 Germany'})
DBSession.add(dataset)
for i, editor in enumerate(['dryerms', 'haspelmathm']):
common.Editor(dataset=dataset, contributor=data['Contributor'][editor], ord=i + 1)
migrate(
'country_language',
models.CountryLanguage,
lambda r: dict(
language_pk=data['WalsLanguage'][r['language_id']].pk,
country_pk=data['Country'][r['country_id']].pk))
migrate(
'altname_language',
common.LanguageIdentifier,
lambda r: dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier'][(r['altname_name'], r['altname_type'])],
description=r['relation']))
migrate(
'isolanguage_language',
common.LanguageIdentifier,
lambda r: dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier'][r['isolanguage_id']],
description=r['relation']))
migrate(
'isolanguage_language',
common.LanguageIdentifier,
lambda r: None if 'ethnologue-%s' % r['isolanguage_id'] not in data['Identifier'] else dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier']['ethnologue-%s' % r['isolanguage_id']],
description=r['relation']))
migrate(
'isolanguage_language',
common.LanguageIdentifier,
lambda r: None if 'gc-%s' % r['isolanguage_id'] not in data['Identifier'] else dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier']['gc-%s' % r['isolanguage_id']],
description=r['relation']))
migrate(
'area',
models.Area,
lambda r: (
r['id'],
dict(name=r['name'], dbpedia_url=r['dbpedia_url'], id=str(r['id']))))
def migrate_chapter(row):
kw = dict(
id=row['id'],
name=row['name'],
wp_slug=row['blog_title'],
sortkey=int(row['id']),
area=data['Area'][row['area_id']])
if int(row['id']) in [143, 144]:
kw['created'] = E2011
kw['updated'] = E2011
return row['id'], kw
migrate('chapter', models.Chapter, migrate_chapter)
def migrate_supplement(row):
if row['name'] not in ['Help', 'Abbreviations']:
sortkey = 990 + int(row['id']) if row['name'] != 'Introduction' else 0
id_ = 's%s' % row['id']
kw = dict(id=id_, name=row['name'], sortkey=sortkey)
return id_, kw
migrate('supplement', models.Chapter, migrate_supplement)
migrate(
'chapter_reference',
common.ContributionReference,
lambda r: dict(
contribution=data['Chapter'][r['chapter_id']],
source=data['Source'][r['reference_id']]))
migrate(
'reference_supplement',
common.ContributionReference,
lambda r: dict(
contribution=data['Chapter']['s%s' % r['supplement_id']],
source=data['Source'][r['reference_id']]))
def migrate_feature(row):
kw = dict(id=row['id'], name=row['name'], ordinal_qualifier=row['id'][-1])
if row['id'].startswith('143') or row['id'].startswith('144'):
kw['created'] = E2011
kw['updated'] = E2011
kw['chapter'] = data['Chapter'][row['chapter_id']]
return row['id'], kw
migrate('feature', models.Feature, migrate_feature)
def migrate_value(row):
desc = row['description']
if desc == 'SOV & NegV/VNeg':
if row['icon_id'] != 's9ff':
desc += ' (a)'
else:
desc += ' (b)'
kw = dict(
id='%s-%s' % (row['feature_id'], row['numeric']),
name=desc,
description=row['long_description'],
jsondata=dict(icon=issues.Icons.id(row['icon_id'])),
number=row['numeric'],
parameter=data['Feature'][row['feature_id']])
return (row['feature_id'], row['numeric']), kw
migrate('value', common.DomainElement, migrate_value)
same = 0
added = 0
for row in old_db.execute("select * from datapoint"):
parameter = data['Feature'][row['feature_id']]
language = data['WalsLanguage'][row['language_id']]
id_ = '%s-%s' % (parameter.id, language.id)
created = E2008
updated = E2008
value_numeric = row['value_numeric']
if (language.id, parameter.id) in vs2008:
if vs2008[(language.id, parameter.id)] != row['value_numeric']:
print '~~~', id_, vs2008[(language.id, parameter.id)], '-->', row['value_numeric']
value_numeric = vs2008[(language.id, parameter.id)]
else:
same += 1
else:
updated = E2011
created = E2011
if parameter.id[-1] == 'A' and not (parameter.id.startswith('143') or parameter.id.startswith('144')):
added += 1
kw = dict(id=id_, updated=updated, created=created)
valueset = data.add(
common.ValueSet, row['id'],
language=language,
parameter=parameter,
contribution=parameter.chapter,
**kw)
data.add(
common.Value, id_,
domainelement=data['DomainElement'][(row['feature_id'], value_numeric)],
valueset=valueset,
**kw)
print same, 'datapoints did not change'
print added, 'datapoints added to existing features'
DBSession.flush()
migrate(
'datapoint_reference',
common.ValueSetReference,
lambda r: dict(
valueset=data['ValueSet'][r['datapoint_id']],
source=data['Source'][r['reference_id']],
description=r['note']))
migrate(
'author_chapter',
common.ContributionContributor,
lambda r: dict(
ord=r['order'],
primary=r['primary'] != 0,
contributor_pk=data['Contributor'][r['author_id']].pk,
contribution_pk=data['Chapter'][r['chapter_id']].pk))
migrate(
'author_supplement',
common.ContributionContributor,
lambda r: dict(
ord=r['order'],
primary=r['primary'] != 0,
contributor_pk=data['Contributor'][r['author_id']].pk,
contribution_pk=data['Chapter']['s%s' % r['supplement_id']].pk))
igts = defaultdict(lambda: [])
for row in old_db.execute("select * from igt"):
d = {'id': 'igt-%s' % row['id']}
d.update(parse_igt(row['xhtml']))
igts[row['example_id']].append(d)
for row in old_db.execute("select * from example"):
if not row['language_id']:
print 'example without language:', row['id']
continue
_igts = igts[row['id']]
if _igts:
for igt in _igts:
data.add(
common.Sentence, igt['id'],
markup_comment=row['xhtml'],
language=data['WalsLanguage'][row['language_id']],
**igt)
else:
name = teaser(row['xhtml'])
if name:
data.add(
common.Sentence, row['id'],
id=str(row['id']),
name=name,
xhtml=row['xhtml'],
language=data['WalsLanguage'][row['language_id']])
missing = {}
for row in old_db.execute("select * from example_feature"):
_igts = igts[row['example_id']]
if _igts:
for igt in _igts:
try:
sentence = data['Sentence'][igt['id']]
except KeyError:
print 'missing sentence:', row['example_id']
continue
try:
value = data['Value']['%s-%s' % (row['feature_id'], sentence.language.id)]
DBSession.add(common.ValueSentence(sentence=sentence, value=value))
except KeyError:
missing[(row['feature_id'], sentence.language.id)] = 1
#print 'missing datapoint:', '%s-%s' % (row['feature_id'], sentence.language.id)
else:
try:
sentence = data['Sentence'][row['example_id']]
except KeyError:
print 'missing sentence:', row['example_id']
continue
try:
value = data['Value']['%s-%s' % (row['feature_id'], sentence.language.id)]
DBSession.add(common.ValueSentence(sentence=sentence, value=value))
except KeyError:
missing[(row['feature_id'], sentence.language.id)] = 1
#print 'missing datapoint:', '%s-%s' % (row['feature_id'], sentence.language.id)
print len(missing), 'missing datapoints for example_feature relations'
def prime_cache(args): # pragma: no cover
"""
we use a versioned session to insert the changes in value assignment
"""
#
# compute the changes from 2008 to 2011:
#
vs2008 = get_vs2008(args)
for row in DB.execute("select * from datapoint"):
key = (row['language_id'], row['feature_id'])
old_value = vs2008.get(key)
new_value = row['value_numeric']
if old_value and old_value != new_value:
valueset = VersionedDBSession.query(common.ValueSet)\
.join(common.Language)\
.join(common.Parameter)\
.filter(common.Parameter.id == row['feature_id'])\
.filter(common.Language.id == row['language_id'])\
.one()
value = valueset.values[0]
assert value.domainelement.number == old_value
for de in valueset.parameter.domain:
if de.number == new_value:
value.domainelement = de
break
assert value.domainelement.number == new_value
valueset.updated = E2011
value.updated = E2011
VersionedDBSession.flush()
for row in reader(args.data_file('corrections_2013.tab'), namedtuples=True, newline='\r'):
valueset = VersionedDBSession.query(common.ValueSet)\
.join(common.Language)\
.join(common.Parameter)\
.filter(common.Parameter.id == row.feature)\
.filter(common.Language.id == row.wals_code)\
.one()
value = valueset.values[0]
if value.domainelement.number == int(row.new):
print '**** old news', valueset.language.id, valueset.parameter.id
continue
if value.domainelement.number != int(row.old):
print '--->', valueset.language.id, valueset.parameter.id, value.domainelement.number
for de in valueset.parameter.domain:
if de.number == int(row.new):
value.domainelement = de
break
assert value.domainelement.number == int(row.new)
valueset.updated = E2013
value.updated = E2013
VersionedDBSession.flush()
print 'corrections 2013 done'
for issue in ['0', '9', '10', '11', '13', '14', '15', '16', '17', '19', '20', '24', '26', '27', '28']:
issue = getattr(issues, 'issue' + issue)
issue(VersionedDBSession, E2013)
VersionedDBSession.flush()
transaction.commit()
transaction.begin()
#
# TODO: these must be recomputed as well, after migrations!
#
# cache number of languages for a parameter:
for parameter, valuesets in groupby(
DBSession.query(common.ValueSet).order_by(common.ValueSet.parameter_pk),
lambda vs: vs.parameter):
parameter.representation = str(len(set(v.language_pk for v in valuesets)))
print 'recomputation of representation done'
transaction.commit()
transaction.begin()
# cache iso codes for languages:
for language in DBSession.query(common.Language).options(joinedload_all(
common.Language.languageidentifier, common.LanguageIdentifier.identifier
)):
iso_codes = []
for identifier in language.identifiers:
if identifier.type == common.IdentifierType.iso.value:
iso_codes.append(identifier.name)
language.iso_codes = ', '.join(sorted(set(iso_codes)))
print 'recomputation of iso codes done'
transaction.commit()
transaction.begin()
compute_language_sources()
transaction.commit()
transaction.begin()
gbs_func('update', args)
if __name__ == '__main__': # pragma: no cover
initializedb(create=main, prime_cache=prime_cache)
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
313596f03b52d8f926d39f82a8d8f88c0c0a19bf | 829d1c828e4e90b151718c4902413700e60db512 | /Day_4.py | cfa06f63b2f8ba1598788baaed64f8084c15565c | [] | no_license | SoadB/100DaysOfCode-Python | 2f8bcd337dc317bc33e8814ea1aeb24f78e4974f | 9482e26825bf1bd5e7520371736896208086b185 | refs/heads/master | 2020-07-11T08:45:11.838852 | 2019-11-26T22:56:03 | 2019-11-26T22:56:03 | 204,493,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py |
import random
# Ex1
x = 5
y = 4.5
z = 2j
print(type(x)), print(type(y)), print(type(z))
print("--------------------------")
# Ex2. int number
long = -72836309174392816438
print(type(long))
print("--------------------------")
# Ex. float number
num = -5.721
print(type(num))
print("--------------------------")
# Ex. power of 10
a = 35e3
b = 12E4
c = -87.7e100
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Complex number
val_1 = 3+5j
val_2 = 5J
val_3 = -72j
print(type(val_1)), print(type(val_2)), print(type(val_3))
print("--------------------------")
# Ex. Convert between the numbers
x = 43
y = 61j
z = -1.29
a = float(x)
b = int(z)
c = complex(x)
print(a), print(b), print(c)
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Random number, import statement at a top file
print(random.randrange(1, 10))
import random
# Ex1
x = 5
y = 4.5
z = 2j
print(type(x)), print(type(y)), print(type(z))
print("--------------------------")
# Ex2. int number
long = -72836309174392816438
print(type(long))
print("--------------------------")
# Ex. float number
num = -5.721
print(type(num))
print("--------------------------")
# Ex. power of 10
a = 35e3
b = 12E4
c = -87.7e100
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Complex number
val_1 = 3+5j
val_2 = 5J
val_3 = -72j
print(type(val_1)), print(type(val_2)), print(type(val_3))
print("--------------------------")
# Ex. Convert between the numbers
x = 43
y = 61j
z = -1.29
a = float(x)
b = int(z)
c = complex(x)
print(a), print(b), print(c)
print(type(a)), print(type(b)), print(type(c))
print("--------------------------")
# Ex. Random number, import statement at a top file
print(random.randrange(1, 10))
| [
"soadb321@gmail.com"
] | soadb321@gmail.com |
e4c15e1ee609db1dfee0dcf2cb6a825074785e3c | 3e9259daf292e924b0f114b3fa2d4249f103de1a | /AI/DEAD_pyvona_test.py | 411d52ac7fb166aaa6486fd4c8ba8199b7210379 | [] | no_license | rajonali/HorribleAI | bd692479dc11114f525b7232e8b442e14ee27cf0 | 6644c931652802244b231df47c68cf4b2d6b673b | refs/heads/master | 2021-01-25T09:04:08.814758 | 2019-02-01T04:53:08 | 2019-02-01T04:53:08 | 83,951,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | import pyvona
filename = "/home/rajonali/AI/filename.ogg"
v = pyvona.create_voice("something", "something")
v.speak("Hello World")
| [
"noreply@github.com"
] | rajonali.noreply@github.com |
e3d05428e8745778fea2949c845fbc7da34a2630 | 6fd76f3bec4aa0784be93cfbd0f6fa72a00bbf5c | /accounts/views.py | efaf95511e52581717a16d54a73d3ba008f78bba | [] | no_license | ihor-nahuliak/Python-Django-Website | 70e43ceadac36c745c6a50fc8635e01872c433e2 | af4338325a9b741a7f047738049218d8384d6183 | refs/heads/master | 2020-12-08T03:35:25.046570 | 2019-11-01T12:07:46 | 2019-11-01T12:07:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from contacts.models import Contact
def register(request):
if request.method == 'POST':
# Get form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
# Check if passwords match
if password == password2:
# Check user name
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is beign used')
return redirect('register')
else:
# Looks good
user = User.objects.create_user(
username=username, password=password, email=email, first_name=first_name, last_name=last_name)
user.save()
messages.success(
request, 'You are now registered and can log in')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
else:
return render(request, 'accounts/register.html')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in')
return redirect('dashboard')
else:
messages.error(request, 'Invalid Credentials')
return redirect('login')
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'You are logged out')
return redirect('index')
def dashboard(request):
user_contacts = Contact.objects.order_by('-contact_date').filter(user_id=request.user.id)
context = {
'contacts': user_contacts
}
return render(request, 'accounts/dashboard.html', context)
| [
"salmanmoazam08@gmail.com"
] | salmanmoazam08@gmail.com |
124ef503647674d5954b7b9f0a1783d787153177 | 9b3a3a84d51b46f576b12ebf4da42f9c7244fc95 | /introduction-to-python/02-datatype.py | a2b4764e02b72c09cf67285dc8434db055c74fd9 | [] | no_license | Markweell/introduction-to-python | bce97e40a1553e7caa90e7b82d7e1510a816c3a1 | ea3ceb38e482cab265c2fe8a638225ba1dc47e26 | refs/heads/master | 2022-06-20T02:07:37.597644 | 2020-05-09T18:34:46 | 2020-05-09T18:34:46 | 262,634,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | #string
print(type("Hello word")) # str
print(type('Hello word')) # str
print(type('''Hello word''')) # str
print(type("""Hello word""")) # str
# Number
print(type(30)) # int
print(type(30.3 )) # float
# Bolean
print(type(False)) # bool
# List
print(type([12,123,123,123])) # List
print(type([12,False,123,'string'])) # List
# Tuplas
print(type((10,12,12,'123'))) # Tupla, como una lista, pero inmutable
# Dictionaries.
print(type({"name": "Marcos"})) # dict
# None
print(type(None)) | [
"marcosgallardoperez@gmail.com"
] | marcosgallardoperez@gmail.com |
cc0c0b02bbebc632dd806ce1cb000e302ef11030 | 1c5444654ab9756378b19b633f89c34703b789f7 | /workspace/dcmfinder.py | e75d9fd564c0d479b40b9e839829e74dcb5f54fc | [
"MIT"
] | permissive | ythackerCS/DCMFinder-Container | 4f0834288becf5500d9072c75e33943e667539a1 | cd37fe8ffb01f067c9b3876f35293c123ccaf644 | refs/heads/main | 2023-08-15T19:59:31.669406 | 2021-10-08T23:08:05 | 2021-10-08T23:08:05 | 415,128,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,228 | py | import sys, os
import re
import subprocess
import csv
from numpy.core.numeric import count_nonzero
import pydicom
from tqdm import tqdm
keepIfTagNotFound = True
def findDicoms(FilterForArray,FilterAgainstArray):
print("RUNNING DCM FINDER")
#This is the input directory (main directory) that is searched for all possible dicom files and a csv of file paths called 'dcmwithoutClassification' is generated
dataDir = "/input/"
experimentNumbers = os.listdir(dataDir)
originalCount = 0
tagnotFoundTime = 0
filteredCount = 0
#all csvs made are put in the output folder, this script generates a dcmwithoutClassification csv that filters the dicom for filters provided
with open ('/output/dcmwithoutClassification.csv', 'w') as dcm_csv:
csv_writer = csv.writer(dcm_csv, delimiter=',')
csv_writer.writerow(["experimentnumber", "dcmsArray"])
for keepFilter in FilterForArray:
print("Filtering for: ", keepFilter[0] , "==", keepFilter[1])
for removeFilter in FilterAgainstArray:
print("Filtering for: ", removeFilter[0] , "!=", removeFilter[1])
for experimentNumber in tqdm(experimentNumbers):
dataFolder = os.path.join(dataDir,experimentNumber)
#NOTE: this is a recursive search so it will search every directory and subdirectory for any file that is of type '.dcm'
dcmFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(dataFolder) for f in filenames if os.path.splitext(f)[1] == '.dcm']
originalCount += len(dcmFiles)
#LINE ADDED TO RESOLVE SYMLINKS
try:
resolvedDCMFiles = [os.readlink(link) for link in dcmFiles]
except OSError:
resolvedDCMFiles = dcmFiles
#filter dicomes for "filters for"
filteredForDCM = []
if len(FilterForArray) > 0:
for keepFilter in FilterForArray:
for file in resolvedDCMFiles:
image = pydicom.read_file(file)
if getattr(image, keepFilter[0]) == keepFilter[1]:
if file not in filteredForDCM:
filteredForDCM.append(file)
else:
filteredForDCM = resolvedDCMFiles
#filter dicomes for "filters against"
filteredAgainstDCM = filteredForDCM
if len(FilterAgainstArray) > 0:
for removeFilter in FilterAgainstArray:
for file in filteredForDCM:
image = pydicom.read_file(file)
if getattr(image, removeFilter[0]) == removeFilter[1]:
if file in filteredAgainstDCM:
filteredAgainstDCM.remove(file)
else:
filteredAgainstDCM = filteredForDCM
filteredCount += len(filteredAgainstDCM)
csv_writer.writerow([experimentNumber, filteredAgainstDCM])
print("Stats \n", "original lenth", originalCount, "\n tag(s) not found for time filters", tagnotFoundTime, "\n filteredLenth", filteredCount)
| [
"46691555+ythackerCS@users.noreply.github.com"
] | 46691555+ythackerCS@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.