blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a0f2cb630520d7d754dab75c74fc1a264fc997f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03378/s701099054.py | 973d6f5da00380a9f78771557ce253738abc986c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | n, m, x = map(int, input().split())
A = list(map(int, input().split()))
start = 0
end = 0
for i in range(x):
if i in A:
start += 1
for i in range(x,n+1):
if i in A:
end += 1
print(min(start, end)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e8dbd929840258d578174ce40685fd6ebdaa89b1 | f3dbb5b2bdbb4b45bb6548935e7873736c7adc68 | /python/dvalley_core_TestEngineer/.svn/pristine/e8/e8dbd929840258d578174ce40685fd6ebdaa89b1.svn-base | 84058cade8f93e499ed8d350d3df254da5415353 | [] | no_license | lw000/new_python_demo | ece955bdb7df2c5499e4fb582503c13081c89649 | 3ca55b2c3106db75e31676e45e6c5c1f5a6fd714 | refs/heads/master | 2020-03-26T21:09:33.082625 | 2018-08-20T05:37:54 | 2018-08-20T05:37:54 | 145,370,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,954 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
'''
Created on 2018年05月31日
@author: Administrator
'''
import sys
import demjson
import logging
import dvcore
from base_api import BaseApi
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class UserApi(BaseApi):
def __init__(self, debug = 0):
super(UserApi, self).__init__(debug=debug)
def start(self):
self.__post_user_resetPassword(method='/user/resetPassword')
self.__post_user_info(method='/user/info')
self.__post_user_baseinfo(method='/user/info')
self.__post_user_nearUser(method='/user/nearUser')
def stop(self):
pass
def __post_user_login(self, method):
params = {'phone':'13632767233', 'pwd':dvcore.webRsaEncrypt('lwstarr23133')}
r = self._send_post(method=method, token=None, params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
self.login_result_data = result['data']
print('result:' + str(result['result']))
print('data:' + str(result['data']))
return True
else:
print(str(result))
else:
log.debug(r)
return False
def __post_user_resetPassword(self, method):
params = dict(uid=self.login_result_data['uid'], old_pwd=dvcore.webRsaEncrypt('lwstarr23133'), pwd=dvcore.webRsaEncrypt('lwstarr23133'))
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r)
#获取用户信息
def __post_user_info(self, method):
params = dict(uid=self.login_result_data['uid'], to_uid=self.login_result_data['uid'])
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r)
#获取用户基本信息
def __post_user_baseinfo(self, method):
params = dict(uid=self.login_result_data['uid'], to_uid=self.login_result_data['uid'])
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r)
#附近的用户
def __post_user_nearUser(self, method):
params = dict(uid=self.login_result_data['uid'], lng='', lat='', sex=2, page=1, limit=20, distance=2000)
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r) | [
"liwei@liweideiMac.local"
] | liwei@liweideiMac.local | |
7a99d10927155a58b60057bd7c103dbb1d282c98 | 486a7eda8f492b0607b262d7128c78427b180269 | /src/profileapp/models.py | ddb452673a3cbd06eebbd2af1cc4548f9c766c9f | [] | no_license | Komilcoder/orm | f6d09206d0e9a9b911657cf4c4c3405696d3a39d | 432d7121088aaf53a97a3c12dcf371494262c38a | refs/heads/master | 2023-03-16T06:25:11.561958 | 2021-03-15T07:07:44 | 2021-03-15T07:07:44 | 347,868,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from django.db import models
from django.contrib.auth.models import User
class Client(models.Model):
client = models.ForeignKey(User,on_delete=models.CASCADE)
name = models.CharField(max_length=256)
def __str__(self):
return self.name
class Subject(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Products(models.Model):
subject = models.ForeignKey(Subject,on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=255)
cost = models.IntegerField(default=0)
def __str__(self):
return self.name
class Order(models.Model):
product = models.ForeignKey(Products,on_delete=models.CASCADE)
amount = models.PositiveIntegerField(default=0)
total_price = models.IntegerField()
client = models.ForeignKey(Client,on_delete=models.CASCADE, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.product)
| [
"yaxshilikovkomil@gmail.com"
] | yaxshilikovkomil@gmail.com |
3e02c0db5013c15ac08acaf6e6509dea8a7ff612 | 20f89f49400feb9d2885dc2daf3ea3ca189556e7 | /day05/proctice/三元表达式.py | 5e0bb8bef2856ba4cf186a6e9284acb76a7fb6c2 | [] | no_license | xiaobaiskill/python | 201511b1b1bddec8c33c4efa7ca2cc4afed24a89 | 540693baad757369ff811fb622a949c99fb6b4ba | refs/heads/master | 2021-04-12T03:43:30.308110 | 2018-07-13T01:41:19 | 2018-07-13T01:41:19 | 125,884,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
# Author Jmz
'''
语法:
[成立1 if condition1 else
成立2 if condition2 else ...
if 成立N conditionN else 不成立]
'''
sex = 'man'
print('正确' if sex == 'man' else '错误')
# 正确
'''
语句解析:
sex = 'man'
if sex == 'man':
print('正确')
else:
print('错误')
'''
age = 23
res = '猜大了' if age > 23 else '猜小了' if age < 23 else '猜对了'
print(res)
# '猜对了'
'''
语句解析:
age = 23
if age >23:
res = '猜大了'
elif age <23:
res = '猜小了'
else:
res = '猜对了'
''' | [
"1125378902@qq.com"
] | 1125378902@qq.com |
c29200633e547e1e14f2c3a49bc8b32333405810 | 86d499787fb35024db798b0c1dbfa7a6936854e9 | /py_tools/example/http_demo/02-根据用户需要返回相应的页面.py | cef6b306a0e0db13d4dc1c3b733418c609ce81ea | [] | no_license | Tomtao626/python-note | afd1c82b74e2d3a488b65742547f75b49a11616e | e498e1e7398ff66a757e161a8b8c32c34c38e561 | refs/heads/main | 2023-04-28T08:47:54.525440 | 2023-04-21T17:27:25 | 2023-04-21T17:27:25 | 552,830,730 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | #coding:utf-8
import socket
import re
def service_client(new_socket):
'''为这个客户端返回数据'''
# 接受浏览器发送过来的请求,http请求
request = new_socket.recv(1024)
# print(request)
request_lines = request.splitlines()
print("")
print(">"*20)
print(request_lines)
'''
GET /index.html HTTP/1.1
'''
file_name = ""
ret = re.match(r"[^/]+(/[^]*)",request_lines[0])
if ret:
file_name = ret.group(1)
if file_name == "/":
file_name = "/index.html"
try:
f = open("./html"+file_name, "rb")
except:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "------file not found------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 返回http格式的数据f给浏览器
# 准备要发送给浏览器的数据---header
response = "HTTP/1.1 200 OK"
response += "\r\n"
# 准备发送给浏览器的数据---body
# response += "hhhhhhhh"
new_socket.send(response.encode("utf-8"))
new_socket.send(html_content)
new_socket.close()
def main():
#创建套接字
tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
#绑定
tcp_server.bind(("",8001))
#变为监听套接字
tcp_server.listen(128)
while True:
#等待新客户端的连接
new_socket, client_addr = tcp_server.accept()
#为这个客户端服务
service_client(new_socket)
# 关闭监听套接字
tcp_server.close()
if __name__ == "__main__":
main()
| [
"gogs@fake.local"
] | gogs@fake.local |
4572a666a39fd92088aeb374ea763180721e82f5 | 16e26dfa651770b51a7879eab5e2ab8d3d6cff92 | /src/data/make_dataset.py | b448390f2975f564afbb864c4610b03a179c80fa | [
"MIT"
] | permissive | carlosgalvez-tiendeo/ue_master-TFM | 108879efc56b01ed64ddbc0346359efcaf66a5e8 | 97ac4d3029b1eef8e95ea8cca846812cafa87d0a | refs/heads/main | 2023-06-09T11:20:59.913197 | 2021-06-23T18:14:00 | 2021-06-23T18:14:00 | 379,906,271 | 0 | 0 | MIT | 2021-06-24T11:45:31 | 2021-06-24T11:45:31 | null | UTF-8 | Python | false | false | 1,476 | py | # -*- coding: utf-8 -*-
import click
import logging
import os
import pandas as pd
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from sklearn.model_selection import train_test_split
PROJECT_DIR = Path(__file__).resolve().parents[2]
RANDOM_STATE = 288
def get_dataset():
df = pd.read_csv('src/data/ds_job.csv')
df.set_index('empleado_id', inplace=True)
X, y = df.drop('target', axis=1), df['target']
return train_test_split(X, y, test_size=0.2, random_state=RANDOM_STATE)
@click.command()
@click.argument('output_filepath', type=click.Path())
def main(output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
if not os.path.exists(output_filepath):
click.ClickException('Path doesn\'t exists').show()
return
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data...')
get_dataset(output_filepath)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| [
"carlos.galvez@tiendeo.com"
] | carlos.galvez@tiendeo.com |
bbfb3e803109460939ad25a2601cb97c67d78f1d | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/traits/ui/menu.py | 3aa7fcd16d7341bc54f84c36dffaa1a4a8e89bcf | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 43 | py | # proxy module
from traitsui.menu import *
| [
"ischnell@enthought.com"
] | ischnell@enthought.com |
decd3605c3e8f3451b507837fc9e80d6e6f84b18 | b659e99f89cf17ae886857383cb5b708847fe3f1 | /Think Python Book/caseStudy-wordPlay/exercise9.3.py | 9014f3fb5e4d0edbc08f63fb7211540e2e2a2b34 | [] | no_license | nitheeshmavila/practice-python | bea06cc4b2b9247b926e07fd5a3987552e531242 | f54bf8934a4cf160cdfc9dc43176f1eea3bc7a41 | refs/heads/master | 2021-07-03T17:24:29.450939 | 2021-06-16T08:40:48 | 2021-06-16T08:40:48 | 100,113,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | '''
Exercise 9.2
--------------
Write a function named avoids that takes a word and a string of forbidden letters, and
that returns True if the word doesn’t use any of the forbidden letters.
Modify your program to prompt the user to enter a string of forbidden letters and then
print the number of words that don’t contain any of them. Can you find a combination
of 5 forbidden letters that excludes the smallest number of words?'''
def avoids(forbidden, word):
for ch in word:
if ch in forbidden:
return False
return True
count = 0
words = open('words.txt','r')
forbidden = input('enter the forbidden string\n')
for word in words:
if avoids(word, forbidden):
count += 1
print("%d words that don’t contain any of letters in %s"%(count,forbidden))
| [
"mail2nitheeshmavila@gmail.com"
] | mail2nitheeshmavila@gmail.com |
d84c3a1d543303d754b7448bdcaed968e11c7e93 | 12971fc2b1426f3d3a52039f21c4c2d7bb820f68 | /Exercises4Programmers_57Challenges/01_saying_hello/python/hello_challenge_01.py | dd10b80c3287cb06046aadc6a60c2a3755dfa048 | [
"MIT"
] | permissive | adrianogil/AlgoExercises | 29b3c64e071008bffbfe9273130f980100381deb | be1d8d22eedade2e313458e8d89185452d9da194 | refs/heads/main | 2023-08-18T23:31:51.463767 | 2023-07-22T18:02:46 | 2023-07-22T18:02:46 | 86,254,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # The “Hello, World” program is the first program you learn
# to write in many languages, but it doesn’t involve any input.
# So create a program that prompts for your name and prints a greeting using your name.
# Example Output
# What is your name? Brian
# Hello, Brian, nice to meet you!
# Challenge:
# - Write a new version of the program without using any variables.
print(f"Hello, {input('What is your name? ')}, nice to meet you!")
| [
"adrianogil.san@gmail.com"
] | adrianogil.san@gmail.com |
f044703c469fe0463f500e944f0e552a812c0673 | 1d7618d3f1a9ffcabb18caa65d09ea48b632b80b | /tests/opt/projectors/test_proj_lpballs.py | 156fb71b1de5c9cc8e4a0c07b95fb25edf5259ab | [
"Apache-2.0"
] | permissive | carnotresearch/cr-sparse | 668b607f0edd55644db6a879ec6b24a3171b264e | c43496bc54ec32b80b5a64901b04d0a12ac6687f | refs/heads/main | 2023-05-23T19:13:06.283727 | 2022-10-08T06:37:37 | 2022-10-08T06:37:37 | 323,566,858 | 70 | 9 | null | null | null | null | UTF-8 | Python | false | false | 2,337 | py | from projectors_setup import *
# L2 Balls
@pytest.mark.parametrize("x,q,outside", [
[[3,4], 5, 0],
[[3,4], 4, 1],
])
def test_l2_ball(x, q, outside):
ball = projectors.proj_l2_ball(q)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
@pytest.mark.parametrize("x,q,b,outside", [
[[3,4], 5, 0, 0],
[[3,4], 4, 0, 1],
[[3,4], 5, [0,0], 0],
[[3,4], 4, [0,0], 1],
[[3,4], 5, [1,1], 0],
[[4,5], 4, [1,1], 1],
])
def test_l2_ball_b(x, q, b, outside):
ball = projectors.proj_l2_ball(q, b=b)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
# @pytest.mark.parametrize("x,q,b,outside", [
# [[3,4], 5, 0, 0],
# [[3,4], 4, 0, 1],
# [[3,4], 5, [0,0], 0],
# [[3,4], 4, [0,0], 1],
# [[3,4], 5, [1,1], 0],
# [[4,5], 4, [1,1], 1],
# ])
# def test_l2_ball_b_a(x, q, b, outside):
# n = len(x)
# A = jnp.eye(n)
# ball = projectors.proj_l2_ball(q, b=b, A=A)
# v = ball(x)
# if outside:
# # second projection should not change it
# assert_array_equal(ball(v), outside)
# else:
# # projection should not change it
# assert_array_equal(v, x)
# # L1 Balls
@pytest.mark.parametrize("x,q,outside", [
[[3,4], 7, 0],
[[3,4], 4, 1],
])
def test_l1_ball(x, q, outside):
ball = projectors.proj_l1_ball(q)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
@pytest.mark.parametrize("x,q,b,outside", [
[[3,4], 7, 0, 0],
[[3,4], 4, 0, 1],
[[3,4], 7, [0,0], 0],
[[3,4], 4, [0,0], 1],
[[3,4], 5, [1,1], 0],
[[4,5], 4, [1,1], 1],
])
def test_l1_ball_b(x, q, b, outside):
ball = projectors.proj_l1_ball(q, b=b)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
| [
"shailesh@indigits.com"
] | shailesh@indigits.com |
6ffec8c541799771f50bd871e898069f9467c6d2 | 14aa22b09485b40bd1983b6b00f6b8d2f743e0c9 | /120.Triangle/main.py | fea13da0a7ed334583985b15b216c63755b3ffc8 | [] | no_license | ZhihaoJun/leetcode | 02030539815b6353b6a5588c64eebb4a10882b9d | 3cbb3f8ec81aa688ca9d90903a49cdf4a6130c4c | refs/heads/master | 2021-10-25T22:55:45.336738 | 2019-04-08T04:21:41 | 2019-04-08T04:21:41 | 33,469,539 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | class Solution(object):
def get(self, x, y):
return self.f[y][x]
def set(self, x, y, v):
if y not in self.f:
self.f[y] = {}
self.f[y][x] = v
def value(self, x, y):
return self.triangle[y][x]
def cal(self, x, y):
if y == self.size-1:
self.set(x, y, self.value(x, y))
else:
m = min(self.get(x, y+1), self.get(x+1, y+1))
self.set(x, y, m+self.value(x, y))
return self.get(x, y)
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
self.size = len(triangle)
self.triangle = triangle
self.f = {}
for y in xrange(self.size-1, -1, -1):
for x in xrange(y+1):
self.cal(x, y)
return self.cal(0, 0)
def main():
triangle = [
[2],
[3, 4],
[6, 5, 7],
[4, 1, 8, 3]
]
triangle = [[0]]
print Solution().minimumTotal(triangle)
if __name__ == '__main__':
main()
| [
"zhao11fs@gmail.com"
] | zhao11fs@gmail.com |
56577c16868cd793762ec33a8905362318f541f3 | b3f063ba06a7b4786463cc77756dde766da2b687 | /setup/load_data/load_csv.py | 1f79989374914f1f7d6295fee1dbad5e1dd66a9b | [] | no_license | Lothilius/fun-with-containers | 594b6e9c0470021fc659eda8ef182bb8a0960cc6 | fbc433323f49b13ab13b06aacda0aa57ccbd84b0 | refs/heads/master | 2022-12-22T06:42:34.069508 | 2020-10-06T09:28:30 | 2020-10-06T09:28:30 | 300,806,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | import pandas as pd
from os import environ
import logging
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
import traceback
import time
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s:%(message)s')
logger = logging.getLogger(__name__)
def connect_to_db():
alchemy_engine = create_engine('postgresql+psycopg2://%s:%s@%s/postgres' % (environ['POSTGRES_USER'],
environ['POSTGRES_PASSWORD'],
environ['POSTGRES_HOST']),
pool_recycle=3600)
return alchemy_engine.connect()
def load_csv(table_name="users"):
""" Load a CSV file in to a predefined postgres DB by creating a table with the name provided. Postgres and csv
are predefined in environment variable.
:param table_name: string of the table name to be created
:return: None
"""
# Create the Connection to the Postgres Database
try:
postgres_connection = connect_to_db()
except OperationalError:
logger.warning("DB not available. Trying again in 5 seconds")
time.sleep(5)
postgres_connection = connect_to_db()
# Load User file in to a Dataframe
users_df = pd.read_csv(environ['CSV_FILE'])
# Split id column in to components
users_df = pd.concat([users_df,
users_df['id'].apply(lambda x: pd.Series({'user_id': int(x.split(' ')[0]),
'last_four': x.split(' ')[1]}))], axis=1)
# Set User Id and last four as index
users_df.set_index(['user_id', 'last_four'], inplace=True)
# Set type for date column
users_df['visit_date'] = pd.to_datetime(arg=users_df['visit_date'])
logger.info("Attempting to create table %s", table_name)
# Create Table in Postgres DB
results = users_df[['first_name', 'last_name', 'age', 'gender', 'visit_date']].to_sql(table_name,
postgres_connection,
if_exists='replace')
logger.info("Results: %s", results)
postgres_connection.close()
logger.info("Connection to DB Closed")
if __name__ == '__main__':
try:
load_csv()
except:
logger.error(traceback.format_exc()) | [
"martin.valenzuela@bazaarvoice.com"
] | martin.valenzuela@bazaarvoice.com |
83060bc4714b82ea8cc3e7bba7f4ce4411fdd011 | bc10a5df6d3f7e59cea6bf364ac614d39d04ac75 | /submodule/template_lib/pytorch/fgsm_tutorial.py | 45008790ddb86792173526de3d373db99817e2af | [
"MIT"
] | permissive | AnonymousGFR/wbgan.pytorch | f65efdb1f5a6678b7344a6e4186f6e78d446d53c | d75cb6599852e901df0136db87520e3314f8ca71 | refs/heads/master | 2023-04-14T06:22:34.542167 | 2019-10-08T14:28:46 | 2019-10-08T14:28:46 | 210,862,958 | 1 | 0 | MIT | 2023-03-24T23:08:30 | 2019-09-25T14:17:04 | Python | UTF-8 | Python | false | false | 5,192 | py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
outdir = 'results/fgsm'
epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = f"{outdir}/data/lenet_mnist_model.pth"
use_cuda = True
# LeNet Model definition
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# MNIST Test dataset and dataloader declaration
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(f'{outdir}/data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
])),
batch_size=1, shuffle=True)
# Define what device we are using
print("CUDA Available: ", torch.cuda.is_available())
device = torch.device(
"cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
# Initialize the network
model = Net().to(device)
# Load the pretrained model
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
# Set the model in evaluation mode. In this case this is for the Dropout layers
model.eval()
# FGSM attack code
def fgsm_attack(image, epsilon, data_grad):
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_image = image + epsilon * sign_data_grad
# Adding clipping to maintain [0,1] range
perturbed_image = torch.clamp(perturbed_image, 0, 1)
# Return the perturbed image
return perturbed_image
def test(model, device, test_loader, epsilon):
# Accuracy counter
correct = 0
adv_examples = []
# Loop over all examples in test set
for data, target in test_loader:
# Send the data and label to the device
data, target = data.to(device), target.to(device)
# Set requires_grad attribute of tensor. Important for Attack
data.requires_grad = True
# Forward pass the data through the model
output = model(data)
init_pred = output.max(1, keepdim=True)[
1] # get the index of the max log-probability
# If the initial prediction is wrong, dont bother attacking, just move on
if init_pred.item() != target.item():
continue
# Calculate the loss
loss = F.nll_loss(output, target)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
# Collect datagrad
data_grad = data.grad.data
# Call FGSM Attack
perturbed_data = fgsm_attack(data, epsilon, data_grad)
# Re-classify the perturbed image
output = model(perturbed_data)
# Check for success
final_pred = output.max(1, keepdim=True)[
1] # get the index of the max log-probability
if final_pred.item() == target.item():
correct += 1
# Special case for saving 0 epsilon examples
if (epsilon == 0) and (len(adv_examples) < 5):
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append((init_pred.item(), final_pred.item(), adv_ex))
else:
# Save some adv examples for visualization later
if len(adv_examples) < 5:
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append((init_pred.item(), final_pred.item(), adv_ex))
# Calculate final accuracy for this epsilon
final_acc = correct / float(len(test_loader))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct,
len(test_loader),
final_acc))
# Return the accuracy and an adversarial example
return final_acc, adv_examples
accuracies = []
examples = []
# Run test for each epsilon
for eps in epsilons:
acc, ex = test(model, device, test_loader, eps)
accuracies.append(acc)
examples.append(ex)
plt.figure(figsize=(5, 5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
# Plot several examples of adversarial samples at each epsilon
cnt = 0
plt.figure(figsize=(8, 10))
for i in range(len(epsilons)):
for j in range(len(examples[i])):
cnt += 1
plt.subplot(len(epsilons), len(examples[0]), cnt)
plt.xticks([], [])
plt.yticks([], [])
if j == 0:
plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
orig, adv, ex = examples[i][j]
plt.title("{} -> {}".format(orig, adv))
plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()
| [
"you@example.com"
] | you@example.com |
ed8fd8a7e250c616bfaaad5bd06f39a58258aad1 | e6d862a9df10dccfa88856cf16951de8e0eeff2b | /VMS/core/python/setup.py | 145c7ec74b08da59c3c13b1c966f6979da01ad6e | [] | no_license | AllocateSoftware/API-Stubs | c3de123626f831b2bd37aba25050c01746f5e560 | f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0 | refs/heads/master | 2022-06-01T07:26:53.264948 | 2020-01-09T13:44:41 | 2020-01-09T13:44:41 | 232,816,845 | 0 | 0 | null | 2022-05-20T21:23:09 | 2020-01-09T13:34:35 | C# | UTF-8 | Python | false | false | 1,110 | py | # coding: utf-8
"""
VMS API
## Description API to be impemented by VMS systems for integration into HealthSuite business processes # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@allocatesoftware.com
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "api-server"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="VMS API",
author="API support",
author_email="support@allocatesoftware.com",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "VMS API"],
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
long_description="""\
## Description API to be impemented by VMS systems for integration into HealthSuite business processes # noqa: E501
"""
)
| [
"nigel.magnay@gmail.com"
] | nigel.magnay@gmail.com |
6f55d1a15017e4db87d7d41770a35d0bedbffe61 | 31fd6c73c37b5065221427d8272e92afc5a79a53 | /src/uniform-tilings/coxeter/integer.py | afcbbb6ec064ff1d47a0d7422bfb08a8eff48433 | [
"MIT"
] | permissive | GenerousMan/pywonderland | 2b28d59aea65fc402dcbea8be585c4e30c7fed3b | 28b7611066fde2590ab9f60971248eb47809ec3e | refs/heads/master | 2020-12-28T09:07:27.086833 | 2020-02-04T11:48:08 | 2020-02-04T11:48:08 | 238,259,252 | 1 | 0 | MIT | 2020-02-04T17:04:20 | 2020-02-04T17:04:19 | null | UTF-8 | Python | false | false | 785 | py | """
Helper functions for integer arithmetic.
"""
from collections import defaultdict
def lcm(m, n):
if m * n == 0:
return 0
q, r = m, n
while r != 0:
q, r = r, q % r
return abs((m * n) // q)
def decompose(n):
"""Decompose an integer `n` into a product of primes.
The result is stored in a dict {prime: exponent}.
This function is used for generating cyclotomic polynomials.
"""
n = abs(n)
primes = defaultdict(int)
# factor 2
while n % 2 == 0:
primes[2] += 1
n = n // 2
# odd prime factors
for i in range(3, int(n**0.5) + 1, 2):
while n % i == 0:
primes[i] += 1
n = n // i
# if n itself is prime
if n > 2:
primes[n] += 1
return primes
| [
"mathzhaoliang@gmail.com"
] | mathzhaoliang@gmail.com |
fce36d11f95cef58d768edb14803b464408f7355 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/shared_code/central_comp/nonfatal/epic/workflow.py | 14eeadc18f3575e30003dbd499a1b7f2bb582802 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,126 | py | import os
import datetime
import getpass
import json
import logging
import shutil
import numpy as np
import networkx as nx
import pandas as pd
from dataframe_io.io_queue import RedisServer
from db_queries import get_demographics
from gbd_artifacts.severity_prop import SeverityPropMetadata
import gbd.constants as gbd
from hierarchies.dbtrees import loctree
from jobmon.client.swarm.workflow.workflow import Workflow
from jobmon.client.swarm.workflow.python_task import PythonTask
from epic.maps.create_digraph import CombineMaps
from epic.maps.generators.generators import generate_maps
from epic.tasks.split_task import SevSplitTaskFactory
from epic.tasks.ex_adjust_task import ExAdjustFactory
from epic.tasks.super_squeeze_task import SuperSqueezeFactory
from epic.tasks.save_task import SaveFactory
from epic.util.common import get_dependencies, validate_decomp_step
from epic.util.constants import DAG, FilePaths, Params
class EpicWorkFlow(object):
CODE_DIR = os.path.dirname(os.path.realpath(__file__))
USERNAME = getpass.getuser()
DATA_DIR = "FILEPATH"
LOG_DIR = os.path.join('PATH', USERNAME)
YEAR_IDS = [1990, 1995, 2000, 2005, 2010, 2015, 2017, 2019]
N_DRAWS = 1000
def __init__(self, version, mapbuilder, decomp_step, gbd_round_id, resume):
# validate decomp_step
validate_decomp_step("EPIC", decomp_step, gbd_round_id)
self.DATA_DIR = os.path.join(self.DATA_DIR, str(version))
if not os.path.exists(self.DATA_DIR):
os.makedirs(self.DATA_DIR)
os.makedirs(
os.path.join(self.DATA_DIR, FilePaths.INPUT_FILES_DIR)
)
self.decomp_step = decomp_step
self.gbd_round_id = gbd_round_id
self.resume = resume
# create epic json map
self.emap = mapbuilder.downstream_only("como")
# instantiate the factories
self._task_registry = {}
self._sev_split_fac = SevSplitTaskFactory(self._task_registry)
self._ex_adjust_fac = ExAdjustFactory(self._task_registry)
self._super_squeeze_fac = SuperSqueezeFactory(self._task_registry)
self._save_fac = SaveFactory()
self.workflow = Workflow(
workflow_args="epic_v{version}".format(version=version),
name="EPIC Central Machinery",
project=DAG.Tasks.PROJECT,
stdout=os.path.join(self.LOG_DIR, "output"),
stderr=os.path.join(self.LOG_DIR, "errors"),
resume=resume,
seconds_until_timeout=435600
)
if not resume:
# Save best input models as csv for posting to EPIC tracker HUB
# page then separate into individual json files for use in
# downstream scripts. Take care that downstream processes do not
# pick up a model_version_id from a previous run. Only
# collect the best models once per run so we know exactly what
# was a available at the start of the run and what was
# consequently used in the rest of the workflow
best_models = mapbuilder.best_models
inputs = [int(x) for x in mapbuilder.inputs]
best_models = best_models.loc[
best_models[Params.MODELABLE_ENTITY_ID].isin(inputs)
]
best_models.to_csv(
os.path.join(
self.DATA_DIR,
FilePaths.INPUT_FILES_DIR,
FilePaths.BEST_MODELS_FILE_PATTERN
),
index=False,
encoding="utf8"
)
for index, row in best_models.iterrows():
SaveFactory.save_model_metadata(
self.DATA_DIR,
row.modelable_entity_id,
row.model_version_id,
row.decomp_step
)
self._task_map = {
DAG.Tasks.SPLIT: self._add_sev_split_task,
DAG.Tasks.SUPER_SQUEEZE: self._add_super_squeeze_task,
DAG.Tasks.EX_ADJUST: self._add_ex_adjust_task
}
# run every process in the pipeline regardless of whether or not
# there is already a model saved
self.pgraph = mapbuilder.P
# get process nodes and build out jobmon workflow
# create a subgraph from the process nodes
top_sort = nx.topological_sort(self.pgraph)
for node in top_sort:
if node == mapbuilder.start_node:
pass
elif DAG.Tasks.SPLIT in node:
self._task_map[DAG.Tasks.SPLIT](node)
elif DAG.Tasks.SUPER_SQUEEZE in node:
self._task_map[DAG.Tasks.SUPER_SQUEEZE](node)
else:
self._task_map[DAG.Tasks.EX_ADJUST](node)
def _create_output_directories(self, meid_list):
for meid in meid_list:
directory = os.path.join(self.DATA_DIR, str(meid))
if os.path.exists(directory) and not self.resume:
shutil.rmtree(directory)
os.makedirs(directory)
elif os.path.exists(directory) and self.resume:
logging.info(
f"Directory exists for modelable_entity_id {meid} "
f"and resume is {self.resume}. Do not delete anything. "
f"Continue workflow."
)
else:
os.makedirs(directory)
def _add_sev_split_task(self, node):
logging.info(f"Adding {node} task")
split_map = self.emap[node]
split_id = int(split_map["kwargs"]["split_id"])
split_meta = SeverityPropMetadata(split_id=split_id,
decomp_step=self.decomp_step, gbd_round_id=self.gbd_round_id)
split_version_id = split_meta.best_version
meta_version = split_meta.get_metadata_version(split_version_id)
parent_meid = int(meta_version.parent_meid())
children_meids = [int(x) for x in meta_version.child_meid().split(",")]
# make output directories
self._create_output_directories(children_meids)
split_task = self._sev_split_fac.get_task(
node=node,
process_graph=self.pgraph,
split_version_id=split_version_id,
output_dir=self.DATA_DIR,
decomp_step=self.decomp_step,
year_id=self.YEAR_IDS,
n_draws=self.N_DRAWS)
self.workflow.add_task(split_task)
self._task_registry[
SevSplitTaskFactory.get_task_name(node)] = split_task
description = (
f"Central_severity_split_{Params.DESCRIPTION_MAP[self.N_DRAWS]}"
)
for meid in children_meids:
measure_id = [gbd.measures.PREVALENCE, gbd.measures.INCIDENCE]
self._add_save_task(meid, "{location_id}.h5",
description, measure_id,
self.YEAR_IDS, self.N_DRAWS,
split_task)
def _add_save_task(self, meid, input_file_pattern, description,
measure_id, year_id, n_draws, upstream_task):
logging.info(f"Adding {meid} save task")
args = {
Params.PARENT_DIR: self.DATA_DIR,
Params.INPUT_DIR: os.path.join(self.DATA_DIR, str(meid)),
Params.INPUT_FILE_PATTERN: input_file_pattern,
Params.MODELABLE_ENTITY_ID: meid,
Params.DESCRIPTION: description,
Params.MEASURE_ID: measure_id,
Params.YEAR_ID: year_id,
Params.DECOMP_STEP: self.decomp_step,
Params.N_DRAWS: n_draws
}
save_task = self._save_fac.get_task(**args)
for upt in list(np.atleast_1d(upstream_task)):
save_task.add_upstream(upt)
self.workflow.add_task(save_task)
self._task_registry[SaveFactory.get_task_name(meid)] = save_task
def _add_ex_adjust_task(self, node):
logging.info(f"Adding {node} task")
# compile submission arguments
kwargs = self.emap[node]["kwargs"]
try:
copy_env_inc = kwargs.pop("copy_env_inc")
measure_id = [
gbd.measures.PREVALENCE, gbd.measures.INCIDENCE]
except KeyError:
copy_env_inc = False
measure_id = [gbd.measures.PREVALENCE]
# make output directories
self._create_output_directories(self.pgraph.nodes[node]["outs"])
ex_adj_task = self._ex_adjust_fac.get_task(
node=node,
process_graph=self.pgraph,
output_dir=self.DATA_DIR,
decomp_step=self.decomp_step,
year_id=self.YEAR_IDS,
n_draws=self.N_DRAWS)
self.workflow.add_task(ex_adj_task)
self._task_registry[ExAdjustFactory.get_task_name(node)] = ex_adj_task
description = (
f"Exclusivity_adjustment_auto_mark_"
f"{Params.DESCRIPTION_MAP[self.N_DRAWS]}"
)
for meid in self.pgraph.nodes[node]["outs"]:
self._add_save_task(meid, "{location_id}.h5",
description, measure_id,
self.YEAR_IDS, self.N_DRAWS,
ex_adj_task)
def _add_super_squeeze_task(self, node):
logging.info(f"Adding {node} task")
# make output directories
self._create_output_directories(self.pgraph.nodes[node]["outs"])
# get dependency_list before parallelizing since the
# dependencies are the same for each parallelized demographic
dep_list = get_dependencies(node, self.pgraph, self._task_registry)
epi_demo = get_demographics("epi", gbd_round_id=self.gbd_round_id)
for location_id in epi_demo[Params.LOCATION_ID]:
for year_id in self.YEAR_IDS:
for sex_id in epi_demo[Params.SEX_ID]:
ss_task = self._super_squeeze_fac.get_task(
node=node,
output_dir=self.DATA_DIR,
location_id=location_id,
year_id=year_id,
sex_id=sex_id,
decomp_step=self.decomp_step,
n_draws=self.N_DRAWS,
dependency_list=dep_list)
self.workflow.add_task(ss_task)
self._task_registry[
SuperSqueezeFactory.get_task_name(
node, location_id, year_id, sex_id)] = ss_task
ss_upstream = [
self._task_registry[t] for t in list(
self._task_registry.keys()) if DAG.Tasks.SUPER_SQUEEZE in t]
description = (
f"Super_Squeeze_auto_mark_{Params.DESCRIPTION_MAP[self.N_DRAWS]}"
)
measure_id = [gbd.measures.PREVALENCE]
for meid in self.pgraph.nodes[node]["outs"]:
self._add_save_task(
meid, "{location_id}/{measure_id}_{year_id}_{sex_id}.h5",
description, measure_id,
self.YEAR_IDS, self.N_DRAWS,
ss_upstream)
if __name__ == "__main__":
# Manual creation of version for now.
VERSION = 8
DECOMP_STEP = "step4"
GBDRID = 6
RESUME = False
logging.info("Generating COMO, Severity Split, and Super Squeeze maps")
generate_maps(decomp_step=DECOMP_STEP, gbd_round_id=GBDRID)
logging.info("Maps created and stored in code directory!")
logging.info("Combining all maps")
cm = CombineMaps(
decomp_step=DECOMP_STEP,
gbd_round_id=GBDRID,
include_como=False
)
ewf = EpicWorkFlow(VERSION, cm, DECOMP_STEP, GBDRID, RESUME)
success = ewf.workflow.run()
| [
"cheth@uw.edu"
] | cheth@uw.edu |
f6a8ae1541c9a1aea0034d32f065444a664ebed5 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/55/usersdata/111/22852/submittedfiles/av2_p3_civil.py | 1f8bc8ba880d7c45429f3ca8df20c55e347dfe9a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def peso(a,x,y):
soma=0
c=0
d=0
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
if x==a[i+1]
c=i
if y==a[j+1]
d=j
for i in range(c,a.shape[0]+1,1):
for j in range(d,a.shape[1]+1,1):
soma=soma+a[i,j]
return soma-a[c,d]
linhas=input('Digite a quantidade de linhas: ')
colunas=input('Digite a quantidade de colunas: ')
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('Digite os elementos da matriz: ')
n1=input('Digite o valor da posição x: ')
n2=input('Digite o valor da posição y: ')
matriz=a[i,j]
resultado=peso(a,x,y)
print matriz
print resultado
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e7433943b06f1d78b9a7fd9a4c54496f2fd4de75 | 6987dfe763caed29613ae255c00821ffb25f44c9 | /utils/ltp/ltp_xml.py | b9c9c1205fdcd7370b8f2fc55893503643bb2607 | [] | no_license | sapnajayavel/FakeReview-Detector | 70a4f9decc7214f796f362b1e53736dd4c3ef71f | ab3ebef3cc0bb6c483c18d39bff1277d2e98c9c0 | refs/heads/master | 2021-01-22T13:30:58.090821 | 2013-06-16T23:42:37 | 2013-06-16T23:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/env python2.7
#encoding=utf-8
"""
"""
from xml.dom.minidom import parse,parseString
class ltpXML:
def __init__(self,sentence,xml_str):
self.sentence = sentence
self.xml_str = xml_str
self.relate_list = self.get_relate_list()
self.word_list = self.get_word_list()
def get_clause_relate_path(self):
relate_str = ""
for i in self.relate_list:
relate_str += (i+"@")
return relate_str.rstrip('@')
def get_word_list(self):
doc = parseString(self.xml_str)
return [node.getAttribute('cont') for node in doc.getElementsByTagName('word')]
def get_relate_list(self):
doc = parseString(self.xml_str)
return [node.getAttribute('relate') for node in doc.getElementsByTagName("word")]
| [
"yangxue00.yxmn@gmail.com"
] | yangxue00.yxmn@gmail.com |
a063e399690c0d581577baa5acc8bb0f037038f6 | 698c383b77111d53495268b64d45f39ffcba126d | /homebooks/accounts/utils/make_user_yaml.py | dd16087b243bf1cb875dca3c6c2651217cc629e0 | [] | no_license | noname2048/ab-homebooks-django | 5f42080d17c9e16a68210766bab11cff56ef74dd | 702e2957ea48eb4fdee8d00558d2ded1434e170d | refs/heads/main | 2023-07-07T15:28:36.000023 | 2021-08-10T12:27:46 | 2021-08-10T12:27:46 | 363,648,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import yaml
from faker import Factory
from yaml import load, dump
from pathlib import Path
if not Path(Path(__file__).resolve() / "test_users.yaml").exists():
fake = Factory.create("en_US")
fake.seed(1)
user_list = [{"username": fake.user_name(), "email": fake.email()} for _ in range(100)]
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
with open("test_users.yaml", "w") as f:
dump(user_list, f)
else:
with open("test_users.yaml", "w") as f:
user_list = load(f, loader=yaml.FullLoader)
print(user_list)
| [
"sungwook.csw@gmail.com"
] | sungwook.csw@gmail.com |
40058b6bf2a6a6dbfac1e5619d0b93b834b79775 | 1bb2a9150de01c618163bbb8f872bdce6f14df4f | /Puzzle/030_set_tap.py | f618728840144e90b9c2fb12dd4b874157cd6e85 | [] | no_license | whyj107/Algorithm | a1c9a49a12a067366bd0f93abf9fa35ebd62102e | aca83908cee49ba638bef906087ab3559b36b146 | refs/heads/master | 2023-04-14T12:59:52.761752 | 2021-05-01T03:53:31 | 2021-05-01T03:53:31 | 240,014,212 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | # 멀티 탭으로 만든 문어 다리 배선
# 문제
# n=20일 때 몇 가지의 멀티 탭 배치를 생각할 수 있는지 구해 보세요
# (단, 전원 용량은 생각하지 않기로 한다.)
N = 20
def set_tap1(remain):
if remain == 1:
return 1
cnt = 0
# 2구
for i in range(1, remain // 2 + 1):
if remain - i == i:
cnt += set_tap1(i) * (set_tap1(i) + 1) // 2
else:
cnt += set_tap1(remain - i) * set_tap1(i)
# 3구
for i in range(1, remain // 3 + 1):
for j in range(i, (remain - i) // 2 + 1):
if (remain - (i - j) == i) and (i == j):
cnt += set_tap1(i) * (set_tap1(i) + 1) * (set_tap1(i) + 2) // 6
elif remain - (i + j) == i:
cnt += set_tap1(i) * (set_tap1(i) + 1) * set_tap1(j) // 2
elif i == j:
cnt += set_tap1(remain - (i + j)) * set_tap1(i) * (set_tap1(i) + 1) // 2
elif remain - (i + j) == j:
cnt += set_tap1(j) * (set_tap1(j) + 1) * set_tap1(i) // 2
else:
cnt += set_tap1(remain - (i + j)) * set_tap1(j) * set_tap1(i)
return cnt
#print(set_tap1(N))
memo = {1: 1}
def set_tap2(remain):
if remain in memo:
return memo[remain]
cnt = 0
# 2구
for i in range(1, remain // 2 + 1):
if remain - i == i:
cnt += set_tap2(i) * (set_tap2(i) + 1) // 2
else:
cnt += set_tap2(remain - i) * set_tap2(i)
# 3구
for i in range(1, remain // 3 + 1):
for j in range(i, (remain - i) // 2 + 1):
if (remain - (i + j) == i) and (i == j):
cnt += set_tap2(i) * (set_tap2(i) + 1) * (set_tap2(i) + 2) // 6
elif remain - (i + j) == i:
cnt += set_tap2(i) * (set_tap2(i) + 1) * set_tap2(j) // 2
elif i == j:
cnt += set_tap2(remain - (i + j)) * set_tap2(i) * (set_tap2(i) + 1) // 2
elif remain - (i + j) == j:
cnt += set_tap2(j) * (set_tap2(j) + 1) * set_tap2(i) // 2
else:
cnt += set_tap2(remain - (i + j)) * set_tap2(j) * set_tap2(i)
memo[remain] = cnt
return cnt
print(set_tap2(N))
| [
"60024292+whyj107@users.noreply.github.com"
] | 60024292+whyj107@users.noreply.github.com |
174d29bde1166d77b5ea8428c53b1229672719c3 | 3cc2f47de6d78d610a2887f92bfba150b2994888 | /application/utils/helper.py | a55fae66e681c0efa5cc87d0be94b0835edd7f75 | [] | no_license | fengliu222/blogbar | c8a66df586187d0a16063e4536e76d155863fe17 | ff6e7182f000777112101eed12ae9e2ca4298d25 | refs/heads/master | 2021-01-15T08:59:51.478354 | 2014-12-20T08:13:53 | 2014-12-20T08:13:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # coding: utf-8
def parse_int(integer, default=None):
"""提取整数,若失败则返回default值"""
try:
return int(integer)
except Exception, e:
return default
| [
"hustlzp@qq.com"
] | hustlzp@qq.com |
5489fd638a7f97687db13c5888518bf9f2fde953 | 6206ad73052b5ff1b6690c225f000f9c31aa4ff7 | /Code/Successful Pairs of Spells and Potions.py | 616d663a7d534aef45f065602b19b50c662efbad | [] | no_license | mws19901118/Leetcode | 7f9e3694cb8f0937d82b6e1e12127ce5073f4df0 | 752ac00bea40be1e3794d80aa7b2be58c0a548f6 | refs/heads/master | 2023-09-01T10:35:52.389899 | 2023-09-01T03:37:22 | 2023-09-01T03:37:22 | 21,467,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | class Solution:
def successfulPairs(self, spells: List[int], potions: List[int], success: int) -> List[int]:
potions.sort() #Sort potions in asending order.
return [len(potions) - bisect_left(potions, ceil(success / x)) for x in spells] #For each spell x, binary search the index to insert ceil(success / x) in potions, and all the potions at index or to the right can form a success pair with x. Let's say it's y and y >= ceil(success / x), then x * y >= x * ceil(success / x) >= x * success / x = success.
| [
"noreply@github.com"
] | mws19901118.noreply@github.com |
5dc8c4505c10729f9eba9ecc3c9bdcb476c5dbda | 43e0cfda9c2ac5be1123f50723a79da1dd56195f | /python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py | 12546ea463a84ac9d8defa21eb099c6346b2678d | [
"Apache-2.0"
] | permissive | jiangjiajun/Paddle | 837f5a36e868a3c21006f5f7bb824055edae671f | 9b35f03572867bbca056da93698f36035106c1f3 | refs/heads/develop | 2022-08-23T11:12:04.503753 | 2022-08-11T14:40:07 | 2022-08-11T14:40:07 | 426,936,577 | 0 | 0 | Apache-2.0 | 2022-02-17T03:43:19 | 2021-11-11T09:09:28 | Python | UTF-8 | Python | false | false | 5,489 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from operator import __add__, __sub__, __mul__, __truediv__
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
op_list = [__add__, __sub__, __mul__, __truediv__]
def get_actual_res(x, y, op):
if op == __add__:
res = paddle.incubate.sparse.add(x, y)
elif op == __sub__:
res = paddle.incubate.sparse.subtract(x, y)
elif op == __mul__:
res = paddle.incubate.sparse.multiply(x, y)
elif op == __truediv__:
res = paddle.incubate.sparse.divide(x, y)
else:
raise ValueError("unsupported op")
return res
class TestSparseElementWiseAPI(unittest.TestCase):
"""
test paddle.sparse.add, subtract, multiply, divide
"""
def setUp(self):
paddle.fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
np.random.seed(2022)
self.op_list = op_list
self.csr_shape = [128, 256]
self.coo_shape = [4, 8, 3, 5]
self.support_dtypes = ['float32', 'float64', 'int32', 'int64']
def func_test_csr(self, op):
for dtype in self.support_dtypes:
x = np.random.randint(-255, 255, size=self.csr_shape).astype(dtype)
y = np.random.randint(-255, 255, size=self.csr_shape).astype(dtype)
dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)
s_dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
s_dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)
csr_x = s_dense_x.to_sparse_csr()
csr_y = s_dense_y.to_sparse_csr()
actual_res = get_actual_res(csr_x, csr_y, op)
actual_res.backward(actual_res)
expect_res = op(dense_x, dense_y)
expect_res.backward(expect_res)
self.assertTrue(
np.allclose(expect_res.numpy(),
actual_res.to_dense().numpy(),
equal_nan=True))
if not (op == __truediv__ and dtype in ['int32', 'int64']):
self.assertTrue(
np.allclose(dense_x.grad.numpy(),
csr_x.grad.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_y.grad.numpy(),
csr_y.grad.to_dense().numpy(),
equal_nan=True))
def func_test_coo(self, op):
for sparse_dim in range(len(self.coo_shape) - 1, len(self.coo_shape)):
for dtype in self.support_dtypes:
x = np.random.randint(-255, 255,
size=self.coo_shape).astype(dtype)
y = np.random.randint(-255, 255,
size=self.coo_shape).astype(dtype)
dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)
s_dense_x = paddle.to_tensor(x,
dtype=dtype,
stop_gradient=False)
s_dense_y = paddle.to_tensor(y,
dtype=dtype,
stop_gradient=False)
coo_x = s_dense_x.to_sparse_coo(sparse_dim)
coo_y = s_dense_y.to_sparse_coo(sparse_dim)
actual_res = get_actual_res(coo_x, coo_y, op)
actual_res.backward(actual_res)
expect_res = op(dense_x, dense_y)
expect_res.backward(expect_res)
self.assertTrue(
np.allclose(expect_res.numpy(),
actual_res.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_x.grad.numpy(),
coo_x.grad.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_y.grad.numpy(),
coo_y.grad.to_dense().numpy(),
equal_nan=True))
def test_support_dtypes_csr(self):
paddle.device.set_device('cpu')
if paddle.device.get_device() == "cpu":
for op in op_list:
self.func_test_csr(op)
def test_support_dtypes_coo(self):
paddle.device.set_device('cpu')
if paddle.device.get_device() == "cpu":
for op in op_list:
self.func_test_coo(op)
if __name__ == "__main__":
paddle.device.set_device('cpu')
unittest.main()
| [
"noreply@github.com"
] | jiangjiajun.noreply@github.com |
4946b6aece4fd26a815a69b1f0b8b0e7d424b1f3 | 843ca2944c5a92dc3d13d132c34545d3b18a9eb9 | /scripts/cell/interfaces/Avatar/DialogSystem.py | fc3f3e99bb9671019e60d6fc59f6cc818c81ed2c | [] | no_license | m969/wanfadalu_server | 550dac97795bfc15c1d47f0c18bfab9c18fe6a76 | 08f5bec5ed4a17f4f1fcc40a27311aa03906e5ef | refs/heads/master | 2021-07-03T10:21:21.331204 | 2020-08-14T14:56:42 | 2020-08-14T14:56:42 | 138,024,157 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | # -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
import GlobalConst
import PyDatas.level_data as level_data
import PyDatas.dialog_config_Table as dialog_config_Table
from DIALOG_ITEM import TDialogItem
from DIALOG_ITEM import TDialogItemList
dialogDatas = dialog_config_Table.datas
dialogTypeMap = {
1: "Arena",
2: "Store",
3: "Sect",
4: "Task",
}
class DialogSystem:
def __init__(self):
# DEBUG_MSG("DialogSystem:__init__")
pass
def onTimer(self, timerHandle, userData):
pass
def requestBuyGoods(self, exposed, spaceID, npcName, goodsID):
DEBUG_MSG("DialogSystem:requestBuyGoods")
if exposed != self.id:
return
def giveGoods(self, goodsID):
DEBUG_MSG("DialogSystem:giveGoods")
def deleteGoods(self, goodsID):
"""
删除背包中的物品
"""
DEBUG_MSG("DialogSystem:deleteGoods")
def deductMoney(self, num):
DEBUG_MSG("getMoney")
def requestDialog(self, exposed, npcEntityID):
DEBUG_MSG("DialogSystem:requestDialog")
if exposed != self.id:
return
npc = KBEngine.entities.get(npcEntityID)
if npc is None:
return
self.dialogNpc = npc
dialogItems = TDialogItemList()
dialogItems["values"] = []
dialogItems["npcName"] = ""
dialogItems["npcDialog"] = ""
if npc.npcType == GlobalConst.NpcType_Arena:
dialogItems["npcName"] = "守擂人"
dialogItems["npcDialog"] = "你要上擂台吗?"
item = TDialogItem()
item["id"] = 1001
item["content"] = "我要上擂台"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
item = TDialogItem()
item["id"] = 0
item["content"] = "算了,怂"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
self.client.OnDialogItemsReturn(dialogItems)
elif npc.npcType == GlobalConst.NpcType_Store:
dialogItems["npcName"] = "商人"
dialogItems["npcDialog"] = "你要购买道具吗?"
item = TDialogItem()
item["id"] = 1002
item["content"] = "我要购买道具"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
item = TDialogItem()
item["id"] = 0
item["content"] = "算了,穷"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
self.client.OnDialogItemsReturn(dialogItems)
elif npc.npcType == GlobalConst.NpcType_Sect:
dialogItems["npcName"] = "守宗人"
dialogItems["npcDialog"] = "你要加入云灵宗吗?"
item = TDialogItem()
item["id"] = 1002
item["content"] = "我要加入宗门"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
item = TDialogItem()
item["id"] = 0
item["content"] = "算了,流浪挺好"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
self.client.OnDialogItemsReturn(dialogItems)
def selectDialogItem(self, exposed, dialogID):
DEBUG_MSG("DialogSystem:selectDialogItem")
if exposed != self.id:
return
if dialogID == 0:
return
dialogData = dialogDatas[dialogID]
dialogType = dialogData["type"]
# dialogScript = eval(dialogTypeMap[dialogType])()
# dialogScript.execute(self)
npcType = self.dialogNpc.npcType
if npcType == GlobalConst.NpcType_Arena:
self.requestEnterArena(self.id, self.dialogNpc.arenaID)
elif npcType == GlobalConst.NpcType_Store:
self.requestPullStorePropList(self.id, self.dialogNpc.id)
elif npcType == GlobalConst.NpcType_Sect:
self.base.requestJoinSect(self.dialogNpc.sectID)
def getTaskInfo(self, npcName):
DEBUG_MSG("DialogSystem:getTaskInfo")
def setTaskFinish(self, npcName, taskIndex, watcherIndex):
DEBUG_MSG("DialogSystem:setTaskFinish")
def isTaskFinish(self, npcName, taskIndex):
DEBUG_MSG("DialogSystem:isTaskFinish")
def giveAward(self, npcName, taskIndex):
DEBUG_MSG("DialogSystem:giveAward")
def giveTask(self, npcName, taskIndex):
DEBUG_MSG("DialogSystem:giveTask")
| [
"969041327@qq.com"
] | 969041327@qq.com |
fc79f953728d1619a0608c81d27284876bdce5b6 | 16f61285e902285ecade358870a2ab5720bfc3b4 | /utilities/make_nsr_database.py | 350eb90bfb64c5d88363c94b7f0d50bbba4e90cf | [] | no_license | naturalis/galaxy-tool-taxonmatcher | 56dbd56264392fb3f2c477ea7aea65a0cd919a19 | 4e9e337c7156d624db60842ed9c7304d909f479d | refs/heads/master | 2022-11-17T18:35:23.176173 | 2022-11-15T10:15:18 | 2022-11-15T10:15:18 | 149,607,894 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | #!/usr/bin/python3
import sqlite3
import csv
db = sqlite3.connect('nsr_taxonmatcher')
cursor = db.cursor()
def make_database():
cursor.execute('''CREATE TABLE nsr(id INTEGER PRIMARY KEY, source TEXT, taxonID INTEGER, acceptedNameUsageID INTEGER, taxonomicStatus TEXT, species_rank TEXT, genus_rank TEXT, family_rank TEXT, order_rank TEXT, class_rank TEXT, phylum_rank TEXT, kingdom_rank TEXT, metadata TEXT)''')
db.commit()
def check_unknowns(data):
for x in data:
if not data[str(x)]:
data[str(x)] = "unknown "+str(x)
return data
def add_nsr_taxonomy():
with open("Taxa.txt", "r", encoding='latin-1') as csv_file:#, encoding='latin-1'
nsr = csv.reader(csv_file, delimiter=',')
for line in nsr:
#line = line.strip().split("\t")
species = line[2].replace(line[3].strip(), "")
print(line)
print(species)
try:
metadata = line[15]+";"+line[16]+";"+line[3]
except:
metadata = ""
data = {"source":"nsr", "taxonID":line[0], "acceptedNameUsageID":line[1], "taxonomicStatus":line[4], "species_rank":species.strip(), "genus_rank":line[11].strip(), "family_rank":line[10], "order_rank":line[9],"class_rank":line[8], "phylum_rank":line[7], "kingdom_rank":line[6], "metadata":metadata}
cursor.execute('''INSERT INTO nsr(source, taxonID, acceptedNameUsageID, taxonomicStatus, species_rank, genus_rank, family_rank, order_rank, class_rank, phylum_rank, kingdom_rank, metadata)VALUES(:source, :taxonID, :acceptedNameUsageID, :taxonomicStatus, :species_rank, :genus_rank, :family_rank, :order_rank, :class_rank, :phylum_rank, :kingdom_rank, :metadata)''', data)
db.commit()
def main():
make_database()
add_nsr_taxonomy()
cursor.execute("CREATE INDEX index_nsr_species ON nsr (species_rank);")
if __name__ == "__main__":
main()
| [
"martenhoogeveen@gmail.com"
] | martenhoogeveen@gmail.com |
6b24dbec72e959374d2977d74015853eeddb3e20 | 1cae1f15f4957efc42b011929cfdba0308887ecc | /beerfinder/sighting/south_migrations/0001_initial.py | 776df5146290be6488131e756855dfab78188fa9 | [] | no_license | jmichalicek/Beerfinder-Web | 983cbcfb37b889b8bf85b3d6c6da6f745c9da921 | f48d9983343463541611752c5aefe0bd10c3907f | refs/heads/master | 2016-09-06T06:10:17.088864 | 2015-05-01T01:40:03 | 2015-05-01T01:40:03 | 15,567,057 | 4 | 0 | null | 2015-05-01T01:40:03 | 2014-01-01T17:04:50 | CSS | UTF-8 | Python | false | false | 13,672 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Sighting'
db.create_table(u'sighting_sighting', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_sighted', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('venue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['venue.Venue'])),
('beer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['beer.Beer'])),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.User'], blank=True)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'sighting', ['Sighting'])
# Adding M2M table for field serving_types on 'Sighting'
m2m_table_name = db.shorten_name(u'sighting_sighting_serving_types')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('sighting', models.ForeignKey(orm[u'sighting.sighting'], null=False)),
('servingtype', models.ForeignKey(orm[u'beer.servingtype'], null=False))
))
db.create_unique(m2m_table_name, ['sighting_id', 'servingtype_id'])
# Adding model 'SightingConfirmation'
db.create_table(u'sighting_sightingconfirmation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sighting', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sighting.Sighting'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.User'], blank=True)),
('is_available', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
))
db.send_create_signal(u'sighting', ['SightingConfirmation'])
# Adding model 'Comment'
db.create_table(u'sighting_comment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.User'])),
('sighting', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['sighting.Sighting'])),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('text', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'sighting', ['Comment'])
def backwards(self, orm):
# Deleting model 'Sighting'
db.delete_table(u'sighting_sighting')
# Removing M2M table for field serving_types on 'Sighting'
db.delete_table(db.shorten_name(u'sighting_sighting_serving_types'))
# Deleting model 'SightingConfirmation'
db.delete_table(u'sighting_sightingconfirmation')
# Deleting model 'Comment'
db.delete_table(u'sighting_comment')
models = {
u'accounts.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'send_watchlist_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_name_on_sightings': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '50', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'beer.beer': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('name', 'brewery'),)", 'object_name': 'Beer'},
'brewery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['beer.Brewery']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'normalized_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '75', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'style': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['beer.Style']", 'null': 'True', 'blank': 'True'})
},
u'beer.brewery': {
'Meta': {'ordering': "('name',)", 'object_name': 'Brewery'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'normalized_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '75', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '75'})
},
u'beer.servingtype': {
'Meta': {'ordering': "('name',)", 'object_name': 'ServingType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '25', 'blank': 'True'})
},
u'beer.style': {
'Meta': {'ordering': "('name',)", 'object_name': 'Style'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sighting.comment': {
'Meta': {'ordering': "('-date_created', 'sighting')", 'object_name': 'Comment'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sighting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['sighting.Sighting']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']"})
},
u'sighting.sighting': {
'Meta': {'ordering': "('-date_sighted', 'beer', 'venue__name')", 'object_name': 'Sighting'},
'beer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['beer.Beer']"}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_sighted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'serving_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['beer.ServingType']", 'symmetrical': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']", 'blank': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venue.Venue']"})
},
u'sighting.sightingconfirmation': {
'Meta': {'ordering': "('-date_created', 'sighting')", 'object_name': 'SightingConfirmation'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'sighting': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sighting.Sighting']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']", 'blank': 'True'})
},
u'venue.venue': {
'Meta': {'object_name': 'Venue'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'foursquare_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['sighting'] | [
"jmichalicek@gmail.com"
] | jmichalicek@gmail.com |
58a9216257324f092630be979ba310cc8f359ca1 | 149c90e6366d8adf9cbfd9332612736a41e6dc25 | /__manifest__.py | 224e05af8799187aeda5ad199cb0481c624444e3 | [] | no_license | brahim94/formation | 02549ef1fee7d7432987248e87627b22158a12f6 | 1cd96a6ccfac1d61e524a20b84d4c30d395f5a35 | refs/heads/master | 2023-03-06T16:19:15.883896 | 2021-02-25T18:13:16 | 2021-02-25T18:13:16 | 341,509,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | # -*- coding: utf-8 -*-
{
'name': "formation",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/13.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'egov_rh_ma'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/formation.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
| [
"brahim-ayad@hotmail.com"
] | brahim-ayad@hotmail.com |
207e5e3c5fa9d53bb95381ed966da7a673f28161 | c33496682b760deac61fedecba3e82ce4e41dfde | /scripts/e216.py | 153ce21f2c414603d711d9470b62ef7e81559416 | [
"MIT"
] | permissive | ferasalsaab/neuralnilm_prototype | c5e9cde02d475ac499b15fea62143e76adff07d0 | 2119292e7d5c8a137797ad3c9abf9f37e7f749af | refs/heads/master | 2020-04-16T14:38:03.615279 | 2018-01-29T15:30:43 | 2018-01-29T15:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,233 | py | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
# lag 64, peepholes
global source
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=None,#[500] * 5,
on_power_thresholds=[5] * 5,
max_input_power=500,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
subsample_target=4,
input_padding=3,
include_diff=False,
clip_appliance_power=False,
lag=32
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.0001, clip_range=(-1, 1)),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| [
"jack-list@xlk.org.uk"
] | jack-list@xlk.org.uk |
bc5fd4f18e4cea5cd86a9459e97322e8707e5aa0 | 53e2254b83ac5ac71ff390a7c77070ff97b31c0b | /large_substr1_lex.py | 602d31574147c303229b67c7c2185b18776e1843 | [] | no_license | Ponkiruthika112/codekataset1 | 83a02b96a6b35c33ae7c5a6d6b21c34e63a7eef4 | 4f164864a59e22122b647dd62d36d24e7ace7dac | refs/heads/master | 2020-04-15T04:58:44.427824 | 2019-09-06T10:10:12 | 2019-09-06T10:10:12 | 164,404,367 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | # your code goes here
s=input()
for i in range(0,len(s)):
k=s[i::]
if s[0]<k[0]:
print(k)
break
#substring
#se
| [
"noreply@github.com"
] | Ponkiruthika112.noreply@github.com |
cf7283cf94a1136d00a4c9a0df38866b2d50be20 | 6ecff67d6103ddbd787f78c35182722b83b8a37e | /백준/Python/카테고리/우선순위 큐/11286(절대값 힙).py | c06ae059aafbe6c660e2deb3dda5c19f3f852654 | [] | no_license | jsungmin6/Algorithm | 9ef2339aa00921e7df756a8dff569954a008c118 | bc1ea9de9f7ba3f1aa6616ebef8719540d72e0bf | refs/heads/master | 2023-05-27T06:24:16.123307 | 2021-06-11T09:22:21 | 2021-06-11T09:22:21 | 259,299,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | '''
분류
우선순위 큐
근거
절대값 힙, 작은수부터 출력해 배열에서 제거
풀이과정
1.절대값이 가장 작은 값을 출력 하는데 같을경우에는 -를 출력한다.
힙을 튜블로 저장해서 -,+ 여부를 파악하고 순서를 줘야할 것 같다.
2.다른 풀이를 보니 우선순위큐를 두개써서 음수와 양수를 나누어 저장해 깔끔하게 구했다.
시간복잡도
우선순위 큐는 삽입과 삭제가 O(log(N)) 이다 . N번 수행하니
O(Nlog(N))
'''
import heapq
import sys
input = sys.stdin.readline
N=int(input())
heap=[]
for _ in range(N):
data = int(input())
if data == 0:
if not heap:
print(0)
else:
print(heapq.heappop(heap)[1])
else:
heapq.heappush(heap,(abs(data),data))
#우선순위 큐 두개사용
import sys
import heapq
inp=sys.stdin.readline
n=int(inp())
hp=[]
hn=[]
for _ in range(n):
x=int(inp())
if x==0:
if hp and hn:
if hp[0]<hn[0]:
print(heapq.heappop(hp))
else:
print(-heapq.heappop(hn))
elif hp:
print(heapq.heappop(hp))
elif hn:
print(-heapq.heappop(hn))
else:
print(0)
else:
if x>0:
heapq.heappush(hp,x)
else:
heapq.heappush(hn,-x) | [
"jsungmin506@gmail.com"
] | jsungmin506@gmail.com |
9229c9844bad464a609cd06d4a743ce10d40f974 | 1fb1c41a7b916da23f02db059526f93f071c5479 | /django12/src/blog/views.py | 9f847f4e748441c0399614978594313e2c220119 | [] | no_license | jm40108/1 | 1a70d90ad8f7bfb0e4da55960b075a70766482c5 | 2f11ad1751aa4d5df01c00fcd07fd2c68a93a425 | refs/heads/master | 2020-04-14T01:20:18.752824 | 2018-12-30T03:23:12 | 2018-12-30T03:23:12 | 163,557,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | from django.shortcuts import render
from django.views.generic.list import ListView
from .models import *
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView
from .forms import *
from django.http.response import HttpResponseRedirect
from django.urls.base import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
#제네릭뷰
#뷰 클래스 구현 시 제네릭뷰를 상속받아 변수/메소드를 수정해 사용
#게시물 목록(index)
class Index(ListView):
template_name = 'blog/index.html' #HTML 파일의 경로를 저장하는 변수
model = Post #목록으로 보여진 모델클래스를 지정하는 변수
context_object_name = 'post_list'
paginate_by = 5
#상세 페이지(detail)
class Detail(DetailView):
template_name = 'blog/detail.html'
model = Post
context_object_name = 'obj'
#글 등록 페이지(postRegister)
class PostRegister(LoginRequiredMixin,FormView):
template_name='blog/postregister.html'
form_class=PostForm
context_object_name = 'form'
def form_valid(self, form):
obj = form.save(commit=False)#obj =Post 객체
obj.author = self.request.user
obj.save()
for f in self.request.FILES.getlist('images'):
#f : 이미지 정보, f를 이용해 PostImage 객체를 생성, 데이터베이스에 저장
image = PostImage(post = obj, image = f)
image.save()
for f in self.request.FILES.getlist('files'):
file = PostFile(post = obj, file = f)
file.save()
return HttpResponseRedirect(reverse('blog:detail', args=(obj.id,)))
#검색기능을 구현한 뷰클래스
class SearchP(FormView):
template_name = 'blog/searchP.html'
form_class = SearchForm
context_object_name = 'form'
# 유효성검사를 통과한 요청들을 처리하기 위해서 form_valid함수 오버라이딩
def form_valid(self, form):
#post 객채중에 사용자가 입력한 텍스트를 포함한 객체를 찾아 HTML결과로 보여주기
#사용자가 입력한 텍스트 추출
search_word = form.cleaned_data['search_word']
#추출된 텍스트를 포함한 Post객체들을 추출
post_list = Post.objects.filter(headline__contains=search_word)
#추출된 결과를 HTML로 전달
return render(self.request, self.template_name, {'form':form, 'search_word':search_word, 'postlist':post_list}) | [
"user@DESKTOP-7EQF3M8"
] | user@DESKTOP-7EQF3M8 |
99f41f0320101cd077bdd50a4a2cb1db34c8fa1e | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/25.ReverseNodesInK-Group.py | 6607facb67916c01b66962b629574b88293be6ce | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | # 25. Reverse Nodes in k-Group
# Hard
# 4406
# 427
# Add to List
# Share
# Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
# k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes, in the end, should remain as it is.
# You may not alter the values in the list's nodes, only nodes themselves may be changed.
# Example 1:
# Input: head = [1,2,3,4,5], k = 2
# Output: [2,1,4,3,5]
# Example 2:
# Input: head = [1,2,3,4,5], k = 3
# Output: [3,2,1,4,5]
# Example 3:
# Input: head = [1,2,3,4,5], k = 1
# Output: [1,2,3,4,5]
# Example 4:
# Input: head = [1], k = 1
# Output: [1]
# Constraints:
# The number of nodes in the list is in the range sz.
# 1 <= sz <= 5000
# 0 <= Node.val <= 1000
# 1 <= k <= sz
# Follow-up: Can you solve the problem in O(1) extra memory space?
# This solution works:
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
cur = head
ans = []
count = 0
while cur:
temp = []
while count < k and cur:
temp.append(cur)
cur = cur.next
count += 1
if len(temp) >= k:
ans.extend(list(reversed(temp)))
else:
ans.extend(temp)
count = 0
new_head = current = None
for node in ans:
node.next = None
if not new_head:
new_head = current = node
else:
current.next = node
current = current.next
return new_head
| [
"akimi.mimi.yano@gmail.com"
] | akimi.mimi.yano@gmail.com |
4e09d800c43d33123b8450392a65415a90949257 | ddc38e9a10e31c122e19d8fe132e9d73ac954c50 | /tests/conftest.py | 4338383926352a7245060ae3a6035ede61b2a92b | [
"MIT"
] | permissive | Tinche/pyrseia | 8734bddc5f0efcc5ceb901fd80a7bd239306577f | 5abf2eda9be06b7417395a50ef676454bbd8f667 | refs/heads/master | 2023-09-03T17:38:05.059968 | 2020-05-10T01:05:20 | 2020-05-10T01:05:20 | 256,872,895 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | from typing import Callable, TypeVar, Type
from pytest import fixture # type: ignore
from pyrseia import server, Server
from .calculator import Calculator
C = TypeVar("C")
@fixture
def calculator_server_creator() -> Callable[[Type[C]], Server[Calculator, C]]:
def create_server(
ctx_cls: Type[C], middleware=[]
) -> Server[Calculator, C]:
serv: Server[Calculator, C] = server(
Calculator, ctx_cls=ctx_cls, middleware=middleware
)
@serv.implement(Calculator.call_one)
async def impl_test_call_one(i: int) -> int:
return i
@serv.implement(Calculator.add)
async def add(a: int, b: int) -> int:
return a + b
@serv.implement(Calculator.multiply)
async def multiply(a: int, b: int) -> int:
return a * b
return serv
return create_server
| [
"tinchester@gmail.com"
] | tinchester@gmail.com |
3d64cb96441e32368bf07fa0be0d9af7d52bd0e5 | 0d37147ac9ae4f24823dc0be62c1bb8e6c610e08 | /bip_utils/bip32/bip32_keys.py | 6c5e3c66ba6d0ca598cee268678eb8b109ecfa89 | [
"MIT"
] | permissive | tesseract13666/bip_utils | 7c1e5f6415054291473340a62f66cb68f10e563a | 5c39cc990fb514cb1d88845e1fb0fa8fa393250c | refs/heads/master | 2023-06-27T13:57:19.037879 | 2021-07-25T21:06:06 | 2021-07-25T21:06:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,614 | py | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
from __future__ import annotations
from functools import lru_cache
from bip_utils.bip32.bip32_ex import Bip32KeyError
from bip_utils.bip32.bip32_key_ser import Bip32PrivateKeySerializer, Bip32PublicKeySerializer
from bip_utils.bip32.bip32_key_data import Bip32FingerPrint, Bip32KeyData
from bip_utils.ecc import EllipticCurveGetter, EllipticCurveTypes, IPoint, IPrivateKey, IPublicKey
from bip_utils.utils import CryptoUtils, DataBytes
class Bip32PublicKey:
""" BIP32 public key class.
It represents a public key used by BIP32 with all the related data (e.g. depth, chain code, etc...).
"""
@classmethod
def FromBytes(cls,
key_bytes: bytes,
key_data: Bip32KeyData,
curve_type: EllipticCurveTypes) -> Bip32PublicKey:
""" Create from bytes.
Args:
key_bytes (bytes) : Key bytes
key_data (Bip32KeyData object) : Key data
curve_type (EllipticCurveTypes): Elliptic curve type
Raises:
Bip32KeyError: If the key constructed from the bytes is not valid
"""
return cls(cls.__KeyFromBytes(key_bytes, curve_type),
key_data)
def __init__(self,
pub_key: IPublicKey,
key_data: Bip32KeyData) -> None:
""" Construct class.
Args:
pub_key (IPublicKey object) : Key object
key_data (Bip32KeyData object): Key data
"""
self.m_pub_key = pub_key
self.m_key_data = key_data
def CurveType(self) -> EllipticCurveTypes:
""" Return key elliptic curve type.
Returns:
EllipticCurveTypes: Elliptic curve type
"""
return self.m_pub_key.CurveType()
def KeyObject(self) -> IPublicKey:
""" Return the key object.
Returns:
IPublicKey object: Key object
"""
return self.m_pub_key
def Data(self) -> Bip32KeyData:
""" Return key data.
Returns:
BipKeyData object: BipKeyData object
"""
return self.m_key_data
@lru_cache()
def RawCompressed(self) -> DataBytes:
""" Return raw compressed public key.
Returns:
DataBytes object: DataBytes object
"""
return self.m_pub_key.RawCompressed()
@lru_cache()
def RawUncompressed(self) -> DataBytes:
""" Return raw uncompressed public key.
Returns:
DataBytes object: DataBytes object
"""
return self.m_pub_key.RawUncompressed()
def Point(self) -> IPoint:
""" Get public key point.
Returns:
IPoint object: IPoint object
"""
return self.m_pub_key.Point()
@lru_cache()
def FingerPrint(self) -> Bip32FingerPrint:
""" Get key fingerprint.
Returns:
bytes: Key fingerprint bytes
"""
return Bip32FingerPrint(self.KeyIdentifier())
@lru_cache()
def KeyIdentifier(self) -> bytes:
""" Get key identifier.
Returns:
bytes: Key identifier bytes
"""
return CryptoUtils.Hash160(self.m_pub_key.RawCompressed().ToBytes())
@lru_cache()
def ToExtended(self) -> str:
""" Return key in serialized extended format.
Returns:
str: Key in serialized extended format
"""
return Bip32PublicKeySerializer.Serialize(self.m_pub_key,
self.m_key_data)
@staticmethod
def __KeyFromBytes(key_bytes: bytes,
curve_type: EllipticCurveTypes) -> IPublicKey:
""" Construct key from bytes.
Args:
key_bytes (bytes) : Key bytes
curve_type (EllipticCurveTypes): Elliptic curve type
Returns:
IPublicKey object: IPublicKey object
Raises:
Bip32KeyError: If the key constructed from the bytes is not valid
"""
try:
curve = EllipticCurveGetter.FromType(curve_type)
return curve.PublicKeyClass().FromBytes(key_bytes)
except ValueError as ex:
raise Bip32KeyError("Invalid public key") from ex
class Bip32PrivateKey:
""" BIP32 private key class.
It represents a private key used by BIP32 with all the related data (e.g. depth, chain code, etc...).
"""
@classmethod
def FromBytes(cls,
key_bytes: bytes,
key_data: Bip32KeyData,
curve_type: EllipticCurveTypes) -> Bip32PrivateKey:
""" Create from bytes.
Args:
key_bytes (bytes) : Key bytes
key_data (Bip32KeyData object) : Key data
curve_type (EllipticCurveTypes): Elliptic curve type
Raises:
Bip32KeyError: If the key constructed from the bytes is not valid
"""
return cls(cls.__KeyFromBytes(key_bytes, curve_type),
key_data)
def __init__(self,
priv_key: IPrivateKey,
key_data: Bip32KeyData) -> None:
""" Construct class.
Args:
priv_key (IPrivateKey object) : Key object
key_data (Bip32KeyData object): Key data
"""
self.m_priv_key = priv_key
self.m_key_data = key_data
def CurveType(self) -> EllipticCurveTypes:
""" Return key elliptic curve type.
Returns:
EllipticCurveTypes: Elliptic curve type
"""
return self.m_priv_key.CurveType()
def KeyObject(self) -> IPrivateKey:
""" Return the key object.
Returns:
IPrivateKey object: Key object
"""
return self.m_priv_key
def Data(self) -> Bip32KeyData:
""" Return key data.
Returns:
BipKeyData object: BipKeyData object
"""
return self.m_key_data
@lru_cache()
def Raw(self) -> DataBytes:
""" Return raw private key.
Returns:
DataBytes object: DataBytes object
"""
return self.m_priv_key.Raw()
@lru_cache()
def PublicKey(self) -> Bip32PublicKey:
""" Get the public key correspondent to the private one.
Returns:
Bip32PublicKey object: Bip32PublicKey object
"""
return Bip32PublicKey(self.m_priv_key.PublicKey(),
self.m_key_data)
@lru_cache()
def ToExtended(self) -> str:
""" Return key in serialized extended format.
Returns:
str: Key in serialized extended format
"""
return Bip32PrivateKeySerializer.Serialize(self.m_priv_key,
self.m_key_data)
@staticmethod
def __KeyFromBytes(key_bytes: bytes,
curve_type: EllipticCurveTypes) -> IPrivateKey:
""" Construct key from bytes.
Args:
key_bytes (bytes) : Key bytes
curve_type (EllipticCurveTypes): Elliptic curve type
Returns:
IPrivateKey object: IPrivateKey object
Raises:
Bip32KeyError: If the key constructed from the bytes is not valid
"""
try:
curve = EllipticCurveGetter.FromType(curve_type)
return curve.PrivateKeyClass().FromBytes(key_bytes)
except ValueError as ex:
raise Bip32KeyError("Invalid private key") from ex
| [
"54482000+ebellocchia@users.noreply.github.com"
] | 54482000+ebellocchia@users.noreply.github.com |
e35e31b0309865823176429707b8d45bae480e5d | aaa108af22012ce93db0177aa38cea698ba500ec | /lab_3/f_math2.py | d7405da1d9c8f9fb0c8f809bd43a2d06390b9c2e | [] | no_license | minhduc9699/PhamMinhDuc-lab-c4e16 | 74519ca0e1552ee890710d42e34d6b54b60954a0 | 957e56509f861514b51873f267edc47d40ed6225 | refs/heads/master | 2021-09-10T15:00:42.141530 | 2018-03-28T06:18:45 | 2018-03-28T06:18:45 | 125,954,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | #from eval import *
import eval
res = eval.calc(5, +, 6)
print(res)
| [
"minhduc.096.99@gmail.com"
] | minhduc.096.99@gmail.com |
61a0e08eee109f4b7a51ef5accb0820c64fd186e | d2dbc4eeba22f308634d8accde50062739fb50d1 | /sd/algorithms/partitional.py | 004f4e4ff78310c3a207d2e1a97987346704b6a9 | [
"Apache-2.0"
] | permissive | shibaji7/SuperDARN-Clustering | bf2d793a23dfae105bd4ca5bbac09fd64e60182a | d7427ba609fb7f5e50c26f52364e5e9e118bbc31 | refs/heads/master | 2022-12-24T07:08:52.308469 | 2020-10-06T16:46:08 | 2020-10-06T16:46:08 | 267,057,983 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | #!/usr/bin/env python
"""
partitional.py: module is deddicated to run different partitional algorithms.
Partitional clustering algorithms aim to discover the groupings present in the data by optimizing
a specific objective function and iteratively improving the quality of the partitions. These algorithms
generally require certain user parameters to choose the prototype points that represent each cluster.
For this reason they are also called prototype-based clustering algorithms.
- kMeans
- kMediods
- kMedians
- kModes
- fuzzykMeans
- Mean Shift
- Kernel kMeans
"""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import sys
sys.path.append("extra/")
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from kmodes.kmodes import KModes
from kmedians import KMedians
from kmedoids import KMedoids
from fuzzykmeans import FuzzyKMeans
from kernelkmeans import KernelKMeans
class Partition(object):
"""All partitoned algorithms are implemened here."""
def __init__(self, method, data, n_clusters=2, random_state=0):
"""
Initialize all the parameters.
method: Name of the algorithms (lower case joined by underscore)
data: Data (2D Matrix)
n_clusters: Number of clusters
random_state: Random initial state
"""
self.method = method
self.data = data
self.n_clusters = n_clusters
np.random.seed(random_state)
return
def setup(self, keywords={}):
"""
Setup the algorithms
"""
for key in keywords.keys():
setattr(self, key, keywords[key])
if self.method == "kmeans": self.obj = KMeans(n_clusters=self.n_clusters, **keywords)
if self.method == "kmedoids": self.obj = KMedoids(n_clusters=self.n_clusters, **keywords)
if self.method == "kmodes": self.obj = KModes(n_clusters=self.n_clusters, init="Huang", **keywords)
if self.method == "kmedians": self.obj = KMedians(n_clusters=self.n_clusters, **keywords)
if self.method == "fuzzykmeans": self.obj = FuzzyKMeans(n_clusters=self.n_clusters, **keywords)
if self.method == "meanshift": self.obj = MeanShift(n_jobs=10, **keywords)
if self.method == "kernelkmeans": self.obj = KernelKMeans(n_clusters=self.n_clusters, **keywords)
return
def run(self):
"""
Run the models
"""
if self.method == "kmeans": self.obj.fit(self.data)
if self.method == "kmedoids": self.obj.fit(self.data)
if self.method == "kmodes": self.obj.fit(self.data)
if self.method == "kmedians": self.obj.fit(self.data)
if self.method == "fuzzykmeans": self.obj.fit(self.data)
if self.method == "meanshift": self.obj.fit(self.data)
if self.method == "kernelkmeans": self.obj.fit(self.data)
return
| [
"shibaji7@vt.edu"
] | shibaji7@vt.edu |
9182b833c33a0c031a4b6abd00195b321488a6ba | 35fa8925e63f2b0f62ef6bfc1ff4e03cf42bd923 | /django_analyses/serializers/analysis.py | 5686c2648bdc0f3b2920d148bc7a04ed4cdb06cb | [
"Apache-2.0"
] | permissive | TheLabbingProject/django_analyses | 9e6f8b9bd2a84e8efe6dda6a15de6a3ecdf48ec1 | 5642579660fd09dde4a23bf02ec98a7ec264bceb | refs/heads/master | 2023-02-26T07:53:53.142552 | 2023-02-17T08:12:17 | 2023-02-17T08:12:17 | 225,623,958 | 1 | 2 | Apache-2.0 | 2023-02-17T08:12:18 | 2019-12-03T13:15:29 | Python | UTF-8 | Python | false | false | 662 | py | from django_analyses.models.analysis import Analysis
from django_analyses.models.category import Category
from rest_framework import serializers
class AnalysisSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="analyses:analysis-detail")
category = serializers.HyperlinkedRelatedField(
view_name="analyses:category-detail", queryset=Category.objects.all(),
)
class Meta:
model = Analysis
fields = (
"id",
"title",
"description",
"category",
"created",
"modified",
"url",
)
| [
"z.baratz@gmail.com"
] | z.baratz@gmail.com |
766e6c727fc7adb01aeabb92bfcb815182f6aae6 | 81f3f88c6b1a687726b75b8f53c2fb29d72df584 | /aispider/crawler/auto_crawler.py | d998aaa9ed30476aae87695ddcafe35746857691 | [
"Apache-2.0"
] | permissive | East196/aispider | 1f256ee1eea991d49d41373f2b4734a65b264b7d | 6b42ff5c4bda89cbd923a86f3aa3fdf4a224dbb9 | refs/heads/master | 2020-04-07T10:14:14.222722 | 2018-04-21T04:55:11 | 2018-04-21T04:55:11 | 124,202,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,020 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
爬取器
"""
from __future__ import print_function
import json
import re
from xml.sax.saxutils import escape
from bs4 import BeautifulSoup
from lxml import etree
from aispider.core.http_request import HttpRequest
from aispider.core.request import get_soup, get_json, get_domain
class AutoCrawler(object):
"""
自动爬取器,从xml rule中读取任务执行
"""
def __init__(self, http_request=None):
if http_request is None:
self.http_request = HttpRequest()
else:
self.http_request = http_request
def crawl(self, url, rule=None):
"""
爬取操作
:param url: 页面url
:param rule: 页面解析rule的定义XML文档
:type rule: string
:return: 提取对象
:rtype: dict
"""
if rule is None:
print("rule must be set")
html = self.http_request.get_html(url)
return self.extract(html, rule)
def extract(self, html, rule):
"""
根据rule指定的抓取规则,从页面内容中提取所需的对象
:param html: 原始内容
:param rule: 抓取规则
:type rule: string
:return: 提取对象
:rtype: dict
"""
doc = html.replace('\n', '')
doc_data = (doc, {})
root = etree.XML(rule)
for child in root:
print(child.tag, child.attrib['type'])
doc_data = [sub for sub in get_doc(child, doc_data)]
return doc_data
def urls(self, url_template="http://www.baidu.com", **kwargs):
# TODO 2 or 3 list
# TODO 使用pop强化kwargs的限定
"""
从url模板获取urls
:param url_template: url模板
:param kwargs: url定义的
"""
for list_item in self._get_list_arg(kwargs)[1]:
sub_kwargs = dict(kwargs)
sub_kwargs['page'] = list_item
yield url_template.format(**sub_kwargs)
@staticmethod
def _get_list_arg(kwargs):
for key, value in kwargs.items():
if type(value) is list:
return key, value
def get_doc(root, doc_data):
"""
:type root: beautifulsoup对象
:param root: 上层解析器xml的结点
:param doc_data: 是一个元组,第一个doc代表传入数据,第二个代表输出数据
:return:是一个元组,第一个doc代表传入数据,第二个代表输出数据
"""
if root.tag != 'parse':
return
if type(doc_data) != list:
doc_data = [doc_data]
# TODO 带第二参数,一直传第二参数内的数据
if root.attrib['type'] == 'json':
print("json")
print(doc_data)
for sub_doc, data in doc_data:
sub_doc_json = json.loads(sub_doc.strip())
# TODO 目前只有1级,TODO多级的jsonpath
jsonpath = root.attrib['jsonpath']
yield sub_doc_json.get(jsonpath), data
if root.attrib['type'] == 're':
print("re")
print(doc_data)
for sub_doc, data in doc_data:
for item in re.findall(root.attrib['restr'], sub_doc):
print("re :" + str(item))
if root.attrib['name']:
data = dict(data, **{root.attrib['name']: "ok"})
yield item.replace('\n', '').strip(), data
if root.attrib['type'] == 'soup':
print("soup")
print(doc_data)
for sub_doc, data in doc_data:
print(sub_doc, data)
soup = BeautifulSoup(sub_doc, 'lxml')
cssselector = root.attrib['cssselector']
print(cssselector)
if root.attrib['list'] == 'False':
print("list false")
a = soup.select_one(cssselector)
yield from rule(root, a, data)
if root.attrib['list'] == 'True':
print("list true")
for a in soup.select(cssselector):
yield from rule(root, a, data)
def rule(root, a, data):
if root.attrib['name']:
data = dict(data, **{root.attrib['name']: a.get_text()})
if len(root):
print(root)
items = item_extract(a, root)
data = dict(data, **items)
yield data, data
else:
yield a.get_text(), data
def item_extract(soup, root):
"""
:param soup:上层html数据的soup
:param root:上层解析器xml的结点
:return: 根据解析器抽取的items
"""
items = {}
for child in root:
if child.tag != 'parse':
continue
if child.attrib['type'] == 'soup':
sub_cssselector = child.attrib['cssselector']
print(soup, sub_cssselector)
if child.get('attr'):
items[child.attrib['name']] = soup.select_one(sub_cssselector)[child.get('attr')]
else:
items[child.attrib['name']] = soup.select_one(sub_cssselector).string
return items
if __name__ == '__main__':
crawler = AutoCrawler()
for url in crawler.urls(url_template="http://a.b.cn/{type}?page={page}", page=[1, 2, 3], type=1):
print(url)
print(escape("<script>FM.view\((.+?)\);</script>"))
rule = '''
<root>
<parse type='re' restr="%s" name='abc' list='True' />
<parse type='json' jsonpath='jjj' list='False'/>
<parse type='soup' name='aname' cssselector='div' list='True'>
<parse type='soup' name='sub' cssselector='a.sub'/>
<parse type='soup' name='sup' cssselector='a.sup'/>
</parse>
</root>
''' % escape("<script>FM.view\((.+?)\);</script>")
doc = """<script>FM.view(
{"jjj":"<div>1<a class='sub' href='a.html'>111</a><a class='sup' href='a.html'>222</a></div>
<div>1<a class='sub' href='a.html'>111</a><a class='sup' href='a.html'>222</a></div>","xxx":2}
);</script>"""
doc_data = AutoCrawler().extract(doc, rule)
for d in doc_data:
print(1, 1, d)
| [
"2901180515@qq.com"
] | 2901180515@qq.com |
710eb29d865055eab7e561ae6bcc477dc460ff18 | 7410903c6cd5ef35c592af00c934fb21c369cbf2 | /00_Code/01_LeetCode/537_ComplexNumberMultiplication.py | 094cbd3b7c1735c92ca7bc620c1998bdb3a74513 | [
"MIT"
] | permissive | KartikKannapur/Algorithms | f4e4726170599db0622d18e8c06a382e9bce9e77 | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | refs/heads/master | 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | """
Given two strings representing two complex numbers.
You need to return a string representing their multiplication.
Note i2 = -1 according to the definition.
Example 1:
Input: "1+1i", "1+1i"
Output: "0+2i"
Explanation: (1 + i) * (1 + i) = 1 + i2 + 2 * i = 2i, and you
need convert it to the form of 0+2i.
Example 2:
Input: "1+-1i", "1+-1i"
Output: "0+-2i"
Explanation: (1 - i) * (1 - i) = 1 + i2 - 2 * i = -2i, and you
need convert it to the form of 0+-2i.
Note:
The input strings will not have extra blank.
The input strings will be given in the form of a+bi, where the
integer a and b will both belong to the range of [-100, 100].
And the output should be also in this form.
Your runtime beats 49.77 % of python submissions
"""
class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
# #Method 1
r1, im1 = a.split("+")
r2, im2 = b.split("+")
r1, r2 = int(r1), int(r2)
im1, im2 = int(im1[:-1]), int(im2[:-1])
return str((r1*r2) - (im1*im2)) + str("+") + str((r1*im2) + (r2*im1)) + "i"
| [
"kartikkannapur@gmail.com"
] | kartikkannapur@gmail.com |
db82c1c496fe6537c6a8907b9536571681a9557c | 98879590858368d5c32c389db31b761e479a0ab8 | /wsgiserver/test_server2.py | 65acfc0ccd5cfba74d4dc697f67fa500d68a87c0 | [] | no_license | zhiruchen/get_hands_dirty | 0bbf3719113dcf474baae571ecd55e5c234072a3 | af98a11bbeb8183428fe41cb7c9fa9a2354983e9 | refs/heads/master | 2020-04-17T12:00:44.275247 | 2017-06-24T16:28:43 | 2017-06-24T16:28:43 | 66,988,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | # -*- coding: utf-8 -*-
import re
from cgi import escape
def index(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''Hello World Application
This is the hello world application:
`continue <hello/>`_
''']
def hello(environ, start_response):
args = environ['myapp.url_args']
if args:
subject = escape(args[0])
else:
subject = 'World'
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''Hello %(subject)s
Hello %(subject)s!
''' % {'subject': subject}]
def not_found(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/html')])
return ['NOT FOUND']
# url到函数的映射
urls = [
(r'^$', index),
(r'hello/?$', hello),
(r'hello/(.+)$', hello)
]
def application(environ, start_response):
"""将当前请求的路径分发给不同的函数"""
path = environ.get('PATH_INFO', '').lstrip('/')
for regex, callback in urls:
match = re.search(regex, path)
if match is not None:
environ['myapp.url_args'] = match.groups()
return callback(environ, start_response)
return not_found(environ, start_response)
if __name__ == '__main__':
from wsgiref.simple_server import make_server
srv = make_server('localhost', 8080, application)
srv.serve_forever() | [
"zhiruchen1992@foxmail.com"
] | zhiruchen1992@foxmail.com |
3e3a6e0c7c8a80543715611c08a374cc4e38615b | 114ea96f201cf3de825900fc632b6696433ae240 | /my_selenium/scroll.py | 538bf4ca52bdd1ef18230e06d1660bc08b65d708 | [] | no_license | iorilan/py_tiny_apps_playground | 2662133ec97aee0d495329a355fd22f7fb14ad2e | 70fb7965c1c27419ee83b6e9fd94d27ad47e4040 | refs/heads/master | 2023-02-08T12:06:47.406728 | 2020-12-31T15:08:24 | 2020-12-31T15:08:24 | 282,785,377 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | """
http://the-internet.herokuapp.com/infinite_scroll
"""
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
def go():
y=100
# to keep browser open by passing in below option
ops = Options()
ops.add_experimental_option('detach',True)
browser = webdriver.Chrome(ChromeDriverManager().install(),chrome_options=ops)
browser.get("http://the-internet.herokuapp.com/infinite_scroll")
while(True):
time.sleep(0.3)
browser.execute_script(f'window.scrollTo(0, {y})')
y+=10
if __name__ == "__main__":
go() | [
"iorilan@hotmail.com"
] | iorilan@hotmail.com |
3f5d03e017f3cbb1e790acea900ca9157adbf6c2 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/type_field_py3.py | d522b7658ebd52d0c6959e7716a83589d1124bef | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 1,042 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TypeField(Model):
"""Information about a field of a type.
:param name: Gets or sets the name of the field.
:type name: str
:param type: Gets or sets the type of the field.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None:
super(TypeField, self).__init__(**kwargs)
self.name = name
self.type = type
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
e68925dd548561cc823aaf960ced2ccccccc2fda | 7e96ba20c25c6fb56af6ccd36b3b6d68df6a081c | /Shivi_khanuja/Django/belt_reviewer/apps/reviews/apps.py | 074ef3493d7c37a3c7566d311a3355dc3c20d54f | [] | no_license | CodingDojoDallas/python_september_2017 | 9d8cd74131a809bc6b13b7f465594cf8b1e2fd75 | f9f2f7b39bf9c4fceda3df5dc7424164aa5d5df5 | refs/heads/master | 2021-01-23T08:52:22.899994 | 2017-10-30T17:00:55 | 2017-10-30T17:00:55 | 102,558,291 | 2 | 14 | null | 2018-01-13T05:28:34 | 2017-09-06T03:28:38 | Python | UTF-8 | Python | false | false | 162 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ReviewsConfig(AppConfig):
name = 'reviews'
| [
"shivalikhanuja.net@gmail.com"
] | shivalikhanuja.net@gmail.com |
b98467ba8ab928b8fe67d4607f0eaa6093bece31 | 14d1b3b0d2f4b21a62ae11bb30adaafca4aa0a76 | /demo_2.py | 106f3e70eebbcab59aad22dcb7825bc9f53da156 | [] | no_license | bdrummo6/CSPT13_Algorithms_GP | 9d64f83cdf566588b988d4108be655997d3b0dd0 | 907dc29d245ebb88d9ce07cf60249bf1ca862c50 | refs/heads/main | 2022-12-26T17:48:29.633204 | 2020-10-06T02:29:58 | 2020-10-06T02:29:58 | 301,584,067 | 0 | 0 | null | 2020-10-06T01:35:29 | 2020-10-06T01:35:28 | null | UTF-8 | Python | false | false | 4,375 | py | '''Explorer's Dilemna - aka the Knapsack Problem
After spending several days exploring a deserted island out in the Pacific,
you stumble upon a cave full of pirate loot! There are coins, jewels,
paintings, and many other types of valuable objects.
However, as you begin to explore the cave and take stock of what you've
found, you hear something. Turning to look, the cave has started to flood!
You'll need to get to higher ground ASAP.
There IS enough time for you to fill your backpack with some of the items
in the cave. Given that...
- you have 60 seconds until the cave is underwater
- your backpack can hold up to 50 pounds
- you want to maximize the value of the items you retrieve (since you can
only make one trip)
HOW DO YOU DECIDE WHICH ITEMS TO TAKE?
'''
import random
import time
from itertools import combinations
class Item:
def __init__(self, name, weight, value):
self.name = name
self.weight = weight
self.value = value
self.efficiency = 0
def __str__(self):
return f'{self.name}, {self.weight} lbs, ${self.value}'
small_cave = []
medium_cave = []
large_cave = []
def fill_cave_with_items():
'''Randomly generates Item objects and
creates caves of different sizes for testing
'''
names = ["painting", "jewel", "coin", "statue", "treasure chest",
"gold", "silver", "sword", "goblet", "hat"]
for _ in range(5):
n = names[random.randint(0,4)]
w = random.randint(1, 25)
v = random.randint(1, 100)
small_cave.append(Item(n, w, v))
for _ in range(15):
n = names[random.randint(0,4)]
w = random.randint(1, 25)
v = random.randint(1, 100)
medium_cave.append(Item(n, w, v))
for _ in range(25):
n = names[random.randint(0,4)]
w = random.randint(1, 25)
v = random.randint(1, 100)
large_cave.append(Item(n, w, v))
def print_results(items, knapsack):
'''Print out contents of what the algorithm
calculated should be added to the knapsack
'''
# print(f'\nItems in the cave:')
# for i in items:
# print(i)
print('\nBest items to put in knapsack: ')
for item in knapsack:
print(f'-{item}')
print(f'\nResult calculated in {time.time()-start:.5f} seconds\n')
print('\n-------------------------')
def naive_fill_knapsack(sack, items):
'''
Put highest value items in knapsack until full
'''
# TODO - sort items by value
# TODO - put most valuable items in knapsack until full
return sack
def brute_force_fill_knapsack(sack, items):
''' Try every combination to find the best'''
# TODO - generate all possible combinations of items
# TODO - calculate the value of all combinations
# find the combo with the highest value
return sack
def greedy_fill_knapsack(sack, items):
'''Use ratio of [value] / [weight]
to choose items for knapsack
'''
# TODO - calculate efficiencies
# TODO - sort items by efficiency
# TODO - put items in knapsack until full
return sack
# TESTS -
# Below are a series of tests that can be utilized to demonstrate
# the differences between each approach. Timing is included to give
# students an idea of how poorly some approaches scale. However,
# efficiency should also be formalized using Big O notation.
fill_cave_with_items()
knapsack = []
# Test 1 - Naive
print('\nStarting test 1, naive approach...')
items = large_cave
start = time.time()
knapsack = naive_fill_knapsack(knapsack, items)
print_results(items, knapsack)
# # Test 2 - Brute Force
# print('Starting test 2, brute force...')
# items = medium_cave
# start = time.time()
# knapsack = brute_force_fill_knapsack(knapsack, items)
# print_results(items, knapsack)
# Test 3 - Brute Force
# print('Starting test 3, brute force...')
# items = large_cave
# start = time.time()
# knapsack = brute_force_fill_knapsack(knapsack, items)
# print_results(items, knapsack)
# # Test 4 - Greedy
# print('Starting test 4, greedy approach...')
# items = medium_cave
# start = time.time()
# greedy_fill_knapsack(knapsack, items)
# print_results(items, knapsack)
# Test 5 - Greedy
# print('Starting test 5, greedy approach...')
# items = large_cave
# start = time.time()
# greedy_fill_knapsack(knapsack, items)
# print_results(items, knapsack) | [
"tomtarpeydev@gmail.com"
] | tomtarpeydev@gmail.com |
8f06b68aecddec25becf243ccd0383fb3198050f | a4ac55fca45c6d958513af43a1916c66fb25b931 | /pyatool/extras.py | 685dbf8e9c9ea0f853f63f2f2bd2fe94754931ad | [
"MIT"
] | permissive | guolong123/pyatool | d4393ebbfe3160400b0060e52f0d73c457212999 | 3d7e1bad67d990909dc590c1132c0f3811a2eee9 | refs/heads/master | 2020-04-24T02:19:07.642579 | 2019-02-20T05:59:01 | 2019-02-20T05:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,097 | py | import requests
import tempfile
import os
import re
import platform
SYSTEM_TYPE = platform.system()
def hello_world(toolkit=None):
"""
test only
:param toolkit:
:return:
"""
# toolkit contains some tools for development
# get device id
device_id = toolkit.device_id
print(device_id)
# use adb
toolkit.adb.run(['shell', 'ps'])
def install_from(url=None, path=None, toolkit=None):
"""
根据url或path安装apk
:param url:
:param path:
:param toolkit:
:return:
"""
if (not (url or path)) or (url and path):
raise TypeError('need url or path for installation, not both or none')
if url and not path:
return _install_from_url(url, toolkit)
else:
return _install_from_path(path, toolkit)
def _install_from_url(url, toolkit=None):
resp = requests.get(url)
if not resp.ok:
return False
with tempfile.NamedTemporaryFile('wb+', suffix='.apk', delete=False) as temp:
temp.write(resp.content)
temp.close()
toolkit.adb.run(['install', '-r', '-d', '-t', temp.name])
os.remove(temp.name)
return True
def _install_from_path(path, toolkit=None):
return toolkit.adb.run(['install', '-r', '-d', '-t', path])
def get_current_activity(toolkit=None):
"""
获取设备的当前activity名称
:param toolkit:
:return:
"""
# TODO if sh has installed in windows, command is same as linux ..
# filter_name = 'findstr' if SYSTEM_TYPE == 'Windows' else 'grep'
return toolkit.adb.run(['shell', 'dumpsys', 'activity', 'top', '|', 'grep', 'ACTIVITY'])
def is_installed(package_name, toolkit=None):
"""
检测包是否已被安装到设备上
:param package_name:
:param toolkit:
:return:
"""
return package_name in show_package(toolkit)
def show_package(toolkit=None):
"""
展示设备上所有已安装的包
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'pm', 'list', 'package'])
def clean_cache(package_name, toolkit=None):
"""
清理对应包的缓存
:param package_name:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'pm', 'clear', package_name])
def uninstall(package_name, toolkit=None, save_data=None):
"""
卸载指定包
:param package_name:
:param toolkit:
:param save_data:
:return:
"""
if save_data:
cmd_list = ['uninstall', '-k', package_name]
else:
cmd_list = ['uninstall', package_name]
return toolkit.adb.run(cmd_list)
def switch_airplane(status, toolkit=None):
"""
切换飞行模式的开关
:param status: true or false
:param toolkit:
:return:
"""
base_setting_cmd = ["shell", "settings", "put", "global", "airplane_mode_on"]
base_am_cmd = ["shell", "am", "broadcast", "-a", "android.intent.action.AIRPLANE_MODE", "--ez", "state"]
if status:
base_setting_cmd += ['1']
base_am_cmd += ['true']
else:
base_setting_cmd += ['0']
base_am_cmd += ['false']
toolkit.adb.run(base_setting_cmd)
toolkit.adb.run(base_am_cmd)
def switch_wifi(status, toolkit=None):
"""
切换wifi开关
:param status: true or false
:param toolkit:
:return:
"""
base_cmd = ['shell', 'svc', 'wifi']
cmd_dict = {
True: base_cmd + ['enable'],
False: base_cmd + ['disable'],
}
toolkit.adb.run(cmd_dict[status])
def switch_screen(status, toolkit=None):
"""
点亮/熄灭 屏幕
:param status: true or false
:param toolkit:
:return:
"""
base_cmd = ['shell', 'input', 'keyevent']
cmd_dict = {
True: base_cmd + ['224'],
False: base_cmd + ['223'],
}
toolkit.adb.run(cmd_dict[status])
def input_text(content, toolkit=None):
"""
输入文字(不支持中文)
# TODO 中文输入 可以利用ADBKeyBoard (https://github.com/senzhk/ADBKeyBoard)
:param content:
:param toolkit:
:return:
"""
toolkit.adb.run(['shell', 'input', 'text', content])
def start_activity(package_name, activity_name=None, flag=None, toolkit=None):
"""
根据包名/活动名 启动应用/活动
:param package_name:
:param activity_name:
:param flag:
:param toolkit:
:return:
"""
base_cmd = ['shell', 'am', 'start']
if flag:
base_cmd.append(flag)
if not activity_name:
return toolkit.adb.run(base_cmd + [package_name])
return toolkit.adb.run(base_cmd + ['{}/.{}'.format(package_name, activity_name)])
def force_stop(package_name, toolkit=None):
"""
根据包名/活动名 停止应用
:param package_name:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'am', 'force-stop', package_name])
def _clean_backstage(toolkit=None):
"""
(无效)清理后台应用/进程
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'am', 'kill-all'])
def send_broadcast(broadcast_name, flag=None, toolkit=None):
"""
发送广播
:param broadcast_name:
:param flag:
:param toolkit:
:return:
"""
base_cmd = ['shell', 'am', 'start']
if flag:
base_cmd.append(flag)
return toolkit.adb.run(base_cmd + [broadcast_name])
def input_key_event(key_code, toolkit=None):
"""
send key event
:param key_code:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'input', 'keyevent', str(key_code)])
def swipe(x1, y1, x2, y2, toolkit=None):
"""
swipe from (x1, y1) to (x2, y2)
:param x1:
:param y1:
:param x2:
:param y2:
:param toolkit:
:return:
"""
x1, y1, x2, y2 = map(str, (x1, y1, x2, y2))
return toolkit.adb.run(['shell', 'input', 'swipe', x1, y1, x2, y2])
def click(x, y, toolkit=None):
"""
click (x, y)
:param x:
:param y:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'input', 'tap', str(x), str(y)])
def get_ip_address(toolkit=None):
"""
获取android设备ip地址
:param toolkit:
:return:
"""
# TODO better design?
result = toolkit.adb.run(['shell', 'ifconfig', 'wlan0'])
return re.findall(r'inet\s*addr:(.*?)\s', result, re.DOTALL)[0]
def set_ime(ime_name, toolkit=None):
"""
设置输入法(需要使用adb shell ime list -a 获取输入法包名)
:param ime_name: 输入法包名 eg:com.android.inputmethod.pinyin/.PinyinIME
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'ime', 'set', ime_name])
def pull(src, target, toolkit=None):
"""
adb pull
:param src:
:param target:
:param toolkit:
:return:
"""
return toolkit.adb.run(['pull', src, target])
def push(src, target, toolkit=None):
"""
adb push
:param src:
:param target:
:param toolkit:
:return:
"""
return toolkit.adb.run(['push', src, target])
def is_connected(toolkit=None):
"""
check if device is connected
:param toolkit:
:return:
"""
try:
toolkit.adb.run(['shell', 'echo', '"hello"'])
except RuntimeError:
return False
return True
def make_dir(target_dir, toolkit=None):
"""
make empty dir
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'mkdir', target_dir])
def remove_dir(target, toolkit=None):
"""
clean dir, by running 'rm -rf'
:param target:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'rm', '-rf', target])
__all__ = [
'hello_world',
'install_from',
'show_package',
'get_current_activity',
'is_installed',
'clean_cache',
'uninstall',
'switch_airplane',
'switch_wifi',
'input_text',
'start_activity',
'get_ip_address',
'set_ime',
'push',
'pull',
'send_broadcast',
'force_stop',
'input_key_event',
'swipe',
'click',
'is_connected',
'make_dir',
'remove_dir',
]
| [
"178894043@qq.com"
] | 178894043@qq.com |
9ec0d07f9d5c5478b3c58d1c523384b8f2a6aa8e | 1b3402ff6f4b531d13add47d94fa497666bdd3f1 | /authentication/serializers.py | 0cab463ae50c55a50877768f7d80bf9ebf71a3f8 | [] | no_license | falled10/breaking_brain_api | b4287ed8986986dea5621deb45529c6986af0f32 | b16324f5a0a6d102797944a02c3194d6f86b049e | refs/heads/master | 2023-06-03T07:25:11.862023 | 2021-06-20T16:00:43 | 2021-06-20T16:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | from django.utils.encoding import force_text, force_bytes
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.conf import settings
from rest_framework import serializers
from authentication.models import User
from authentication.tokens import TokenGenerator
from breaking_brain_api.tasks import send_email
class SignUpSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password')
extra_kwargs = {'username': {'required': True}}
def create(self, validated_data):
user = User(email=validated_data['email'], username=validated_data['username'])
user.set_password(validated_data['password'])
user.save()
token = f'{urlsafe_base64_encode(force_bytes(user.email))}.{TokenGenerator.make_token(user)}'
url = f'{settings.USER_ACTIVATION_URL}?token={token}'
context = {
'url': url,
'email': user.email
}
template = 'notifications/activate_user.html'
send_email.delay(
subject="Activate your ChooseOne account",
template=template,
recipients=[user.email],
context=context
)
return user
class ActivateUserSerializer(serializers.Serializer):
token = serializers.CharField()
def validate(self, attrs):
token = attrs['token']
error_text = f"Provided activation token '{token}' is not valid"
try:
email, token = token.split('.')
email = force_text(urlsafe_base64_decode(email))
except (TypeError, ValueError):
raise serializers.ValidationError(error_text)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
raise serializers.ValidationError(error_text)
if not TokenGenerator.check_token(user, token):
raise serializers.ValidationError(error_text)
attrs['email'] = email
return attrs
def activate_user(self):
user = User.objects.get(email=self.validated_data['email'])
user.is_active = True
user.save()
return user
| [
"jurakulek@gmail.com"
] | jurakulek@gmail.com |
dc2ed23ce665cf327bd85c3c52419c99ed15d825 | dd0f95e4b20112b3c8ec4ded3e2baa9acaa5f71e | /tests/tests.py | e1669df1959e52168d30d20d954360fad2da7d33 | [] | no_license | scotm/postcode_locator | 1cf6f0e075ff38a16806be82be4cb6b345cf6d73 | e9d6a486dc66cc05bc53157f91a71477abc1f449 | refs/heads/master | 2021-01-21T10:13:23.944029 | 2019-03-26T16:47:21 | 2019-03-26T16:47:21 | 31,371,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | from django.contrib.gis.geos import Point
from django.core.urlresolvers import reverse
from django.test import TestCase
from postcode_locator.models import PostcodeMapping
from postcode_locator.tests.factories import PostcodeMappingFactory
class MatchPostcodeTest(TestCase):
def setUp(self):
pass
def test_match_postcode(self):
point = Point(-3.1627269999999998, 55.9735760000000013)
p = PostcodeMappingFactory(postcode='EH67HQ', point=point)
with self.assertRaises(PostcodeMapping.DoesNotExist):
PostcodeMapping.match_postcode('')
with self.assertRaises(PostcodeMapping.DoesNotExist):
PostcodeMapping.match_postcode('AS2SAD')
self.assertEqual(unicode(p), 'EH67HQ')
self.assertEqual(PostcodeMapping.match_postcode('AS2SAD',raise_exceptions=False), None)
self.assertEqual(PostcodeMapping.match_postcode('EH6 7HQ').point, point)
self.assertEqual(PostcodeMapping.match_postcode('EH67HQ').point, point)
self.assertEqual(PostcodeMapping.match_postcode('EH6 7HQ').point, point)
def test_postcodemappingfactory(self):
p = PostcodeMappingFactory.create()
q = PostcodeMappingFactory.create()
self.assertNotEqual(p.point, q.point)
def test_page(self):
import json
p = PostcodeMappingFactory.create()
response = self.client.get(reverse('postcode_point'), data={'postcode':p.pk})
data = json.loads(response.content)
self.assertAlmostEqual(data['point'][0], p.point.x,places=6)
self.assertAlmostEqual(data['point'][1], p.point.y,places=6)
| [
"scott.scotm@gmail.com"
] | scott.scotm@gmail.com |
da0b1ea947109a1a2d20982c3c045252fa82861c | 9b14cd92abcae4e26df1afc3cc595e1edeffda82 | /ATLASalertsservice/alerts.py | 79452ceab9de5b49e82a4533e4f45dd73f8e03d2 | [] | no_license | ATLAS-Analytics/AnalyticsNotebooks | 8d374363c3a55067614a802c1928743e990bd419 | 6656470cf42c5211891013a9f3a0f9036749abe1 | refs/heads/master | 2020-04-15T15:50:44.881217 | 2018-10-19T18:00:05 | 2018-10-19T18:00:05 | 55,799,185 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | import requests, httplib2, json, time
from oauth2client.service_account import ServiceAccountCredentials
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
import os
from subprocess import Popen, PIPE
class alerts:
def __init__(self):
SCOPE = ["https://spreadsheets.google.com/feeds"]
SECRETS_FILE = "AlertingService-879d85ad058f.json"
credentials = ServiceAccountCredentials.from_json_keyfile_name(SECRETS_FILE, SCOPE)
http = credentials.authorize(httplib2.Http())
discoveryUrl = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
self.service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl)
return
def addAlert(self, test, email, text):
spreadsheetId = '19bS4cxqBEwr_cnCEfAkbaLo9nCLjWnTnhQHsZGK9TYU'
rangeName = test + '!A1:C1'
myBody = {u'range': rangeName, u'values': [[time.strftime("%Y/%m/%d %H:%M:%S"), email, text]], u'majorDimension': u'ROWS'}
cells = self.service.spreadsheets().values().append(spreadsheetId=spreadsheetId, range=rangeName,valueInputOption='RAW', insertDataOption='INSERT_ROWS', body=myBody).execute()
return
def sendMail(self, test, to, body):
msg = MIMEText(body)
msg['Subject'] = test
msg['From'] = 'AAAS@mwt2.org'
msg['To'] = to
p = Popen(["/usr/sbin/sendmail", "-t", "-oi", "-r AAAS@mwt2.org"], stdin=PIPE)
print(msg.as_string())
p.communicate(msg.as_string().encode('utf-8'))
def send_HTML_mail(self, test, to, body, subtitle="", images=[]):
msg = MIMEMultipart('related')
msg['Subject'] = test
msg['From'] = 'AAAS@mwt2.org'
msg['To'] = to
msgAlternative = MIMEMultipart('alternative')
msg.attach(msgAlternative)
html = open("index.htm", "r").read()
image_template = open("image_template.htm", "r").read()
html = html.replace('TheMainTitle',test)
html = html.replace('TheSubtitle',subtitle)
html = html.replace('MyBody', body)
html = html.replace('TheImagePlaceholder1', image_template * int( (len(images)+1) / 2 ) )
html = html.replace('TheImagePlaceholder2', image_template * int(len(images)/2) )
for ind,i in enumerate(images):
#print("Adding image:", i)
html = html.replace('FigureTitle',i['Title'],2) #appears twice per figure
html = html.replace('FigureFilename',"cid:image"+str(ind),1)
html = html.replace('FigureDescription',i['Description'],1)
link=''
if 'Link' in i:
link=i['Link']
html = html.replace('FigureLink',link,1)
img_data = open(i['Filename'], 'rb').read()
image = MIMEImage(img_data, name=i['Filename'])
image.add_header('Content-ID', '<image'+str(ind)+'>')
msg.attach(image)
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(body, 'plain')
part2 = MIMEText(html, 'html')
msgAlternative.attach(part1)
msgAlternative.attach(part2)
p = Popen(["/usr/sbin/sendmail", "-t", "-oi", "-r AAAS@mwt2.org"], stdin=PIPE)
#print(msg.as_string())
p.communicate(msg.as_string().encode('utf-8'))
| [
"ivukotic@cern.ch"
] | ivukotic@cern.ch |
c143278afc362b186e45a61344e467ce938f73cf | ddd7e91dae17664505ea4f9be675e125337347a2 | /unused/2014/preprocess/preprocess_ndpi.py | e183a7517dc9cfbe7be7907701c28afb1afe6eb4 | [] | no_license | akurnikova/MouseBrainAtlas | 25c4134bae53827167e4b54ba83f215aec9f2d85 | ed1b5858467febdaed0a58a1a742764d214cc38e | refs/heads/master | 2021-07-15T17:17:19.881627 | 2019-02-22T06:00:17 | 2019-02-22T06:00:17 | 103,425,463 | 0 | 0 | null | 2018-04-27T19:08:02 | 2017-09-13T16:45:56 | Jupyter Notebook | UTF-8 | Python | false | false | 5,345 | py | #!/usr/bin/python
def execute_command(cmd):
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError as e:
print >>sys.stderr, "Execution failed:", e
raise e
import os
import sys
import shutil
import glob
import argparse
sys.path.append('/home/yuncong/morphsnakes')
import morphsnakes
import numpy as np
from matplotlib import pyplot as plt
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from skimage.morphology import remove_small_objects
from skimage.measure import regionprops, label
ndpi_dir = os.environ['GORDON_NDPI_DIR']
temp_dir = os.environ['GORDON_TEMP_DIR']
data_dir = os.environ['GORDON_DATA_DIR']
repo_dir = os.environ['GORDON_REPO_DIR']
result_dir = os.environ['GORDON_RESULT_DIR']
labeling_dir = os.environ['GORDON_LABELING_DIR']
ndpisplit = '/oasis/projects/nsf/csd181/yuncong/ndpisplit'
def foreground_mask_morphsnakes(img):
gI = morphsnakes.gborders(img, alpha=20000, sigma=1)
mgac = morphsnakes.MorphGAC(gI, smoothing=2, threshold=0.3, balloon=-3)
mgac.levelset = np.ones_like(img)
mgac.levelset[:3,:] = 0
mgac.levelset[-3:,:] = 0
mgac.levelset[:,:3] = 0
mgac.levelset[:,-3:] = 0
num_iters = 1000
for i in xrange(num_iters):
msnake.step()
if np.sum(msnake.levelset - previous_levelset) < 3:
break
previous_levelset = msnake.levelset
blob_labels, n_labels = label(msnake.levelset, neighbors=4, return_num=True)
blob_props = regionprops(blob_labels + 1)
largest_blob = np.argmax([p.area for p in blob_props])
mask = np.zeros_like(msnake.levelset, dtype=np.bool)
mask[blob_labels == largest_blob] = 1
min_size = 40
mask = remove_small_objects(mask, min_size=min_size, connectivity=1, in_place=False)
return mask
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("stack", type=str, help="choose what stack of images to crop and resolution, ex: RS141")
parser.add_argument("-r","--rotation",type=str,help="how each slice will be rotated",default = None)
parser.add_argument("-m","--mirror",type=str,help="to mirror horizontal type 'flop', for vertical type 'flip'",default=None)
args = parser.parse_args()
stack = args.stack
stack_temp_dir = os.path.join(temp_dir, stack)
if not os.path.exists(stack_temp_dir):
os.makedirs(stack_temp_dir)
# Split ndpi files
for ndpi_file in os.listdir(os.path.join(ndpi_dir, stack)):
if not ndpi_file.endswith('ndpi'): continue
execute_command(ndpisplit + ' ' + ndpi_file)
for level in ['macro', 'x0.078125', 'x0.3125', 'x1.25', 'x5', 'x20']:
res_temp_dir = os.path.join(stack_temp_dir, level)
os.mkdir(res_temp_dir)
for f in glob.glob('*_%s_z0.tif'%level):
shutil.move(f, res_temp_dir)
map_dir = os.mkdir(os.path.join(stack_temp_dir, 'map'))
for f in glob.glob('*map*'%level):
shutil.move(f, res_temp_dir)
# Crop sections out of whole-slide images, according to manually produced bounding boxes information
stack_data_dir = os.path.join(data_dir, stack)
if not os.path.exists(stack_data_dir):
os.makedirs(stack_data_dir)
for resol in ['x0.3125', 'x1.25', 'x5', 'x20']:
res_data_dir = os.path.join(stack_data_dir, resol)
if not os.path.exists(res_data_dir):
os.makedirs(res_data_dir)
section_ind = 0
res_temp_dir = os.path.join(stack_temp_dir, resol)
for slide_im_filename in os.listdir(res_temp_dir):
_, slide_str, _ = slide_im_filename.split('_')[:3]
slide_im_path = os.path.join(res_temp_dir, slide_im_filename)
img_id = subprocess.check_output(["identify", slide_im_path]).split()
tot_w, tot_h = map(int, img_id[2].split('x'))
bb_txt = os.path.join(repo_dir, 'preprocessing/bounding_box_data', stack, stack + '_' + slide_str + '.txt')
for x_perc, y_perc, w_perc, h_perc in map(split, open(bb_txt, 'r').readlines()):
(x,y,w,h) = (int(tot_w * float(x_perc)), int(tot_h * float(y_perc)),
int(tot_w * float(w_perc)), int(tot_h * float(h_perc)))
geom = str(w) + 'x' + str(h) + '+' + str(x) + '+' + str(y)
section_ind += 1
section_im_filename = '_'.join([stack, resol, '%04d' % section_ind]) +'.tif'
section_im_path = os.path.join(res_data_dir, section_im_filename)
# Crops the image according to bounding box data
cmd1 = "convert %s -crop %s %s" % (slide_im_path, geom, section_im_path)
execute_command(cmd1)
# Rotates the cropped image if specified
if args.rotation is not None:
cmd2 = "convert %s -page +0+0 -rotate %s %s" % (section_im_path, args.rotation, section_im_path)
execute_command(cmd2)
# Reflects the rotated image if specified
if args.mirror is not None:
cmd3 = "convert %s -%s %s" % (section_im_path, args.mirror, section_im_path)
execute_command(cmd3)
print "Processed %s" % section_im_filename
section_im = imread(section_im_path)
mask = foreground_mask_morphsnakes(section_im)
mask_filename = '_'.join([stack, resol, '%04d' % section_ind]) +'_mask.png'
mask_path = os.path.join(res_data_dir, mask_filename)
imsave(mask_path, mask) | [
"cyc3700@gmail.com"
] | cyc3700@gmail.com |
5dd5ce0022a9d3e8a1091fad2365d5c27b5e5451 | 34fea597d7327536987b2b8342e4ff26f294e9ed | /Source/DataDictionary/txt2xml.py | 8f1b9a8871488c1a4a03bb96f9c2668ac16be8b8 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | hsorby/gdcm | bce8bcf436682bb8a5f1ff2b549a07d10943cf62 | cd1b5fc2872893d17eb3fe7abc17850e1fdff120 | refs/heads/master | 2021-01-20T22:35:25.037034 | 2011-12-15T22:31:03 | 2011-12-15T22:31:03 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 15,070 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import re,os
"""
GEMS
This parser parse a table formatted like this:
Attribute Name Tag VR VM
"""
class TextParser:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*([A-Za-z0-9&{}=+ «»%;#()./,_:<>-]+)\s+\(?([0-9A-Fa-fn]+),\s?([0-9A-Fa-fyxX]+)\)?\s+([A-Z][A-Z])\s+([0-9Nn-]+)\s*$")
patt1 = re.compile("^\s*([A-Za-z0-9&{}=+ ;%#\[\]()./,_:<>-]+)\s+\(?([0-9A-Fa-f]+),\s?([0-9A-Fa-fyxX]+)\)?\s+([1-3C]+)\s+([A-Z][A-Z])\s+([0-9Nn-]+)\s*$")
patt2 = re.compile( "^\s*([Table ]*[A-Z1-9.:-]+)\s+([A-Za-z -]+)\s+\(([A-Z0-9_]+)\)\s*$")
#patt3 = re.compile( '^\s*Private Creator Identification\s*\((["A-Za-z0-9() ./])\)\s*$' )
patt3 = re.compile( '^\s*Private Creator Identification\s*\("?(.*)"?\)\)?\s*$' )
patt4 = re.compile( '^\s*Private Creator Identification\s*([A-Z0-9_]+)\s*$' )
m = patt.match(line)
m1 = patt1.match(line)
m2 = patt2.match(line)
m3 = patt3.match(line)
m4 = patt4.match(line)
#print line
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" name=\"%s\"/>"%(m.group(2),m.group(3),m.group(4),m.group(5),m.group(1).rstrip())
#dicom = m.group(1) + ' ' + m.group(2) + ' ' + m.group(3) + ' ' + m.group(4)
#print dicom
outLines.append( dicom )
elif m1:
# <entry group="0001" element="0001" vr="LO" vm="1" type="1C"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" type=\"%s\" name=\"%s\"/>"%(m1.group(2),m1.group(3),m1.group(5),m1.group(6),m1.group(4),m1.group(1).rstrip())
#dicom = m.group(1) + ' ' + m.group(2) + ' ' + m.group(3) + ' ' + m.group(4)
#print dicom
outLines.append( dicom )
elif m2:
# <dict edition="2007" url="http://??" ref="Table A-16" name="Private Creator Identification - Xeleris" owner="GEMS_GENIE_1">
s = "</dict><dict ref=\"%s\" name=\"%s\" owner=\"%s\">"%(m2.group(1),m2.group(2).rstrip(),m2.group(3))
s += '\n'
outLines.append( s )
elif m3:
s = "</dict><dict ref=\"%s\" name=\"%s\" owner=\"%s\">"%("??","??",m3.group(1))
s += '\n'
outLines.append( s )
elif m4:
s = "</dict><dict ref=\"%s\" name=\"%s\" owner=\"%s\">"%("??","??",m4.group(1))
s += '\n'
outLines.append( s )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
GEMS
This parser parse a table formatted like this:
Grp Elm VR VM Type Definition
"""
class TextParser2:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*([0-9A-Z]+)\s+([0-9A-Zx]+)\s+([A-Z][A-Z])\s+([1-9SNn-]+)\s+([1-9])\s+([A-Za-z0-9 ()._,/#>-]+)\s*$")
patt2 = re.compile( "^\s*([A-Z1-9.-]+)\s*([A-Za-z -]+)\s*$")
m = patt.match(line)
m2 = patt2.match(line)
#print line
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" type=\"%s\" name=\"%s\"/>"%(m.group(1),m.group(2),m.group(3),m.group(4),m.group(5),m.group(6).rstrip())
#dicom = m.group(1) + ' ' + m.group(2) + ' ' + m.group(3) + ' ' + m.group(4)
#print dicom
outLines.append( dicom )
elif m2:
# <dict edition="2007" url="http://??" ref="Table A-16" name="Private Creator Identification - Xeleris" owner="GEMS_GENIE_1">
s = "<dict ref=\"%s\" name=\"%s\" owner=\"%s\">"%(m2.group(1),m2.group(2).rstrip(),"")
s += '\n'
outLines.append( s )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
SIEMENS:
This parser parse a table formatted like this:
Tag Private Owner Code Name VR VM
"""
class TextParser3:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*\(([0-9A-Z]+),([0-9A-Zx]+)\)\s+([A-Za-z0-9./:_ -]+)\s+\|\s+([A-Za-z0-9 ()._,/#>-]+)\s+([A-Z][A-Z]_?O?W?)\s+([0-9n-]+)\s*$")
patt2 = re.compile( "^\s*([A-Z1-9.-]+)\s*([A-Za-z -]+)\s*$")
m = patt.match(line)
m2 = patt2.match(line)
#print line
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" owner=\"%s\" name=\"%s\"/>"%(m.group(1),m.group(2),m.group(5),m.group(6),m.group(3).rstrip(),m.group(4).rstrip())
#dicom = m.group(1) + ' ' + m.group(2) + ' ' + m.group(3) + ' ' + m.group(4)
#print dicom
outLines.append( dicom )
elif m2:
# <dict edition="2007" url="http://??" ref="Table A-16" name="Private Creator Identification - Xeleris" owner="GEMS_GENIE_1">
s = "<dict ref=\"%s\" name=\"%s\" owner=\"%s\">"%(m2.group(1),m2.group(2).rstrip(),"")
s += '\n'
outLines.append( s )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
PHILIPS: (see mr91.pdf)
Diffusion B-Factor 2001,xx03 VR = FL, VM = 1 Dimension: s/mm2
Indicates the Diffusion coefficient.
"""
class TextParser4:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*([A-Za-z0-9> -]+)\s+([0-9]+),([0-9A-Fx]+)\s+VR = ([A-Z][A-Z]), VM = ([0-9n-]+)\s+(.*)\s*$")
patt1 = re.compile("^\s*([A-Za-z0-9()> -]+)\s+([0-9]+),([0-9A-Fx]+)\s+Value Representation = ([A-Z][A-Z]), Multiplicity = ([0-9n-]+)(.*)\s*$")
patt2 = re.compile("^\s*[STUDYSERIES]+\s+\(([0-9]+),([0-9]+)\)\s+([A-Za-z ]+)\s*$")
m = patt.match(line)
m1 = patt1.match(line)
m2 = patt2.match(line)
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" name=\"%s\"/>"%(m.group(2),m.group(3),m.group(4),m.group(5),m.group(1).rstrip())
#print dicom
outLines.append( dicom )
elif m1:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" name=\"%s\"/>"%(m1.group(2),m1.group(3),m1.group(4),m1.group(5),m1.group(1).rstrip())
#print dicom
outLines.append( dicom )
elif m2:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" name=\"%s\" />"%(m2.group(1),m2.group(2),m2.group(3).rstrip())
#print dicom
outLines.append( dicom )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
PHILIPS: (see 453567994381_B.pdf)
7053,0010 LO Private Creator Data element 1
"""
class TextParser5:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^([\s>]*)([0-9]+),([0-9A-Fx]+)\s+([A-Z][A-Z])\s+([A-Za-z0-9.?(,)> -]+)\s+([0-9n-]+)\s*$")
m = patt.match(line)
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" name=\"%s%s\"/>\n"%(m.group(2),m.group(3),m.group(4),m.group(6),m.group(1).lstrip(),m.group(5).rstrip())
#print dicom
outLines.append( dicom )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
PHILIPS: (see 9605_0132RevC.pdf)
Attribute Tag Type VR VM
ADAC Header Signature 0019, 0010 3 LO 2
"""
class TextParser6:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*([A-Za-z0-9 #()./,_:>-]+)\s+([0-9A-Z]+),\s?([0-9A-ZxX]+)\s+([1-3C]+)\s+([A-Z][A-Z])\s+([0-9Nn-]+)\s*$")
m = patt.match(line)
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" type=\"%s\" name=\"%s\" />"%(m.group(2),m.group(3),m.group(5),m.group(6),m.group(4),m.group(1).rstrip())
#print dicom
outLines.append( dicom )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
PHILIPS: (see MR_System_R1_5_dcs.pdf
Number of PC Directions 2001,1016 SS 2, USER -
"""
class TextParser7:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*([A-Za-z0-9'./> -]+)\s+\(?([0-9A-F]+),([0-9A-FxXY]+)\)?\s+([A-Z][A-Z])\s+([1-3C]+)?,?.*\s*$")
m = patt.match(line)
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" type=\"%s\" name=\"%s\" />"%(m.group(2),m.group(3),m.group(4),m.group(5),m.group(1).rstrip())
#print dicom
outLines.append( dicom )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
AGFA
IMPAX object document (0029,xx00) OB 1 Mitra Object Document 1.0
"""
class TextParser8:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*([A-Za-z0-9()> -]+)\s+\(([0-9]+),([0-9A-Fx]+)\)\s+([A-Z][A-Z])\s+([1-9n-]+)\s+([A-Za-z_0-9. ]+)\s*$")
m = patt.match(line)
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" owner=\"%s\" name=\"%s\" />"%(m.group(2),m.group(3),m.group(4),m.group(5),m.group(6),m.group(1).rstrip())
#print dicom
outLines.append( dicom )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
SIEMENS
Parse a diction.pfl file
Pixel Overflow Flag 1 Pixel Overflow,7FE3,SIEMENS MED NM,1B,SS,1
"""
class TextParser9:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^([A-Z0-9a-z()=/:%. -]+),([0-9A-F]+),([A-Za-z0-9. -]+),([0-9A-F][0-9A-F]),([A-Z][A-Z]),([1-9N-]+)$")
patt1 = re.compile("^[^,]+,([0-9A-F]+),.*$")
m = patt.match(line)
m1 = patt1.match(line)
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" vm=\"%s\" owner=\"%s\" name=\"%s\" />"%(m.group(2),m.group(4),m.group(5),m.group(6),m.group(3),m.group(1).rstrip())
#print dicom
outLines.append( dicom )
else:
#print line
n = eval( '0x' + m1.group(1) )
#print m1.group(1)
if( not (n % 2 == 0) ):
print n
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
"""
Storage.pdf
Attribute Name Group Byte Type VR Attribute Description
"""
class TextParser10:
def __init__(self, inputfilename, outputfilename):
self._InputFilename = ''
self._OutputFilename = ''
def Parse(self):
infile = file(inputfilename, 'r')
outLines = []
for line in infile.readlines():
patt = re.compile("^\s*([A-Z.a-z -]+[1-2]?)\s+([0-9A-Z]+)\s+([0-9A-Zx]+)\s+([1-3])\s+([A-Z][A-Z])\s+.*$")
m = patt.match(line)
#print line
if m:
# <entry group="0001" element="0001" vr="LO" vm="1" owner="Private Creator"/>
dicom = "<entry group=\"%s\" element=\"%s\" vr=\"%s\" type=\"%s\">"%(m.group(2),m.group(3),m.group(5),m.group(4))
#dicom = m.group(1) + ' ' + m.group(2) + ' ' + m.group(3) + ' ' + m.group(4)
#print dicom
dicom += '\n'
dicom += "<description>%s</description>\n</entry>\n"%m.group(1).rstrip()
outLines.append( dicom )
else:
print line
#print self.Reformat(line)
#outLines.append( self.Reformat(line) + '\n' )
outfile = file(outputfilename, 'w')
outfile.writelines( outLines )
outfile.close()
if __name__ == "__main__":
argc = len(os.sys.argv )
if ( argc < 3 ):
print "Sorry, wrong list of args"
os.sys.exit(1) #error
inputfilename = os.sys.argv[1]
outputfilename = os.sys.argv[2]
tp = TextParser10(inputfilename,outputfilename);
tp.Parse()
| [
"h.sorby@auckland.ac.nz"
] | h.sorby@auckland.ac.nz |
77058cba020de410735849d1908ef6510f24db88 | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /company/amazon/graphs/connected_graphs.py | e8044bd1e1dd19d5a95ccc77e3a4796e68f2fa77 | [] | no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | class Solution:
def dfs(self, isConnected):
visited = [0] * len(isConnected)
res = 0
def traverse(visited, i):
for j in range(len(isConnected)):
if isConnected[i][j] == 1 and visited[j] == 0:
visited[j] = 1
traverse(visited, j)
for i in range(len(isConnected)):
if visited[i] == 0:
traverse(visited, i)
res += 1
return res
def findCircleNum(self, isConnected: List[List[int]]) -> int:
return self.dfs(isConnected)
| [
"saluja.harkirat@gmail.com"
] | saluja.harkirat@gmail.com |
13866286b72f821ce21e6968b0f7be80736b3dc1 | 24b2f3f5f49ed19cf7fd3dcd433d6b72806e08cf | /python/sorting_and_searching/0363_Max_Sum_of_Rectangle_No_Larger_Than_K.py | 680321f332bc9186ac9d8af3bffac598b0194d6a | [] | no_license | lizzzcai/leetcode | 97089e4ca8c3c53b5a4a50de899591be415bac37 | 551cd3b4616c16a6562eb7c577ce671b419f0616 | refs/heads/master | 2021-06-23T05:59:56.928042 | 2020-12-07T03:07:58 | 2020-12-07T03:07:58 | 162,840,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,167 | py | '''
27/05/2020
363. Max Sum of Rectangle No Larger Than K - Hard
Tag: Binary Search, Dynamic Programming, Queue
Given a non-empty 2D matrix matrix and an integer k, find the max sum of a rectangle in the matrix such that its sum is no larger than k.
Example:
Input: matrix = [[1,0,1],[0,-2,3]], k = 2
Output: 2
Explanation: Because the sum of rectangle [[0, 1], [-2, 3]] is 2,
and 2 is the max number no larger than k (k = 2).
Note:
The rectangle inside the matrix must have an area > 0.
What if the number of rows is much larger than the number of columns?
'''
from typing import List
import collections
import math
import bisect
# Solution
class Solution1:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
P = []
for row in matrix:
p = [0]
for x in row:
p.append(p[-1]+x)
P.append(p)
res = -math.inf
for c1 in range(len(P[0])):
for c2 in range(c1+1, len(P[0])):
# 2-D prefix sum between c1 and c2
curr_sum, curr_P = 0, [0]
for r in range(len(P)):
curr_sum += P[r][c2]-P[r][c1]
idx = bisect.bisect_left(curr_P, curr_sum - k)
if idx < len(curr_P):
res = max(res, curr_sum - curr_P[idx])
if res == k:
return res
bisect.insort_left(curr_P, curr_sum)
return res
class Solution2:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
P = []
for row in matrix:
p = [0]
for x in row:
p.append(p[-1]+x)
P.append(p)
res = -math.inf
for c1 in range(len(P[0])):
for c2 in range(c1+1, len(P[0])):
# 2-D prefix sum between c1 and c2
curr_sum, curr_P = 0, [0]
for r in range(len(P)):
curr_sum += P[r][c2]-P[r][c1]
idx = bisect.bisect_left(curr_P, curr_sum - k)
if idx < len(curr_P):
res = max(res, curr_sum - curr_P[idx])
if res == k:
return res
bisect.insort_left(curr_P, curr_sum)
return res
# Unit Test
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_testCase(self):
for Sol in [Solution1()]:
func = Sol.maxSumSubmatrix
self.assertEqual(func([[1,0,1],[0,-2,3]], 2), 2)
self.assertEqual(func([[2,2,-1]], 0), -1)
self.assertEqual(func([[-9,-6,-1,-7,-6,-5,-4,-7,-6,0],[-4,-9,-4,-7,-7,-4,-4,-6,-6,-6],[-2,-2,-6,-7,-7,0,-1,-1,-8,-2],[-5,-3,-1,-6,-1,-1,-6,-3,-4,-8],[-4,-1,0,-8,0,-9,-8,-7,-2,-4],[0,-3,-1,-7,-2,-5,-5,-5,-8,-7],[-2,0,-8,-2,-9,-2,0,0,-9,-6],[-3,-4,-3,-7,-2,-1,-9,-5,-7,-2],[-8,-3,-2,-8,-9,0,-7,-8,-9,-3],[-7,-4,-3,-3,-3,-1,0,-1,-8,-2]], -321), -323)
if __name__ == '__main__':
unittest.main() | [
"lilcolinn@gmail.com"
] | lilcolinn@gmail.com |
9b06082fd362d65269019bd114f3fabdf83ff4c5 | 740ed147112eddc2581504f5a8c5c4cb4dbe32f6 | /pprof/projects/pprof/sqlite3.py | e407453388a8fe510e87f19c8ee2998d4bd0e9c8 | [] | no_license | CIB/pprof-study | 3765499c8111dfcf6f690ea192b9ce235f1f28c4 | 9d6b995ba21ced3fa39327eff6dc34274e3b3b56 | refs/heads/master | 2021-01-22T01:33:53.452195 | 2016-02-15T14:45:14 | 2016-02-15T14:45:14 | 37,426,320 | 0 | 0 | null | 2015-06-14T19:28:01 | 2015-06-14T19:28:00 | null | UTF-8 | Python | false | false | 2,569 | py | from pprof.projects.pprof.group import PprofGroup
from os import path
from plumbum import local
class SQLite3(PprofGroup):
""" SQLite3 """
NAME = 'sqlite3'
DOMAIN = 'database'
src_dir = "sqlite-amalgamation-3080900"
src_file = src_dir + ".zip"
src_uri = "http://www.sqlite.org/2015/" + src_file
def download(self):
from pprof.utils.downloader import Wget
from plumbum.cmd import unzip
with local.cwd(self.builddir):
Wget(self.src_uri, self.src_file)
unzip(self.src_file)
self.fetch_leveldb()
def configure(self):
pass
def build(self):
from pprof.utils.compiler import lt_clang
from pprof.utils.run import run
with local.cwd(self.builddir):
sqlite_dir = path.join(self.builddir, self.src_dir)
clang = lt_clang(self.cflags, self.ldflags,
self.compiler_extension)
with local.cwd(sqlite_dir):
run(clang["-fPIC", "-I.", "-c", "sqlite3.c"])
run(clang["-shared", "-Wl,-soname,libsqlite3.so.0", "-o",
"libsqlite3.so", "sqlite3.o", "-ldl"])
with local.cwd(self.builddir):
self.build_leveldb()
def fetch_leveldb(self):
src_uri = "https://github.com/google/leveldb"
with local.cwd(self.builddir):
from pprof.utils.downloader import Git
Git(src_uri, "leveldb.src")
def build_leveldb(self):
from pprof.utils.compiler import lt_clang, lt_clang_cxx
from pprof.utils.run import run
from plumbum.cmd import make
sqlite_dir = path.join(self.builddir, self.src_dir)
leveldb_dir = path.join(self.builddir, "leveldb.src")
# We need to place sqlite3 in front of all other flags.
self.ldflags = ["-L", sqlite_dir] + self.ldflags
self.cflags = ["-I", sqlite_dir] + self.cflags
clang_cxx = lt_clang_cxx(self.cflags, self.ldflags)
clang = lt_clang(self.cflags, self.ldflags)
with local.cwd(leveldb_dir):
with local.env(CXX=str(clang_cxx), CC=str(clang)):
run(make["clean", "out-static/db_bench_sqlite3"])
def run_tests(self, experiment):
from pprof.project import wrap
from pprof.utils.run import run
leveldb_dir = path.join(self.builddir, "leveldb.src")
with local.cwd(leveldb_dir):
sqlite = wrap(
path.join(leveldb_dir, "out-static", "db_bench_sqlite3"), experiment)
run(sqlite)
| [
"simbuerg@fim.uni-passau.de"
] | simbuerg@fim.uni-passau.de |
4b2cd183f9317d769c5c7ab8699d92dc79d30b09 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /grow_bert/lowcost/layers/resolution_layer.py | b35bb95842ab66c6af0c41c742b726d6721f1191 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 4,229 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pooling layer to reduce input sequence length."""
import tensorflow as tf
from official.modeling import tf_utils
class MaskPoolLayer(tf.keras.layers.Layer):
"""Mask pooling layer."""
def __init__(self, pool_size, nocls=True, **kwargs):
super(MaskPoolLayer, self).__init__(**kwargs)
self.nocls = nocls
self.pool_size = pool_size
assert self.pool_size > 0
def call(self, input_tensor, unpooled_len=0):
if self.pool_size == 1:
return input_tensor
batch_size, seq_len = tf_utils.get_shape_list(input_tensor, expected_rank=2)
# reshape tensor in order to use tf.nn.pool
reshaped_tensor = tf.reshape(input_tensor, [batch_size, seq_len, 1])
if self.nocls:
tensor_to_pool = reshaped_tensor[:, 1:, :]
else:
tensor_to_pool = reshaped_tensor
if unpooled_len > 0:
tensor_to_pool = tensor_to_pool[:, :-unpooled_len, :]
pooled_tensor = tf.nn.max_pool(
tensor_to_pool,
ksize=self.pool_size,
strides=self.pool_size,
padding='SAME')
if self.nocls:
pooled_tensor = tf.concat([reshaped_tensor[:, 0:1, :], pooled_tensor],
axis=1)
if unpooled_len > 0:
pooled_tensor = tf.concat(
[pooled_tensor, reshaped_tensor[:, -unpooled_len:, :]], axis=1)
pooled_tensor = tf.reshape(pooled_tensor, [batch_size, -1])
return pooled_tensor
class EmbedPoolLayer(tf.keras.layers.Layer):
"""Embedding pooling layer."""
def __init__(self, hidden_size, pool_size, pool_name=None, **kwargs):
super(EmbedPoolLayer, self).__init__(**kwargs)
self.pool_name = pool_name
self.pool_size = pool_size
self.hidden_size = hidden_size
if self.pool_name == 'concat':
self.embedding_projection_dense = tf.keras.layers.Dense(
self.hidden_size, name='resolution/projection_dense')
def call(self, input_tensor, unpooled_len=0):
if self.pool_size <= 1 or self.pool_name is None:
return input_tensor
if self.pool_name == 'concat':
if unpooled_len == 0:
tensor_to_pool = input_tensor
else:
tensor_to_pool = input_tensor[:, :-unpooled_len, :]
else:
if unpooled_len == 0:
tensor_to_pool = input_tensor[:, 1:, :]
else:
tensor_to_pool = input_tensor[:, 1:-unpooled_len, :]
if self.pool_name == 'mean':
pooled_tensor = tf.nn.avg_pool(
tensor_to_pool,
ksize=self.pool_size,
strides=self.pool_size,
padding='SAME')
pooled_tensor = tf.concat([input_tensor[:, 0:1, :], pooled_tensor],
axis=1)
elif self.pool_name == 'max':
pooled_tensor = tf.nn.max_pool(
tensor_to_pool,
ksize=self.pool_size,
strides=self.pool_size,
padding='SAME')
pooled_tensor = tf.concat([input_tensor[:, 0:1, :], pooled_tensor],
axis=1)
elif self.pool_name == 'concat':
batch_size, seq_len, embed_dim = tensor_to_pool.shape
assert seq_len % self.pool_size == 0, (f'seqlen: {seq_len}, poolsize: '
f'{self.pool_size}')
pooled_len = seq_len // self.pool_size
pooled_tensor = tf.reshape(
tensor_to_pool, [batch_size, pooled_len, self.pool_size * embed_dim])
pooled_tensor = self.embedding_projection_dense(pooled_tensor)
elif self.pool_name is not None:
raise NotImplementedError
if unpooled_len > 0:
pooled_tensor = tf.concat(
[pooled_tensor, input_tensor[:, -unpooled_len:, :]], axis=1)
return pooled_tensor
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
e3444263f858ef21d7e3d42b693a6d4a0113c448 | e122b7aa4f2f0b09962c2a3580b9a440ed1dbcce | /aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/BatchGetEdgeInstanceDriverConfigsRequest.py | fc162a96be2081c218889caa3105e475c54d4f0c | [
"Apache-2.0"
] | permissive | laashub-soa/aliyun-openapi-python-sdk | 0a07805972fb5893f933951960d0f53fc2938df9 | c0d98a5363225fc8ad4aee74e30d10f62403319b | refs/heads/master | 2023-02-28T00:17:05.934082 | 2021-02-05T09:46:35 | 2021-02-05T09:46:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class BatchGetEdgeInstanceDriverConfigsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'BatchGetEdgeInstanceDriverConfigs','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DriverIds(self):
return self.get_query_params().get('DriverIds')
def set_DriverIds(self, DriverIdss):
for depth1 in range(len(DriverIdss)):
if DriverIdss[depth1] is not None:
self.add_query_param('DriverIds.' + str(depth1 + 1) , DriverIdss[depth1])
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
52020455331440cfe02942db01afd18aed00716a | 717558d6a075163294054bd5aea4ef3234df23ad | /models_all/st_test6.py | 020b56f9d7347dd2d7a7fd204cf82942113dc054 | [
"MIT"
] | permissive | RomeoV/pyomo-MINLP-benchmarking | 1270766397fbc4e57ea1bd0c2285fb7edf64062d | 996d2c8ee1cb9b03fe00c6246f52294337d8b92c | refs/heads/master | 2021-07-11T17:54:25.284712 | 2020-08-13T23:43:14 | 2020-08-13T23:43:14 | 185,664,992 | 8 | 1 | MIT | 2019-05-10T19:07:05 | 2019-05-08T19:09:05 | Python | UTF-8 | Python | false | false | 2,229 | py | # MINLP written by GAMS Convert at 05/15/20 00:51:23
#
# Equation counts
# Total E G L N X C B
# 6 1 0 5 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 11 1 0 10 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 57 47 10 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i7 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i8 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i9 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,1),initialize=0)
m.obj = Objective(expr=50*m.i1*m.i1 + 48*m.i1 + 50*m.i2*m.i2 + 42*m.i2 + 50*m.i3*m.i3 + 48*m.i3 + 50*m.i4*m.i4 + 45*m.i4
+ 50*m.i5*m.i5 + 44*m.i5 + 50*m.i6*m.i6 + 41*m.i6 + 50*m.i7*m.i7 + 47*m.i7 + 50*m.i8*m.i8 + 42*
m.i8 + 50*m.i9*m.i9 + 45*m.i9 + 50*m.i10*m.i10 + 46*m.i10, sense=minimize)
m.c1 = Constraint(expr= - 2*m.i1 - 6*m.i2 - m.i3 - 3*m.i5 - 3*m.i6 - 2*m.i7 - 6*m.i8 - 2*m.i9 - 2*m.i10 <= -4)
m.c2 = Constraint(expr= 6*m.i1 - 5*m.i2 + 8*m.i3 - 3*m.i4 + m.i6 + 3*m.i7 + 8*m.i8 + 9*m.i9 - 3*m.i10 <= 22)
m.c3 = Constraint(expr= - 5*m.i1 + 6*m.i2 + 5*m.i3 + 3*m.i4 + 8*m.i5 - 8*m.i6 + 9*m.i7 + 2*m.i8 - 9*m.i10 <= -6)
m.c4 = Constraint(expr= 9*m.i1 + 5*m.i2 - 9*m.i4 + m.i5 - 8*m.i6 + 3*m.i7 - 9*m.i8 - 9*m.i9 - 3*m.i10 <= -23)
m.c5 = Constraint(expr= - 8*m.i1 + 7*m.i2 - 4*m.i3 - 5*m.i4 - 9*m.i5 + m.i6 - 7*m.i7 - m.i8 + 3*m.i9 - 2*m.i10 <= -12)
| [
"bernalde@cmu.edu"
] | bernalde@cmu.edu |
4d3e3795de4f0d14456e7c3b20dbe59dd3f65d27 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-1006.py | 9963ac98670192801ddc8a73ed6f01ffdc49ab9c | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,293 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", $TypedVar) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
669b4b361e5ae6e31c58326b71d172bdfac6fa46 | 1d0613fb401e92b6861ea3f615561df854603db6 | /KiBuzzard/deps/fonttools/Tests/ttLib/tables/otBase_test.py | ce0416e429bf1d1d925f0deb9a77811759bbcc93 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | gregdavill/KiBuzzard | 8c84a4339108c9942e1ec0e05e4110bba49fd265 | 88c4129b3fbed2cad718c01e5e2d29204e2f2071 | refs/heads/main | 2023-09-01T19:46:45.146077 | 2023-08-31T11:55:10 | 2023-08-31T11:55:10 | 328,686,533 | 358 | 36 | MIT | 2023-08-31T12:12:45 | 2021-01-11T14:16:42 | Python | UTF-8 | Python | false | false | 3,188 | py | from fontTools.misc.textTools import deHexStr
from fontTools.ttLib.tables.otBase import OTTableReader, OTTableWriter
import unittest
class OTTableReaderTest(unittest.TestCase):
def test_readShort(self):
reader = OTTableReader(deHexStr("CA FE"))
self.assertEqual(reader.readShort(), -13570)
self.assertEqual(reader.pos, 2)
def test_readLong(self):
reader = OTTableReader(deHexStr("CA FE BE EF"))
self.assertEqual(reader.readLong(), -889274641)
self.assertEqual(reader.pos, 4)
def test_readUInt8(self):
reader = OTTableReader(deHexStr("C3"))
self.assertEqual(reader.readUInt8(), 0xC3)
self.assertEqual(reader.pos, 1)
def test_readUShort(self):
reader = OTTableReader(deHexStr("CA FE"))
self.assertEqual(reader.readUShort(), 0xCAFE)
self.assertEqual(reader.pos, 2)
def test_readUShortArray(self):
reader = OTTableReader(deHexStr("DE AD BE EF CA FE"))
self.assertEqual(list(reader.readUShortArray(3)),
[0xDEAD, 0xBEEF, 0xCAFE])
self.assertEqual(reader.pos, 6)
def test_readUInt24(self):
reader = OTTableReader(deHexStr("C3 13 37"))
self.assertEqual(reader.readUInt24(), 0xC31337)
self.assertEqual(reader.pos, 3)
def test_readULong(self):
reader = OTTableReader(deHexStr("CA FE BE EF"))
self.assertEqual(reader.readULong(), 0xCAFEBEEF)
self.assertEqual(reader.pos, 4)
def test_readTag(self):
reader = OTTableReader(deHexStr("46 6F 6F 64"))
self.assertEqual(reader.readTag(), "Food")
self.assertEqual(reader.pos, 4)
def test_readData(self):
reader = OTTableReader(deHexStr("48 65 6C 6C 6F"))
self.assertEqual(reader.readData(5), b"Hello")
self.assertEqual(reader.pos, 5)
def test_getSubReader(self):
reader = OTTableReader(deHexStr("CAFE F00D"))
sub = reader.getSubReader(2)
self.assertEqual(sub.readUShort(), 0xF00D)
self.assertEqual(reader.readUShort(), 0xCAFE)
class OTTableWriterTest(unittest.TestCase):
def test_writeShort(self):
writer = OTTableWriter()
writer.writeShort(-12345)
self.assertEqual(writer.getData(), deHexStr("CF C7"))
def test_writeLong(self):
writer = OTTableWriter()
writer.writeLong(-12345678)
self.assertEqual(writer.getData(), deHexStr("FF 43 9E B2"))
def test_writeUInt8(self):
writer = OTTableWriter()
writer.writeUInt8(0xBE)
self.assertEqual(writer.getData(), deHexStr("BE"))
def test_writeUShort(self):
writer = OTTableWriter()
writer.writeUShort(0xBEEF)
self.assertEqual(writer.getData(), deHexStr("BE EF"))
def test_writeUInt24(self):
writer = OTTableWriter()
writer.writeUInt24(0xBEEF77)
self.assertEqual(writer.getData(), deHexStr("BE EF 77"))
def test_writeULong(self):
writer = OTTableWriter()
writer.writeULong(0xBEEFCAFE)
self.assertEqual(writer.getData(), deHexStr("BE EF CA FE"))
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
| [
"greg.davill@gmail.com"
] | greg.davill@gmail.com |
e66cb54f081ee6a1b66de4c70b0070575006b270 | caf135d264c4c1fdd320b42bf0d019e350938b2d | /04_Algorithms/Leetcode/JZ23 二叉搜索树的后序遍历序列.py | 9fd12c4174b4cea3111d6a7932fcc8b955ad6e21 | [] | no_license | coolxv/DL-Prep | 4243c51103bdc38972b8a7cbe3db4efa93851342 | 3e6565527ee8479e178852fffc4ccd0e44166e48 | refs/heads/master | 2022-12-31T22:42:20.806208 | 2020-10-23T10:19:19 | 2020-10-23T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | # -*- coding:utf-8 -*-
class Solution1:
def VerifySquenceOfBST(self, sequence):
def helper(seq):
if 0 < len(seq) < 2:
return True
elif len(seq) == 0:
return False
else:
pivot = seq[-1]
place = -1
small, big = False, False
for idx, num in enumerate(seq[:-1]):
if num < pivot:
idx += 1
if big:
return False
elif num > pivot:
if place == -1:
place = idx
big = True
else:
return False
return helper(seq[:place]) and helper(seq[place:-1])
return helper(sequence)
# -*- coding:utf-8 -*-
# 根据跟左右分成左右两部分呢,只要右边都大于跟就可以
class Solution:
def VerifySquenceOfBST(self, sequence):
def helper(seq):
if 0 <= len(seq) < 2:
return True
pivot = seq[-1]
place = len(seq) - 1
for idx, num in enumerate(seq[:-1]):
if num > pivot:
place = idx
break
elif num == pivot:
return False
for num in seq[place:-1]:
if num <= pivot:
return False
return helper(seq[:place]) and helper(seq[place:-1])
if not sequence:
return False
return helper(sequence)
sol = Solution()
print(sol.VerifySquenceOfBST([4, 7, 5, 12, 10]))
print(sol.VerifySquenceOfBST([4, 9, 3, 12, 10]))
print(sol.VerifySquenceOfBST([4, 8, 6, 12, 16, 14, 10]))
print(sol.VerifySquenceOfBST([5, 4, 3, 2, 1]))
| [
"1574572981@qq.com"
] | 1574572981@qq.com |
e8fb944f87186041b706d37c50f6956045ae2213 | 7c39cc529a292967d8fc52718a227fccc0da53e4 | /src/crawler/crawler/items.py | 8341771ba3b5981723f73e6ed2a721d274f4581e | [] | no_license | carrie0307/parking-domain-detection | 860b5d1ad5e0e0344c4667e2e91149257a1068a6 | 9ff9f4f2b31c73599d0ac46eeb62634e2a8d940a | refs/heads/master | 2020-04-10T18:03:27.496758 | 2018-12-11T00:28:18 | 2018-12-11T00:28:18 | 161,192,700 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
'''
oldDomain = scrapy.Field()
newDomain = scrapy.Field()
url_links = scrapy.Field()
'''
label = scrapy.Field()
name = scrapy.Field()
down_link = scrapy.Field()
apk_name = scrapy.Field()
| [
"cst_study@163.com"
] | cst_study@163.com |
6e78a0942b241bb27c202408d2d7248f3628cdbc | 9689ebc06e7c9a5c1b5b19d34dbcf0f5b5b82cb6 | /speech/migrations/0125_auto_20190121_1023.py | b2e25a07b6d803abbd5e98a66ad8e95273c52cfe | [] | no_license | tigrezhito1/Ramas | 94fe57dc4235616522aa50b36f5a655861ecbb9f | fa894fa69f6bf2a645179cadc11fb8809e82700a | refs/heads/master | 2020-05-02T07:03:03.564208 | 2019-03-26T14:55:29 | 2019-03-26T14:55:29 | 177,808,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,863 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-21 10:23
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('speech', '0124_auto_20190121_0906'),
]
operations = [
migrations.AlterField(
model_name='agente',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 421338)),
),
migrations.AlterField(
model_name='api',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 420012)),
),
migrations.AlterField(
model_name='base',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 418687)),
),
migrations.AlterField(
model_name='campania',
name='fecha',
field=models.DateTimeField(db_column='fecha cargada', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 416723)),
),
migrations.AlterField(
model_name='cliente',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 414579)),
),
migrations.AlterField(
model_name='estado',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 416022)),
),
migrations.AlterField(
model_name='supervisor',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 413576)),
),
]
| [
"you@example.com"
] | you@example.com |
877180851854964f8c776d432dcf5da1e3a1f906 | 4a7494457d4ffcc32fd663835ceaa6360488f9e6 | /largest-subarray.py | 8f90cd5e9901bd472435d910a0b9fe416b59f539 | [] | no_license | eliyahuchaim/randomCodeChallenges | c5e35c7afc80f6c2acc482878256bb4710bbbe55 | 780e4404bcad20c6ccaba7a11c52c8d67c96cd59 | refs/heads/master | 2021-05-01T07:18:19.164784 | 2018-02-11T18:21:17 | 2018-02-11T18:21:17 | 121,152,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | array = [1, -1, 5, 3, -7, 4, 5, 6, -100, 4]
def largestSubarray(array):
total = temp = 0
for n in array:
# temp += n if temp + n > 0 else 0
if temp + n < 0:
temp = 0
else:
temp += n
if total < temp: total = temp
return total
# print(largestSubarray(array))
def test(n):
if n < 2:
return n
return n * test(n-1)
print(test(5))
| [
"github email address"
] | github email address |
d217fb6e18d40c8667f11db7401b0d973258cfa1 | 5f2b22d4ffec7fc1a4e40932acac30256f63d812 | /mathematical-modeling/test/canadian_temperature.py | fcb568f1f5270207fcff7d06d953597f12abea66 | [] | no_license | Thpffcj/Python-Learning | 45734dd31e4d8d047eec5c5d26309bc7449bfd0d | 5dacac6d33fcb7c034ecf5be58d02f506fd1d6ad | refs/heads/master | 2023-08-04T21:02:36.984616 | 2021-09-21T01:30:04 | 2021-09-21T01:30:04 | 111,358,872 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,834 | py | # -*- coding: UTF-8 -*-
# Created by thpffcj on 2019/9/20.
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 40)
pd.set_option('display.width', 1000)
'''
[839 rows x 25 columns]
"Date/Time","Year","Month","Mean Max Temp (°C)","Mean Max Temp Flag","Mean Min Temp (°C)","Mean Min Temp Flag","Mean Temp (°C)","Mean Temp Flag","Extr Max Temp (°C)","Extr Max Temp Flag","Extr Min Temp (°C)","Extr Min Temp Flag","Total Rain (mm)","Total Rain Flag","Total Snow (cm)","Total Snow Flag","Total Precip (mm)","Total Precip Flag","Snow Grnd Last Day (cm)","Snow Grnd Last Day Flag","Dir of Max Gust (10's deg)","Dir of Max Gust Flag","Spd of Max Gust (km/h)","Spd of Max Gust Flag"
[366 rows x 27 columns]
"Date/Time","Year","Month","Day","Data Quality","Max Temp (°C)","Max Temp Flag","Min Temp (°C)","Min Temp Flag","Mean Temp (°C)","Mean Temp Flag","Heat Deg Days (°C)","Heat Deg Days Flag","Cool Deg Days (°C)","Cool Deg Days Flag","Total Rain (mm)","Total Rain Flag","Total RaiTotal Snow (cm)","Total Snow Flag","Total Precip (mm)","Total Precip Flag","Snow on Grnd (cm)","Snow on Grnd Flag","Dir of Max Gust (10s deg)","Dir of Max Gust Flag","Spd of Max Gust (km/h)","Spd of Max Gust Flag"
'''
data_1938_2007 = pd.read_csv("卡纳达气候数据/Manitoba/WINNIPEG_1938_2007.csv", skiprows=18)
# for i in range(2009, 2010):
# file_name = "卡纳达气候数据/Manitoba/WINNIPEG_daily_".join(str(i)).join(".csv")
file_name = "卡纳达气候数据/Manitoba/WINNIPEG_daily_2009.csv"
data = pd.read_csv(file_name, skiprows=24)
max_temp_data = data["Max Temp (°C)"]
min_temp_data = data["Min Temp (°C)"]
mean_temp_data = data["Mean Temp (°C)"]
result = pd.DataFrame(columns=["Date/Time", "Year", "Month"
"Mean Max Temp (°C)",
"Mean Min Temp (°C)",
"Mean Temp (°C)",
"Total Rain (mm)",
"Total Snow (cm)",
"Total Precip (mm)"])
# print(data)
for i in range(1, 13):
date_time = ""
# print(date_time)
mean_max_temp = data[data["Month"] == i]["Max Temp (°C)"].dropna().values.mean()
mean_min_temp = data[data["Month"] == i]["Min Temp (°C)"].dropna().values.mean()
mean_temp = data[data["Month"] == i]["Mean Temp (°C)"].dropna().values.mean()
total_rain = data[data["Month"] == i]["Total Rain (mm)"].dropna().values.sum()
total_snow = data[data["Month"] == i]["Total Snow (cm)"].dropna().values.sum()
total_precip = data[data["Month"] == i]["Total Precip (mm)"].dropna().values.sum()
print(result)
# result.to_csv('卡纳达气候数据/Manitoba/2009.csv', sep=',', header=True, index=True)
# month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
#
# location = 0
# for i in range(0, 12):
# max_temp = 0.0
# min_temp = 0.0
# mean_temp = 0.0
# day_not_null = 0.0
#
# # 循环每月
# for j in range(0, month[i]):
# # 最高气温之和
# if not np.isnan(max_temp_data[location]):
# max_temp += float(max_temp_data[location])
# day_not_null += 1
#
# # 最低气温之和
# if not np.isnan(min_temp_data[location]):
# min_temp += float(min_temp_data[location])
# day_not_null += 1
#
# # 平均气温之和
# if not np.isnan(mean_temp_data[location]):
# mean_temp += float(mean_temp_data[location])
# day_not_null += 1
#
# location = location + 1
#
# if day_not_null != 0:
# max_temp = max_temp / day_not_null
# min_temp = min_temp / day_not_null
# mean_temp = mean_temp / day_not_null
# print(i)
# print(max_temp)
# print(min_temp)
# print(mean_temp)
| [
"1441732331@qq.com"
] | 1441732331@qq.com |
5573cd63e7ebaa8b444ea121a417072282d3ba39 | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /tests/sentry/api/endpoints/test_project_environments.py | 3279e53b271d1c8c31363bcb8df014286d498391 | [
"BSD-2-Clause"
] | permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 1,174 | py | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Environment
from sentry.testutils import APITestCase
class ProjectEnvironmentsTest(APITestCase):
def test_simple(self):
project = self.create_project()
env1 = Environment.objects.create(
project_id=project.id,
organization_id=project.organization_id,
name='production',
)
env1.add_project(project)
env2 = Environment.objects.create(
project_id=project.id,
organization_id=project.organization_id,
name='staging',
)
env2.add_project(project)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-environments', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert response.data[0]['name'] == 'production'
assert response.data[1]['name'] == 'staging'
| [
"jeyce@github.com"
] | jeyce@github.com |
cb2f83b86946560fd8c598dfd20a0e7a84f4d13c | 6a0abe2f4172f680415d83f1946baaf85e5711b7 | /aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/DetectImageFacesRequest.py | 470f7c6aad17d81be0dde9d91e70745af3e23bed | [
"Apache-2.0"
] | permissive | brw123/aliyun-openapi-python-sdk | 905556b268cbe4398f0f57b48422b713d9e89a51 | 8c77db6fd6503343cffa3c86fcb9d11770a64ca2 | refs/heads/master | 2020-05-01T16:26:49.291948 | 2019-03-21T09:11:55 | 2019-03-21T09:11:55 | 177,572,187 | 1 | 0 | null | 2019-03-25T11:21:59 | 2019-03-25T11:21:59 | null | UTF-8 | Python | false | false | 1,290 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DetectImageFacesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'DetectImageFaces','imm')
def get_ImageUri(self):
return self.get_query_params().get('ImageUri')
def set_ImageUri(self,ImageUri):
self.add_query_param('ImageUri',ImageUri)
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
43765cb5e4883a166f855945d77778bc48262c7f | 4c489dadcc7d5b59e95eb1991085509372233d53 | /backend/home/migrations/0001_load_initial_data.py | 54607830076275e75394db8f9cbd3dc37bd32255 | [] | no_license | crowdbotics-apps/test-26812 | 1a304728cd5230c7f7a843bb37be5b1f2a65f923 | dedd52fc3ab5cb0d5d49f1dafd93232b246beb87 | refs/heads/master | 2023-05-07T09:40:39.041733 | 2021-05-17T18:41:05 | 2021-05-17T18:41:05 | 368,288,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "test-26812.botics.co"
site_params = {
"name": "test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1752eb5d8c161c3697676c8be791bea383a7832b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/f0v.py | 658e4418beac1328261dee4dfa89ecdbb068d26d | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'f0V':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
42092263ef3f5e333a75f78220560c9da11a25c8 | 4ca853aeabbd3e33f5c0f28699b3de7206ad83a7 | /ML With python/ASSIGNMENT/p41.py | 657bbf07ebc5b1d9c5524a377bc2fbccd8239179 | [] | no_license | kundan4U/ML-With-Python- | 9fe1ce5049591040521b0809e9cd154158364c72 | 5376693ae3f52721663adc713f6926c0ccccbf75 | refs/heads/main | 2023-06-17T20:27:30.411104 | 2021-07-21T18:47:23 | 2021-07-21T18:47:23 | 388,214,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py |
list=[]
print("plese Enter 10 integer number")
for i in range(0,10):
n=int(input())
list.append(n)
print("Sum of list is :",sum(list))
print("Average of list is :",sum(list)/10) | [
"kc946605@gmail.com"
] | kc946605@gmail.com |
d5a0b82e66988683dc1a7a08141b7edbbd417a8e | 20aadf6ec9fd64d1d6dffff56b05853e0ab26b1f | /l6/L6_pbm10.py | 3ae3245b6a76d7e97b2c8dab28e13dd0e95b16a6 | [] | no_license | feminas-k/MITx---6.00.1x | 9a8e81630be784e5aaa890d811674962c66d56eb | 1ddf24c25220f8b5f78d36e2a3342b6babb40669 | refs/heads/master | 2021-01-19T00:59:57.434511 | 2016-06-13T18:13:17 | 2016-06-13T18:13:17 | 61,058,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def howMany(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
'''
templist = aDict.values()
result = 0
for elmnt in aDict.values():
result += len(elmnt)
return result
| [
"femi1991@gmail.com"
] | femi1991@gmail.com |
b8766a10748c69ca7f71c5a655acc68584b7a37c | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-3190.py | 2fb8e0e6a673125695c09fac111c25d0e89336b5 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,349 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif $Var("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
f08f4762946aef8780f1f38b462d4d8626e4a64d | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/edmundzhang/emsemble-method-voting-lr-gb-rf-svc/emsemble-method-voting-lr-gb-rf-svc.py | a3d99dc1eba7755790009666723177c68666a38c | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,628 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# pandas
import pandas as pd
from pandas import Series,DataFrame
# numpy, matplotlib, seaborn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
from sklearn.neural_network import MLPClassifier
# In[ ]:
# get titanic & test csv files as a DataFrame
titanic_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
# preview the data
titanic_df.head()
# In[ ]:
titanic_df.info()
print("----------------------------")
test_df.info()
# In[ ]:
# drop unnecessary columns, these columns won't be useful in analysis and prediction
titanic_df = titanic_df.drop(['PassengerId'], axis=1)
#test_df = test_df.drop(['Ticket'], axis=1)
# In[ ]:
#Name
titanic_df_title = [i.split(",")[1].split(".")[0].strip() for i in titanic_df["Name"]]
test_df_title = [i.split(",")[1].split(".")[0].strip() for i in test_df["Name"]]
titanic_df["Title"] = pd.Series(titanic_df_title)
test_df["Title"] = pd.Series(test_df_title)
titanic_df.head()
# In[ ]:
g = sns.countplot(x="Title",data=titanic_df)
# easy to read
g = plt.setp(g.get_xticklabels(), rotation=45)
# In[ ]:
titanic_df["Title"] = titanic_df["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
test_df["Title"] = test_df["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
titanic_df["Title"] = titanic_df["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
test_df["Title"] = test_df["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
titanic_df["Title"] = titanic_df["Title"].astype(int)
test_df["Title"] = test_df["Title"].astype(int)
# In[ ]:
g = sns.countplot(titanic_df["Title"])
g = g.set_xticklabels(["Master","Miss/Ms/Mme/Mlle/Mrs","Mr","Rare"])
# In[ ]:
g = sns.factorplot(x="Title",y="Survived",data=titanic_df,kind="bar")
g = g.set_xticklabels(["Master","Miss-Mrs","Mr","Rare"])
g = g.set_ylabels("survival probability")
# In[ ]:
titanic_df.head()
# In[ ]:
# convert to indicator values Title
titanic_df = pd.get_dummies(titanic_df, columns = ["Title"])
test_df = pd.get_dummies(test_df, columns = ["Title"])
titanic_df.head()
# In[ ]:
# Drop Name variable
titanic_df.drop(labels = ["Name"], axis = 1, inplace = True)
test_df.drop(labels = ["Name"], axis = 1, inplace = True)
# In[ ]:
titanic_df.head()
# In[ ]:
test_df.head()
# In[ ]:
# Embarked
# only in titanic_df, fill the two missing values with the most occurred value, which is "S".
titanic_df["Embarked"] = titanic_df["Embarked"].fillna("S")
# plot
sns.factorplot('Embarked','Survived', data=titanic_df,size=4,aspect=3)
fig, (axis1,axis2,axis3) = plt.subplots(1,3,figsize=(15,5))
# sns.factorplot('Embarked',data=titanic_df,kind='count',order=['S','C','Q'],ax=axis1)
# sns.factorplot('Survived',hue="Embarked",data=titanic_df,kind='count',order=[1,0],ax=axis2)
sns.countplot(x='Embarked', data=titanic_df, ax=axis1)
sns.countplot(x='Survived', hue="Embarked", data=titanic_df, order=[1,0], ax=axis2)
# group by embarked, and get the mean for survived passengers for each value in Embarked
embark_perc = titanic_df[["Embarked", "Survived"]].groupby(['Embarked'],as_index=False).mean()
sns.barplot(x='Embarked', y='Survived', data=embark_perc,order=['S','C','Q'],ax=axis3)
embark_dummies_titanic = pd.get_dummies(titanic_df['Embarked'])
embark_dummies_test = pd.get_dummies(test_df['Embarked'])
titanic_df = titanic_df.join(embark_dummies_titanic)
test_df = test_df.join(embark_dummies_test)
titanic_df.drop(['Embarked'], axis=1,inplace=True)
test_df.drop(['Embarked'], axis=1,inplace=True)
# In[ ]:
# Fare
# only for test_df, since there is a missing "Fare" values
test_df["Fare"].fillna(test_df["Fare"].median(), inplace=True)
# convert from float to int
titanic_df['Fare'] = titanic_df['Fare'].astype(int)
test_df['Fare'] = test_df['Fare'].astype(int)
# get fare for survived & didn't survive passengers
fare_not_survived = titanic_df["Fare"][titanic_df["Survived"] == 0]
fare_survived = titanic_df["Fare"][titanic_df["Survived"] == 1]
# get average and std for fare of survived/not survived passengers
avgerage_fare = DataFrame([fare_not_survived.mean(), fare_survived.mean()])
std_fare = DataFrame([fare_not_survived.std(), fare_survived.std()])
# plot
titanic_df['Fare'].plot(kind='hist', figsize=(15,3),bins=100, xlim=(0,50))
avgerage_fare.index.names = std_fare.index.names = ["Survived"]
avgerage_fare.plot(yerr=std_fare,kind='bar',legend=False)
# In[ ]:
# Age
fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4))
axis1.set_title('Original Age values - Titanic')
axis2.set_title('New Age values - Titanic')
# axis3.set_title('Original Age values - Test')
# axis4.set_title('New Age values - Test')
# get average, std, and number of NaN values in titanic_df
average_age_titanic = titanic_df["Age"].mean()
std_age_titanic = titanic_df["Age"].std()
count_nan_age_titanic = titanic_df["Age"].isnull().sum()
# get average, std, and number of NaN values in test_df
average_age_test = test_df["Age"].mean()
std_age_test = test_df["Age"].std()
count_nan_age_test = test_df["Age"].isnull().sum()
# generate random numbers between (mean - std) & (mean + std)
rand_1 = np.random.randint(average_age_titanic - std_age_titanic, average_age_titanic + std_age_titanic, size = count_nan_age_titanic)
rand_2 = np.random.randint(average_age_test - std_age_test, average_age_test + std_age_test, size = count_nan_age_test)
# plot original Age values
# NOTE: drop all null values, and convert to int
titanic_df['Age'].dropna().astype(int).hist(bins=70, ax=axis1)
# test_df['Age'].dropna().astype(int).hist(bins=70, ax=axis1)
# fill NaN values in Age column with random values generated
titanic_df["Age"][np.isnan(titanic_df["Age"])] = rand_1
test_df["Age"][np.isnan(test_df["Age"])] = rand_2
# convert from float to int
titanic_df['Age'] = titanic_df['Age'].astype(int)
test_df['Age'] = test_df['Age'].astype(int)
# plot new Age Values
titanic_df['Age'].hist(bins=70, ax=axis2)
# test_df['Age'].hist(bins=70, ax=axis4)
# In[ ]:
# .... continue with plot Age column
# peaks for survived/not survived passengers by their age
facet = sns.FacetGrid(titanic_df, hue="Survived",aspect=4)
facet.map(sns.kdeplot,'Age',shade= True)
facet.set(xlim=(0, titanic_df['Age'].max()))
facet.add_legend()
# average survived passengers by age
fig, axis1 = plt.subplots(1,1,figsize=(18,4))
average_age = titanic_df[["Age", "Survived"]].groupby(['Age'],as_index=False).mean()
sns.barplot(x='Age', y='Survived', data=average_age)
# In[ ]:
# Cabin&Ticket
titanic_len = len(titanic_df)
all_dataset = pd.concat(objs=[titanic_df, test_df], axis=0).reset_index(drop=True)
# Replace the Cabin number by the type of cabin 'X' if not
all_dataset["Cabin"] = pd.Series([i[0] if not pd.isnull(i) else 'X' for i in all_dataset['Cabin'] ])
Ticket = []
for i in list(all_dataset.Ticket):
if not i.isdigit() :
Ticket.append(i.replace(".","").replace("/","").strip().split(' ')[0]) #Take prefix
else:
Ticket.append("X")
all_dataset["Ticket"] = Ticket
#g = sns.countplot(all_dataset["Cabin"],order=['A','B','C','D','E','F','G','T','X'])
#g = sns.factorplot(y="Survived",x="Cabin",data=all_dataset,kind="bar",order=['A','B','C','D','E','F','G','T','X'])
#g = g.set_ylabels("Survival Probability")
all_dataset = pd.get_dummies(all_dataset, columns = ["Cabin"],prefix="Cabin")
all_dataset = pd.get_dummies(all_dataset, columns = ["Ticket"], prefix="T")
titanic_df = all_dataset[:titanic_len]
titanic_df = titanic_df.drop(["PassengerId"],axis=1)
titanic_df["Survived"] = titanic_df["Survived"].astype(int)
test_df = all_dataset[titanic_len:]
test_df = test_df.drop(["Survived"],axis=1)
test_df["PassengerId"] = test_df["PassengerId"].astype(int)
# In[ ]:
# Family
# Instead of having two columns Parch & SibSp,
# we can have only one column represent if the passenger had any family member aboard or not,
# Meaning, if having any family member(whether parent, brother, ...etc) will increase chances of Survival or not.
titanic_df['Family'] = titanic_df["Parch"] + titanic_df["SibSp"]+1
test_df['Family'] = test_df["Parch"] + test_df["SibSp"]+1
# drop Parch & SibSp
titanic_df = titanic_df.drop(['SibSp','Parch'], axis=1)
test_df = test_df.drop(['SibSp','Parch'], axis=1)
g = sns.factorplot(x="Family",y="Survived",data = titanic_df)
g = g.set_ylabels("Survival Probability")
# Create new feature of family size
titanic_df['Single'] = titanic_df['Family'].map(lambda s: 1 if s == 1 else 0)
titanic_df['SmallF'] = titanic_df['Family'].map(lambda s: 1 if s == 2 else 0)
titanic_df['MedF'] = titanic_df['Family'].map(lambda s: 1 if 3 <= s <= 4 else 0)
titanic_df['LargeF'] = titanic_df['Family'].map(lambda s: 1 if s >= 5 else 0)
test_df['Single'] = test_df['Family'].map(lambda s: 1 if s == 1 else 0)
test_df['SmallF'] = test_df['Family'].map(lambda s: 1 if s == 2 else 0)
test_df['MedF'] = test_df['Family'].map(lambda s: 1 if 3 <= s <= 4 else 0)
test_df['LargeF'] = test_df['Family'].map(lambda s: 1 if s >= 5 else 0)
g = sns.factorplot(x="Single",y="Survived",data=titanic_df,kind="bar")
g = g.set_ylabels("Survival Probability")
g = sns.factorplot(x="SmallF",y="Survived",data=titanic_df,kind="bar")
g = g.set_ylabels("Survival Probability")
g = sns.factorplot(x="MedF",y="Survived",data=titanic_df,kind="bar")
g = g.set_ylabels("Survival Probability")
g = sns.factorplot(x="LargeF",y="Survived",data=titanic_df,kind="bar")
g = g.set_ylabels("Survival Probability")
# In[ ]:
# Sex
# As we see, children(age < ~16) on aboard seem to have a high chances for Survival.
# So, we can classify passengers as males, females, and child
def get_person(passenger):
age,sex = passenger
return 'child' if age < 16 else sex
titanic_df['Person'] = titanic_df[['Age','Sex']].apply(get_person,axis=1)
test_df['Person'] = test_df[['Age','Sex']].apply(get_person,axis=1)
# No need to use Sex column since we created Person column
titanic_df.drop(['Sex'],axis=1,inplace=True)
test_df.drop(['Sex'],axis=1,inplace=True)
# create dummy variables for Person column, & drop Male as it has the lowest average of survived passengers
person_dummies_titanic = pd.get_dummies(titanic_df['Person'])
person_dummies_titanic.columns = ['Child','Female','Male']
person_dummies_titanic.drop(['Male'], axis=1, inplace=True)
person_dummies_test = pd.get_dummies(test_df['Person'])
person_dummies_test.columns = ['Child','Female','Male']
person_dummies_test.drop(['Male'], axis=1, inplace=True)
titanic_df = titanic_df.join(person_dummies_titanic)
test_df = test_df.join(person_dummies_test)
fig, (axis1,axis2) = plt.subplots(1,2,figsize=(10,5))
# sns.factorplot('Person',data=titanic_df,kind='count',ax=axis1)
sns.countplot(x='Person', data=titanic_df, ax=axis1)
# average of survived for each Person(male, female, or child)
person_perc = titanic_df[["Person", "Survived"]].groupby(['Person'],as_index=False).mean()
sns.barplot(x='Person', y='Survived', data=person_perc, ax=axis2, order=['male','female','child'])
titanic_df.drop(['Person'],axis=1,inplace=True)
test_df.drop(['Person'],axis=1,inplace=True)
# In[ ]:
# Pclass
# sns.factorplot('Pclass',data=titanic_df,kind='count',order=[1,2,3])
sns.factorplot('Pclass','Survived',order=[1,2,3], data=titanic_df,size=5)
# create dummy variables for Pclass column, & drop 3rd class as it has the lowest average of survived passengers
pclass_dummies_titanic = pd.get_dummies(titanic_df['Pclass'])
pclass_dummies_titanic.columns = ['Class_1','Class_2','Class_3']
pclass_dummies_titanic.drop(['Class_3'], axis=1, inplace=True)
pclass_dummies_test = pd.get_dummies(test_df['Pclass'])
pclass_dummies_test.columns = ['Class_1','Class_2','Class_3']
pclass_dummies_test.drop(['Class_3'], axis=1, inplace=True)
titanic_df.drop(['Pclass'],axis=1,inplace=True)
test_df.drop(['Pclass'],axis=1,inplace=True)
titanic_df = titanic_df.join(pclass_dummies_titanic)
test_df = test_df.join(pclass_dummies_test)
# In[ ]:
# define training and testing sets
X_train = titanic_df.drop("Survived",axis=1)
Y_train = titanic_df["Survived"]
X_test = test_df.drop("PassengerId",axis=1).copy()
# In[ ]:
titanic_df.info()
print("----------------------------")
test_df.info()
# In[ ]:
# Logistic Regression
#class_weight ='balanced',
logreg = LogisticRegression(penalty='l2',solver='liblinear',multi_class='ovr')
logreg.fit(X_train, Y_train)
Y_pred_logreg = logreg.predict(X_test)
logreg.score(X_train, Y_train)
# In[ ]:
#GradientBoosting
GradientBoostingTree = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,max_depth=1, random_state=0).fit(X_train, Y_train)
GradientBoostingTree_score=GradientBoostingTree.score(X_train, Y_train)
GradientBoostingTree_score
# In[ ]:
# Random Forests
#数据集比较简单,模型较为复杂,设置max_depth和min_samples_split参数,防止过拟合
random_forest = RandomForestClassifier(n_estimators=100,max_features=9,max_depth = 6, min_samples_split=20)
random_forest.fit(X_train, Y_train)
Y_pred_random_forest = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
# In[ ]:
#SVC
SVC_Model = SVC(C=2.5,cache_size=200, class_weight=None, coef0=0.0,decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',max_iter=-1, probability=False, random_state=None, shrinking=True,tol=0.001, verbose=False)
SVC_Model.fit(X_train, Y_train)
SVC_Model_score=SVC_Model.score(X_train, Y_train)
SVC_Model_score
# In[ ]:
#MLP
#MLP_model = MLPClassifier(activation='relu', solver='adam', alpha=0.0001)
#MLP_model = MLPClassifier(activation='relu', solver='lbfgs', alpha=0.0001)
#MLP_model.fit(X_train, Y_train)
#MLP_model_score=SVC_Model.score(X_train, Y_train)
#MLP_model_score
# In[ ]:
#voting_final = VotingClassifier(estimators=[('GB', GradientBoostingTree), ('RF', random_forest),('LR',logreg),('SVC',SVC_Model),('MLP',MLP_model)], voting='hard', n_jobs=1)
voting_final = VotingClassifier(estimators=[('GB', GradientBoostingTree), ('RF', random_forest),('LR',logreg),('SVC',SVC_Model)], voting='hard', n_jobs=1)
voting_final = voting_final.fit(X_train, Y_train)
votingY_pred = voting_final.predict(X_test)
voting_final.score(X_train, Y_train)
# In[ ]:
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": votingY_pred
})
submission.to_csv('titanic.csv', index=False)
# kfold = StratifiedKFold(n_splits=10)
# # Cross validate model with Kfold stratified cross val
# kfold = StratifiedKFold(n_splits=10)
#
#
# #compare different algorithms
# random_state = 2
# classifiers = []
# classifiers.append(SVC(random_state=random_state))
# classifiers.append(DecisionTreeClassifier(random_state=random_state))
# classifiers.append(AdaBoostClassifier(DecisionTreeClassifier(random_state=random_state),random_state=random_state,learning_rate=0.1))
# classifiers.append(RandomForestClassifier(random_state=random_state))
# classifiers.append(ExtraTreesClassifier(random_state=random_state))
# classifiers.append(GradientBoostingClassifier(random_state=random_state))
# classifiers.append(MLPClassifier(random_state=random_state))
# classifiers.append(KNeighborsClassifier())
# classifiers.append(LogisticRegression(random_state = random_state))
# classifiers.append(LinearDiscriminantAnalysis())
#
# cv_results = []
# for classifier in classifiers :
# cv_results.append(cross_val_score(classifier, X_train, y = Y_train, scoring = "accuracy", cv = kfold, n_jobs=1))
#
# cv_means = []
# cv_std = []
# for cv_result in cv_results:
# cv_means.append(cv_result.mean())
# cv_std.append(cv_result.std())
#
# cv_res = pd.DataFrame({"CrossValMeans":cv_means,"CrossValerrors": cv_std,"Algorithm":["SVC","DecisionTree","AdaBoost",
# "RandomForest","ExtraTrees","GradientBoosting","MultipleLayerPerceptron","KNeighboors","LogisticRegression","LinearDiscriminantAnalysis"]})
#
# g = sns.barplot("CrossValMeans","Algorithm",data = cv_res, palette="Set3",orient = "h",**{'xerr':cv_std})
# g.set_xlabel("Mean Accuracy")
# g = g.set_title("Cross validation scores")
# RFC = RandomForestClassifier()
# rf_param_grid = {"max_depth": [1,2,3,4,5],
# "max_features": [1, 5, 10],
# "min_samples_leaf": [1, 3, 10],
# "bootstrap": [False],
# "min_samples_split": [2, 50, 100],
# "n_estimators" :[10,100,1000],
# "criterion": ["gini"]}
# gsRFC = GridSearchCV(RFC,param_grid = rf_param_grid, cv=kfold, scoring="accuracy", n_jobs= 1, verbose = 1)
# gsRFC.fit(X_train,Y_train)
# RFC_best = gsRFC.best_estimator_
# gsRFC.best_score_
# train_sizes, train_scores, test_scores = learning_curve(random_forest, X_train, Y_train, cv=kfold, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5))
#
# train_scores_mean = np.mean(train_scores, axis=1)
# train_scores_std = np.std(train_scores, axis=1)
# test_scores_mean = np.mean(test_scores, axis=1)
# test_scores_std = np.std(test_scores, axis=1)
#
# plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
# train_scores_mean + train_scores_std, alpha=0.1,
# color="r")
# plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
# test_scores_mean + test_scores_std, alpha=0.1, color="g")
#
# plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
# label="Training score")
#
# plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
# label="Cross-validation score")
| [
"bitsorific@gmail.com"
] | bitsorific@gmail.com |
2e8b923f0207d66b8e635ac23e77072882adefa1 | 62261d7fbd2feab54b1b5f9f0fef33fd784873f9 | /src/results/deepfool/merge.py | 3ca3a046015f8a2c7f425357dd4043e353ce924f | [] | no_license | ychnlgy/DeepConsensus-experimental-FROZEN | 50ebfe25b33ce8715fb9f24285969831cef311f2 | 904ae3988fee1df20273e002ba53a49a0d811192 | refs/heads/master | 2020-03-31T06:18:38.029959 | 2018-12-02T05:04:52 | 2018-12-02T05:04:52 | 151,976,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import scipy.misc, os, collections, numpy
ORI = "original.png"
PER = "perturbed.png"
def pair(folder):
files = os.listdir(folder)
sep = collections.defaultdict(dict)
for f in files:
fid, lab = f.split("-")
im = scipy.misc.imread(os.path.join(folder, f))
sep[fid][lab] = im
h = 2*im.shape[0]
w = len(files)//2 * im.shape[1]
if len(im.shape) == 3:
c = im.shape[2]
size = (w, h, c)
out = numpy.zeros((w, h, c))
else:
size = (w, h)
out = numpy.zeros(size)
for i, (k, v) in enumerate(sep.items()):
out[i * im.shape[1]: (i+1)*im.shape[1], :im.shape[0]] = v[ORI]
out[i * im.shape[1]: (i+1)*im.shape[1], im.shape[0]:] = v[PER]
scipy.misc.imsave("merged.png", out)
if __name__ == "__main__":
import sys
pair(sys.argv[1])
| [
"ychnlgy@gmail.com"
] | ychnlgy@gmail.com |
e3063f03197b9586200a1b0b57c839c13178ae36 | 8f7dee55fd89be91f5cc0f3539099d49575fe3a9 | /profile/context_processors.py | a5e74883e9f039415a0d319a2c201278b3bad380 | [] | no_license | qoin-open-source/samen-doen | f9569439a5b715ed08b4a4dadc3e58a89f9e1d70 | 9feadb90fe14d458102ff3b55e60c56e0c8349e4 | refs/heads/master | 2020-05-03T23:59:43.794911 | 2019-04-02T21:15:18 | 2019-04-02T21:15:18 | 178,876,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | from django.conf import settings
#from cms.models import Page
def top_menu_items(request):
"""
Adds list of items to include in top menu (reverse ids from menu system).
"""
return {
'top_menu_items': settings.TOP_MENU_ITEMS
}
def tandc_url(request):
"""Adds terms and conditions url for language."""
# try:
# url = Page.objects.get(
# reverse_id='tandc', publisher_is_draft=False).get_absolute_url()
# except Page.DoesNotExist:
url = ''
return {
'tandc_url': url,
}
| [
"stephen.wolff@qoin.com"
] | stephen.wolff@qoin.com |
7714c67660d779859e7a8c5e203bd5850c7d3fcc | 092dd56a1bf9357466c05d0f5aedf240cec1a27b | /tests/fullscale/eqinfo/TestEqInfoTri.py | 90cb696cfd95f9f891ecb9e9d0875d78fd19a316 | [
"MIT"
] | permissive | rwalkerlewis/pylith | cef02d5543e99a3e778a1c530967e6b5f1d5dcba | c5f872c6afff004a06311d36ac078133a30abd99 | refs/heads/main | 2023-08-24T18:27:30.877550 | 2023-06-21T22:03:01 | 2023-06-21T22:03:01 | 154,047,591 | 0 | 0 | MIT | 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null | UTF-8 | Python | false | false | 2,571 | py | #!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2022 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/eqinfo/TestEqInfoTri.py
#
# @brief Test suite for testing pylith_eqinfo with tri fault meshes.
import numpy
from TestEqInfo import TestEqInfo, run_eqinfo
class TestEqInfoTri(TestEqInfo):
"""Test suite for testing pylith_eqinfo with tri3 meshes.
"""
def setUp(self):
"""Setup for test.
"""
run_eqinfo("tri", ["tri.cfg"])
return
def test_stats(self):
"""Check fault stats.
"""
import stats_tri
timestamp = numpy.array([0.0, 1.0], dtype=numpy.float64)
oneE = stats_tri.RuptureStats()
oneE.timestamp = timestamp
oneE.ruparea = numpy.array([1.5 + 2.0, 1.5 + 2.0], dtype=numpy.float64)
slip0 = (0.2**2 + 0.5**2)**0.5
slip1 = (0.5**2 + 0.4**2)**0.5
oneE.potency = numpy.array(
[slip0 * 1.5 + slip1 * 2.0, 0.1 * 1.5 + 0.2 * 2.0], dtype=numpy.float64)
oneE.moment = numpy.array([slip0 * 1.5 * 1.0e+10 + slip1 * 2.0 * 2.0e+10,
0.1 * 1.5 * 1.0e+10 + 0.2 * 2.0 * 2.0e+10], dtype=numpy.float64)
self._check(oneE, stats_tri.one)
twoE = stats_tri.RuptureStats()
twoE.timestamp = timestamp
twoE.ruparea = numpy.array([1.5, 0.0], dtype=numpy.float64)
twoE.potency = numpy.array([0.1 * 1.5, 0.0], dtype=numpy.float64)
twoE.moment = numpy.array(
[0.1 * 1.5 * 1.0e+10, 0.0], dtype=numpy.float64)
self._check(twoE, stats_tri.two)
allE = stats_tri.RuptureStats()
allE.timestamp = timestamp
allE.ruparea = oneE.ruparea + twoE.ruparea
allE.potency = oneE.potency + twoE.potency
allE.moment = oneE.moment + twoE.moment
self._check(allE, stats_tri.all)
return
# ----------------------------------------------------------------------
if __name__ == '__main__':
import unittest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestEqInfoTri))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
3d16707eb061763a8a198215cb3901205f667840 | 918b8b356abdaed27ee2dc1ad45503e32d8d8080 | /twisted/words/im/pbsupport.py | 6db05b771f1f133ce630598d31b06ddb7c36ec9b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hitzjd/Balance-Simulate | 683c7b424195131e4ec5691e930e0ed909631d0d | 22f06f34b0e4dbbf887f2075823dcdf4429e4b8e | refs/heads/master | 2020-03-12T09:51:50.704283 | 2018-04-22T11:27:18 | 2018-04-22T11:27:18 | 130,561,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,673 | py | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""L{twisted.words} support for Instance Messenger."""
from __future__ import nested_scopes
from twisted.internet import defer
from twisted.internet import error
from twisted.python import log, components
from twisted.python.failure import Failure
from twisted.spread import pb
from twisted.words.im.locals import ONLINE, OFFLINE, AWAY
from twisted.words.im import basesupport, interfaces
from zope.interface import implements
class TwistedWordsPerson(basesupport.AbstractPerson):
"""I a facade for a person you can talk to through a twisted.words service.
"""
def __init__(self, name, wordsAccount):
basesupport.AbstractPerson.__init__(self, name, wordsAccount)
self.status = OFFLINE
def isOnline(self):
return ((self.status == ONLINE) or
(self.status == AWAY))
def getStatus(self):
return self.status
def sendMessage(self, text, metadata):
"""Return a deferred...
"""
if metadata:
d=self.account.client.perspective.directMessage(self.name,
text, metadata)
d.addErrback(self.metadataFailed, "* "+text)
return d
else:
return self.account.client.perspective.callRemote('directMessage',self.name, text)
def metadataFailed(self, result, text):
print "result:",result,"text:",text
return self.account.client.perspective.directMessage(self.name, text)
def setStatus(self, status):
self.status = status
self.chat.getContactsList().setContactStatus(self)
class TwistedWordsGroup(basesupport.AbstractGroup):
implements(interfaces.IGroup)
def __init__(self, name, wordsClient):
basesupport.AbstractGroup.__init__(self, name, wordsClient)
self.joined = 0
def sendGroupMessage(self, text, metadata=None):
"""Return a deferred.
"""
#for backwards compatibility with older twisted.words servers.
if metadata:
d=self.account.client.perspective.callRemote(
'groupMessage', self.name, text, metadata)
d.addErrback(self.metadataFailed, "* "+text)
return d
else:
return self.account.client.perspective.callRemote('groupMessage',
self.name, text)
def setTopic(self, text):
self.account.client.perspective.callRemote(
'setGroupMetadata',
{'topic': text, 'topic_author': self.client.name},
self.name)
def metadataFailed(self, result, text):
print "result:",result,"text:",text
return self.account.client.perspective.callRemote('groupMessage',
self.name, text)
def joining(self):
self.joined = 1
def leaving(self):
self.joined = 0
def leave(self):
return self.account.client.perspective.callRemote('leaveGroup',
self.name)
class TwistedWordsClient(pb.Referenceable, basesupport.AbstractClientMixin):
"""In some cases, this acts as an Account, since it a source of text
messages (multiple Words instances may be on a single PB connection)
"""
def __init__(self, acct, serviceName, perspectiveName, chatui,
_logonDeferred=None):
self.accountName = "%s (%s:%s)" % (acct.accountName, serviceName, perspectiveName)
self.name = perspectiveName
print "HELLO I AM A PB SERVICE", serviceName, perspectiveName
self.chat = chatui
self.account = acct
self._logonDeferred = _logonDeferred
def getPerson(self, name):
return self.chat.getPerson(name, self)
def getGroup(self, name):
return self.chat.getGroup(name, self)
def getGroupConversation(self, name):
return self.chat.getGroupConversation(self.getGroup(name))
def addContact(self, name):
self.perspective.callRemote('addContact', name)
def remote_receiveGroupMembers(self, names, group):
print 'received group members:', names, group
self.getGroupConversation(group).setGroupMembers(names)
def remote_receiveGroupMessage(self, sender, group, message, metadata=None):
print 'received a group message', sender, group, message, metadata
self.getGroupConversation(group).showGroupMessage(sender, message, metadata)
def remote_memberJoined(self, member, group):
print 'member joined', member, group
self.getGroupConversation(group).memberJoined(member)
def remote_memberLeft(self, member, group):
print 'member left'
self.getGroupConversation(group).memberLeft(member)
def remote_notifyStatusChanged(self, name, status):
self.chat.getPerson(name, self).setStatus(status)
def remote_receiveDirectMessage(self, name, message, metadata=None):
self.chat.getConversation(self.chat.getPerson(name, self)).showMessage(message, metadata)
def remote_receiveContactList(self, clist):
for name, status in clist:
self.chat.getPerson(name, self).setStatus(status)
def remote_setGroupMetadata(self, dict_, groupName):
if dict_.has_key("topic"):
self.getGroupConversation(groupName).setTopic(dict_["topic"], dict_.get("topic_author", None))
def joinGroup(self, name):
self.getGroup(name).joining()
return self.perspective.callRemote('joinGroup', name).addCallback(self._cbGroupJoined, name)
def leaveGroup(self, name):
self.getGroup(name).leaving()
return self.perspective.callRemote('leaveGroup', name).addCallback(self._cbGroupLeft, name)
def _cbGroupJoined(self, result, name):
groupConv = self.chat.getGroupConversation(self.getGroup(name))
groupConv.showGroupMessage("sys", "you joined")
self.perspective.callRemote('getGroupMembers', name)
def _cbGroupLeft(self, result, name):
print 'left',name
groupConv = self.chat.getGroupConversation(self.getGroup(name), 1)
groupConv.showGroupMessage("sys", "you left")
def connected(self, perspective):
print 'Connected Words Client!', perspective
if self._logonDeferred is not None:
self._logonDeferred.callback(self)
self.perspective = perspective
self.chat.getContactsList()
pbFrontEnds = {
"twisted.words": TwistedWordsClient,
"twisted.reality": None
}
class PBAccount(basesupport.AbstractAccount):
implements(interfaces.IAccount)
gatewayType = "PB"
_groupFactory = TwistedWordsGroup
_personFactory = TwistedWordsPerson
def __init__(self, accountName, autoLogin, username, password, host, port,
services=None):
"""
@param username: The name of your PB Identity.
@type username: string
"""
basesupport.AbstractAccount.__init__(self, accountName, autoLogin,
username, password, host, port)
self.services = []
if not services:
services = [('twisted.words', 'twisted.words', username)]
for serviceType, serviceName, perspectiveName in services:
self.services.append([pbFrontEnds[serviceType], serviceName,
perspectiveName])
def logOn(self, chatui):
"""
@returns: this breaks with L{interfaces.IAccount}
@returntype: DeferredList of L{interfaces.IClient}s
"""
# Overriding basesupport's implementation on account of the
# fact that _startLogOn tends to return a deferredList rather
# than a simple Deferred, and we need to do registerAccountClient.
if (not self._isConnecting) and (not self._isOnline):
self._isConnecting = 1
d = self._startLogOn(chatui)
d.addErrback(self._loginFailed)
def registerMany(results):
for success, result in results:
if success:
chatui.registerAccountClient(result)
self._cb_logOn(result)
else:
log.err(result)
d.addCallback(registerMany)
return d
else:
raise error.ConnectionError("Connection in progress")
def _startLogOn(self, chatui):
print 'Connecting...',
d = pb.getObjectAt(self.host, self.port)
d.addCallbacks(self._cbConnected, self._ebConnected,
callbackArgs=(chatui,))
return d
def _cbConnected(self, root, chatui):
print 'Connected!'
print 'Identifying...',
d = pb.authIdentity(root, self.username, self.password)
d.addCallbacks(self._cbIdent, self._ebConnected,
callbackArgs=(chatui,))
return d
def _cbIdent(self, ident, chatui):
if not ident:
print 'falsely identified.'
return self._ebConnected(Failure(Exception("username or password incorrect")))
print 'Identified!'
dl = []
for handlerClass, sname, pname in self.services:
d = defer.Deferred()
dl.append(d)
handler = handlerClass(self, sname, pname, chatui, d)
ident.callRemote('attach', sname, pname, handler).addCallback(handler.connected)
return defer.DeferredList(dl)
def _ebConnected(self, error):
print 'Not connected.'
return error
| [
"hitwhzjd@163.com"
] | hitwhzjd@163.com |
121e27cfcaef5efab2c56974139a1bafd566952e | edbf8601ae771031ad8ab27b19c2bf450ca7df76 | /19-Remove-Nth-Node-From-End-of-List/RemoveNthNodeFromEndOfList.py3 | 8a93b5636d86f21ee4c093c44be72653cf74c6a3 | [] | no_license | gxwangdi/Leetcode | ec619fba272a29ebf8b8c7f0038aefd747ccf44a | 29c4c703d18c6ff2e16b9f912210399be427c1e8 | refs/heads/master | 2022-07-02T22:08:32.556252 | 2022-06-21T16:58:28 | 2022-06-21T16:58:28 | 54,813,467 | 3 | 2 | null | 2022-06-21T16:58:29 | 2016-03-27T05:02:36 | Java | UTF-8 | Python | false | false | 670 | py3 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if head==None or n<0 :
return None
dummy = ListNode(0)
dummy.next = head
prev = dummy
target = head
tail = head
i = 0
while i<n and tail!=None:
tail = tail.next
i+=1
while tail!=None:
tail = tail.next
target = target.next
prev = prev.next
prev.next = target.next
return dummy.next
| [
"gxwangdi@gmail.com"
] | gxwangdi@gmail.com |
755afec3b1065969f31399dc2818065df4c4ce1b | e3fe234510d19c120d56f9a2876b7d508d306212 | /tool/add_user_info.py | cf7926ddc05cd92b753d4b9fb6fff028de2b4ba3 | [
"Apache-2.0"
] | permissive | KEVINYZY/python-tutorial | 78b348fb2fa2eb1c8c55d016affb6a9534332997 | ae43536908eb8af56c34865f52a6e8644edc4fa3 | refs/heads/master | 2020-03-30T02:11:03.394073 | 2019-12-03T00:52:10 | 2019-12-03T00:52:10 | 150,617,875 | 0 | 0 | Apache-2.0 | 2018-09-27T16:39:29 | 2018-09-27T16:39:28 | null | UTF-8 | Python | false | false | 863 | py | # -*- coding: utf-8 -*-
# Author: XuMing <shibing624@126.com>
# Data: 17/8/30
# Brief:
import sys
path_user_cdc_client = sys.argv[1]
path_file = sys.argv[2]
path_output = sys.argv[3]
userid_map = {}
with open(path_user_cdc_client, "r")as f:
for line in f:
userid = line.strip().split("\t")[0]
userid = userid.decode("gb18030")
userid_map[userid] = line.strip().decode("gb18030")
content = set()
with open(path_file, "r") as f:
for line in f:
# parts = (line.strip()).decode("utf-8").split("\t")
# userid = parts[0]
userid = (line.strip()).decode("utf8")
if userid in userid_map:
content.add((line.strip()).decode("utf-8") + "\t" + userid_map[userid])
with open(path_output, "w") as f:
for line in content:
f.write((line.strip()).encode("utf-8"))
f.write("\n")
| [
"507153809@qq.com"
] | 507153809@qq.com |
d1faa712aa4511eb6b3eab04b47070bb02eb3a92 | 1eddc123709611cf2ddccb0b7d48b722b2a09a5d | /plugins/modules/fmgr_firewall_gtp_messageratelimit.py | d76a992b2291e11e40fa13858bb313926d01ad54 | [] | no_license | fortinet-ansible-dev/ansible-galaxy-fortimanager-collection | bfb2014a72007358b491bb1d27c0fa3191ec62a8 | 63b65abce410ed4d6b76e3dd1dcf7a4341cc173d | refs/heads/main | 2023-07-09T04:33:38.304263 | 2023-06-21T21:11:57 | 2023-06-21T21:11:57 | 242,629,431 | 10 | 18 | null | 2022-12-16T15:57:16 | 2020-02-24T02:28:03 | Python | UTF-8 | Python | false | false | 111,403 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2023 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_gtp_messageratelimit
short_description: Message rate limiting.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.0.0"
author:
- Xinwei Du (@dux-fortinet)
- Xing Li (@lix-fortinet)
- Jie Xue (@JieX19)
- Link Zheng (@chillancezen)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
access_token:
description: The token to access FortiManager without using username and password.
required: false
type: str
bypass_validation:
description: Only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters.
required: false
type: bool
default: false
enable_log:
description: Enable/Disable logging for task.
required: false
type: bool
default: false
forticloud_access_token:
description: Authenticate Ansible client with forticloud API access token.
required: false
type: str
proposed_method:
description: The overridden method for the underlying Json RPC request.
required: false
type: str
choices:
- update
- set
- add
rc_succeeded:
description: The rc codes list with which the conditions to succeed will be overriden.
type: list
required: false
elements: int
rc_failed:
description: The rc codes list with which the conditions to fail will be overriden.
type: list
required: false
elements: int
workspace_locking_adom:
description: The adom to lock for FortiManager running in workspace mode, the value can be global and others including root.
required: false
type: str
workspace_locking_timeout:
description: The maximum time in seconds to wait for other user to release the workspace lock.
required: false
type: int
default: 300
adom:
description: the parameter (adom) in requested url
type: str
required: true
gtp:
description: the parameter (gtp) in requested url
type: str
required: true
firewall_gtp_messageratelimit:
description: the top level parameters set
required: false
type: dict
suboptions:
create-aa-pdp-request:
type: int
description: Rate limit for create AA PDP context request
create-aa-pdp-response:
type: int
description: Rate limit for create AA PDP context response
create-mbms-request:
type: int
description: Rate limit for create MBMS context request
create-mbms-response:
type: int
description: Rate limit for create MBMS context response
create-pdp-request:
type: int
description: Rate limit for create PDP context request
create-pdp-response:
type: int
description: Rate limit for create PDP context response
delete-aa-pdp-request:
type: int
description: Rate limit for delete AA PDP context request
delete-aa-pdp-response:
type: int
description: Rate limit for delete AA PDP context response
delete-mbms-request:
type: int
description: Rate limit for delete MBMS context request
delete-mbms-response:
type: int
description: Rate limit for delete MBMS context response
delete-pdp-request:
type: int
description: Rate limit for delete PDP context request
delete-pdp-response:
type: int
description: Rate limit for delete PDP context response
echo-reponse:
type: int
description: Rate limit for echo response
echo-request:
type: int
description: Rate limit for echo requests
error-indication:
type: int
description: Rate limit for error indication
failure-report-request:
type: int
description: Rate limit for failure report request
failure-report-response:
type: int
description: Rate limit for failure report response
fwd-reloc-complete-ack:
type: int
description: Rate limit for forward relocation complete acknowledge
fwd-relocation-complete:
type: int
description: Rate limit for forward relocation complete
fwd-relocation-request:
type: int
description: Rate limit for forward relocation request
fwd-relocation-response:
type: int
description: Rate limit for forward relocation response
fwd-srns-context:
type: int
description: Rate limit for forward SRNS context
fwd-srns-context-ack:
type: int
description: Rate limit for forward SRNS context acknowledge
g-pdu:
type: int
description: Rate limit for G-PDU
identification-request:
type: int
description: Rate limit for identification request
identification-response:
type: int
description: Rate limit for identification response
mbms-de-reg-request:
type: int
description: Rate limit for MBMS de-registration request
mbms-de-reg-response:
type: int
description: Rate limit for MBMS de-registration response
mbms-notify-rej-request:
type: int
description: Rate limit for MBMS notification reject request
mbms-notify-rej-response:
type: int
description: Rate limit for MBMS notification reject response
mbms-notify-request:
type: int
description: Rate limit for MBMS notification request
mbms-notify-response:
type: int
description: Rate limit for MBMS notification response
mbms-reg-request:
type: int
description: Rate limit for MBMS registration request
mbms-reg-response:
type: int
description: Rate limit for MBMS registration response
mbms-ses-start-request:
type: int
description: Rate limit for MBMS session start request
mbms-ses-start-response:
type: int
description: Rate limit for MBMS session start response
mbms-ses-stop-request:
type: int
description: Rate limit for MBMS session stop request
mbms-ses-stop-response:
type: int
description: Rate limit for MBMS session stop response
note-ms-request:
type: int
description: Rate limit for note MS GPRS present request
note-ms-response:
type: int
description: Rate limit for note MS GPRS present response
pdu-notify-rej-request:
type: int
description: Rate limit for PDU notify reject request
pdu-notify-rej-response:
type: int
description: Rate limit for PDU notify reject response
pdu-notify-request:
type: int
description: Rate limit for PDU notify request
pdu-notify-response:
type: int
description: Rate limit for PDU notify response
ran-info:
type: int
description: Rate limit for RAN information relay
relocation-cancel-request:
type: int
description: Rate limit for relocation cancel request
relocation-cancel-response:
type: int
description: Rate limit for relocation cancel response
send-route-request:
type: int
description: Rate limit for send routing information for GPRS request
send-route-response:
type: int
description: Rate limit for send routing information for GPRS response
sgsn-context-ack:
type: int
description: Rate limit for SGSN context acknowledgement
sgsn-context-request:
type: int
description: Rate limit for SGSN context request
sgsn-context-response:
type: int
description: Rate limit for SGSN context response
support-ext-hdr-notify:
type: int
description: Rate limit for support extension headers notification
update-mbms-request:
type: int
description: Rate limit for update MBMS context request
update-mbms-response:
type: int
description: Rate limit for update MBMS context response
update-pdp-request:
type: int
description: Rate limit for update PDP context request
update-pdp-response:
type: int
description: Rate limit for update PDP context response
version-not-support:
type: int
description: Rate limit for version not supported
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Message rate limiting.
fmgr_firewall_gtp_messageratelimit:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
gtp: <your own value>
firewall_gtp_messageratelimit:
create-aa-pdp-request: <value of integer>
create-aa-pdp-response: <value of integer>
create-mbms-request: <value of integer>
create-mbms-response: <value of integer>
create-pdp-request: <value of integer>
create-pdp-response: <value of integer>
delete-aa-pdp-request: <value of integer>
delete-aa-pdp-response: <value of integer>
delete-mbms-request: <value of integer>
delete-mbms-response: <value of integer>
delete-pdp-request: <value of integer>
delete-pdp-response: <value of integer>
echo-reponse: <value of integer>
echo-request: <value of integer>
error-indication: <value of integer>
failure-report-request: <value of integer>
failure-report-response: <value of integer>
fwd-reloc-complete-ack: <value of integer>
fwd-relocation-complete: <value of integer>
fwd-relocation-request: <value of integer>
fwd-relocation-response: <value of integer>
fwd-srns-context: <value of integer>
fwd-srns-context-ack: <value of integer>
g-pdu: <value of integer>
identification-request: <value of integer>
identification-response: <value of integer>
mbms-de-reg-request: <value of integer>
mbms-de-reg-response: <value of integer>
mbms-notify-rej-request: <value of integer>
mbms-notify-rej-response: <value of integer>
mbms-notify-request: <value of integer>
mbms-notify-response: <value of integer>
mbms-reg-request: <value of integer>
mbms-reg-response: <value of integer>
mbms-ses-start-request: <value of integer>
mbms-ses-start-response: <value of integer>
mbms-ses-stop-request: <value of integer>
mbms-ses-stop-response: <value of integer>
note-ms-request: <value of integer>
note-ms-response: <value of integer>
pdu-notify-rej-request: <value of integer>
pdu-notify-rej-response: <value of integer>
pdu-notify-request: <value of integer>
pdu-notify-response: <value of integer>
ran-info: <value of integer>
relocation-cancel-request: <value of integer>
relocation-cancel-response: <value of integer>
send-route-request: <value of integer>
send-route-response: <value of integer>
sgsn-context-ack: <value of integer>
sgsn-context-request: <value of integer>
sgsn-context-response: <value of integer>
support-ext-hdr-notify: <value of integer>
update-mbms-request: <value of integer>
update-mbms-response: <value of integer>
update-pdp-request: <value of integer>
update-pdp-response: <value of integer>
version-not-support: <value of integer>
'''
RETURN = '''
meta:
description: The result of the request.
type: dict
returned: always
contains:
request_url:
description: The full url requested.
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request.
returned: always
type: int
sample: 0
response_data:
description: The api response.
type: list
returned: always
response_message:
description: The descriptive message of the api response.
type: str
returned: always
sample: OK.
system_information:
description: The information of the target system.
type: dict
returned: always
rc:
description: The status the request.
type: int
returned: always
sample: 0
version_check_warning:
description: Warning if the parameters used in the playbook are not supported by the current FortiManager version.
type: list
returned: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/gtp/{gtp}/message-rate-limit',
'/pm/config/global/obj/firewall/gtp/{gtp}/message-rate-limit'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/gtp/{gtp}/message-rate-limit/{message-rate-limit}',
'/pm/config/global/obj/firewall/gtp/{gtp}/message-rate-limit/{message-rate-limit}'
]
url_params = ['adom', 'gtp']
module_primary_key = None
module_arg_spec = {
'access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'rc_succeeded': {
'required': False,
'type': 'list',
'elements': 'int'
},
'rc_failed': {
'required': False,
'type': 'list',
'elements': 'int'
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'adom': {
'required': True,
'type': 'str'
},
'gtp': {
'required': True,
'type': 'str'
},
'firewall_gtp_messageratelimit': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.0': True,
'6.2.1': True,
'6.2.2': True,
'6.2.3': True,
'6.2.5': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.0': True,
'6.4.1': True,
'6.4.2': True,
'6.4.3': True,
'6.4.4': True,
'6.4.5': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.0': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.0': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'options': {
'create-aa-pdp-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'create-aa-pdp-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'create-mbms-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'create-mbms-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'create-pdp-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'create-pdp-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'delete-aa-pdp-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'delete-aa-pdp-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'delete-mbms-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'delete-mbms-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'delete-pdp-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'delete-pdp-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'echo-reponse': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'echo-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'error-indication': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'failure-report-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'failure-report-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'fwd-reloc-complete-ack': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'fwd-relocation-complete': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'fwd-relocation-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'fwd-relocation-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'fwd-srns-context': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'fwd-srns-context-ack': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'g-pdu': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'identification-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'identification-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-de-reg-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-de-reg-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-notify-rej-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-notify-rej-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-notify-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-notify-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-reg-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-reg-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-ses-start-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-ses-start-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-ses-stop-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'mbms-ses-stop-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'note-ms-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'note-ms-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'pdu-notify-rej-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'pdu-notify-rej-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'pdu-notify-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'pdu-notify-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'ran-info': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'relocation-cancel-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'relocation-cancel-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'send-route-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'send-route-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'sgsn-context-ack': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'sgsn-context-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'sgsn-context-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'support-ext-hdr-notify': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'update-mbms-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'update-mbms-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'update-pdp-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'update-pdp-response': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
},
'version-not-support': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True,
'6.2.0': True,
'6.2.2': True,
'6.2.6': True,
'6.2.7': True,
'6.2.8': True,
'6.2.9': True,
'6.2.10': True,
'6.4.1': True,
'6.4.3': True,
'6.4.4': True,
'6.4.6': True,
'6.4.7': True,
'6.4.8': True,
'6.4.9': True,
'6.4.10': True,
'6.4.11': True,
'7.0.1': True,
'7.0.2': True,
'7.0.3': True,
'7.0.4': True,
'7.0.5': True,
'7.0.6': True,
'7.0.7': True,
'7.2.1': True,
'7.2.2': True,
'7.4.0': True
},
'type': 'int'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_gtp_messageratelimit'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('access_token', module.params['access_token'] if 'access_token' in module.params else None)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_partial_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"hq-devops-admin@fortinet.com"
] | hq-devops-admin@fortinet.com |
a7cb90a23bc7cad769ce94384e03a03fa159fdf6 | fd8ef75bb06383538cdb21ed2a0ef88e570179b7 | /src/openfermion/resource_estimates/thc/compute_lambda_thc.py | 16fa0ef8d1fcf0fdb71359635f9c43ccda4892ed | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | quantumlib/OpenFermion | d1147383f99573d19005bd0f3e0120e9e9bed04c | 788481753c798a72c5cb3aa9f2aa9da3ce3190b0 | refs/heads/master | 2023-09-04T11:00:32.124157 | 2023-08-24T21:54:30 | 2023-08-24T21:54:30 | 104,403,768 | 1,481 | 406 | Apache-2.0 | 2023-08-24T21:54:31 | 2017-09-21T22:10:28 | Python | UTF-8 | Python | false | false | 2,973 | py | #coverage:ignore
"""
Compute lambdas for THC according to
PRX QUANTUM 2, 030305 (2021) Section II. D.
"""
import numpy as np
from openfermion.resource_estimates.molecule import pyscf_to_cas
def compute_lambda(pyscf_mf,
etaPp: np.ndarray,
MPQ: np.ndarray,
use_eri_thc_for_t=False):
"""
Compute lambda thc
Args:
pyscf_mf - PySCF mean field object
etaPp - leaf tensor for THC that is dim(nthc x norb). The nthc and norb
is inferred from this quantity.
MPQ - central tensor for THC factorization. dim(nthc x nthc)
Returns:
"""
nthc = etaPp.shape[0]
# grab tensors from pyscf_mf object
h1, eri_full, _, _, _ = pyscf_to_cas(pyscf_mf)
# computing Least-squares THC residual
CprP = np.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
BprQ = np.tensordot(CprP, MPQ, axes=([2], [0]))
Iapprox = np.tensordot(CprP, np.transpose(BprQ), axes=([2], [0]))
deri = eri_full - Iapprox
res = 0.5 * np.sum((deri)**2)
# NOTE: remove in future once we resolve why it was used in the first place.
# NOTE: see T construction for details.
eri_thc = np.einsum("Pp,Pr,Qq,Qs,PQ->prqs",
etaPp,
etaPp,
etaPp,
etaPp,
MPQ,
optimize=True)
# projecting into the THC basis requires each THC factor mu to be nrmlzd.
# we roll the normalization constant into the central tensor zeta
SPQ = etaPp.dot(
etaPp.T) # (nthc x norb) x (norb x nthc) -> (nthc x nthc) metric
cP = np.diag(np.diag(
SPQ)) # grab diagonal elements. equivalent to np.diag(np.diagonal(SPQ))
# no sqrts because we have two normalized THC vectors (index by mu and nu)
# on each side.
MPQ_normalized = cP.dot(MPQ).dot(cP) # get normalized zeta in Eq. 11 & 12
lambda_z = np.sum(np.abs(MPQ_normalized)) * 0.5 # Eq. 13
# NCR: originally Joonho's code add np.einsum('llij->ij', eri_thc)
# NCR: I don't know how much this matters.
if use_eri_thc_for_t:
# use eri_thc for second coulomb contraction. This was in the original
# code which is different than what the paper says.
T = h1 - 0.5 * np.einsum("illj->ij", eri_full) + np.einsum(
"llij->ij", eri_thc) # Eq. 3 + Eq. 18
else:
T = h1 - 0.5 * np.einsum("illj->ij", eri_full) + np.einsum(
"llij->ij", eri_full) # Eq. 3 + Eq. 18
#e, v = np.linalg.eigh(T)
e = np.linalg.eigvalsh(T) # only need eigenvalues
lambda_T = np.sum(
np.abs(e)) # Eq. 19. NOTE: sum over spin orbitals removes 1/2 factor
lambda_tot = lambda_z + lambda_T # Eq. 20
#return nthc, np.sqrt(res), res, lambda_T, lambda_z, lambda_tot
return lambda_tot, nthc, np.sqrt(res), res, lambda_T, lambda_z
| [
"noreply@github.com"
] | quantumlib.noreply@github.com |
308fa8a66e05dc307f31798d5666ddc5f04c04b9 | a6d8465aed280c36fb7129e1fa762535bae19941 | /embroidery365/builder/models.py | 2a66c14eedd56fbd8017ed36c96924943f213b2b | [] | no_license | rahuezo/365digitizing_and_embroidery | c61c53f567e73163a67d3fd568a20551a3681ccd | 41a22b6ff8bd83238219f2d34ce13b5a8ef9bb57 | refs/heads/master | 2020-09-02T11:59:07.702947 | 2017-11-11T02:40:01 | 2017-11-11T02:40:01 | 98,377,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class BaseItem(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Style(models.Model):
name = models.CharField(max_length=255)
base_item = models.ForeignKey(BaseItem)
def __str__(self):
return self.name
class Size(models.Model):
size = models.CharField(max_length=3)
base_item = models.ForeignKey(BaseItem)
def __str__(self):
return "{0}-{1}".format(self.base_item.name, self.size)
class Placement(models.Model):
position = models.CharField(max_length=255)
base_item = models.ForeignKey(BaseItem)
def __str__(self):
return "{0}-{1}".format(self.base_item.name, self.position)
class Order(models.Model):
customer = models.ForeignKey(User, on_delete=models.CASCADE)
order_base_item = models.CharField(max_length=255)
order_style = models.CharField(max_length=255)
order_logo = models.ImageField(blank=True)
order_item_placement = models.CharField(max_length=255)
order_logo_width = models.DecimalField(max_digits=10, decimal_places=2)
order_logo_height = models.DecimalField(max_digits=10, decimal_places=2)
order_details = models.TextField(blank=True)
total = models.DecimalField(max_digits=10, decimal_places=2)
extra_details = models.TextField(blank=True, null=True)
logo_colors = models.TextField(blank=True, null=True)
created = models.DateField(auto_now_add=True)
def __str__(self):
return "{0} {1} {2}".format(self.customer, self.order_base_item, self.order_style)
| [
"rahuezo@ucdavis.edu"
] | rahuezo@ucdavis.edu |
0c681f2924ffd76c5f9ae78985c972d4343bc44c | 0d2f636592dc12458254d793f342857298c26f12 | /11-1(tag).py | 734ce229ca7758695bc91f180e55aa7f25671b3e | [] | no_license | chenpc1214/test | c6b545dbe13e672f11c58464405e024394fc755b | 8610320686c499be2f5fa36ba9f11935aa6d657b | refs/heads/master | 2022-12-13T22:44:41.256315 | 2020-09-08T16:25:49 | 2020-09-08T16:25:49 | 255,796,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
def absolute(n):
""" 絕對值設計 """
if n < 0:
n = -n
print("絕對值是 ", n)
x = int(input("請輸入數值 = "))
absolute(x)
"""自己做的"""
"""n = input("請輸入數值=")
def absolute(n):
return abs(n)"""
| [
"kkbuger1523@gmail.com"
] | kkbuger1523@gmail.com |
acc6ecdc5245f8d6a5f64b44c64aaf65b596ec4f | 7b4820948845f55274b211d676ab8a6253a6298b | /addons/plugin.video.onlinemovies/default.py | 85dfea3da113da4f3a25fa280fcd2dfecd7eab42 | [] | no_license | bopopescu/mw | 524c57d4b859751e298b907a12e44e9711ef72a6 | 5ef2acea0fb4150578e53201463c6bc5da37be20 | refs/heads/master | 2021-05-30T19:33:11.750160 | 2016-01-11T05:28:46 | 2016-01-11T05:28:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,475 | py | import urllib,urllib2,re,xbmcplugin,xbmcgui,urlresolver,sys,xbmc,xbmcaddon,os,urlparse
from t0mm0.common.addon import Addon
from metahandler import metahandlers
addon_id = 'plugin.video.onlinemovies'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon(addon_id, sys.argv)
ADDON2=xbmcaddon.Addon(id='plugin.video.onlinemovies')
fanart = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id , 'fanart.jpg'))
icon = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id, 'icon.png'))
metaset = selfAddon.getSetting('enable_meta')
def CATEGORIES():
addDir2('Latest Cinema Releases','http://onlinemovies.pro/category/most-popular-new-movies/?filtre=date',1,icon,'',fanart)
addDir2('Recently Added','http://onlinemovies.pro/category/genre/?filtre=date',1,icon,'',fanart)
addDir2('Most Viewed','http://onlinemovies.pro/category/genre/?filtre=views',1,icon,'',fanart)
addDir2('Highest Rated','http://onlinemovies.pro/category/genre/?filtre=rate',1,icon,'',fanart)
addDir2('HD Movies','http://onlinemovies.pro/category/hd-movies/?filtre=random',1,icon,'',fanart)
addDir2('Christmas Movies','http://onlinemovies.pro/category/christmas-movies/',1,icon,'',fanart)
addDir2('Disney','http://onlinemovies.pro/category/disneys/',1,icon,'',fanart)
addDir2('Latest TV Episodes','http://onlinemovies.pro/category/serials/?filtre=date',1,icon,'',fanart)
addDir2('Search','url',3,icon,'',fanart)
xbmc.executebuiltin('Container.SetViewMode(50)')
def GETMOVIES(url,name):
metaset = selfAddon.getSetting('enable_meta')
link = open_url(url)
if 'category/serials/' in url: metaset='false'
match=re.compile('<a href="(.+?)" title="(.+?)">').findall(link)[:-12]
for url,name in match:
name=cleanHex(name)
if metaset=='false':
addLink(name,url,100,icon,'',fanart)
else: addDir(name,url,100,'',len(match),isFolder=False)
try:
url=re.compile("<link rel='next' href='(.+?)'/>").findall(link)[0]
addDir2('Next Page>>',url,1,icon,'',fanart)
except: pass
if metaset=='true':
setView('movies', 'MAIN')
else: xbmc.executebuiltin('Container.SetViewMode(50)')
def cleanHex(text):
def fixup(m):
text = m.group(0)
if text[:3] == "&#x": return unichr(int(text[3:-1], 16)).encode('utf-8')
else: return unichr(int(text[2:-1])).encode('utf-8')
return re.sub("(?i)&#\w+;", fixup, text.decode('ISO-8859-1').encode('utf-8'))
def SEARCH():
search_entered =''
keyboard = xbmc.Keyboard(search_entered, 'Search Online Movies Pro')
keyboard.doModal()
if keyboard.isConfirmed():
search_entered = keyboard.getText().replace(' ','+')
if len(search_entered)>1:
url = 'http://onlinemovies.pro/?s='+ search_entered
link = open_url(url)
GETMOVIES(url,name)
def PLAYLINK(name,url,iconimage):
link = open_url(url)
try: url=re.compile('src="(.+?)" allowFullScreen></iframe>').findall(link)[0]
except: url=re.compile("src='(.+?)' allowFullScreen></iframe>").findall(link)[0]
ua='|User-Agent=Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36'
#### THANKS TO LAMBDA ####
import client
import jsunpack
url = urlparse.urlparse(url).query
url = urlparse.parse_qsl(url)[0][1]
url = 'http://videomega.tv/cdn.php?ref=%s' % url
result = client.request(url)
unpacked = ''
packed = result.split('\n')
for i in packed:
try: unpacked += jsunpack.unpack(i)
except: unpacked += i
result = unpacked
result = re.sub('\s\s+', ' ', result)
url = re.compile('"video".+?"src"\s*\,\s*"(.+?)"').findall(result)
url += client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video.+?'})
url = url[0]+ua
#### THANKS TO LAMBDA ####
ok=True
liz=xbmcgui.ListItem(name, iconImage=icon,thumbnailImage=icon); liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
xbmc.Player ().play(url, liz, False)
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name,url,mode,iconimage,description,fanart):
xbmc.executebuiltin('Container.SetViewMode(50)')
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&description="+str(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, 'plot': description } )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
def addDir2(name,url,mode,iconimage,description,fanart):
xbmc.executebuiltin('Container.SetViewMode(50)')
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&description="+str(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, 'plot': description } )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addDir(name,url,mode,iconimage,itemcount,isFolder=False):
if metaset=='true':
splitName=name.partition('(')
simplename=""
simpleyear=""
if len(splitName)>0:
simplename=splitName[0]
simpleyear=splitName[2].partition(')')
if len(simpleyear)>0:
simpleyear=simpleyear[0]
mg = metahandlers.MetaData()
meta = mg.get_meta('movie', name=simplename ,year=simpleyear)
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&site="+str(site)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage=meta['cover_url'], thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels= meta )
contextMenuItems = []
contextMenuItems.append(('Movie Information', 'XBMC.Action(Info)'))
liz.addContextMenuItems(contextMenuItems, replaceItems=True)
if not meta['backdrop_url'] == '': liz.setProperty('fanart_image', meta['backdrop_url'])
else: liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=isFolder,totalItems=itemcount)
return ok
else:
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&site="+str(site)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage=icon, thumbnailImage=icon)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=isFolder)
return ok
def open_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def setView(content, viewType):
if content:
xbmcplugin.setContent(int(sys.argv[1]), content)
if ADDON2.getSetting('auto-view')=='true':
xbmc.executebuiltin("Container.SetViewMode(%s)" % ADDON2.getSetting(viewType) )
params=get_params(); url=None; name=None; mode=None; site=None; iconimage=None
try: site=urllib.unquote_plus(params["site"])
except: pass
try: url=urllib.unquote_plus(params["url"])
except: pass
try: name=urllib.unquote_plus(params["name"])
except: pass
try: mode=int(params["mode"])
except: pass
try: iconimage=urllib.unquote_plus(params["iconimage"])
except: pass
print "Site: "+str(site); print "Mode: "+str(mode); print "URL: "+str(url); print "Name: "+str(name)
print params
if mode==None or url==None or len(url)<1: CATEGORIES()
elif mode==1: GETMOVIES(url,name)
elif mode==2: GETTV(url,name)
elif mode==3: SEARCH()
elif mode==100: PLAYLINK(name,url,iconimage)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| [
"bialagary@Garys-Mac-mini.local"
] | bialagary@Garys-Mac-mini.local |
fbd130a61a01bd9a6823abfaf12fe6aaabe8bdfa | 7e80b503e6563f190147e5e6bc2b47d5d8020510 | /xpdan/tests/test_main_pipeline.py | d48ca0a8d228efbdc4760025b3eb6e1f3beab1e4 | [] | no_license | eaculb/xpdAn | a687c15175fdff82a0f2092509faa7c2da288357 | 82e16ba50ddbfb9dcd0dba9b3b181354c329d58a | refs/heads/master | 2021-09-04T06:07:48.173024 | 2017-10-11T14:24:59 | 2017-10-11T14:24:59 | 106,735,663 | 0 | 0 | null | 2017-10-12T19:20:47 | 2017-10-12T19:20:47 | null | UTF-8 | Python | false | false | 1,230 | py | import os
import time
from xpdan.pipelines.main import conf_main_pipeline
def test_main_pipeline(exp_db, fast_tmp_dir, start_uid3):
"""Decider between pipelines"""
source = conf_main_pipeline(exp_db, fast_tmp_dir,
vis=True,
write_to_disk=True,
mask_setting=None,
verbose=True)
# source.visualize('/home/christopher/dev/xpdAn/examples/mystream.png')
t0 = time.time()
for nd in exp_db[-1].documents(fill=True):
source.emit(nd)
t1 = time.time()
print(t1 - t0)
for root, dirs, files in os.walk(fast_tmp_dir):
level = root.replace(fast_tmp_dir, '').count(os.sep)
indent = ' ' * 4 * level
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
assert 'Au' in os.listdir(fast_tmp_dir)
assert 'Au_{:.6}.yml'.format(start_uid3) in os.listdir(
os.path.join(fast_tmp_dir, 'Au'))
for f in ['dark_sub', 'mask', 'iq_q', 'iq_tth', 'pdf']:
assert f in os.listdir(
os.path.join(fast_tmp_dir, 'Au'))
| [
"cjwright4242@gmail.com"
] | cjwright4242@gmail.com |
e14f2fe359cddfd679bf0d49b1f8bd0071ee606d | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rvm.py | e7aa454725a371fb08c6f2504204562064724860 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rVM':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
66edbd7ed1b95691bb48c4cba65b30a6f7bba699 | 523239f9fd7aa76b246e7fdf16f3298b0e178878 | /Old_Code/code/miscc/loss_back.py | fe2c264abc08624dd34fbcfd76d04e2cf696c16e | [] | no_license | Trccc/SBA-GAN | d1e69421255f56d7558fbbb13478eff51c6ff5e2 | 581d0ced59b0b00900ad68cc927d9ba39cd58671 | refs/heads/master | 2020-11-25T03:02:00.822588 | 2019-12-03T03:41:00 | 2019-12-03T03:41:00 | 228,468,462 | 2 | 0 | null | 2019-12-16T20:19:32 | 2019-12-16T20:19:31 | null | UTF-8 | Python | false | false | 8,193 | py | # Created by Chirong Nov 4th 21:01
import torch
import torch.nn as nn
import numpy as np
from miscc.config import cfg
from GlobalAttention import func_attention
# ##################Loss for matching text-image###################
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
"""Returns cosine similarity between x1 and x2, computed along dim.
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def sent_loss(cnn_code, rnn_code, labels, class_ids,
batch_size, eps=1e-8):
# ### Mask mis-match samples ###
# that come from the same class as the real sample ###
masks = []
if class_ids is not None:
for i in range(batch_size):
mask = (class_ids == class_ids[i]).astype(np.uint8)
mask[i] = 0
masks.append(mask.reshape((1, -1)))
masks = np.concatenate(masks, 0)
# masks: batch_size x batch_size
masks = torch.ByteTensor(masks)
if cfg.CUDA:
masks = masks.cuda()
# --> seq_len x batch_size x nef
if cnn_code.dim() == 2:
cnn_code = cnn_code.unsqueeze(0)
rnn_code = rnn_code.unsqueeze(0)
# cnn_code_norm / rnn_code_norm: seq_len x batch_size x 1
cnn_code_norm = torch.norm(cnn_code, 2, dim=2, keepdim=True)
rnn_code_norm = torch.norm(rnn_code, 2, dim=2, keepdim=True)
# scores* / norm*: seq_len x batch_size x batch_size
scores0 = torch.bmm(cnn_code, rnn_code.transpose(1, 2))
norm0 = torch.bmm(cnn_code_norm, rnn_code_norm.transpose(1, 2))
scores0 = scores0 / norm0.clamp(min=eps) * cfg.TRAIN.SMOOTH.GAMMA3
# --> batch_size x batch_size
scores0 = scores0.squeeze()
if class_ids is not None:
scores0.data.masked_fill_(masks, -float('inf'))
scores1 = scores0.transpose(0, 1)
if labels is not None:
loss0 = nn.CrossEntropyLoss()(scores0, labels)
loss1 = nn.CrossEntropyLoss()(scores1, labels)
else:
loss0, loss1 = None, None
return loss0, loss1
def words_loss(img_features, words_emb, labels,
cap_lens, class_ids, batch_size):
"""
words_emb(query): batch x nef x seq_len
img_features(context): batch x nef x 17 x 17
"""
masks = []
att_maps = []
similarities = []
cap_lens = cap_lens.data.tolist()
for i in range(batch_size):
if class_ids is not None:
mask = (class_ids == class_ids[i]).astype(np.uint8)
mask[i] = 0
masks.append(mask.reshape((1, -1)))
# Get the i-th text description
words_num = cap_lens[i]
# -> 1 x nef x words_num
word = words_emb[i, :, :words_num].unsqueeze(0).contiguous()
# -> batch_size x nef x words_num
word = word.repeat(batch_size, 1, 1)
# batch x nef x 17*17
context = img_features
"""
word(query): batch x nef x words_num
context: batch x nef x 17 x 17
weiContext: batch x nef x words_num
attn: batch x words_num x 17 x 17
"""
weiContext, attn = func_attention(word, context, cfg.TRAIN.SMOOTH.GAMMA1)
att_maps.append(attn[i].unsqueeze(0).contiguous())
# --> batch_size x words_num x nef
word = word.transpose(1, 2).contiguous()
weiContext = weiContext.transpose(1, 2).contiguous()
# --> batch_size*words_num x nef
word = word.view(batch_size * words_num, -1)
weiContext = weiContext.view(batch_size * words_num, -1)
#
# -->batch_size*words_num
row_sim = cosine_similarity(word, weiContext)
# --> batch_size x words_num
row_sim = row_sim.view(batch_size, words_num)
# Eq. (10)
row_sim.mul_(cfg.TRAIN.SMOOTH.GAMMA2).exp_()
row_sim = row_sim.sum(dim=1, keepdim=True)
row_sim = torch.log(row_sim)
# --> 1 x batch_size
# similarities(i, j): the similarity between the i-th image and the j-th text description
similarities.append(row_sim)
# batch_size x batch_size
similarities = torch.cat(similarities, 1)
if class_ids is not None:
masks = np.concatenate(masks, 0)
# masks: batch_size x batch_size
masks = torch.ByteTensor(masks)
if cfg.CUDA:
masks = masks.cuda()
similarities = similarities * cfg.TRAIN.SMOOTH.GAMMA3
if class_ids is not None:
similarities.data.masked_fill_(masks, -float('inf'))
similarities1 = similarities.transpose(0, 1)
if labels is not None:
loss0 = nn.CrossEntropyLoss()(similarities, labels)
loss1 = nn.CrossEntropyLoss()(similarities1, labels)
else:
loss0, loss1 = None, None
return loss0, loss1, att_maps
# ##################Loss for G and Ds##############################
def discriminator_loss(netD, real_imgs, fake_imgs, conditions,
real_labels, fake_labels):
# Forward
real_features = netD(real_imgs)
fake_features = netD(fake_imgs.detach())
# loss
#
cond_real_logits = netD.COND_DNET(real_features, conditions)
cond_real_errD = nn.BCELoss()(cond_real_logits, real_labels)
cond_fake_logits = netD.COND_DNET(fake_features, conditions)
cond_fake_errD = nn.BCELoss()(cond_fake_logits, fake_labels)
#
batch_size = real_features.size(0)
cond_wrong_logits = netD.COND_DNET(real_features[:(batch_size - 1)], conditions[1:batch_size])
cond_wrong_errD = nn.BCELoss()(cond_wrong_logits, fake_labels[1:batch_size])
if netD.UNCOND_DNET is not None:
real_logits = netD.UNCOND_DNET(real_features)
fake_logits = netD.UNCOND_DNET(fake_features)
real_errD = nn.BCELoss()(real_logits, real_labels)
fake_errD = nn.BCELoss()(fake_logits, fake_labels)
errD = ((real_errD + cond_real_errD) / 2. +
(fake_errD + cond_fake_errD + cond_wrong_errD) / 3.)
else:
errD = cond_real_errD + (cond_fake_errD + cond_wrong_errD) / 2.
return errD
# Nov 4th 21:26 Chirong
def generator_loss(netsD, image_encoder, fake_imgs, real_labels,
words_embs, sent_emb, match_labels,
cap_lens, class_ids):
numDs = len(netsD)
batch_size = real_labels.size(0)
logs = ''
# Forward
errG_total = 0
for i in range(numDs):
features = netsD[i](fake_imgs[i])
cond_logits = netsD[i].COND_DNET(features, sent_emb)
cond_errG = nn.BCELoss()(cond_logits, real_labels)
if netsD[i].UNCOND_DNET is not None:
logits = netsD[i].UNCOND_DNET(features)
errG = nn.BCELoss()(logits, real_labels)
g_loss = errG + cond_errG
else:
g_loss = cond_errG
errG_total += g_loss
# err_img = errG_total.data[0]
logs += 'g_loss%d: %.2f ' % (i, g_loss.data[0])
# Ranking loss
if i == (numDs - 1):
# words_features: batch_size x nef x 17 x 17
# sent_code: batch_size x nef
region_features, cnn_code = image_encoder(fake_imgs[i])
w_loss0, w_loss1, _ = words_loss(region_features, words_embs,
match_labels, cap_lens,
class_ids, batch_size)
w_loss = (w_loss0 + w_loss1) * \
cfg.TRAIN.SMOOTH.LAMBDA
# err_words = err_words + w_loss.data[0]
s_loss0, s_loss1 = sent_loss(cnn_code, sent_emb,
match_labels, class_ids, batch_size)
s_loss = (s_loss0 + s_loss1) * \
cfg.TRAIN.SMOOTH.LAMBDA
# err_sent = err_sent + s_loss.data[0]
errG_total += w_loss + s_loss
logs += 'w_loss: %.2f s_loss: %.2f ' % (w_loss.data[0], s_loss.data[0])
return errG_total, logs
##################################################################
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
| [
"google-dl-platform@googlegroups.com"
] | google-dl-platform@googlegroups.com |
33ffbc046e4b0ac1d10d366ef83a771449e1ddc9 | 5ba2ea4694d9423bc5435badba93b7b8fedfadd0 | /webapp/common/form_filter.py | b8a0c53c890bb381213adb1227e781f2a1f1a292 | [] | no_license | Digital-Botschafter-und-mehr/mein-stadtarchiv | bdf480d82b366253afd27c697143ad5d727f652f | a9876230edac695710d4ec17b223e065fa61937c | refs/heads/master | 2023-02-05T18:43:13.159174 | 2021-01-01T09:35:46 | 2021-01-01T09:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | # encoding: utf-8
"""
Copyright (c) 2017, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
def json_filter(value):
return json.loads(value)
| [
"mail@ernestoruge.de"
] | mail@ernestoruge.de |
ba73735c7237f4e48b4b1fbd2aa067c357f01d0e | c2be187155aabf59a4c0d3f5065bc26239c0b827 | /get_products.py | f2882901593542808abf671d139350736cc370d0 | [] | no_license | dankCodeNugs/tmtext | 1d6c54f79399bfa5e6f3905c0f72ba0be59d8d0d | 8e2d834775f440def7f57294674b8109b46ee191 | refs/heads/master | 2023-03-16T08:49:38.456929 | 2016-12-20T19:45:54 | 2016-12-20T19:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | #!/usr/bin/python
# extract products from a certain output file, that belong to a certain category/department
# and write their respective category ids to a file
# usage: site given as first argument, category as second argument
import json
import codecs
import re
import sys
from pprint import pprint
def get_products(filename, category):
output_all = codecs.open(filename, "r", "utf-8")
products = []
for line in output_all:
# print line
if line.strip():
item = json.loads(line.strip())
if 'department' in item:
if item['department'] == category:
products.append(item['product_name'])
if 'category' in item:
if item['category'] == category:
products.append(item['product_name'])
# close all opened files
output_all.close()
return products
site = sys.argv[1]
category = sys.argv[2]
filename = "sample_output/" + site + "_bestsellers_dept.jl"
prods = get_products(filename, category)
pprint(prods)
| [
"life.long.learner127@outlook.com"
] | life.long.learner127@outlook.com |
5ccb3f3b16bf9a927d6c3b37a551c8127225de2e | 696b9b8963a6b26776849f69263e50860317a37d | /PyPoll/main.py | 61a19cdc1d53b9a47dffc15695d5372dc380ec3e | [] | no_license | neelarka/python-challenge | 64a7099470b2885511b568625c8c0e320801da82 | 9060c30ed9a319807fd5bd8756b3bc36f522bbc8 | refs/heads/master | 2020-03-24T22:00:26.874430 | 2018-08-05T04:23:11 | 2018-08-05T04:23:11 | 143,061,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,350 | py | import os
import csv
from pathlib import Path
filepath = Path("../../Desktop/election_data.csv")
with open(filepath, newline="", encoding='utf-8' ) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
reader = csv.reader(csvfile)
next(reader, None)
Voter_Id = []
county = []
candidate = []
for row in csvreader:
Voter_Id.append(row[0])
county.append(row[1])
candidate.append(row[2])
length = len(Voter_Id)
# print ("The Total Votes : " + str(length))
# Votes For Khan
candidates = []
for name in candidate:
if name == "Khan":
candidates.append(candidate)
#print(candidates)
#print(county)
length1 = len(candidates)
#print ("The Total Votes for Khan: " + str(length1))
percentage_Khan = length1/length
#print(percentage_Khan)
# Votes For Correy
candidates1 = []
for name in candidate:
if name == "Correy":
candidates1.append(candidate)
#print(candidates)
#print(county)
length2 = len(candidates1)
#print ("The Total Votes for Correy: " + str(length2))
percentage_Correy = length2/length
#print(percentage_Correy)
# Votes For Li
candidates2 = []
for name in candidate:
if name == "Li":
candidates2.append(candidate)
length3 = len(candidates2)
#print ("The Total Votes for Li: " + str(length3))
percentage_Li = length3/length
#print(percentage_Li)
# Votes For O'Tooley
candidates3 = []
for name in candidate:
if name == "O'Tooley":
candidates3.append(candidate)
length4 = len(candidates3)
#print ("The Total Votes for O'Tooley: " + str(length4))
percentage_O_Tooley = length4/length
#print(percentage_O_Tooley)
print("Election Results" + "\n --------------------")
print ("The Total Votes : " + str(length) + "\n --------------------")
print("Khan: " + str("%.3f" % percentage_Khan) + " (" + str(length1)+ ")" )
print("Correy: " + str("%.3f" % percentage_Correy) + " (" + str(length2)+ ")" )
print("Li: " + str("%.3f" % percentage_Li) + " (" + str(length3)+ ")" )
print("O'Tooley: " + str("%.3f" % percentage_O_Tooley) + " (" + str(length4)+ ")" )
print("--------------------")
winner = max(length1, length2, length3, length4)
if winner == length1 :
print(" Winner : " + "Khan" )
elif winner == length2 :
print(" Winner : " + "Correy" )
elif winner == length3 :
print(" Winner : " + "Li" )
else :
print(" Winner : " + "O'Tooley" )
print("--------------------")
text_file = open("Output_PyPoll.txt", "w")
text_file.write("Election Results \n" )
text_file.write("\n--------------------\n")
text_file.write("The Total Votes : " + str(length) + "\n")
text_file.write("\n--------------------\n")
text_file.write("Khan: " + str("%.3f" % percentage_Khan) + " (" + str(length1)+ ")" )
text_file.write("\n Li: " + str("%.3f" % percentage_Li) + " (" + str(length3)+ ")" )
text_file.write("\n O'Tooley: " + str("%.3f" % percentage_O_Tooley) + " (" + str(length4)+ ") \n" )
text_file.write("\n--------------------\n")
winner = max(length1, length2, length3, length4)
if winner == length1 :
text_file.write(" Winner : " + "Khan" )
elif winner == length2 :
text_file.write(" Winner : " + "Correy" )
elif winner == length3 :
text_file.write(" Winner : " + "Li" )
else :
text_file.write(" Winner : " + "O'Tooley" )
text_file.write("\n--------------------")
text_file.close()
| [
"you@example.com"
] | you@example.com |
5e08cd42df0284a8dbe8ab6c59c652b94834a5ae | b2d4c5b7738f3b53126d73bcac6165cbb32445eb | /05_数据存储/02-关系型数据库存储/_01_MySQL的存储/_05_删除数据.py | c378a6ab681dba229320434e26d188cf3b8b0411 | [] | no_license | xuelang201201/Python3Spider | 933911abb5056bc7864d3c6bfaf1d7f75ca6ac98 | 0b190f11f74f66058eda6a40a000c5d6076764ea | refs/heads/master | 2022-07-02T11:15:42.032997 | 2020-05-16T13:01:08 | 2020-05-16T13:01:08 | 259,358,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import pymysql
db = pymysql.connect(host='localhost', user='root', password='123456', port=3306, db='spiders')
cursor = db.cursor()
table = 'students'
condition = 'age > 20'
sql = 'DELETE FROM {table} WHERE {condition}'.format(table=table, condition=condition)
try:
cursor.execute(sql)
print('Successful')
db.commit()
except Exception as reason:
print('Failed: ' + str(reason))
db.rollback()
db.close()
| [
"xuelang201201@gmail.com"
] | xuelang201201@gmail.com |
903c419d5b5ada8e3f10bf8af828028c8b21c111 | 0a9949a7dbe5f7d70028b22779b3821c62eb6510 | /static/flight_price_analysis/conf/conf.py | 03dee15c55f02670e982b0b01d56b2fd80b699f2 | [] | no_license | 744996162/warehouse | ed34f251addb9438a783945b6eed5eabe18ef5a2 | 3efd299a59a0703a1a092c58a6f7dc2564b92e4d | refs/heads/master | 2020-06-04T22:10:14.727156 | 2015-07-03T09:40:09 | 2015-07-03T09:40:09 | 35,603,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #coding=utf-8
__author__ = 'zhangc'
import ConfigParser
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
#测试路径
# conf_path=path+"/conf/db.conf"
# conf_path=path+"/conf/db3.conf"
#部署路径
# conf_path="conf/db3.conf"
# conf_path="db.conf"
conf_path = BASE_DIR+"/conf/db.conf"
class DBConf(object):
_inst=None
def __init__(self):
self.config=ConfigParser.ConfigParser()
with open(conf_path,'r') as conf_file:
# with open(conf_path,'r') as conf_file:
self.config.readfp(conf_file)
@staticmethod
def getInst():
if not DBConf._inst:
DBConf._inst = object.__new__(DBConf)
DBConf._inst.__init__()
return DBConf._inst
def get_mysql(self, key):
return self.config.get('mysql', key)
if __name__=="__main__":
pass
test=DBConf()
# print(test.get_mysql("databasegtgj"))
print(test.get_mysql("databasebi"))
| [
"744996162@qq.com"
] | 744996162@qq.com |
4c67b10133e699217a67331d1ee268eb65d7d2c7 | 34cab614568d4ce3cf28167450d6d2bc2bf7bfbf | /importers/cunia.py | 287229bfda6b4aace79d72da50a213322351c454 | [] | no_license | Halicea/ArmandDictionar | 07949936efd3a55edfa1e7a1e12d1ed8c48c4bdf | a82f77065e03cafa6c6b50c163fa53858ab356b8 | refs/heads/master | 2016-09-05T14:03:41.261258 | 2013-04-29T10:27:27 | 2013-04-29T10:27:27 | 964,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | # -*- coding: utf-8 -*-
import codecs
import re
import os
langs = ['ro', 'en', 'fr']
err_count = 0
tr_not_found = {'ro':0, 'en':0, 'fr':0}
with_ref=0
def parse_line(l, index):
global err_count
global tr_not_found
global with_ref
results = []
#synonims or similar meaning
words = l.split(u'§')
for i in range(0, len(words)):
res = {}
index = 0
w = words[i].strip()
if ' ' in w:
res['rmn']=w[:w.index(' ')]
index = w.index(' ')+1
#find translations
for lang in langs:
key = '{%s:'%lang
if( key in w):
lindex = w.index(key)+len(lang)+2
try:
rindex = w.index('}', lindex)
res[lang] = w[lindex:rindex].split(',')
except:
err_count+=1
#print w.encode('utf-8', errors='ignore')
elif not ('vedz' in w):
tr_not_found[lang] +=1
res[lang]=[]
if 'vedz' in w:
with_ref+=1
res['referece']= w[w.index('vedz')+4:]
res['raw'] = w
res['index'] = index
results.append(res)
return results
directory = '/Users/kostamihajlov/Desktop/cunia'
d =[]
merge_count = 0
warn_merge = 0
max_merge =10
current_let = None
line = 0
lines = tuple(codecs.open(os.path.join(directory,'cunia.txt'),'r', 'utf16'))
lang_matcher = "\{[fr: en: ro:].*\}"
merge_required = [u'unã cu', u'vedz', u'tu-aestu', '{ro:','{en:','{fr:']
merge_required_count = 0
for k in lines:
clean= k.strip().replace(u'\ufffc', '')
if len(clean)==1:
current_let = clean
print 'Starting with letter:%s'%current_let
elif not clean:
pass
elif u"Dictsiunar a Limbãljei Armãneascã" in clean:
pass
else:
merged = False
if d:
for k in merge_required:
if(d[-1].endswith(k)):
d[-1] = d[-1]+' '+clean
merged = True
merge_required_count+=1
break;
if not merged:
if(clean[0].lower()==current_let.lower()):
d.append(clean)
merge_count = 0
else:
d[-1] = d[-1]+' '+clean
merge_count+=1
#print u'Merging line %s and merge count is %s'%(line, merge_count)
if merge_count>=max_merge:
#print 'Max Merge received on line %s'%line
pass
line+=1
wc = 0
final = []
index = 0
for w in d:
final.extend(parse_line(w, index))
index+=1
current_letter = None
prev_letter = None
f = None
for w in final:
try:
current_letter = w['rmn'][0]
if current_letter!=prev_letter:
prev_letter = current_letter
if f: f.close()
f = codecs.open(os.path.join(directory, current_letter+'.txt'), 'w', 'utf-16')
f.write('%s %s en:%s fr:%s ro:%s\n'%(w['index'], w['rmn'], w['en'], w['fr'], w['ro']))
except:
print w
print 'Regular Merges:', merge_required_count
print 'Total Words', len(final)
print 'Total Lines', len(d)
print 'Errors', err_count
print 'References', with_ref
print 'Without translations', tr_not_found | [
"costa@halicea.com"
] | costa@halicea.com |
c8dee9b1d52c4575ef88af44d875106e2a851a69 | 1f620140538728b25fd0181e493975534aa0e1fb | /project/basis/admin.py | 29ffeb36206b3b503e22a6c5ab29970a5e48bb91 | [] | no_license | YukiUmetsu/recipe-app-api-python-django | 2a22f63871489cd073d5c312e20fd9fe49eee5a5 | abaf4a0826e840e990781b20aaa5d7f0577c54c5 | refs/heads/master | 2022-11-30T03:11:16.129881 | 2020-03-03T20:04:00 | 2020-03-03T20:04:00 | 244,045,701 | 0 | 0 | null | 2022-11-22T05:21:23 | 2020-02-29T21:41:25 | Python | UTF-8 | Python | false | false | 947 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from basis import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{
'fields': (
'is_active',
'is_staff',
'is_superuser',
)
}
),
(_('Important dates'), {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredient)
| [
"yuuki.umetsu@gmail.com"
] | yuuki.umetsu@gmail.com |
f8e645fdf821b4a8a13e40bf4c64379b0006bd1f | 8af71789222675dddd541bafba681143162f4206 | /apps/entidades/admin.py | 0036ab360b549a2fda945f3612c3020591d73f18 | [] | no_license | RubenAlvarenga/nhakanina | b82d23d80e06aaf49693c8fb65a70ee73e130994 | 3e39a522029c9a6cbb455b2e736ce335ebc4bf1d | refs/heads/master | 2021-01-10T15:32:01.550423 | 2016-03-07T17:34:09 | 2016-03-07T17:34:09 | 43,449,047 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Persona, Alumno
from apps.finanzas.models import PlanPago
class AlumnoAdmin(admin.ModelAdmin):
list_display = ('codigo', 'cedula', 'get_full_name')
search_fields = ['cedula', 'apellido1', 'apellido2', 'nombre1', 'nombre2']
list_display_links = ('get_full_name',)
class PersonaAdmin(admin.ModelAdmin):
list_display = ('id', 'cedula', 'get_full_name')
search_fields = ['cedula', 'apellido1', 'apellido2', 'nombre1', 'nombre2']
list_display_links = ('get_full_name',)
class PlanPagoInine(admin.TabularInline):
model = PlanPago
readonly_fields = ('concepto', 'estado', 'vencimiento', 'secuencia', 'monto')
template = 'catedras/planpago_inline.html'
admin.site.register(Persona, PersonaAdmin)
admin.site.register(Alumno, AlumnoAdmin)
| [
"rubenalvarengan@gmail.com"
] | rubenalvarengan@gmail.com |
c5b809b7fc38a9ff5c2f441e16cef7229f172c5c | 019fd2c29b8239d7b0a3906cfbdddfd440362417 | /automl/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py | d93ca92f8ed2cfa20ca2620e5999b95da2b81014 | [
"Apache-2.0"
] | permissive | tswast/google-cloud-python | 1334d26cdb994293f307d889251d7daef5fcb826 | d897d56bce03d1fda98b79afb08264e51d46c421 | refs/heads/master | 2021-06-10T17:40:06.968584 | 2020-01-11T17:41:29 | 2020-01-11T17:41:29 | 58,775,221 | 1 | 1 | Apache-2.0 | 2019-04-10T17:09:46 | 2016-05-13T22:06:37 | Python | UTF-8 | Python | false | false | 1,173 | py | config = {
"interfaces": {
"google.cloud.automl.v1beta1.PredictionService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000,
}
},
"methods": {
"Predict": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"BatchPredict": {
"timeout_millis": 20000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| [
"noreply@github.com"
] | tswast.noreply@github.com |
3b260f7d947e059cd8f8835dcc76c8a3a680903b | f889bc01147869459c0a516382e7b95221295a7b | /test/test_error_parameters_item.py | f28c7deb40b2328947bfa816d2d8f5b8c4d4729a | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.error_parameters_item import ErrorParametersItem
class TestErrorParametersItem(unittest.TestCase):
""" ErrorParametersItem unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testErrorParametersItem(self):
"""
Test ErrorParametersItem
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.error_parameters_item.ErrorParametersItem()
pass
if __name__ == '__main__':
unittest.main()
| [
"sander@wildatheart.eu"
] | sander@wildatheart.eu |
36085263fc74873d7a489b295947c34b95ac2da9 | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/misc/editgrid.py | 88352f19f837e68d04a636a2ed4eefcea4deadab | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 2,557 | py | # -*- coding: utf-8 -*-
import wx
import wx.grid
import numpy as np
class EditGrid(wx.grid.Grid):
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
wx.EVT_KEY_DOWN(self, self.OnKeyDown)
def OnKeyDown(self, event):
key = event.GetKeyCode()
if event.ControlDown and key == ord('V'):
self.OnPaste(event)
else:
event.Skip()
def toarray(self, selection=None):
if selection:
x0, y0, x1, y1 = selection
else:
x0, y0, x1, y1 = self._getvalidbounds()
out = np.zeros([x1-x0, y1-y0], 'd')
for i in range(x0, x1):
for j in range(y0, y1):
out[i,j]= float(self.GetCellValue(i,j))
return out
def _getvalidbounds(self):
x0 = 0
y0 = 0
x1 = 0
y1 = 0
while y1 <= self.GetNumberCols() and not self.GetCellValue(0, y1) == '':
y1 += 1
while x1 <= self.GetNumberRows() and not self.GetCellValue(x1, 0) =='':
x1 += 1
return x0, y0, x1, y1
def setarray(self, data,x0=0, y0=0):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
self.SetCellValue(i+x0, j+y0, '%s' % data[i, j])
def tostring(self, selection=None):
from cStringIO import StringIO
sb = StringIO()
np.savetxt(sb, self.toarray(selection), delimiter='\t')
return sb.getvalue()
def setfromstring(self, data, x0=0, y0=0):
from cStringIO import StringIO
#print repr(data)
sb = StringIO(data.encode())
self.setarray(np.loadtxt(sb, delimiter = '\t'), x0, y0)
def OnPaste(self, event):
cb = wx.TextDataObject()
wx.TheClipboard.Open()
wx.TheClipboard.GetData(cb)
wx.TheClipboard.Close()
self.setfromstring(cb.GetText())
class EntryGrid(wx.Frame):
def __init__(self, parent=None):
wx.Frame.__init__(self, parent, size=(500, 500))
self.grid = EditGrid(self)
self.grid.CreateGrid(100, 5)
@property
def data(self):
return self.grid.toarray()
def ShowDataGrid():
f = EntryGrid()
f.Show()
return f
| [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
9b6987d7cb66e6ddd8024c55a1385a3fdea8a190 | 1cccad3f2b8cb9872fd47360486f43ed90f57c9b | /config/snippets/models.py | fcd342d2d7995fc13300285bb1aba732f10a9e9a | [] | no_license | moorekwon/rest-api | bf377cd98aa07792fd08bda70ff2621c9ca3bf9b | 5455c021dd2796968cd035f41ad7de44ec4201c4 | refs/heads/master | 2021-01-02T15:04:18.630201 | 2020-03-02T15:55:30 | 2020-03-02T15:55:30 | 239,673,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from django.db import models
# Create your models here.
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
from config.settings import AUTH_USER_MODEL
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted([(item, item) for item in get_all_styles()])
class Snippet(models.Model):
author = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE)
# db index 설정방법 1(Field.db_index)
# created = models.DateTimeField(auto_now_add=True, db_index=True)
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)
class Meta:
ordering = ['created']
# db index 설정방법 2(Model.Meta)
indexes = [
models.Index(fields=['created'])
]
| [
"raccoonhj33@gmail.com"
] | raccoonhj33@gmail.com |
1f2f281bc5d31a4d6b6acc05d5758e652471300c | 2d4ab8e3ea9fd613ec0ae0c1956b68874c9b5f06 | /paip/pipelines/variant_calling/index_alignment.py | 5237bd5c232a9b2a4000d6340f70cca0582b76b0 | [] | no_license | biocodices/paip | 4abd39cbbd372a68592da87177c70c403d5a661d | 040a62c11e5bae306e2de4cc3e0a78772ee580b3 | refs/heads/master | 2021-01-17T20:48:28.642255 | 2019-07-26T14:30:58 | 2019-07-26T14:30:58 | 62,604,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from paip.task_types import SampleTask
from paip.pipelines.variant_calling import MarkDuplicates
from paip.helpers.create_cohort_task import create_cohort_task
class IndexAlignment(SampleTask):
"""
Takes a BAM and creates its BAI (index) companion.
"""
REQUIRES = MarkDuplicates
OUTPUT = "dupmarked_alignment.bai"
def run(self):
with self.output().temporary_path() as temp_bai:
program_name = 'picard BuildBamIndex'
program_options = {
'input_bam': self.input()['dupmarked_bam'].path,
'output_bai': temp_bai,
}
self.run_program(program_name, program_options)
IndexAlignmentCohort = create_cohort_task(IndexAlignment)
| [
"juanmaberros@gmail.com"
] | juanmaberros@gmail.com |
af319fa3841c5cb66f3ce96349db1ab481a2337d | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Risk/FK_FKYW_PG_KCB_212.py | 06c0ff949a1a41fb9928d88cc150b43d6ed2101e | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import os
sys.path.append("/home/yhl2/workspace/xtp_test/Autocase_Result/Risk/service")
from order import f
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from QueryStkPriceQty import *
from log import *
from utils import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import SqlData_Transfer
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import clear_data_and_restart_sh
class FK_FKYW_PG_212(xtp_test_case):
def setUp(self):
sql_transfer = SqlData_Transfer()
sql_transfer.delete_cur_risk()
sql_transfer.insert_cur_risk('FK_FKYW_GPWT_624')
clear_data_and_restart_sh()
Api.trade.Logout()
time.sleep(2)
Api.trade.Login()
def test_FK_FKYW_PG_212(self):
title = '默认rule25,rule0=0'
#定义当前测试用例的期待值
#期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
#xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '未成交',
'errorID':0,
'errorMSG': '',
'是否生成报单':'是',
'是否是撤废':'否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_ALLOTMENT'],
'order_client_id': 4,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': '700001',
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity': 2580,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
count = 1
max_count = 101
filename = 'FK_FKYW_PG_212_order'
insert_orders_sleep(count, max_count, Api, case_goal, wt_reqs, filename)
time.sleep(3)
max_count = 1
wt_reqs['order_client_id'] = 1
insert_orders_sleep(count, max_count, Api, case_goal, wt_reqs, filename)
time.sleep(3)
file_reorder(filename)
# 校验订单是否正确触发风控
rule25_check2(filename)
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
38e4fda8720303f4e1bd2540c6a20bf972c60ed9 | f546248daf7fd64aeff6517e9fea668b459f9b62 | /yatwin/interfaces/onvif/wsdl/wsdl/errors.py | 557ffa417b37c79dad41bfc06a4d843c207004bc | [] | no_license | andre95d/python-yatwin | 2310b6c6b995771cea9ad53f61ad37c7b10d52d0 | 7d370342f34e26e6e66718ae397eb1d81253cd8a | refs/heads/master | 2023-03-16T18:06:17.141826 | 2020-05-12T23:04:53 | 2020-05-12T23:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | """
Library which contains errors
All errors inherit from <Exception>, then pass
... essentially just renaming the <Exception>
Contains:
<FileDoesNotExist>
<InvalidArgument>
<ParseError>
"""
class FileDoesNotExist(Exception): pass
class InvalidArgument(Exception): pass
class ParseError(Exception): pass
| [
"26026015+tombulled@users.noreply.github.com"
] | 26026015+tombulled@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.