blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
210d512330e8abd0fb7ad24944d2c9940b3f89de | 6440a113597191d3b78aa6b5cae6cea9fb057e2e | /August-Challenge/10.py | b21c3a313bebbab53721eccdafa5fcea44128377 | [] | no_license | DarshanGowda0/LC-Grind | 40c162d8894df81ea7124f66daf20f86f327b6cb | f7b9a86797d52ab1057f0300352c0c5670a59bd5 | refs/heads/master | 2023-01-28T01:27:45.195164 | 2020-12-06T03:58:14 | 2020-12-06T03:58:14 | 277,024,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from collections import deque
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
# use bfs with timestamp
directions = [(1,0), (-1,0), (0,1), (0,-1)]
m, n = len(grid), len(grid[0])
que = deque()
for i in range(m):
for j in range(n):
if grid[i][j] == 2:
que.append((i,j,0))
maxTime = 0
while que:
# print(que)
x, y, timestamp = que.popleft()
maxTime = max(maxTime, timestamp)
for i, j in directions:
nx, ny = x+i, y+j
if 0<=nx<m and 0<=ny<n and grid[nx][ny] == 1:
que.append((nx,ny,timestamp+1))
grid[nx][ny] = 2
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
return -1
return maxTime
| [
"darshan.gowda008@gmail.com"
] | darshan.gowda008@gmail.com |
e9b4d2a23744022658a9abdb3e3dd716b50ad8c4 | 747755833862b8e9d0f58ebc62879d6ef47c23c8 | /python-master (5)/python-master/zhilian_new.py | b2c2f638b5b2230c54dc3553c300bdd1b683b5cb | [] | no_license | tangsong41/stu_py | 98a06730dbca6e158cf81c18d98fe1317c1ae512 | d41507cd8dd9e8a54084872dfa15c36da443c02b | refs/heads/master | 2022-12-11T23:53:57.530946 | 2019-01-15T18:29:19 | 2019-01-15T18:29:19 | 163,953,100 | 3 | 0 | null | 2022-12-07T23:24:01 | 2019-01-03T09:41:29 | Jupyter Notebook | UTF-8 | Python | false | false | 5,314 | py | # encoding: utf-8
__author__ = 'zhanghe'
import requests
import json
import logging
from pyquery import PyQuery as Pq
from tools import zhilian
logging.basicConfig(level=logging.DEBUG, filename='zhilian.log', filemode='w')
# 登录页的url
url = 'https://passport.zhaopin.com/'
# 有些网站反爬虫,这里用headers把程序伪装成浏览器
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36'}
# 登录需要提交的表单
form_data = {
'bkurl': '',
'LoginName': '2627498748@qq.com', # 填入网站的上网帐号
'Password': '825716', # 填入网站密码
'RememberMe': 'false'
}
s = requests.session()
login_response = s.post(url, data=form_data, headers=header)
print login_response.text
# 新版特点https协议登录,成功后跳转
# window.location.href = "http://i.zhaopin.com";
if 'window.location.href = "http://i.zhaopin.com"' in login_response.text:
i_zhaopin = s.get('http://i.zhaopin.com/')
# 登录成功着陆页面
print i_zhaopin.text
pass
if 'window.location.href = "http://i.zhaopin.com"' in login_response.text:
resume_list_page = s.get('http://my.zhaopin.com/myzhaopin/resume_list.asp')
# 简历管理页面(简历列表)
print type(resume_list_page)
print type(resume_list_page.text)
# print resume_list.text
with open('static/html/test.html', 'wb') as f:
f.write(resume_list_page.text.encode('utf-8'))
# 提取简历列表信息
rows = Pq(resume_list_page.text).find('div.emailL_tab')
i = 0
resume_list = []
for row in Pq(rows):
j = 0
item_list = []
item_dict = {}
for item in Pq(row).find('.email5'):
item_text = Pq(item).text()
print "[%s-%s]" % (i, j) + item_text
# 0,1,2,6是有效的信息
if j in (0, 1, 2, 6):
item_list.append(item_text)
# Python哲学的一句话:只用一种方式解决问题,所以自增操作完全可以用i+=1完成,就不需要i++了。
j += 1
# 提取简历完整度
if len(item_list) == 4:
item_list[2] = item_list[2].split()[-1].rstrip('%')
# 简历url添加进item_list
url = Pq(row).find('.email5 .iconHover_2').attr('href')
item_list.append('http://my.zhaopin.com/myzhaopin/' + url)
print json.dumps(item_list).decode('raw_unicode_escape')
i += 1
# 将列表转为可读性强的字典
item_dict['update_date'] = item_list[0]
item_dict['language'] = item_list[1]
item_dict['integrity'] = item_list[2]
item_dict['openness'] = item_list[3]
item_dict['url'] = item_list[4]
resume_list.append(item_dict)
print json.dumps(resume_list, indent=4).decode('raw_unicode_escape')
if resume_list is None:
print('简历不存在')
exit()
# 去除英文简历
for item in resume_list:
if item['language'] == u'英文':
resume_list.remove(item)
print json.dumps(resume_list, indent=4).decode('raw_unicode_escape')
# 根据PRD描述的规则获取最优简历
best_resume = zhilian.select_best_resume(resume_list)
print best_resume
# 进入简历管理页面,获取各个模块的url
if best_resume is not None:
module_url_dict = {}
module_list_page = s.get(best_resume['resume'])
module_url_rows = Pq(module_list_page.text).find('.left .leftRow .leftRowCon ul.leftRowB')
for module_url_row in Pq(module_url_rows).find('li.ok'):
print Pq(module_url_row).html()
title = Pq(module_url_row).text()
url = zhilian.url_join(Pq(module_url_row).find('a').attr('href'), 'http://my.zhaopin.com')
module_url_dict[title] = url
print json.dumps(module_url_dict, indent=4).decode('raw_unicode_escape')
# 获取各个模块的数据
if u'个人信息' in module_url_dict:
profile = s.get(module_url_dict[u'个人信息'])
print profile.text
profile_items_list = [
'username',
'gender',
'birth_date_y',
'birth_date_m',
'experience',
'experience_month',
'hukou',
'hukou_p',
'residence',
'residence_p',
'residence_district',
'contact_num',
'email1',
]
profile_items_dict = {}
for item in profile_items_list:
profile_items_dict[item] = Pq(profile.text).find('input[name="' + item + '"]').attr('value')
# 婚姻状况js实现
print json.dumps(profile_items_dict, indent=4).decode('raw_unicode_escape')
pass
# 头像的存储
if best_resume is not None:
module_url_dict = {}
module_list_page = s.get(best_resume['resume'])
avatar_url = Pq(module_list_page.text).find('.rightRow1 p.f_right a').attr('href')
avatar = s.get(avatar_url)
with open('static/avatar/test.jpg', 'wb') as f:
for item in avatar:
f.write(item)
# 映射关系转换
# 保存json至服务器
import os
filepath = 'static/json/'
if not os.path.isdir(filepath):
os.mkdir(filepath)
filename = 'static/json/test.json'
result_json = json.dumps(profile_items_dict, indent=4, ensure_ascii=False)
with open(filename, 'wb') as f:
f.write(result_json.encode('utf-8')) | [
"369223985@qq.com"
] | 369223985@qq.com |
699244c39d0cfa6d0b34047b5368788825556e8d | c4ddbcb4b02c9ba487c5f128db30ac9b5cae6d92 | /vizdoomgymmaze/envs/vizdoommazeone11.py | 1615d82bff249e486a614a7cdfd0724899f32f6b | [
"MIT"
] | permissive | fanyuzeng/vizdoomgymmaze | 11d1a5b2e05ff1eb2606015e0eae128ea42425cb | 51f750405a762e3f19193c09ef34380786c11efe | refs/heads/master | 2020-07-22T20:20:29.780544 | 2019-08-27T09:51:51 | 2019-08-27T09:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from vizdoomgymmaze.envs.vizdoomenv import VizdoomEnv
class VizdoomMazeOne11(VizdoomEnv):
def __init__(self):
super(VizdoomMazeOne11, self).__init__(24) | [
"wangchen100@163.com"
] | wangchen100@163.com |
fe95ba1affa130825a886104f8a5791e5ceae0ac | 46f87eae767602d40b3c39ae069d33669bbd2584 | /python-prgject/venv/Scripts/easy_install-3.7-script.py | bb4e37ef0cf3542682bde0b14e6146ba865b5fc9 | [] | no_license | ADSL-Plucky/learngit | 3f59e80d13546f9e0be8aa8ba30a23cf67ff4c4d | d4aace0cf4ee249f6b29c352859644eb8f4c9236 | refs/heads/master | 2021-02-18T10:22:03.734093 | 2020-06-26T06:49:22 | 2020-06-26T06:49:22 | 244,944,985 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | #!E:\Python\python-prgject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"836046238@qq.com"
] | 836046238@qq.com |
8ddbe4a4979a7f7cba769558ef23c929ef0a570b | 20abf1e776e7d95277999398985d60df34970140 | /可视化/散点图.py | 4714ecc585870c8fa46d42835918eb7442870731 | [] | no_license | NoraCarl/Python | 5f931974e3b82d5362ca41ff8a7d1ee64b07328e | 2cbdabfa16d2457d9fa2c2095774d5d7c4f36abe | refs/heads/master | 2022-12-15T11:28:42.109466 | 2020-09-21T10:17:33 | 2020-09-21T10:17:33 | 296,377,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#设置中文
plt.rcParams['font.sans-serif']=['Simhei']
plt.rcParams['axes.unicode_minus']=False
# 设置随机值
N = 10000
x = np.random.randn(N)
y = np.random.randn(N)
if __name__ == "__main__":
plt.figure(figsize=(12,6))
plt.scatter(x, y,s=100,alpha=0.5,edgecolors= 'white')
plt.title('complex',fontsize=30)#显示图表标题
plt.xlabel('这里是x轴')#x轴名称
plt.ylabel('这里是y轴')#y轴名称
plt.grid(False)#显示网格线
plt.show() | [
"17623461751@163.com"
] | 17623461751@163.com |
43f838411752aaa7b70c2c2bc2e9885eeb519a7f | 9e637c9b101015263071526ef666f7ef3eb11831 | /scripts/wds_debug_off.py | 5d3fe843b5227464eb371fa9f3cf12882513f1a3 | [] | no_license | qiaoshouxing/crtcfg | 3314c472a650da075bf83be088f947876c4c3037 | b4d14fcb76246c17f2b7c8829cb543fc447d7af0 | refs/heads/master | 2021-01-25T11:03:28.924290 | 2017-06-10T02:15:16 | 2017-06-10T02:15:16 | 93,909,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | # $language = "Python"
# $interface = "1.0"
'''
crt script, update image
'''
import os
__author__ = 'sxqiao'
crt.Screen.Synchronous = True
def main():
crt.Screen.Send('\r\n')
crt.Screen.Send('exit\r\n')
crt.Screen.Send('\r\n')
crt.Screen.Send('quit\r\n')
crt.Screen.Send('\r\n')
crt.Screen.Send('enable\n')
crt.Screen.Send('admin\n')
crt.Screen.Send('configure terminal\r\n')
crt.Screen.Send('no terminal debugging\n')
crt.Screen.Send('no terminal logging\n')
main()
| [
"qiaoshouxingsdd@163.com"
] | qiaoshouxingsdd@163.com |
79cfe48d6912952f15b68446a795e5a636f8fc3a | fb88cacbaf6a983f73672ea0eb243a146d2e491f | /web_socket/__init__.py | ba62b7d7a85e8ade38692b6e74ba5d5a7596de4e | [
"MIT"
] | permissive | dtkcs01/project-loader | 860d9e114c9cffc9fba6f848cd4d73836fed297b | c33f91281d0eebd719ce43a80ad9a614735681f3 | refs/heads/master | 2023-08-17T00:30:26.094693 | 2020-08-05T15:43:46 | 2020-08-05T15:43:46 | 258,460,593 | 0 | 0 | MIT | 2023-08-14T22:15:12 | 2020-04-24T09:02:46 | Python | UTF-8 | Python | false | false | 131 | py | import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from web_socket_handler import WebSocketHandler
| [
"dtkcs01@gmail.com"
] | dtkcs01@gmail.com |
11eaf6953c9bfd9b109db46d19ac79e6b3682fdc | a03303e46f21697c9da87d0bb0f7b0a3077aba5c | /siswa_potongan_biaya/models/calon_siswa.py | ea4fd1771e5abc6c983546bd01d6ddd0b5a81a34 | [] | no_license | butirpadi/flectra_app_sek | fccd3e47ef261e116478e6da7f0cc544ee67f127 | 00fa36d9176511f8ffe3c7636a8434ee2ed8c756 | refs/heads/master | 2020-04-06T10:26:37.053024 | 2018-11-19T23:59:34 | 2018-11-20T00:17:02 | 157,380,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,405 | py | # -*- coding: utf-8 -*-
from flectra import models, fields, api, exceptions, _
from pprint import pprint
from datetime import datetime, date
import calendar
class calon_siswa(models.Model):
_inherit = 'siswa_psb_ocb11.calon_siswa'
qty = fields.Integer('Qty', default=1)
# override on generate pembayaran default
def generate_pembayaran_default(self):
super(calon_siswa, self).generate_pembayaran_default()
# recalculate jumlah harga
for by in self.payment_lines:
if by.biaya_id.is_bulanan:
by.qty = 12
by.jumlah_harga = (by.harga - by.potongan_harga) * by.qty
# by.dibayar = by.jumlah_harga
by.dibayar = (by.harga - by.potongan_harga) * by.qty
def generate_biaya_optional(self):
newid = super(calon_siswa, self).generate_biaya_optional()
# print('tampilkan biaya optional')
# pprint(newid)
if newid.biaya_id.is_bulanan:
newid.qty = 12
newid.jumlah_harga = (newid.harga - newid.potongan_harga) * newid.qty
# newid.dibayar = newid.jumlah_harga
newid.dibayar = (newid.harga - newid.potongan_harga) * newid.qty
# override calculate bayar tunai, (replace)
def calculate_bayar_tunai(self):
print('inside inherited calculate_bayar_tunai')
tunai = self.bayar_tunai
for pay_line in self.payment_lines:
if tunai > 0 :
if tunai > pay_line.jumlah_harga :
# dibayar = (pay_line.qty * pay_line.harga) - pay_line.potongan_harga
dibayar = pay_line.qty * (pay_line.harga - pay_line.potongan_harga)
pay_line.dibayar = dibayar
tunai -= pay_line.dibayar
else:
pay_line.dibayar = tunai
tunai = 0
else:
# jika tunai tidak mencukupi maka hapus saja payment nya
# pay.unlink()
# ralat jika tunai tidak mencukupi makan set bayar = 0
pay_line.dibayar = 0
# override action confirm (replace)
def action_confirm(self):
# check pembayaran is set or not
if len(self.payment_lines) > 0:
# register siswa to res.partner
if self.is_siswa_lama:
# update siswa lama
self.env['res.partner'].search([('id','=',self.siswa_id.id)]).write({
# 'rombels' : [(0, 0, { 'rombel_id' : self.rombel_id.id, 'tahunajaran_id' : self.tahunajaran_id.id })],
# 'active_rombel_id' : self.rombel_id.id,
'is_siswa_lama' : True,
'calon_siswa_id' : self.id,
})
else:
# insert into res_partner
new_siswa = self.env['res.partner'].create({
'is_customer' : 1,
'name' : self.name,
'calon_siswa_id' : self.id,
'street' : self.street,
'street2' : self.street2,
'zip' : self.zip,
'city' : self.city,
'state_id' : self.state_id.id,
'country_id' : self.country_id.id,
'phone' : self.phone,
'mobile' : self.mobile,
'tanggal_registrasi' : self.tanggal_registrasi,
'tahunajaran_id' : self.tahunajaran_id.id,
'nis' : self.nis,
'panggilan' : self.panggilan,
'jenis_kelamin' : self.jenis_kelamin,
'tanggal_lahir' : self.tanggal_lahir,
'tempat_lahir' : self.tempat_lahir,
'alamat' : self.alamat,
'telp' : self.telp,
'ayah' : self.ayah,
'pekerjaan_ayah_id' : self.pekerjaan_ayah_id.id,
'telp_ayah' : self.telp_ayah,
'ibu' : self.ibu,
'pekerjaan_ibu_id' : self.pekerjaan_ibu_id.id,
'telp_ibu' : self.telp_ibu,
# 'rombels' : [(0, 0, { 'rombel_id' : self.rombel_id.id, 'tahunajaran_id' : self.tahunajaran_id.id })],
# 'active_rombel_id' : self.rombel_id.id,
'is_siswa' : True,
'anak_ke' : self.anak_ke,
'dari_bersaudara' : self.dari_bersaudara
})
# self.siswa_id = new_siswa.id
self.registered_siswa_id = new_siswa.id
# self.siswa_id = new_siswa.id
# update state
self.state = 'reg'
# assign siswa biaya
# get tahunajaran_jenjang
ta_jenjang = self.env['siswa_ocb11.tahunajaran_jenjang'].search([('tahunajaran_id', '=', self.tahunajaran_id.id),
('jenjang_id', '=', self.jenjang_id.id)
])
# assign biaya to siswa
total_biaya = 0.0
if self.is_siswa_lama:
id_siswa = self.siswa_id.id
else:
id_siswa = new_siswa.id
for by in ta_jenjang.biayas:
# cek biaya apakah is_optional dan apakah di pilih dalam payment_lines
by_found = False
if by.biaya_id.is_optional:
for by_in_pay in self.payment_lines:
if by.biaya_id.id == by_in_pay.biaya_id.id:
by_found = True
if not by_found:
continue
if self.is_siswa_lama and by.biaya_id.is_siswa_baru_only:
print('skip')
continue
else:
print('JENJANG ID : ' + str(self.jenjang_id.id))
if by.biaya_id.is_bulanan:
for bulan_index in range(1,13):
harga = by.harga
if by.is_different_by_gender:
if self.jenis_kelamin == 'perempuan':
harga = by.harga_alt
self.env['siswa_keu_ocb11.siswa_biaya'].create({
'name' : by.biaya_id.name + ' ' + calendar.month_name[bulan_index],
'siswa_id' : id_siswa,
'tahunajaran_id' : self.tahunajaran_id.id,
'biaya_id' : by.biaya_id.id,
'bulan' : bulan_index,
'harga' : harga,
'amount_due' : harga,
'jenjang_id' : self.jenjang_id.id
})
total_biaya += harga
else:
harga = by.harga
if by.is_different_by_gender:
if self.jenis_kelamin == 'perempuan':
harga = by.harga_alt
self.env['siswa_keu_ocb11.siswa_biaya'].create({
'name' : by.biaya_id.name,
'siswa_id' : id_siswa,
'tahunajaran_id' : self.tahunajaran_id.id,
'biaya_id' : by.biaya_id.id,
'harga' : harga,
'amount_due' : harga,
'jenjang_id' : self.jenjang_id.id
})
total_biaya += harga
# add potongan biaya
for pay_a in self.payment_lines:
if pay_a.potongan_harga > 0:
if pay_a.biaya_id.is_bulanan:
print('inside biaya is bulanan')
biaya_bulanan_ids = self.env['siswa_keu_ocb11.siswa_biaya'].search([
('siswa_id','=',id_siswa),
('tahunajaran_id','=',self.tahunajaran_id.id),
('biaya_id','=',pay_a.biaya_id.id),
# ('tahunajaran_id','=',self.tahunajaran_id.id),
])
for by_bul in biaya_bulanan_ids:
a_pot = self.env['siswa.potongan_biaya'].create({
'siswa_id' : id_siswa,
'siswa_biaya_id' : by_bul.id,
'jumlah_potongan' : pay_a.potongan_harga
})
a_pot.action_confirm()
else:
print('inside biaya not bulanan')
print(id_siswa)
print(pay_a.biaya_id.id)
print(self.tahunajaran_id.id)
siswa_by_id = self.env['siswa_keu_ocb11.siswa_biaya'].search([
('siswa_id','=',id_siswa),
('tahunajaran_id','=',self.tahunajaran_id.id),
('biaya_id','=',pay_a.biaya_id.id),
# ('tahunajaran_id','=',self.tahunajaran_id.id),
]).id
# siswa_by_id = self.env['siswa_keu_ocb11.siswa_biaya'].search([
# ('id','=',id_siswa),
# ('biaya_id','=',pay_a.biaya_id.id),
# ('tahunajaran_id','=',self.tahunajaran_id.id),
# ])
pprint(siswa_by_id)
a_pot = self.env['siswa.potongan_biaya'].create({
'siswa_id' : id_siswa,
'siswa_biaya_id' : siswa_by_id,
'jumlah_potongan' : pay_a.potongan_harga
})
a_pot.action_confirm()
# # set total_biaya dan amount_due
# # total_biaya = sum(by.harga for by in self.biayas)
# print('ID SISWA : ' + str(id_siswa))
# res_partner_siswa = self.env['res.partner'].search([('id','=',id_siswa)])
# self.env['res.partner'].search([('id','=',id_siswa)]).write({
# 'total_biaya' : total_biaya,
# 'amount_due_biaya' : res_partner_siswa.amount_due_biaya + total_biaya,
# })
# add pembayaran
pembayaran = self.env['siswa_keu_ocb11.pembayaran'].create({
'tanggal' : self.tanggal_registrasi ,
'tahunajaran_id' : self.tahunajaran_id.id,
'siswa_id' : id_siswa,
})
# reset pembayaran_lines
pembayaran.pembayaran_lines.unlink()
pembayaran.total = 0
total_bayar = 0.0
for pay in self.payment_lines:
print('Payment Lines : ')
print('harga : ' + str(pay.harga))
print('dibayar : ' + str(pay.dibayar))
print('biaya_id : ' + str(pay.biaya_id.id))
# get siswa_biaya
if pay.dibayar > 0: # jangan dimasukkan ke pembayaran untuk yang nilai dibayarnya = 0
if pay.biaya_id:
if pay.biaya_id.is_bulanan:
# pay_biaya_id = self.env['siswa_keu_ocb11.siswa_biaya'].search([
# ('siswa_id','=',id_siswa),
# ('tahunajaran_id','=',self.tahunajaran_id.id),
# ('biaya_id','=',pay.biaya_id.id),
# ('tahunajaran_id','=',self.tahunajaran_id.id),
# ('bulan','=',pay.bulan),
# ]).id
# untuk section ini diganti sesuai dengan ketentuan potongan biaya
biaya_bulanan_ids = self.env['siswa_keu_ocb11.siswa_biaya'].search([
('siswa_id','=',id_siswa),
('tahunajaran_id','=',self.tahunajaran_id.id),
('biaya_id','=',pay.biaya_id.id),
('tahunajaran_id','=',self.tahunajaran_id.id),
])
dibayar_untuk_biaya_ini = pay.dibayar
for by_bln in biaya_bulanan_ids:
if dibayar_untuk_biaya_ini > 0:
if dibayar_untuk_biaya_ini > by_bln.harga:
pembayaran.pembayaran_lines = [(0, 0, {
'biaya_id' : by_bln.id,
'jumlah_potongan' : pay.potongan_harga,
'amount_due' : by_bln.amount_due,
'bayar' : by_bln.harga - pay.potongan_harga
})]
dibayar_untuk_biaya_ini -= (by_bln.harga - pay.potongan_harga)
total_bayar += by_bln.harga
else:
pembayaran.pembayaran_lines = [(0, 0, {
'biaya_id' : by_bln.id,
'jumlah_potongan' : pay.potongan_harga,
'amount_due' : by_bln.amount_due,
'bayar' : dibayar_untuk_biaya_ini
})]
total_bayar += dibayar_untuk_biaya_ini
dibayar_untuk_biaya_ini = 0
else:
pay_biaya_id = self.env['siswa_keu_ocb11.siswa_biaya'].search([
('siswa_id','=',id_siswa),
('tahunajaran_id','=',self.tahunajaran_id.id),
('biaya_id','=',pay.biaya_id.id),
# ('tahunajaran_id','=',self.tahunajaran_id.id),
])
# ini juga di rubah
pembayaran.pembayaran_lines = [(0, 0, {
'biaya_id' : pay_biaya_id.id,
'jumlah_potongan' : pay.potongan_harga,
'amount_due' : pay_biaya_id.amount_due,
'bayar' : pay.dibayar
})]
total_bayar += pay.dibayar
print('pay_biaya_id : ' + str(pay_biaya_id))
print('-------------------')
# raise exceptions.except_orm(_('Warning'), _('TEST ERROR'))
# confirm pembayaran
pembayaran.action_confirm()
# set terbilang
if total_bayar == 0:
self.terbilang = 'nol'
else:
t = self.terbilang_(total_bayar)
while '' in t:
t.remove('')
self.terbilang = ' '.join(t)
self.terbilang += ' Rupiah'
# set total
self.total = total_bayar
# raise exceptions.except_orm(_('Warning'), _('You can not delete Done state data'))
else:
raise exceptions.except_orm(_('Warning'), _('Can not confirm this registration, complete payment first!'))
| [
"butirpadi@gmail.com"
] | butirpadi@gmail.com |
31b8ef762b0ceea39277622cf231f8ad06a54314 | b64bdb3d5e15694fceadf2dc3e303107574af3e3 | /app01/models.py | 4f51b80d79171430982f847aae640a87b92e8309 | [
"Apache-2.0"
] | permissive | lixiang30/restdemo | 31624ebe944e9d21e3856f2e74891e6287213847 | 1a01ed14a99ce15d16bc857541d0882f6727447c | refs/heads/master | 2020-04-05T12:03:21.782491 | 2018-11-22T07:31:34 | 2018-11-22T07:31:34 | 156,856,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | from django.db import models
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=32)
price = models.IntegerField()
pub_date = models.DateField()
publish = models.ForeignKey("Publish")
authors = models.ManyToManyField("Author")
def __str__(self):
return self.title
class Publish(models.Model):
name = models.CharField(max_length=12)
email = models.EmailField()
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=32)
age = models.IntegerField()
def __str__(self):
return self.name
# class User(models.Model):
# name = models.CharField(max_length=32)
# pwd = models.CharField(max_length=32)
class User(models.Model):
name = models.CharField(max_length=32)
pwd = models.CharField(max_length=32)
type_choice = ((1,"普通用户"),(2,"VIP"),(3,"SVIP"))
user_type = models.IntegerField(choices=type_choice,default=1)
class Token(models.Model):
user = models.OneToOneField("User")
token = models.CharField(max_length=128)
def __str__(self):
return self.token
| [
"534551900@qq.com"
] | 534551900@qq.com |
edf3214aa2632cb707d9e76ef68704be436c3a8b | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/ad_group_ad_asset_view_service/transports/base.py | 02b9a075f24439fca6a2439e85ffce2d60f99f10 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,819 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import ad_group_ad_asset_view
from google.ads.googleads.v7.services.types import ad_group_ad_asset_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupAdAssetViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupAdAssetViewService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group_ad_asset_view: gapic_v1.method.wrap_method(
self.get_ad_group_ad_asset_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group_ad_asset_view(self) -> typing.Callable[
[ad_group_ad_asset_view_service.GetAdGroupAdAssetViewRequest],
ad_group_ad_asset_view.AdGroupAdAssetView]:
raise NotImplementedError
__all__ = (
'AdGroupAdAssetViewServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
20b1fc1feee6f9f40bc5791f54856c882f9619b7 | 6ac36f3c59e7a65fccf90e8ab54bbfc75f655c64 | /buildURL.py | a280b96c67f50c5e2c0e365a2210ab8dfef271a6 | [] | no_license | crhodes2/JAMA-ID-Checker | 76658b020f807419423e72b29ed9c2eb98043676 | 5ad0bf4e7eddebd1f38ea91d2ef1169c737dd891 | refs/heads/master | 2020-03-28T15:00:16.759007 | 2018-09-14T21:20:41 | 2018-09-14T21:20:41 | 148,545,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | # !/usr/bin/env python3
# -*- mode: python -*-
##############################
''' BUILD URL '''
##############################
# LIST OF MODULES #########################################################################
# VARIABLES #########################################################################
id = ''
# FUNCTIONS #########################################################################
def templateURL(id):
url='https://www.jamaland.com/rest/latest/abstractitems?documentKey={0}'. format(id)
return url | [
"crhodes@jama.com"
] | crhodes@jama.com |
dc4ceb94123ac84ffc8ecc8d3b1356c819f7e211 | b36ec0cf0fe0165e643af2095d13ebaeca366d12 | /plugin.py | 0f4c5bd693c028d84a7c3597662384003cc508ae | [] | no_license | k45734/downloader_video | a90f4354cb2112ced9cb8ddfcaa66dbb1f896643 | fc656e2657756be1e969ce02b17d0074ada6b14f | refs/heads/master | 2022-12-31T03:54:02.430596 | 2020-10-17T04:12:10 | 2020-10-17T04:12:10 | 295,620,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | # -*- coding: utf-8 -*-
# python
import os, traceback
# third-party
from flask import Blueprint
# sjva 공용
from framework.logger import get_logger
from framework import app, path_data
from framework.util import Util
from framework.common.plugin import get_model_setting, Logic, default_route
# 패키지
#########################################################
class P(object):
package_name = __name__.split('.')[0]
logger = get_logger(package_name)
blueprint = Blueprint(package_name, package_name, url_prefix='/%s' % package_name, template_folder=os.path.join(os.path.dirname(__file__), 'templates'))
menu = {
'main' : [package_name, '비디오 다운로드'],
'sub' : [
['ani365', 'ani365'], ['log', '로그']
],
'category' : 'vod',
'sub2' : {
'ani365' : [
['setting', '설정'], ['request', '요청'], ['queue', '큐'], ['list', '목록']
],
}
}
plugin_info = {
'version' : '0.2.0.0',
'name' : 'downloader_ani',
'category_name' : 'vod',
'icon' : '',
'developer' : 'soju6jan',
'description' : '비디오 다운로드',
'home' : 'https://github.com/soju6jan/downloader_ani',
'more' : '',
}
ModelSetting = get_model_setting(package_name, logger)
logic = None
module_list = None
home_module = 'ani365'
def initialize():
try:
app.config['SQLALCHEMY_BINDS'][P.package_name] = 'sqlite:///%s' % (os.path.join(path_data, 'db', '{package_name}.db'.format(package_name=P.package_name)))
from framework.util import Util
Util.save_from_dict_to_json(P.plugin_info, os.path.join(os.path.dirname(__file__), 'info.json'))
from .logic_ani365 import LogicAni365
P.module_list = [LogicAni365(P)]
P.logic = Logic(P)
default_route(P)
except Exception as e:
P.logger.error('Exception:%s', e)
P.logger.error(traceback.format_exc())
initialize()
| [
"noreply@github.com"
] | k45734.noreply@github.com |
5b730de8bdcae3b2658e59b3950333c0adb23829 | 649640025ff32274b44ab8c9982a262e31d70586 | /LJstream.py | 2fe7f15d15b8cadf3f2bb8c46818287d042e9f09 | [] | no_license | jalcok1/python | 1f31275dabc8153c8431d52336a2ec1855ce4f9b | deda9b28e76b0d97291275e1ac6e47dc73732d45 | refs/heads/master | 2021-01-23T07:16:58.538247 | 2014-07-06T23:17:30 | 2014-07-06T23:17:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | py | import matplotlib.pyplot as plt
import numpy as np
import u3
from time import sleep
from datetime import datetime
import struct
import traceback
plt.ion() #Turn on interaction mode for faster plotting via draw()
###############################################################################
## U3
# Uncomment these lines to stream from a U3
###############################################################################
# At high frequencies ( >5 kHz), the number of samples will be MAX_REQUESTS
# times 48 (packets per request) times 25 (samples per packet).
#
# n = MAX_REQUESTS*48*25
d = u3.U3()
d.configU3()
d.getCalibrationData()
d.configIO(FIOAnalog = 2) #Set the FIO0 to Analog
fs = 5000.0 #SampleFrequency Hz
T = 1.0/fs #SampleInterval
MAX_REQUESTS = 20 #MAX_REQUESTS is the number of packets to be read.
x = np.linspace(0.0, 2.0, num = 2*fs) #Initializing x-array
y = np.zeros(2*fs) #Initializing y-array
line, = plt.plot(x,y) #creating a line to plot
plt.ylim((-2.0,2.0))
plt.draw()
print "configuring U3 stream"
d.streamConfig( NumChannels = 1,
PChannels = [ 0 ],
NChannels = [ 31 ],
Resolution = 3,
ScanFrequency = fs
)
try:
print "start stream",
d.streamStart()
start = datetime.now()
print start
missed = 0
dataCount = 0
packetCount = 0
for r in d.streamData():
if r is not None:
# Our stop condition
if dataCount >= MAX_REQUESTS:
break
if r['errors'] != 0:
print "Error: %s ; " % r['errors'], datetime.now()
if r['numPackets'] != d.packetsPerRequest:
print "----- UNDERFLOW : %s : " % r['numPackets'], datetime.now()
if r['missed'] != 0:
missed += r['missed']
print "+++ Missed ", r['missed']
y1 = np.array(r['AIN0']) #data pulled from labjack
r_index = len(y1) #length of data pulled from labjack
y = y[r_index:len(x)] #data shift left, move out old data
y = np.hstack((y,y1)) #shift in new data from labjack
line.set_ydata(y) #update stream data
plt.draw()
dataCount += 1
packetCount += r['numPackets']
else:
# Got no data back from our read.
# This only happens if your stream isn't faster than the
# the USB read timeout, ~1 sec.
print "No data", datetime.now()
except:
print "".join(i for i in traceback.format_exc())
finally:
stop = datetime.now()
d.streamStop()
print "stream stopped."
d.close()
sampleTotal = packetCount * d.streamSamplesPerPacket
scanTotal = sampleTotal / 2 #sampleTotal / NumChannels
print "%s requests with %s packets per request with %s samples per packet = %s samples total." % ( dataCount, (float(packetCount) / dataCount), d.streamSamplesPerPacket, sampleTotal )
print "%s samples were lost due to errors." % missed
sampleTotal -= missed
print "Adjusted number of samples = %s" % sampleTotal
runTime = (stop-start).seconds + float((stop-start).microseconds)/1000000
print "The experiment took %s seconds." % runTime
print "Scan Rate : %s scans / %s seconds = %s Hz" % ( scanTotal, runTime, float(scanTotal)/runTime )
print "Sample Rate : %s samples / %s seconds = %s Hz" % ( sampleTotal, runTime, float(sampleTotal)/runTime )
| [
"ryanalcoke@gmail.com"
] | ryanalcoke@gmail.com |
66899219064fcede1534a8b9bb9447668ae49104 | be2393c9f3ce2e443e48a03902280177d3f2ae0e | /v1/Core_2_playbook.py | f98859c3954420cebe889d7c73d9df62dfac79ac | [] | no_license | cucy/ansible_learn | 6d08a0fbbb09e68f186d0151c828efbbd8b93717 | 12967ecf804adb64fefcfa206a23ef8741f935cd | refs/heads/master | 2022-12-10T12:52:07.002644 | 2021-05-05T06:47:18 | 2021-05-05T06:47:18 | 125,711,644 | 0 | 1 | null | 2022-12-08T00:43:01 | 2018-03-18T09:56:37 | Python | UTF-8 | Python | false | false | 1,588 | py | # 核心类
from collections import namedtuple
from optparse import Values
from ansible.parsing.dataloader import DataLoader
from ansible.playbook import Playbook
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
from ansible import context
from ansible.module_utils.common.collections import ImmutableDict
###############################
# InventoryManager 类
###############################
# 读取资产清单信息
loader = DataLoader()
"""
sources 可以是绝对和相对路径
"""
inventory_manager = InventoryManager(loader=loader, sources=['my_hots.txt'])
###############################
# VariableManager 类
###############################
variable_manager = VariableManager(loader=loader, inventory=inventory_manager)
# 方法2
context.CLIARGS = ImmutableDict(
connection='smart', module_path=None, verbosity=5,
forks=10, become=None, become_method=None,
become_user=None, check=False, diff=False,
syntax=None, start_at_task=None
)
passwords = dict()
play_book = PlaybookExecutor(playbooks=['testplaybook.yml'],
inventory=inventory_manager,
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
)
play_book.run()
| [
"noreply@github.com"
] | cucy.noreply@github.com |
9c2c290abf5daf75f79551d53e07a51de7e7df53 | c60be77c2805ad4e792acb11b222974562017158 | /server.py | bfd372e77910af9bcc2efa9d920900618421cfa3 | [] | no_license | sahilarora3117/SensitiveContentFiltering | 3efdfcfef04d86af7b50cac0b36f79df0393abf0 | 2a60e2face864c9c906c7bb099d78f3b0eed7379 | refs/heads/main | 2023-08-26T09:36:29.130127 | 2021-11-02T17:36:04 | 2021-11-02T17:36:04 | 408,773,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,448 | py | from fastapi import FastAPI, File, UploadFile
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import tokenizer_from_json
from fastapi.middleware.cors import CORSMiddleware
from keras.preprocessing import image
import numpy as np
from io import BytesIO
from PIL import Image
import json
app = FastAPI()
vocab_size = 3000
embedding_dim = 32
max_length = 60
truncation_type='post'
padding_type='post'
oov_tok = "<OOV>"
training_size = 20000
sentence = ["My credit card no is 124345346", "game of thrones season finale showing this sunday night"]
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def predict(image: Image.Image):
x = np.asarray(image.resize((150, 150)))[..., :3]
x = np.expand_dims(x, axis=0)
image = np.vstack([x])
model = keras.models.load_model('model/image_model.h5')
classes = model.predict(image, batch_size=1)
return str(classes[0])
def read_imagefile(file) -> Image.Image:
image = Image.open(BytesIO(file))
return image
@app.get("/textclass")
async def text_class(needy: str):
l = []
l.append(needy)
model = keras.models.load_model('model/text_model.h5')
with open('tokenizer.json') as f:
data = json.load(f)
tokenizer = tokenizer_from_json(data)
sequences = tokenizer.texts_to_sequences(l)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=truncation_type)
predictions = model.predict(padded)
ret = ""
f = True
for i in range(len(predictions)):
if predictions[i][0]>0.5:
ret += ("Sensitive - "+ str(predictions[i][0]) + "\n")
f = True
else:
ret += ("Non-Sensitive - " + str(predictions[i][0]) + "\n")
f= False
return {ret, f}
@app.post("/imageclass")
async def predict_api(file: UploadFile = File(...)):
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
image = read_imagefile(await file.read())
sens = predict(image)
return {sens} | [
"iamsahil@protonmail.com"
] | iamsahil@protonmail.com |
2e1f5d6e080d3901d259f5438b5d089b0e036633 | 0144dfb3e42b0c0b253ebe2e9828126a19c6744b | /manage.py | 500042698c3a7945eb131e7285c5ae262debfaf6 | [] | no_license | codejoncode/Intro-Django | 253d9dc5ce31f82c21308dd2ca2acea80d70ae76 | c8eb0ee3b50a5dbcfdde45ecdd2f1e5817e2228f | refs/heads/master | 2020-04-02T08:02:24.496719 | 2018-10-25T23:41:44 | 2018-10-25T23:41:44 | 154,226,575 | 0 | 0 | null | 2018-10-22T22:38:12 | 2018-10-22T22:38:11 | null | UTF-8 | Python | false | false | 555 | py | #!/usr/bin/env python
import os
import sys
import sqlite3
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'jonathan.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"jonathanjholloway@gmail.com"
] | jonathanjholloway@gmail.com |
b2e5a4581a5999e0b3fdecf534691bf4d60b7d4a | 22b862e2f51e1f492ce8119349f006d255d97d54 | /python/opencv-test.py | d221a91089709efa983e9190f1c94f4a65190e8a | [] | no_license | matlab-user/code-test-site | f85c214d85098763421b2539c070c805423f73c4 | c32e4dfa66879b3a6ff92266205d1c7287dcb4b0 | refs/heads/master | 2021-01-18T22:51:13.869824 | 2015-08-23T08:47:31 | 2015-08-23T08:47:31 | 38,184,158 | 0 | 1 | null | 2016-06-13T10:03:19 | 2015-06-28T03:01:01 | C | UTF-8 | Python | false | false | 321 | py | import cv2
import numpy as np
import sys
if __name__ == '__main__':
winName = 'display image'
imagePath = 'default.jpg'
cv2.namedWindow( winName )
image=cv2.imread( imagePath )
if np.any(image):
cv2.imshow( winName, image )
cv2.waitKey( 0 )
cv2.destroyWindow( winName )
else:
print "no image found!"
| [
"bugs_anywhere@sina.com"
] | bugs_anywhere@sina.com |
0617bf496c31811ec5ac7c8d34a4b5d9c1611aca | 89510ffa453f22f1748087d74dfb88b86c354b35 | /task/action_flow.py | c36570f27b407408a303bdd4c01c23dc764c02f4 | [
"MIT"
] | permissive | TimothyGood/unicom-task | 81568c61bf5fb779e0cfd2e8962b25da2cabe8a0 | 47b7abc6ee3950be82f23e70ced75699134f4abd | refs/heads/main | 2023-08-14T06:23:56.929425 | 2021-09-29T16:09:16 | 2021-09-29T16:09:16 | 411,735,353 | 0 | 0 | MIT | 2021-09-29T15:46:14 | 2021-09-29T15:46:13 | null | UTF-8 | Python | false | false | 4,669 | py | # -*- coding: utf-8 -*-
# @Time : 2021/08/14 16:30
# @Author : srcrs
# @Email : srcrs@foxmail.com
import requests,json,time,re,login,logging,traceback,os,random,notify,datetime,util
from lxml.html import fromstring
#激活即将过期的流量包
class action_flow:
#获得我的礼包页面对象
def getQuerywinning(self, client, username):
#获得我的礼包页面
querywinninglist = client.get(
'http://m.client.10010.com/myPrizeForActivity/querywinninglist.htm?yw_code=&desmobile='+str(username)+'&version=android@8.0100')
querywinninglist.encoding = 'utf-8'
#将页面格式化
doc = f"""{querywinninglist.text}"""
#转换为html对象
html = fromstring(doc)
return html
#获得流量包的还剩多长时间结束,返回形式时间戳
def getflowEndTime(self, client, username):
#获得中国时间戳
now = util.getTimezone()
#获得我的礼包页面对象
html = self.getQuerywinning(client, username)
#获得流量包到期的时间戳
endStamp = []
endTime = html.xpath('/html/body/div[1]/div[7]/ul/li[*]/div[2]/p[3]')
for end in endTime:
#寻找起止时间间隔位置
#end为空,可能无到期时间和开始时间
end = end.text
if end != None:
index = end.find('-')+1
#切割得到流量包失效时间
end = end[index:index+10] + ' 23:59:59'
end = end.replace('.','-')
#将时间转换为时间数组
timeArray = time.strptime(end, "%Y-%m-%d %H:%M:%S")
#得到时间戳
timeStamp = int(time.mktime(timeArray))
endStamp.append(timeStamp-now)
else:
#将找不到结束时间的流量包设置为不激活
endStamp.append(86401)
return endStamp
#存储并返回未使用的流量包
def getStorageFlow(self, client, username):
#获得我的礼包页面
html = self.getQuerywinning(client, username)
#寻找ul下的所有li,在未使用流量包栏页面
ul = html.xpath('/html/body/div[1]/div[7]/ul/li')
#存储流量包数据
datas = []
#获得所有流量包的标识并存储
for li in ul:
print(li.text)
data = {
'activeCode': None,
'prizeRecordID': None,
'phone': None
}
tran = {1:'activeCode',2:'prizeRecordID',3:'phone'}
line = li.attrib.get('onclick')
print(line)
#正则匹配字符串 toDetailPage('2534','20210307073111185674422127348889','18566669999');
pattern = re.finditer(r'\'[\dA-Za-z]+\'',line)
i = 1
for match in pattern:
data[tran[i]] = match.group()[1:-1]
i = i + 1
datas.append(data)
return datas
def run(self, client, user):
try:
#获得所有未使用的流量包
datas = self.getStorageFlow(client, user['username'])
#获得流量包还剩多长时间到期时间戳
endTime = self.getflowEndTime(client, user['username'])
#流量包下标
i = 0
flag = True
for end in endTime:
#如果时间小于1天就激活
#程序早上7:30运行,正好当天可使用
if end < 86400:
flag = False
activeData = {
'activeCode': datas[i]['activeCode'],
'prizeRecordID': datas[i]['prizeRecordID'],
'activeName': '做任务领奖品'
}
#激活流量包
res = client.post('http://m.client.10010.com/myPrizeForActivity/myPrize/activationFlowPackages.htm',data=activeData)
res.encoding = 'utf-8'
res = res.json()
if res['status'] == '200':
logging.info('【即将过期流量包】: ' + '激活成功')
else:
logging.info('【即将过期流量包】: ' + '激活失败')
time.sleep(8)
i = i + 1
if flag:
logging.info('【即将过期流量包】: 暂无')
except Exception as e:
print(traceback.format_exc())
logging.error('【即将过期流量包】: 错误, ' + stat['msgStr']) | [
"srcrs@foxmail.com"
] | srcrs@foxmail.com |
c0cbc2e765c78914980d687ff4a1a88f5fef91d6 | 225543bcaa194360aa66c738a99b7ad5c291434b | /PythonFEB/main_210225.py | bbf257e6aae1ea6c38eeccc57df86da41f5555fc | [] | no_license | m0100434/zendlijsten | f0eecf12ab3fc90c1db9b5c22f1163a92dcdf6f7 | 171e1c427db71dad01408072081c85035c57a2b2 | refs/heads/main | 2023-06-19T05:04:31.619139 | 2021-07-17T07:51:46 | 2021-07-17T07:51:46 | 349,770,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,370 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 11:56:59 2021
@author: ArxXi
"""
from selenium import webdriver
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
import pickle
from datetime import date
def save_cookie(driver, path):
with open(path, 'wb') as filehandler:
pickle.dump(driver.get_cookies(), filehandler)
def load_cookie(driver, path):
with open(path, 'rb') as cookiesfile:
cookies = pickle.load(cookiesfile)
for cookie in cookies:
driver.add_cookie(cookie)
def remove_entry(index):
ourtime.pop(index-entries_deleted)
# print("time which is going to be deleted = "+ ourtime[index])
# ourtime[index] = "-"
"""
Een v
VTM v
Vier v
Canvas v
Vitaya = vtm 4 v
Q2 v
Vijf v
CAZ = vtm 3 v
Zes v
Ketnet v
La Une v
RTL-TVI v
AB3 ?
La Deux v
Club RTL v
Plug RTL ?
La Trois v
Nickelodeon FR ?
"""
def channel_identifier(anchor_link):
tmp = anchor_link.split("/")
if(tmp[4] == "een"):
return "een"
if (tmp[4] == "canvas"):
return "canvas"
if (tmp[4] == "vtm"):
return "vtm"
if (tmp[4] == "vier"):
return "vier"
if (tmp[4] == "vijf"):
return "vijf"
if (tmp[4] == "zes"):
return "zes"
if (tmp[4] == "rtl-tvi-hd"):
return "RTI TVI HD"
if (tmp[4] == "la-une"):
return "LA UNE"
if (tmp[4] == "la-deux"):
return "LA DEUX"
if (tmp[4] == "ketnet"):
return "KETNET"
if (tmp[4] == "vtm2"):
return "vtm2"
if (tmp[4] == "vtm3"):
return "vtm3"
if (tmp[4] == "club-rtl"):
return "club-rtl"
if (tmp[4] == "vtm4"):
return "vtm4"
if (tmp[4] == "caz-2"):
return "caz-2"
if (tmp[4] == "la-trois"):
return "la-trois"
return "null"
# options = FirefoxOptions()
# options.add_arguments("--headless")
# driver = webdriver.Firefox(options=options)
#0 click een, canvas,vtm, vier
#1 click vjtf
#2 click zes
#9 click la une , la deux, ketnet, la trois
#14 click
date_of_movie = ""
links_traveresed = 0
default_link = "https://www.demorgen.be/tv-gids/dag/25-02-2021"
if(len(default_link.split("/")) ==6):
date_of_movie =default_link.split("/")[5]
print("got true")
else:
date_of_movie = date.today()
date_of_movie = date_of_movie.strftime('%d/%m/%y')
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(default_link)
# driver.implicitly_wait(15)
delay = 10 # seconds
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'sp_message_iframe_404503')))
print("Iframe element ready")
except TimeoutException:
print("Iframe not loaded issue")
a = driver.find_element_by_tag_name("iframe")
driver.switch_to.frame(1)
print("switching to iframe done")
green_button = driver.find_element_by_xpath("/html/body/div/div[3]/div[3]/button[1]")
green_button.click()
time.sleep(10)
print("It will be on schedule website")
driver.switch_to.default_content()
#declarration
iteration = 0
ourtime = []
channel_names = []
ad_index = 82
associated_channel_name = []
production_date = []
show_title = []
current_episode = []
total_episode = []
season_number = []
myepisode_number = ""
description = []
genre = []
series_movie = []
actors = []
episode_text = " "
entries_deleted = 0
number_of_clicks = [0,1,2,6,9,14]
links = []
while (iteration != (len(number_of_clicks))):
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, '/html/body/main/div/div/div[2]/div/div/div[1]/div[2]/button[2]')))
next_button = driver.find_element_by_xpath("/html/body/main/div/div/div[2]/div/div/div[1]/div[2]/button[2]")
for i in range(0, number_of_clicks[iteration]):
print("next button should be clicked")
next_button.click()
driver.implicitly_wait(2)
print("Next Button located")
except TimeoutException:
print("Next Button Not Located")
a = driver.find_elements_by_class_name("tvgm-channel__logo-placeholder")
#Getting channel names on current page
for i in range(0,len(a)):
ourlink = a[i].get_property("href")
distributed = ourlink.split("/")
channel = distributed[4]
channel_names.append(channel)
#time of shows
b = driver.find_elements_by_class_name("tvgm-broadcast-teaser__time")
for i in range(0,len(b)):
ourtime.append(b[i].text)
c = driver.find_elements_by_class_name("tvgm-broadcast-teaser__link")
for i in range(0,len(c)):
if((c[i].get_property("href")) not in links):
links.append(c[i].get_property("href"))
#getting link
for i in range(links_traveresed,len(links)):
tmp = links[i]
episode_text = " "
if(channel_identifier(tmp) != "null"):
associated_channel_name.append(channel_identifier(tmp))
driver.get(tmp)
#Page visited
try:
production_date.append(driver.find_element_by_class_name("tvgm-broadcast-detail__productionyear").text)
except NoSuchElementException:
print("Production Date not found")
production_date.append("-")
try:
show_title.append(driver.find_element_by_class_name("tvgm-broadcast-detail__title").text)
except NoSuchElementException:
print("Show title not found")
show_title.append("-")
try:
description.append(driver.find_element_by_class_name("tvgm-broadcast-detail__description").text)
except NoSuchElementException:
print("Description not found")
description.append("-")
try:
actors.append(driver.find_element_by_class_name("tvgm-broadcast-detail__castandcrew").text)
except NoSuchElementException:
print("Actors not found")
actors.append("-")
try:
temp = driver.find_element_by_class_name("tvgm-broadcast-detail__info-playable").text
temp = temp.split(",")
if(len(temp) == 2):
series_movie.append(temp[0])
genre.append(temp[1])
print("This got executed (Genre)")
if (len(temp) == 1):
series_movie.append(temp[0])
genre.append("-")
except NoSuchElementException:
print("Series/Movie not found")
series_movie.append("-")
genre.append("-")
try:
driver.find_element_by_class_name("tvgm-broadcast-detail__episode-numbers")
myepisode_number = driver.find_element_by_class_name("tvgm-broadcast-detail__episode-numbers").text
tmp = myepisode_number.split(" ")
season_number.append(tmp[1])
#changing done
if(len(tmp)>2):
combined_episode_number = tmp[3].split("/")
if(len(combined_episode_number) ==2):
current_episode.append(combined_episode_number[0])
total_episode.append(combined_episode_number[1])
print("This got executed (Episodes)")
if (len(combined_episode_number) == 1):
current_episode.append(combined_episode_number[0])
total_episode.append("-")
else:
#if both not available
total_episode.append("-")
current_episode.append("-")
print("Epsisode starting and ending exist ")
except NoSuchElementException:
print("Starting ending Episode not exist")
season_number.append("-")
current_episode.append("-")
total_episode.append("-")
#tester
#break
else:
#not interested in this channel
remove_entry(i)
entries_deleted = entries_deleted +1
print("****** ENTRY SKIPPED ********")
links_traveresed = len(links)
#tester
# if(i == ad_index):
# break
driver.get(default_link)
iteration = iteration+1
driver.close()
# print("Starting time = " + ourtime[ad_index])
# print("Actors = " + actors[ad_index])
# print("Associated Channel Name = " + associated_channel_name[ad_index])
# print("Production Date = " + production_date[ad_index])
# print("Show title = " + show_title[ad_index])
# print("Current Episode = " + current_episode[ad_index])
# print("Total Episode = " + total_episode[ad_index])
# print("Genre = " + genre[ad_index])
# print("Series_Movie = " + series_movie[ad_index])
# print("Season Number = " + season_number[ad_index])
# for i in range(0,len(ourtime)):
# if(ourtime[i] == "-"):
# del(ourtime[i])
print(ourtime)
print(actors)
print(associated_channel_name)
print(production_date)
print(show_title)
print(current_episode)
print(total_episode)
print(genre)
print(series_movie)
print(season_number)
print(len(ourtime))
print(len(actors))
print(len(associated_channel_name))
print(len(production_date))
print(len(show_title))
print(len(current_episode))
print(len(total_episode))
print(len(genre))
print(len(series_movie))
print(len(season_number))
import csv
with open('channel_data_210225.csv', mode='w',newline='') as employee_file:
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0,len(ourtime)):
if(i==0):
employee_writer.writerow(["Date of Movie","Starting Time","Actors","Channel Name","Production Date","Title of Show","Current Episode","Total Episodes","Genre","Series/Movie","Season Number"])
employee_writer.writerow([date_of_movie,ourtime[i],actors[i],associated_channel_name[i],production_date[i],show_title[i],current_episode[i],total_episode[i],genre[i],series_movie[i],season_number[i]])
| [
"m.verschuere@ailvis.com"
] | m.verschuere@ailvis.com |
8d9383180262776e736956682035080bb7976b3c | a80159b91c24941d33dfb141562eb9b188060d9a | /leetcode/二分查找/旋转数组的最小数字.py | 4d8b71ddba5396cd50c8698b1a1852e4eb4e3fb6 | [] | no_license | rookie-LeeMC/Data_Structure_and_Algorithm | ec216e2fe30e83ec11539719c50e995cd951d5a9 | 125fd69f4968ad44757d529ba7701af5bb2092bb | refs/heads/main | 2023-06-26T14:27:54.249192 | 2021-07-25T09:22:02 | 2021-07-25T09:22:02 | 367,633,358 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # -*- coding:UTF-8 -*-
'''
https://leetcode-cn.com/problems/xuan-zhuan-shu-zu-de-zui-xiao-shu-zi-lcof/
'''
def minArray(numbers):
if not numbers: return None
if len(numbers) == 1: return numbers[0]
start, end = 0, len(numbers) - 1
while start < end:
mid = (start + end) // 2
if numbers[mid] > numbers[end]:
start = mid + 1
elif numbers[mid] < numbers[end]:
end = mid
elif numbers[mid] == numbers[end]:
end -= 1
return numbers[start]
# 错误
def minArray2(numbers):
if not numbers: return None
if len(numbers) == 1: return numbers[0]
start, end = 0, len(numbers) - 1
while start < end:
mid = start + (end - start) // 2
if numbers[mid] > numbers[start]:
start = mid + 1
elif numbers[mid] < numbers[start]:
end = mid
elif numbers[mid] == numbers[start]:
start += 1 # 错误原因:当start和end相邻时,mid=start,且[start]==[end],此时会跨过去mid,假如此刻mid是最小点,就会被跨过去
return numbers[start]
print(minArray([3, 4, 5, 1, 2]))
print(minArray2([3, 4, 5, 1, 2]))
print(minArray([2, 2, 2, 0, 1]))
print(minArray2([2, 2, 2, 0, 1]))
# print(minArray([1, 3, 5]))
# print(minArray2([1, 3, 5]))
| [
"limengchen@jd.com"
] | limengchen@jd.com |
c41686bf719d07a62eb8b42429a30b9acd99e4de | 8bd0896f8b7a201954959c82bee63701ca75b300 | /configurer.py | 376a09449ec8be3635d80b1a6faa755a28ab2a55 | [] | no_license | yellams/Setlist-to-Playlist-Generator | 201755f72285e89bfffd2c61e69ceca9076d4bac | 0e0f6f51875943eb888fab678f99e995e392e1ff | refs/heads/master | 2021-01-18T17:48:52.160798 | 2017-04-06T00:46:23 | 2017-04-06T00:46:23 | 86,815,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,017 | py | import re
import configparser
import spotipy.util as util
import os
music_dir = ''
spotify_user = ''
gpm_user = ''
gpm_pw = ''
config = configparser.ConfigParser()
print('This script only needs to be run once, before your first run of the program')
print('It will generate config.txt in this directory, a file required to run the PlaylistGenerator')
print('This script will remove any config.txt in this directory, resetting all options before asking for new input')
input('Press Enter to continue...')
if os.path.isfile('config.txt'):
os.remove('config.txt')
resp = input('Do you want to make m3u playlists from a local music directory? [Y/N]: ')
if re.match('[Yy]', resp):
music_dir = input('Type the directory of your local music directory: ')
playlist_dir = input('Type the directory to store your playlist m3u files: ')
resp = input('Do you want to make spotify playlists? [Y/N]: ')
if re.match('[Yy]', resp):
spotify_user = input('Type your spotify username: ')
print('A browser will launch a tab, allow the app access and copy the return url here')
scope = 'playlist-read-private playlist-modify-public playlist-modify-private'
util.prompt_for_user_token(spotify_user, scope, '892896528c1c4849bab75b493377d83e',
'287db3f1e9c64d6aa8de0724a3b09573', 'https://example.com/callback/')
resp = input('Do you want to make google music playlists? [Y/N]: ')
if re.match('[Yy]', resp):
gpm_user = input('Type your google music username (email): ')
print('app-specific password suggested! google api has no authentication like spotify, '
'so the app-specific password is stored in the config file')
gpm_pw = input('Type your google app-specific password: ')
config['Local'] = {'Music Directory': music_dir, 'Playlist Directory': playlist_dir}
config['Spotify'] = {'Username': spotify_user}
config['Google Music'] = {'Username': gpm_user, 'Password': gpm_pw}
with open('config.txt', 'w') as configfile:
config.write(configfile)
| [
"nick_smalley@hotmail.co.uk"
] | nick_smalley@hotmail.co.uk |
87bbcfce9578a68f80b05ea38464493f4f179a40 | 4703b990612ece92825eec4ad7608ce8fd563f4e | /src/server_handler/change_ip.py | 30d195a7a1a007ef597869ef4d57154b1d95988c | [
"MIT"
] | permissive | robertpardillo/Funnel | 69a270ee722f05e6a23eb27d7b3268f916a0d9f8 | f45e419f55e085bbb95e17c47b4c94a7c625ba9b | refs/heads/master | 2020-03-08T10:15:24.458019 | 2018-04-04T13:52:04 | 2018-04-04T13:52:04 | 128,068,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py |
import re
def change_handler_IP(new_ip):
with open('static/js/webSocket.js', 'r+') as f:
text0 = f.read()
string = '// IP \nvar IP = "{}";'.format(new_ip)
text=re.sub('.*//.*IP.*\\n.*var.*IP.*=.*', string,text0)
f.seek(0)
f.write(text)
f.close()
| [
"robertpardillo93@gmail.com"
] | robertpardillo93@gmail.com |
d3a786c7b7bd8e77a8951d9b735d4c1bef26eb7e | 4971735e54cb52189022d2f0fa6d5618856fe561 | /env/bin/genwallet | c16177e4d67d53b7d24a524aeed8f4ec68bd7fdc | [
"MIT"
] | permissive | Organizational-Proof-Of-Work/clearinghoused_build | 551d832f1b8bc5adecbf6124a3ca9d234cc65cf0 | 7bab4ccb516015913bad41cfdc9eb15d3fbfcaf4 | refs/heads/master | 2020-12-27T09:07:23.148765 | 2020-02-02T21:44:46 | 2020-02-02T21:44:46 | 237,843,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/xch/clearinghoused_build/env/bin/python3.4
# -*- coding: utf-8 -*-
import re
import sys
from pycoin.scripts.genwallet import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"vcgato29@gmail.com"
] | vcgato29@gmail.com | |
635d2fd6ec0cbfccb05a30d49d971ce0dd8ed9a9 | b0384631c98537f1891a1cb3251a28c2e3816cda | /python/tests/test_insertion_sort.py | 23081784c74d838ac2c41439364addb10bb0091a | [] | no_license | okayjones/data-structures-and-algorithms | b14022e554bf875d5cf36748d0f1a1922d64b11e | d923132849f799985440dd5c510e932d731b82d1 | refs/heads/master | 2023-03-31T07:41:01.276358 | 2021-04-06T03:39:56 | 2021-04-06T03:39:56 | 297,821,846 | 0 | 2 | null | 2021-04-06T03:39:56 | 2020-09-23T01:48:25 | JavaScript | UTF-8 | Python | false | false | 750 | py | from challenges.insertion_sort.insertion_sort import insertion_sort
def test_insertion_sort_simple():
arr = [8, 4, 23, 42, 16, 15]
expected = [4, 8, 15, 16, 23, 42]
actual = insertion_sort(arr)
assert actual == expected
def test_insertion_sort_reverse():
arr = [20, 18, 12, 8, 5, -2]
expected = [-2, 5, 8, 12, 18, 20]
actual = insertion_sort(arr)
assert actual == expected
def test_insertion_sort_few_unique():
arr = [5, 12, 7, 5, 5, 7]
expected = [5, 5, 5, 7, 7, 12]
actual = insertion_sort(arr)
assert actual == expected
def test_insertion_sort_nearly_sorted():
arr = [2, 3, 5, 7, 13, 11]
expected = [2, 3, 5, 7, 11, 13]
actual = insertion_sort(arr)
assert actual == expected
| [
"loganemilyjones@mac.com"
] | loganemilyjones@mac.com |
2c5501bfeeb3e261c19ee83f877b202c512ea266 | 1b7d5036985f3e8f080b8cf8a5511ad73dfa1e00 | /image_recognition.py | ee165aec22d4c767fc2d989f718a501b627947fa | [] | no_license | mycityyumi/Deep-learning-by-keras | 30d5da452cf6809470fd956053a9f8f66dcb2320 | 28dd53a709a0a7699c94c578788e82f289e48cd9 | refs/heads/master | 2020-03-26T15:08:22.679921 | 2018-08-16T18:29:38 | 2018-08-16T18:29:38 | 145,024,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import numpy as np
from keras.preprocessing import image
from keras.applications import resnet50
# Load Keras' ResNet50 model that was pre-trained against the ImageNet database
model = resnet50.ResNet50()
# Load the image file, resizing it to 224x224 pixels (required by this model ResNet50)
img = image.load_img("bay2.jpg", target_size =(224,224))
# Convert the image to a numpy array
#X is 3 chiều, 2 chiều đầu là height and width in 224x224, the third dismension is colr image [red value , blue value, green value]
x = image.img_to_array(img)
# Add a forth dimension since Keras expects a list of images
x =np.expand_dims(x, axis=0)
# Scale the input image to the range used in the trained network
x = resnet50.preprocess_input(x)
# Run the image through the deep neural network to make a prediction
predictions = model.predict(x)
# Look up the names of the predicted classes. Index zero is the results for the first image.
predicted_classes = resnet50.decode_predictions(predictions, top=9)
print("This is an image of:")
for imagenet_id, name, likelihood in predicted_classes[0]:
print(" - {}: {:2f} likelihood".format(name, likelihood))
| [
"noreply@github.com"
] | mycityyumi.noreply@github.com |
21f206752ebfdcc133190f1ba94d60ffc4cda891 | 8eeaecd4c66741717bafda4dae869ef11a66d7f5 | /plugins/modules/ibm_compute_vm_instance_info.py | 4011be8e64f58b52b1767e5d7c80d3ddbd84950c | [
"Apache-2.0"
] | permissive | pachvasu/ansible-collection-ibm | 13327fc968d27a1dee0c621e7f5d7a6a8f66caa4 | 3de32f396d0427b68bac114b1ebb72bb6b15837e | refs/heads/master | 2021-01-14T20:52:22.445194 | 2020-02-21T04:41:26 | 2020-02-21T04:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,037 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor’s Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party’s
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients’ rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients’
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
party’s negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a party’s ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ibm_compute_vm_instance_info
short_description: Retrieve IBM Cloud 'ibm_compute_vm_instance' resource
version_added: "2.8"
description:
- Retrieve an IBM Cloud 'ibm_compute_vm_instance' resource
requirements:
- IBM-Cloud terraform-provider-ibm v1.2.1
- Terraform v0.12.20
options:
datacenter:
description:
- Datacenter in which the virtual guest is deployed
required: False
type: str
last_known_power_state:
description:
- The last known power state of a virtual guest in the event the guest is turned off outside of IMS or has gone offline.
required: False
type: str
power_state:
description:
- The current power state of a virtual guest.
required: False
type: str
public_subnet_id:
description:
- None
required: False
type: int
ipv6_address_id:
description:
- None
required: False
type: int
public_ipv6_subnet:
description:
- None
required: False
type: str
hostname:
description:
- The hostname of the virtual guest
required: True
type: str
domain:
description:
- The domain of the virtual guest
required: True
type: str
status:
description:
- The VSI status
required: False
type: str
private_interface_id:
description:
- None
required: False
type: int
most_recent:
description:
- If true and multiple entries are found, the most recently created virtual guest is used. If false, an error is returned
required: False
type: bool
default: False
private_subnet_id:
description:
- None
required: False
type: int
ipv4_address:
description:
- None
required: False
type: str
public_interface_id:
description:
- None
required: False
type: int
ip_address_id_private:
description:
- None
required: False
type: int
public_ipv6_subnet_id:
description:
- None
required: False
type: str
secondary_ip_addresses:
description:
- None
required: False
type: list
elements: str
secondary_ip_count:
description:
- None
required: False
type: int
cores:
description:
- Number of cpu cores
required: False
type: int
ipv4_address_private:
description:
- None
required: False
type: str
ip_address_id:
description:
- None
required: False
type: int
ipv6_address:
description:
- None
required: False
type: str
ibmcloud_api_key:
description:
- The API Key used for authentification. This can also be provided
via the environment variable 'IC_API_KEY'.
required: True
ibmcloud_region:
description:
- Denotes which IBM Cloud region to connect to
default: us-south
required: False
author:
- Jay Carman (@jaywcarman)
'''
# Top level parameter keys required by Terraform module
TL_REQUIRED_PARAMETERS = [
('hostname', 'str'),
('domain', 'str'),
]
# All top level parameter keys supported by Terraform module
TL_ALL_PARAMETERS = [
'datacenter',
'last_known_power_state',
'power_state',
'public_subnet_id',
'ipv6_address_id',
'public_ipv6_subnet',
'hostname',
'domain',
'status',
'private_interface_id',
'most_recent',
'private_subnet_id',
'ipv4_address',
'public_interface_id',
'ip_address_id_private',
'public_ipv6_subnet_id',
'secondary_ip_addresses',
'secondary_ip_count',
'cores',
'ipv4_address_private',
'ip_address_id',
'ipv6_address',
]
# define available arguments/parameters a user can pass to the module
from ansible.module_utils.basic import env_fallback
module_args = dict(
datacenter=dict(
required=False,
type='str'),
last_known_power_state=dict(
required=False,
type='str'),
power_state=dict(
required=False,
type='str'),
public_subnet_id=dict(
required=False,
type='int'),
ipv6_address_id=dict(
required=False,
type='int'),
public_ipv6_subnet=dict(
required=False,
type='str'),
hostname=dict(
required=True,
type='str'),
domain=dict(
required=True,
type='str'),
status=dict(
required=False,
type='str'),
private_interface_id=dict(
required=False,
type='int'),
most_recent=dict(
default=False,
type='bool'),
private_subnet_id=dict(
required=False,
type='int'),
ipv4_address=dict(
required=False,
type='str'),
public_interface_id=dict(
required=False,
type='int'),
ip_address_id_private=dict(
required=False,
type='int'),
public_ipv6_subnet_id=dict(
required=False,
type='str'),
secondary_ip_addresses=dict(
required=False,
elements='',
type='list'),
secondary_ip_count=dict(
required=False,
type='int'),
cores=dict(
required=False,
type='int'),
ipv4_address_private=dict(
required=False,
type='str'),
ip_address_id=dict(
required=False,
type='int'),
ipv6_address=dict(
required=False,
type='str'),
ibmcloud_api_key=dict(
type='str',
no_log=True,
fallback=(env_fallback, ['IC_API_KEY']),
required=True),
ibmcloud_region=dict(
type='str',
fallback=(env_fallback, ['IC_REGION']),
default='us-south')
)
def run_module():
from ansible.module_utils.basic import AnsibleModule
import ansible.module_utils.ibmcloud as ibmcloud
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
result = ibmcloud.ibmcloud_terraform(
resource_type='ibm_compute_vm_instance',
tf_type='data',
parameters=module.params,
ibm_provider_version='1.2.1',
tl_required_params=TL_REQUIRED_PARAMETERS,
tl_all_params=TL_ALL_PARAMETERS)
if result['rc'] > 0:
module.fail_json(
msg=ibmcloud.Terraform.parse_stderr(result['stderr']), **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | [
"amallak1@in.ibm.com"
] | amallak1@in.ibm.com |
ffc8510fad9f4ace2d2bd04157cac3c8f167d580 | ac20ee49a06ad58279d28801d821fbd1aa6e9504 | /tests/test_stack.py | 78f1fd23a3caeb36d4673acdfba428b8fd093e64 | [] | no_license | gummoe/practice | 150ad4bd018102f2e7d6bb6cb0396c0c75199e7f | fb44a87569d06b83ab3756888f6a78c61fc3ab33 | refs/heads/master | 2023-08-19T00:23:10.264157 | 2023-08-12T20:43:47 | 2023-08-12T20:43:47 | 145,779,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | import unittest
from Stacks_and_Queues.stack import Stack
class StackTest(unittest.TestCase):
def test_is_empty_is_true(self):
stack = Stack()
self.assertTrue(stack.is_empty())
def test_is_empty_is_false(self):
stack = Stack()
stack.stack = [1, 2, 3]
self.assertFalse(stack.is_empty())
def test_pop_empty(self):
stack = Stack()
self.assertIsNone(stack.pop())
def test_pop_not_empty(self):
stack = Stack()
stack.stack = [1, 2, 3]
self.assertEqual(1, stack.pop())
self.assertEqual(2, len(stack.stack))
def test_push(self):
stack = Stack()
stack.push(1)
self.assertEqual(stack.stack[0], 1)
stack.push(2)
self.assertEqual(stack.stack[0], 2)
def test_peek_is_empty(self):
stack = Stack()
self.assertIsNone(stack.peek())
def test_peek_is_not_empty(self):
stack = Stack()
stack.stack = ['bananas']
self.assertEqual('bananas', stack.peek())
| [
"b.gummoe@gmail.com"
] | b.gummoe@gmail.com |
8c3a3ad6824443f6589a63360797ccb846b92ad8 | 5b3c10d432d7a88bfe86637c286d3bf3a55732c1 | /solutions/python3/0724.py | e4a540435f56ec6d513ba6054063da2d17f2c236 | [] | no_license | leewwe/LeetCode-1 | c0ac8ff16b925cab833dbae554a51b613450aa6c | e42bd45d134870d9a8ece3564236705c4479747f | refs/heads/master | 2021-02-12T20:55:17.363375 | 2020-03-02T14:04:35 | 2020-03-02T14:04:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | class Solution:
def pivotIndex(self, nums: List[int]) -> int:
sum_ = sum(nums)
prefixSum = 0
for i, num in enumerate(nums):
if prefixSum == sum_ - prefixSum - num:
return i
prefixSum += num
return -1
| [
"walkccray@gmail.com"
] | walkccray@gmail.com |
dd762817cc3638ccc38f499401078b5017b3d5ef | 81d6af46887bcd166fb99015a68ca001f25cd639 | /MAS location parser/Scraper.py | 26b98275663c0e0994427b684b5919dea4714568 | [] | no_license | stephenFox1911/Group-N-Go | 73ab1269b2b4f1e53de3ccbfc0c5e140181bb45a | feb8b90220d4a38ab530f6c37b9f77e895641cac | refs/heads/master | 2021-01-01T17:05:47.003666 | 2014-12-29T02:11:01 | 2014-12-29T02:11:01 | 24,345,748 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | __author__ = 'Adam'
import bs4
import urllib.request
import re
def getBuildingList():
sourcelink = "http://www.facilities.gatech.edu/map/alpha_occupants.php"
source = urllib.request.urlopen(sourcelink).read()
soup = bs4.BeautifulSoup(source)
maindiv = soup.find(id="newsRelease")
uls = maindiv.findAll('ul')
links = []
for ul in uls:
anchors = ul.findAll('a')
for a in anchors:
string = a.getText() + " | " + a['href']
if string not in links:
links.append(string)
return links
def get_Address(building_info):
parts = re.split(r'\s+\|\s+', building_info)
name = parts[0]
link = "http://www.facilities.gatech.edu/map/" + re.sub(r'\s+', "", parts[1])
bldg_page = urllib.request.urlopen(link).read()
bldg_soup = bs4.BeautifulSoup(bldg_page)
centerdiv = bldg_soup.find(id="columnPrimary")
info = centerdiv.findAll('h4')
address = ""
for line in info:
if re.match(r'[0-9]', line.getText().strip()[0]):
address = line.getText().strip() + ", Atlanta, GA 30313"
if address is not "":
return name + " | " + address
if __name__ == "__main__":
rawlist = getBuildingList()
finallist = []
for bldg in rawlist:
bldgtext = get_Address(bldg)
if (bldgtext is not None) and (bldgtext not in finallist):
finallist.append(bldgtext)
f = open('building_info.txt', 'w')
for done in finallist:
f.write(done + "\n")
print("DONE") | [
"swimmadude66@gmail.com"
] | swimmadude66@gmail.com |
233e2a5963d5a426c93405fb4107477636dc4909 | 16f8fbcfd7e67abd42f4128a11d5b6134b304101 | /chapter2/creditcard2.28.py | ad4972243fe71cb06d8f8d47a84d99889579134b | [] | no_license | xiaodong-Ren/mytest | 2b64d5d8a084e5bdd5a5dcae42440866a9a0cd0b | 472d88f7f0ab3bd2a7a7998bbe15fb28b26a4c9c | refs/heads/master | 2022-11-15T09:06:36.763718 | 2020-07-22T13:35:16 | 2020-07-22T13:35:16 | 276,074,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | class CreditCard:
"""A consumer credit card."""
def __init__(self, customer, bank, acnt, limit):
"""Create a new credit card instance.
The initial balance is zero.
customer the name of the customer (e.g., 'John Bowman')
bank the name of the bank (e.g., 'California Savings')
acnt the acount identifier (e.g., '5391 0375 9387 5309')
limit credit limit (measured in dollars)
"""
self._customer = customer
self._bank = bank
self._account = acnt
self._limit = limit
self._balance = 0
def get_customer(self):
"""Return name of the customer."""
return self._customer
def get_bank(self):
"""Return the bank's name."""
return self._bank
def get_account(self):
"""Return the card identifying number (typically stored as a string)."""
return self._account
def get_limit(self):
"""Return current credit limit."""
return self._limit
def get_balance(self):
"""Return current balance."""
return self._balance
def charge(self, price):
"""Charge given price to the card, assuming sufficient credit limit.
Return True if charge was processed; False if charge was denied.
"""
try:
float(price)
if price + self._balance > self._limit: # if charge would exceed limit,
return False # cannot accept charge
else:
self._balance += price
return True
except ValueError:
print('charge function need a numerical input')
def make_payment(self, amount):
"""Process customer payment that reduces balance."""
try:
# float(amount)
if amount < 0.:
raise ValueError('amount must be a positive number')
self._balance -= amount
except TypeError:
print('make_payment need a numerical input')
class PredatoryCreditCard(CreditCard):
def __init__(self, customer, bank, acnt, limit,apr):
super().__init__(customer, bank, acnt, limit)
self._apr=apr
self._time=0
def charge(self):
self._time+=1
success=super().charge()
if not success:
self._balance+=5
return success
def process_month(self):
fee=max(0,self._time-10)
if self._balance>0:
monthly_factor=pow(1+self._apr,1/12)
self._balance=(self._balance+fee)*monthly_factor
self.time=0
if __name__ == '__main__':
| [
"2550828616@qq.com"
] | 2550828616@qq.com |
703513f83f52c79bc6a54d94e6347e41f18c5214 | a773881379e8d71f0bd1040219be44a6661ad101 | /1_hello_world/hello_world_client.py | 8aa9519e3fbbb40559361f035c0da88e7fb0210b | [] | no_license | odellus/autolector | 94816eefb0f4932e4564c4a69fba3b95404c3afa | 3bac3618beb4c5316348ac81a3414054dfa412a8 | refs/heads/main | 2023-04-27T10:04:47.627414 | 2021-05-15T07:12:50 | 2021-05-15T07:12:50 | 361,344,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | import requests
url = 'http://127.0.0.1:5005/'
res = requests.get(url)
print("If it worked it will say {'hello':'world!'}")
print(res.json())
| [
"odell.wood@gmail.com"
] | odell.wood@gmail.com |
2d6aa92846e99ddfb2b8c0021138962bb73079cb | c0898e3ca453b0053d5d9ac726080ab6d3242e67 | /app/movies/views.py | 8705567995172d8c1627a6a238c3ca8805bb0688 | [] | no_license | aftab-hussain-93/movie_api_integration | f1906d7737700ee159ba7fe876c18b6947988de8 | e6bd1f16f9ec3347bcb716fc7128b3357d735f78 | refs/heads/master | 2022-12-15T07:34:42.981491 | 2020-09-17T12:53:41 | 2020-09-17T12:53:41 | 296,215,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | import requests
from requests.models import PreparedRequest
from werkzeug.security import generate_password_hash, check_password_hash
from flask import Blueprint, current_app, request, url_for
from app.general_utils import token_required
movie = Blueprint('movie',__name__)
@movie.route('/movies')
@token_required
def all_movies(current_user):
"""
Provides the paginated list of all the movies. Use query string with param - "page" = <num> to get the next
paginated list.
"""
page = request.args.get('page')
user = current_app.config['API_USERNAME']
pwd = current_app.config['API_PWD']
payload = {'page': page} if page else None
# Initilization flag data_found = False
data_found = False
# Number of retries for response from the API
request_count = 4
for req in range(request_count):
current_app.logger.info(f"Attempt number {req} to access Movie API")
res = requests.get('https://demo.credy.in/api/v1/maya/movies/', params=payload, auth=(user, pwd), timeout=3)
if res.status_code == 200:
# Break out of loop if success Response received
# Set data_found flag = True
data_found = True
break
# Getting API data
data = res.json()
# Check data_found flag and verify if there are any errors in the response
if data_found and not data.get('error'):
next_page_num = data.get('next')
# Creating pagination URLs that map to the APIs paginated URLs
next_page = url_for('movie.all_movies', _external=True) + '?page=' + next_page_num.split('=')[1] if next_page_num else None
previous_page_num = data.get('previous')
prev_page = url_for('movie.all_movies', _external=True) + '?page=' + previous_page_num.split('=')[1] if previous_page_num else None
result = {
"count": data['count'],
"next" : next_page,
"previous" : prev_page,
"data" : data['results']
}
return result, 200
else:
return {"error": "Could not fetch API data. Please try again in a while."}, 400
| [
"aftab.h.1993@gmail.com"
] | aftab.h.1993@gmail.com |
0dc19efe2d518e8be5fd9d82b203f9b1b4cb72ee | 0e08d6c1244c31c707eb7f2e6de4ff24c2bfe032 | /str.py | b6ca108bb61501254ff7ada2775db9d861898ec5 | [] | no_license | CrashNosok/del | 7a088069a844a9c212026c0305ca8fcc8beebb80 | 7c2f44b9b791e826f511b13c89cb53201790486b | refs/heads/master | 2021-06-28T08:26:56.055373 | 2021-05-27T16:03:17 | 2021-05-27T16:03:17 | 232,588,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,427 | py | # a = int(input())
# if a == 5:
# print('молодец')
# elif a == 4:
# print('хорошо')
# elif a == 3:
# print('средне')
# elif a == 2:
# print('плохо')
# else:
# print('err')
# m = int(input())
# n = int(input())
# a = int(input())
# b = int(input())
# c = int(input())
# if (a < m and b < n) or (b < m and a < n) or\
# ((a < m and c < n) or (c < m and a < n)) or\
# ((b < m and c < n) or (c < m and b < n)):
# print('да')
# else:
# print('нет')
# a = int(input())
# if a == 1 or a == 2 or a == 4 or a == 7:
# print('нельзя')
# else:
# print('можно')
'''
строки
строка - набор символов
'''
"""
a = "he's a doctor"
b = 'он сказал: "привет"'
c = 'he\'s a doctor. он сказал: "привет"'
c2 = c = '''he's a doctor. он сказал: "привет"'''
print(c2)
"""
# \ символ переноса строки и экранирования
# str1 = 'hello \
# world'
# print(str1)
# str2 = '''dsdjksfn
# dsifn
# dsfl
# dsf'''
# print(str2)
# непечатные символы:
# \n - new line переход на новую строку
# \t - tab табуляция
# print('hello\n\n\n\tworld')
# конкатенация строк (сложение строк)
# str1 = 'hello '
# str2 = 'world'
# print(str1 + str2)
# name = input('введите имя: ')
# print('hello ' + name)
# умножение строки на число
# print('la' * 3)
# len() длина строки
# a = 'qwerty'
# print(len(a))
'''
сравнение строк
каждому символу соответствует число из ASCII таблицы
'''
# print('a' > 'A')
# print(97 > 65)
# str1 = 'aa'
# str2 = 'aaa'
# print(str1 > str2)
# print('12' < '248' and 23 > 9)
# print('l', 'o', 'l')
'''
задача 1:
str1 = 'строка'
str2 = 'вторая строка'
вывод:
'строка вторая строка'
задача 2:
str1 = 'строка'
str2 = 'вторая строка'
вывод:
'строкавторая строка'
задача 3:
str1 = 'строка'
str2 = 'вторая строка'
вывод:
'строка
вторая строка'
задача 4:
Программа спрашивает логин у пользователя, если логин “Сотрудник”, программа
должна вывести “Привет”, если “Директор”, то “Здравствуйте!”, если “”,
то “нет логина”.
задача 5:
Написать простой калькулятор, который считывает с пользовательского
ввода три строки: первое число, второе число и операцию, после чего применяет
операцию к введенным числам (“первое число” “операция” “второе число”)
и выводит результат на экран.
Поддерживаемые операции: +, -, /, *, mod, pow, div, где:
mod - это взятие остатка от деления,
pow - возведение в степень,
div - целочисленное деление.
'''
| [
"noreply@github.com"
] | CrashNosok.noreply@github.com |
7f3b1aa13a0b19734edc82dd8b641d82ce0bec2f | fef7b3ab3a0f4403a22155b55f9259d3fcf69e44 | /training.py | abd744fe5e9949bf5a1ec2f23b23efa560804c99 | [] | no_license | codefluence/ventilator-pressure-prediction | 3af7bed55afd0def756d31deaa322ec92bff213b | c7c35e0a2a881844ec1f2a8e30e03f013fdffd57 | refs/heads/main | 2023-08-31T09:47:53.050190 | 2021-10-29T09:24:18 | 2021-10-29T09:24:18 | 411,368,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,315 | py | import math
from datetime import datetime
import os
import json
from tqdm import tqdm
import torch
import torch.nn.functional as F
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet
from pytorch_forecasting.metrics import MultiHorizonMetric
from pytorch_forecasting.data.encoders import NaNLabelEncoder
from data import VantilatorDataModule
from lstm import LSTM
from unet import UNet
def fit_data(model_name, CV_split):
torch.cuda.manual_seed(0)
torch.manual_seed(0)
np.random.seed(0)
pl.utilities.seed.seed_everything(0)
# torch.backends.cudnn.benchmark = False
# pl.utilities.seed.seed_everything(0)
# os.environ["CUBLAS_WORKSPACE_CONFIG"]=":4096:8"
data = VantilatorDataModule(CV_split=CV_split)
if model_name == 'LSTM':
model = LSTM(n_channels = data.series_input.shape[1])
elif model_name == 'UNET':
model = UNet(n_channels=data.series_input.shape[1])
filename = model.name + '_CV5'+str(CV_split)
dirpath='./checkpoints/'
early_stop_callback = EarlyStopping(
monitor='val_mae',
patience=11,
verbose=True,
mode='min',
min_delta=0.0001
)
checkpoint_callback = ModelCheckpoint(
dirpath=dirpath,
filename=filename,
save_top_k=1,
verbose=True,
monitor='val_mae',
mode='min'
)
trainer = pl.Trainer( logger=pl_loggers.TensorBoardLogger('./logs/'),
gpus=1,
max_epochs=1000,
checkpoint_callback=True,
callbacks=[early_stop_callback,checkpoint_callback] )
trainer.fit(model, data)
def eval_models(device='cuda'):
preds = []
model_names = ('LSTM',)#'UNET',
NUM_CV_SPLITS = 1
for i in range(NUM_CV_SPLITS):
data = VantilatorDataModule(dataset='test', CV_split=i)
for model_name in model_names:
print(model_name,i)
if model_name == 'UNET':
model = UNet.load_from_checkpoint('./checkpoints/UNET_CV5{}.ckpt'.format(i), n_channels=data.series_input.shape[1])
else:
model = LSTM.load_from_checkpoint('./checkpoints/LSTM_CV5{}.ckpt'.format(i), n_channels=data.series_input.shape[1])
model.to(device)
model.eval()
output = []
for idx, batch in enumerate(data.data_loader_test):
output.append( model(batch[0].to(device)).detach().cpu().numpy().squeeze() / (NUM_CV_SPLITS*len(model_names)) )
preds.append(np.vstack(output))
preds = np.add(preds)
return preds.flatten()[data.indices.astype(bool).flatten()]
if __name__ == '__main__':
for model_name in 'LSTM','UNET':
for i in range(5):
print('\n',model_name,i)
fit_data(model_name, i)
preds = eval_models()
submission = pd.DataFrame({ 'id': np.arange(len(preds))+1, 'pressure': preds })
submission.to_csv('submission.csv', index=False)
| [
"codefluence@gmail.com"
] | codefluence@gmail.com |
b3064ad095c702ca5a032d832b892a7c300da246 | 1ba3727a813280163b8442a8a5cd2de4686f716f | /spectra_audio1.py | f2cf52aeb4c0cc36990ff934992a76cc1c1b6bfd | [] | no_license | antonioam82/VOCODER | e503934bbff2e930ddbd06099b9310f7fff88ade | e113822e2126d78f3d34f866ed13afa491b3f25a | refs/heads/main | 2023-07-21T12:33:29.789855 | 2021-08-28T21:09:32 | 2021-08-28T21:09:32 | 400,249,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | import pyaudio
import struct
import numpy as np
import matplotlib.pyplot as plt
import time
from matplotlib import style
from tkinter import TclError
style.use('ggplot')
CHUNK = 1024 * 4
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK
)
x = np.arange(0, 2 * CHUNK, 2)
fig, ax = plt.subplots(1, figsize=(15,7))
line, = ax.plot(x, np.random.rand(CHUNK), '-', lw=2)
ax.set_ylim(0, 255)
ax.set_xlim(0, 2 * CHUNK)
plt.setp(ax, xticks=[0, CHUNK, 2 * CHUNK], yticks=[0, 128, 255])
plt.show(block=False)
while True:
data = stream.read(CHUNK)
data_int = struct.unpack(str(2*CHUNK)+'B',data)
data_np = np.array(data_int, dtype='b')[::2]+128
line.set_ydata(data_np)
try:
fig.canvas.draw()
fig.canvas.flush_events()
#frame_count += 1
except TclError:
# calculate average frame rate
#frame_rate = frame_count / (time.time() - start_time)
#print('stream stopped')
#print('average frame rate = {:.0f} FPS'.format(frame_rate))
break
| [
"noreply@github.com"
] | antonioam82.noreply@github.com |
e5695f05dca32ee4a44d4f6f0415056694fb4748 | 45de13a618813455a3ea1e65c5dd31066b311cd7 | /llluiop/0016/number.py | fe56ace640e2080b04b956cfb56d9bbe23f65c2d | [] | permissive | luhralive/python | bbee0a3e7d0ac9845db484595362bba41923c2a4 | b74bdc4c7bc8e75aee9530c27d621a773a71ac67 | refs/heads/master | 2020-05-19T12:48:17.144882 | 2019-05-05T11:53:46 | 2019-05-05T11:53:46 | 185,023,995 | 1 | 0 | MIT | 2019-05-05T11:45:42 | 2019-05-05T11:45:41 | null | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/env python
import xlwt
import json
def load_data(filepath):
f = open(filepath, "r")
return json.load(f)
def write_data_to_xls(data):
xls = xlwt.Workbook()
sheet = xls.add_sheet("number")
for i in range(len(data)):
for j in range(len(data[i])):
sheet.write(i, j, data[i][j])
xls.save('number.xls')
if __name__ == '__main__':
data = load_data("number.txt")
write_data_to_xls(data)
| [
"290522165@qq.com"
] | 290522165@qq.com |
6efba3c50f369d7a1de46821a7f56465d60273e9 | 7022a87984476aa53c52c7dccccd8acbb77565ed | /chapter4/first_numbers.py | 7005c0663de1e607f6b542d04aee88a345de9454 | [] | no_license | ChengYaoYan/python-crash-course | fb9892aaa0c496f1c08bf3589f12e87d7edec570 | 498ec1e00a45d50e9e57a036152775ddb2ff5517 | refs/heads/main | 2023-03-14T05:41:33.045307 | 2021-02-27T06:58:23 | 2021-02-27T06:58:23 | 341,420,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | for value in range(1, 5):
print(value)
numbers = list(range(1, 5))
print(numbers)
print(numbers)
| [
"1526695730@qq.com"
] | 1526695730@qq.com |
94e98751e9be3a199fbd00c203265b467571b82c | 795de2e7619a6937a8b97b95a1c117cc2aa8beac | /alien_invasion.py | 36d064f11cada2c5f44e18f6a6626c90efff58e8 | [] | no_license | zhangjialepc/Alien | b1ce39d736035e700662a4b708845615b1beb648 | 4a571a6682c20ea4f0bc5a6b91a872aa33fcbbea | refs/heads/master | 2020-08-18T14:39:39.547851 | 2019-10-17T13:55:24 | 2019-10-17T13:55:24 | 215,802,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | import pygame
from settings import Settings
from ship import Ship
import game_functions as gf
from pygame.sprite import Group
from game_stats import GameStats
from button import Button
from scoreboard import Scoreboard
def run_game():
pygame.init()
#窗口大小和名字
ai_settings=Settings()
screen=pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))
pygame.display.set_caption('Alien Invsion')
#创建PLAY按钮
play_button = Button(ai_settings,screen,'Play')
# 创建一个用于存储游戏统计信息的实例
stats = GameStats(ai_settings)
sb = Scoreboard(ai_settings,screen,stats)
#创建一个飞船
ship = Ship(ai_settings,screen)
#创建一个存储子弹的编组
bullets = Group()
aliens = Group()
#创建一个外星人
#alien = Alien(ai_settings,screen)
# 创建外星人群
gf.create_fleet(ai_settings,screen,ship,aliens)
#开始游戏主循环
while True:
#监视键盘和鼠标事件
gf.check_events(ai_settings,screen,stats,sb,play_button,ship,aliens,bullets)
if stats.game_active:
ship.update()
gf.update_bullets(ai_settings,screen,stats,sb,ship,
aliens,bullets)
gf.update_aliens(ai_settings,stats,screen,ship,aliens,bullets)
gf.update_screen(ai_settings,screen,stats,sb,ship,aliens,
bullets,play_button)
run_game()
| [
"noreply@github.com"
] | zhangjialepc.noreply@github.com |
67869e6099abddcfe5402283bfdc577b4021833d | d0a11bddb429ba385beb4c50a7c95efe8219631c | /Augmented Reality Final.py | cb60e82cfebb1e942eca6031972368984221a210 | [] | no_license | Sakshee5/Augmented-Reality-using-OpenCV | a659b9df0f18f03f1ea72a0541946ad45adf0caf | 28e72a1f62482a74ab0f60e518ac3756a195cc8c | refs/heads/main | 2023-04-21T04:49:11.664808 | 2021-05-20T13:22:16 | 2021-05-20T13:22:16 | 369,174,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,768 | py | """
Uses code from Feature Detection.py and Overlaying Image on Image.py
"""
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
imgTarget = cv2.imread('C:/Users/Sakshee/Pictures/cards1.jpg')
myVid = cv2.VideoCapture('C:/Users/Sakshee/Videos/test_video.mp4')
# Boolean to tell us whether we have the target in our image or not
detection = False
# keep a count of frames we have displayed from our video
frameCounter = 0
success, imgVideo = myVid.read()
hT, wT, cT = imgTarget.shape
imgVideo = cv2.resize(imgVideo, (wT, hT))
orb = cv2.ORB_create(nfeatures=1000)
kp1, des1 = orb.detectAndCompute(imgTarget, None)
def stackImages(imgArray, scale, labels=[]):
"""
Function to stack images togther in the form of an array so that only one output window contains all the results
ARGUMENTS:
imgArray - example [[img1, img2, ...imgn],[img1, img2, ...imgn], ...]
scale - to scale the output window
labels - label for each image to be displayed in the same format as imgArray
RETURNS: stacked image
"""
sizeW = imgArray[0][0].shape[1]
sizeH = imgArray[0][0].shape[0]
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
imgArray[x][y] = cv2.resize(imgArray[x][y], (sizeW, sizeH), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((sizeH, sizeW, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
hor_con[x] = np.concatenate(imgArray[x])
ver = np.vstack(hor)
ver_con = np.concatenate(hor)
else:
for x in range(0, rows):
imgArray[x] = cv2.resize(imgArray[x], (sizeW, sizeH), None, scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
hor_con = np.concatenate(imgArray)
ver = hor
if len(labels) != 0:
eachImgWidth = int(ver.shape[1] / cols)
eachImgHeight = int(ver.shape[0] / rows)
print(eachImgHeight)
for d in range(0, rows):
for c in range(0, cols):
cv2.rectangle(ver, (c * eachImgWidth, eachImgHeight * d),
(c * eachImgWidth + len(labels[d]) * 13 + 27, 30 + eachImgHeight * d), (255, 255, 255),
cv2.FILLED)
cv2.putText(ver, labels[d], (eachImgWidth * c + 10, eachImgHeight * d + 20), cv2.FONT_HERSHEY_COMPLEX,
0.7, (255, 0, 255), 2)
return ver
while True:
success, imgWebcam = cap.read()
imgAug = imgWebcam.copy()
kp2, des2 = orb.detectAndCompute(imgWebcam, None)
if detection == False:
# sets the video back to frame zero
myVid.set(cv2.CAP_PROP_POS_FRAMES, 0)
frameCounter = 0
else:
# checks whether the video has reached it maximum number of frames. If yes, then it resets it so that the video can play again
if frameCounter == myVid.get(cv2.CAP_PROP_FRAME_COUNT):
myVid.set(cv2.CAP_PROP_POS_FRAMES, 0)
frameCounter = 0
success, imgVideo = myVid.read()
imgVideo = cv2.resize(imgVideo, (wT, hT))
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
print(len(good))
imgFeatures = cv2.drawMatches(imgTarget, kp1, imgWebcam, kp2, good, None, flags=2)
if len(good) > 20:
detection = True
srcPts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dstPts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
matrix, mask = cv2.findHomography(srcPts, dstPts, cv2.RANSAC, 5)
print(matrix)
pts = np.float32([[0, 0], [0, hT], [wT, hT], [wT, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, matrix)
img2 = cv2.polylines(imgWebcam, [np.int32(dst)], True, (255, 0, 255), 3)
imgWarp = cv2.warpPerspective(imgVideo, matrix, (imgWebcam.shape[1], imgWebcam.shape[0]))
# entire webcam image as black
maskNew = np.zeros((imgWebcam.shape[0], imgWebcam.shape[1]), np.uint8)
# fill the space where the image has been found to white
cv2.fillPoly(maskNew, [np.int32(dst)], (255, 255, 255))
# we inverse our mask because we'll be overlaying images using bitwise_and and bitwise_or next
# we need to make sure which region (image or background) is white and which is black to overlay it accordingly
maskInv = cv2.bitwise_not(maskNew)
# as declared earlier we have imgAug as the webcam copy. By the command we are filling up all the white region with the actual
# background
imgAug = cv2.bitwise_and(imgAug, imgAug, mask=maskInv)
imgAug = cv2.bitwise_or(imgWarp, imgAug)
imgStacked = stackImages(([imgWebcam, imgTarget, imgVideo, imgFeatures], [imgAug, imgWarp, maskNew, maskInv]), 0.5)
imgStacked = cv2.resize(imgStacked, (1000, 600))
cv2.imshow('imgStacked', imgStacked)
cv2.waitKey(1)
# iterating the frame counter so that it can check when it reaches the end and replays
frameCounter += 1
else:
print('Could not find enough relevant matches therefore imgStacked not defined') | [
"noreply@github.com"
] | Sakshee5.noreply@github.com |
33bb984693a0e46d635446d8d5cb37dbc005a58d | 219588f6643881fb75b086fed64424db3b2204b7 | /src/scripts/licensecompare.py | 004352e71872a3ffb39f671e0389d3d0871705ed | [
"Apache-2.0"
] | permissive | armijnhemel/binaryanalysis | 910ded34a7c0be52dad18e0c8bf36b524b7fb94f | ea97b6b7617128ccf7cfa19244b91675d9bf66df | refs/heads/master | 2022-06-28T23:51:52.842978 | 2022-06-14T19:43:37 | 2022-06-14T19:43:37 | 68,471,787 | 81 | 54 | null | null | null | null | UTF-8 | Python | false | false | 7,935 | py | #!/usr/bin/env python
# Binary Analysis Tool
# Copyright 2014-2015 Armijn Hemel for Tjaldur Software Governance Solutions
# Licensed under Apache 2.0, see LICENSE file for details
'''
This script uses a very crude approach to compare license classifications made
with Ninka and FOSSology. It is meant to find where Ninka and FOSSology differ
and to improve both.
'''
import sys
import sqlite3
from optparse import OptionParser
from multiprocessing import Pool
# two hashes with results that are equivalent
ninka_to_fossology = { 'LesserGPLv2+': 'LGPL-2.0+'
, 'BSD3': 'BSD-3-Clause'
, 'boostV1Ref': 'BSL-1.0'
}
fossology_to_ninka = { 'No_license_found': 'NONE'
, 'GPL-1.0': 'GPLv1'
, 'GPL-1.0+': 'GPLv1+'
, 'GPL-2.0': 'GPLv2'
, 'GPL-2.0+': 'GPLv2+'
, 'GPL-3.0': 'GPLv3'
, 'GPL-3.0+': 'GPLv3+'
, 'LGPL-2.0': 'LibraryGPLv2'
, 'LGPL-2.0+': 'LibraryGPLv2+'
, 'LGPL-2.1': 'LesserGPLv2.1'
, 'LGPL-2.1+': 'LesserGPLv2.1+'
, 'LGPL-3.0': 'LesserGPLv3'
, 'LGPL-3.0+': 'LesserGPLv3+'
, 'Apache-1.0': 'Apachev1.0'
, 'Apache-1.1': 'Apachev1.1'
, 'Apache-2.0': 'Apachev2'
, 'BSL-1.0': 'boostV1'
, 'MPL-1.0': 'MPLv1_0'
, 'FTL': 'FreeType'
, 'PHP-3.01': 'phpLicV3.01'
, 'Postfix': 'Postfix'
, 'QPL-1.0': 'QTv1'
, 'MPL-1.1': 'MPLv1_1'
, 'Zend-2.0': 'zendv2'
, 'NPL-1.1': 'NPLv1_1'
, 'BSD-2-Clause': 'spdxBSD2'
, 'BSD-3-Clause': 'spdxBSD3'
, 'EPL-1.0': 'EPLv1'
, 'Artifex': 'artifex'
, 'CDDL': 'CDDLic'
, 'Public-domain': 'publicDomain'
, 'Public-domain-ref': 'publicDomain'
, 'IPL': 'IBMv1'
, 'Intel': 'IntelACPILic'
, 'MX4J-1.0': 'MX4JLicensev1'
, 'Beerware': 'BeerWareVer42'
, 'CPL-1.0': 'CPLv1'
, 'Sun': 'sunRPC'
, 'SunPro': 'SunSimpleLic'
, 'W3C-IP': 'W3CLic'
, 'Artistic-1.0': 'ArtisticLicensev1'
}
def lookup((db, sha)):
conn = sqlite3.connect(db)
cursor = conn.cursor()
licenses = cursor.execute("select distinct license, scanner from licenses where checksum=?", sha).fetchall()
cursor.close()
conn.close()
# 2 licenses were found, one from Ninka, one from FOSSology
if len(licenses) == 2:
if licenses[0][1] == 'ninka':
if fossology_to_ninka.has_key(licenses[1][0]):
if fossology_to_ninka[licenses[1][0]] == licenses[0][0]:
status = 'agreed'
licenses = []
else:
if ninka_to_fossology.has_key(licenses[0][0]):
if ninka_to_fossology[licenses[0][0]] == licenses[1][0]:
status = 'agreed'
licenses = []
else:
status = "difference"
else:
status = "difference"
else:
status = "difference"
elif licenses[1][1] == 'ninka':
if fossology_to_ninka.has_key(licenses[0][0]):
if fossology_to_ninka[licenses[0][0]] == licenses[1][0]:
status = 'agreed'
licenses = []
else:
if ninka_to_fossology.has_key(licenses[1][0]):
if ninka_to_fossology[licenses[0][0]] == licenses[1][0]:
status = 'agreed'
licenses = []
else:
status = "difference"
else:
status = "difference"
else:
status = "difference"
# more licenses were found. Ignore for now.
else:
status = 'unscanned'
licenses = []
return (status, sha[0], licenses)
def main(argv):
parser = OptionParser()
parser.add_option("-l", "--licensedb", action="store", dest="licenses", help="path to licensing database", metavar="FILE")
parser.add_option("-d", "--database", action="store", dest="db", help="path to master database", metavar="FILE")
(options, args) = parser.parse_args()
if options.licenses == None:
parser.error("Need path to licensing database")
try:
conn = sqlite3.connect(options.licenses)
except:
print >>sys.stderr, "Can't open licensing database"
sys.exit(1)
if options.db == None:
parser.error("Need path to master database")
try:
dbconn = sqlite3.connect(options.db)
except:
print >>sys.stderr, "Can't open master database"
sys.exit(1)
cursor = conn.cursor()
notsame = []
cursor.execute("select distinct checksum from licenses")
sha256s = cursor.fetchmany(10000)
unscannedcounter = 0
agreedcounter = 0
dbcursor = dbconn.cursor()
# create a pool of workers since all this work can be done in parallel
pool = Pool()
# two dictionaries that list per license scanner per license
# what the other license scanner thinks happens
ninkas = {}
fossologys = {}
while sha256s != []:
tmpsha256 = map(lambda x: (options.licenses, x), sha256s)
results = pool.map(lookup, tmpsha256, 1)
interesting = filter(lambda x: x[0] == 'difference', results)
agreed = filter(lambda x: x[0] == 'agreed', results)
agreedcounter += len(agreed)
unscanned = filter(lambda x: x[0] == 'unscanned', results)
unscannedcounter += len(unscanned)
for i in interesting:
interestingfile = dbconn.execute("select filename from processed_file where checksum=?", (i[1],)).fetchone()
if interestingfile == None:
# error in the database
continue
# checksum, result of Ninka and then result of FOSSology
(sha256, licenses) = i[1:]
print "%s -- %s -- %s -- %s" % (sha256, licenses[0][0], licenses[1][0], interestingfile[0])
if ninkas.has_key(licenses[0][0]):
if ninkas[licenses[0][0]].has_key(licenses[1][0]):
ninkas[licenses[0][0]][licenses[1][0]] += 1
else:
ninkas[licenses[0][0]][licenses[1][0]] = 1
else:
ninkas[licenses[0][0]] = {}
ninkas[licenses[0][0]][licenses[1][0]] = 1
if fossologys.has_key(licenses[1][0]):
if fossologys[licenses[1][0]].has_key(licenses[0][0]):
fossologys[licenses[1][0]][licenses[0][0]] += 1
else:
fossologys[licenses[1][0]][licenses[0][0]] = 1
else:
fossologys[licenses[1][0]] = {}
fossologys[licenses[1][0]][licenses[0][0]] = 1
sha256s = cursor.fetchmany(10000)
pool.close()
dbcursor.close()
dbconn.close()
cursor.close()
conn.close()
print "unscanned:", unscannedcounter
print "agreed:", agreedcounter
print
for n in ninkas:
for f in ninkas[n]:
print "NINKA", n, f, ninkas[n][f]
print
for n in fossologys:
for f in fossologys[n]:
print "FOSSOLOGY", n, f, fossologys[n][f]
if __name__ == "__main__":
main(sys.argv)
| [
"armijn@tjaldur.nl"
] | armijn@tjaldur.nl |
8b46157906b8047c028a01bf4f357b0433113dd4 | 614a14c538492e39dd0483cefc424527ee18791d | /DS/lab2/q11.py | 4995fe247294803d7c9b188e61703dcf74a3abb1 | [] | no_license | Arunima123/SEM-6 | 7db057c5df3a3c3eaca76c9218c7131245dbead8 | 3470c30883389b735e06c3420a4f58d1cf0836d8 | refs/heads/main | 2023-05-30T23:35:03.113372 | 2021-06-13T10:45:34 | 2021-06-13T10:45:34 | 339,344,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | x=[11, -21, 0, 45, 66, -93]
i=0
while i<len(x):
if x[i]<0:
print x[i]
i+=1 | [
"noreply@github.com"
] | Arunima123.noreply@github.com |
a66504ee17891adad91de64071af684dfaf621da | d8d1daed8162cc70a989d696adece741fad39632 | /amplify/agent/collectors/nginx/meta.py | e098d7e1a8372510d90ef31332dc67721c05518e | [
"BSD-2-Clause"
] | permissive | sakomws/nginx-amplify-agent | 16085a236f5c3eaf0e4b53cbb935c4fab3ce079b | e33eb8724a7fc06cc44137a23653da11c8c07e82 | refs/heads/master | 2020-04-03T08:10:14.812192 | 2018-09-26T13:49:52 | 2018-09-26T13:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,844 | py | # -*- coding: utf-8 -*-
import re
import psutil
from amplify.agent.collectors.abstract import AbstractMetaCollector
from amplify.agent.common.context import context
from amplify.agent.common.util import subp
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class NginxMetaCollector(AbstractMetaCollector):
dpkg_s_re = re.compile('([\w\-\.]+)\s*:\s*(.+)')
dpkg_l_re = re.compile('([\d\w]+)\s+([\d\w\.\-]+)\s+([\d\w\.\-\+~]+)\s')
short_name = 'nginx_meta'
def __init__(self, **kwargs):
super(NginxMetaCollector, self).__init__(**kwargs)
self.register(
self.open_ssl,
self.find_packages
)
if not self.in_container:
self.register(
self.nginx_uptime
)
@property
def default_meta(self):
meta = {
'type': 'nginx', # Hard coded since only 1 'nginx' object in backend.
'local_id': self.object.local_id,
'root_uuid': context.uuid,
'running': True,
'display_name': self.object.display_name,
'stub_status_enabled': self.object.stub_status_enabled,
'status_module_enabled': self.object.plus_status_enabled,
'stub_status_url': self.object.stub_status_url,
'plus_status_url': self.object.plus_status_external_url or self.object.plus_status_internal_url,
'version': self.object.parsed_v['version'],
'parent_hostname': context.app_config['credentials']['imagename'] or context.hostname,
'plus': self.object.parsed_v['plus'],
'configure': self.object.parsed_v['configure'],
'packages': {},
'path': {'bin': self.object.bin_path, 'conf': self.object.conf_path},
'built_from_source': False,
'ssl': self.object.parsed_v['ssl']
}
if not self.in_container:
meta['start_time'] = None
meta['pid'] = self.object.pid
return meta
def open_ssl(self):
"""
Old nginx uses standard openssl library - this method tries to find its version
"""
if self.meta['ssl']['built'] is None:
openssl_out, _ = subp.call('openssl version')
if openssl_out[0]:
version = openssl_out[0].split()[1]
self.meta['ssl'] = {
'built': ['openssl', version],
'run': ['openssl', version]
}
def find_packages(self):
"""
Tries to find a package for the running binary
"""
package = None
# find which package contains our binary
dpkg_s_out, dpkg_s_err = subp.call('dpkg -S %s' % self.object.bin_path, check=False)
for line in dpkg_s_out:
kv = re.match(self.dpkg_s_re, line)
if kv:
package = kv.group(1)
break
if 'no_path' in dpkg_s_err[0]:
self.meta['built_from_source'] = True
if package:
# get version
all_installed_packages = {}
dpkg_l_out, _ = subp.call("dpkg -l | grep nginx")
for line in dpkg_l_out:
gwe = re.match(self.dpkg_l_re, line)
if gwe:
if gwe.group(2).startswith('nginx'):
all_installed_packages[gwe.group(2)] = gwe.group(3)
if package in all_installed_packages:
self.meta['packages'] = {package: all_installed_packages[package]}
def nginx_uptime(self):
""" collect info about start time """
master_process = psutil.Process(self.object.pid)
self.meta['start_time'] = int(master_process.create_time()) * 1000
class GenericLinuxNginxMetaCollector(NginxMetaCollector):
def find_packages(self):
pass
class DebianNginxMetaCollector(NginxMetaCollector):
pass
class GentooNginxMetaCollector(NginxMetaCollector):
def find_packages(self):
""" Find a package with running binary """
equery_out, equery_err = subp.call(
'equery --no-color --no-pipe --quiet belongs --early-out %s' % self.object.bin_path,
check=False
)
if equery_out[0]:
category, package = equery_out[0].split('/', 1)
name, version = package.split('-', 1)
if name == 'nginx':
self.meta['packages'] = {category + '/nginx': version}
elif not equery_err[0]:
self.meta['built_from_source'] = True
class CentosNginxMetaCollector(NginxMetaCollector):
def find_packages(self):
""" Find a package with running binary """
package, version = None, None
rpm_out, rpm_err = subp.call(
'rpm -qf %s ' % self.object.bin_path + '--queryformat="%{NAME} %{VERSION}-%{RELEASE}.%{ARCH}\\n"',
check=False
)
if rpm_out and rpm_out[0]:
package, version = rpm_out[0].split()
if 'is not owned by' in rpm_err[0]:
self.meta['built_from_source'] = True
if package:
self.meta['packages'] = {package: version}
class FreebsdNginxMetaCollector(NginxMetaCollector):
def find_packages(self):
""" Find a package with running binary """
# find which package contains our binary
pkg_out, _ = subp.call('pkg which -p %s' % self.object.bin_path, check=False)
if 'was installed by package ' in pkg_out[0]:
# get version
package, version = pkg_out[0].split()[-1].split('-', 1)
self.meta['packages'] = {package: version}
elif 'was not found in the database' in pkg_out[0]:
self.meta['built_from_source'] = True
| [
"dedm@nginx.com"
] | dedm@nginx.com |
c6bc62ff853e6e030b8cf6fab25d29e502ba9a0b | 5bc6d2f91576ef8b3c75028951860495aafad1cb | /src/fvm/test/DIALECTRIC_CHARGING/testDielectricCharging.py | 0302f0704ea75c3e167f55832f2233202b25560e | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | jamesmloy/fvm | 7b4f1ed619de52bb8215623beb3f8b2c27f628d3 | 13ba38f73da7fd19fa9eb8e44c51b0fd9c882614 | refs/heads/master | 2021-01-13T12:54:51.994777 | 2016-12-11T21:06:57 | 2016-12-11T21:06:57 | 72,786,954 | 0 | 0 | null | 2016-11-03T21:07:33 | 2016-11-03T21:07:33 | null | UTF-8 | Python | false | false | 9,334 | py | #!/usr/bin/env python
"""
the scipt is used to test the mesh dependency on tunneling model
it uses the new set of parameter from Sambit in Sep2010
"""
### import modules ###
import pdb
import sys
import fvm
fvm.set_atype('double')
from math import *
import fvm.fvmbaseExt as fvmbaseExt
import fvm.importers as importers
import fvm.models_atyped_double as models
import fvm.exporters_atyped_double as exporters
from FluentCase import FluentCase
from fvm.fvmbaseExt import VecD3
import time
from optparse import OptionParser
from mpi4py import MPI
def usage():
print __doc__
sys.exit(1)
# map between fvm, tecplot, and xdmf types
etype = {
'tri' : 1,
'quad' : 2,
'tetra' : 3,
'hexa' : 4
}
tectype = {
'tri' : 'FETRIANGLE',
'quad' : 'FEQUADRILATERAL',
'tetra' : 'FETETRAHEDRON',
'hexa' : 'FEBRICK'
}
xtype = {
'tri' : 'Triangle',
'quad' : 'Quadrilateral',
'tetra' : 'Tetrahedron',
'hexa' : 'Hexahedron'
}
parser = OptionParser()
parser.set_defaults(type='tri')
parser.add_option("--type", help="'tri'[default], 'quad', 'hexa', or 'tetra'")
parser.add_option("--xdmf", action='store_true', help="Dump data in xdmf")
parser.add_option("--time","-t",action='store_true',help="Print timing information.")
(options, args) = parser.parse_args()
if len(args) != 1:
usage()
### mesh ID ###
topID = 4
botID = 5
sideID = 3
interiorID = 2
normal_direction = 2 # z direction is the charging direction
nLevel = 1000
nTrap = 2
### input and output ###
reader = FluentCase(args[0])
### physics parameters ###
dielectric_constant = 7.9
dielectric_thickness = 200e-9
applied_voltage = 100
dielectric_ionization = 3.0
dielectric_bandgap = 5.0
substrate_workfunction = 5.0
membrane_workfunction = 5.0
optical_dielectric_constant = 4.0
electron_trapdepth = 1.5
electron_trapdensity = 3e25
OP_temperature = 300
electron_effmass = 0.5
poole_frenkel_emission_frequency = 1e11
electron_capture_cross = 1e-22
electron_mobility = 50.0e-4
electron_saturation_velocity = 1.0e5
### run parameters ###
timeStep = 1e-9
timeScale = 1.1
numIterationsPerStep = 3
numTimeSteps = 100
globalTime = 0
globalCount = 0
saveFrequency = 10
totalChargeFile = open("totalCharges.dat", "w")
#targetChargeFile = open("targetCharges.dat","w")
#========================================================================================#
#----------------------------------------------------------#
def unsteadyAdvance(globalCount, globalTime, timeStep):
if globalCount == 0:
elecModel.calculateEquilibriumParameters()
for i in range(0, numTimeSteps):
target = 1000-5
(chargeSumT, chargeSumC) = calculateTotalCharges()
saveTotalCharges(globalTime, chargeSumT/1e6, chargeSumC/1e6)
#saveTargetCharges(globalTime, target)
"""
if (globalCount % saveFrequency == 0):
saveChargeProfile(globalCount)
savePotentialProfile(globalCount)
"""
elec_options['timeStep'] = timeStep
try:
elecModel.advance(numIterationsPerStep)
except KeyboardInterrupt:
break
globalTime += timeStep
globalCount += 1
print "advaning to time %i\t %e" % (globalCount,globalTime)
elecModel.updateTime()
timeStep *= timeScale
#-----------------------------------------------------------#
def calculateTotalCharges():
sumC = 0.0
sumT = 0.0
cells = meshes[0].getCells()
nCells = cells.getSelfCount()
charge = elecFields.charge[cells].asNumPyArray()
volume = geomFields.volume[cells].asNumPyArray()
for i in range(0, nCells):
sumT = sumT + charge[i][0] + charge[i][1]
sumC = sumC + charge[i][2]
return sumT/nCells, sumC/nCells
#-----------------------------------------------------------#
def saveTotalCharges(time, sumC, sumT):
totalChargeFile.write('%e\t%e\t%e\n' % (time, sumC, sumT))
totalChargeFile.flush()
def saveTargetCharges(time, target):
cells = meshes[0].getCells()
nCells = cells.getSelfCount()
charge = elecFields.charge[cells].asNumPyArray()
targetChargeFile.write('%e\t%e\t%e\t%e\n' % (time, charge[target][0], charge[target-1][0], charge[target+1][0]))
targetChargeFile.flush()
#-----------------------------------------------------------#
def saveChargeProfile(nstep):
print "saving charge profile"
fileName = outputDir + str(nstep) + "_charge.dat"
cells = meshes[0].getCells()
nCells = cells.getSelfCount()
charge = elecFields.charge[cells].asNumPyArray()
file = open(fileName, "w")
for i in range (nCells-1, 0-1, -1):
file.write('%i\t%e\t%e\t%e\n' % (i, charge[i][0], charge[i][1], charge[i][2]))
file.close()
def savePotentialProfile(nstep):
print "saving potential"
fileName = outputDir + str(nstep) + "_potential.dat"
cells = meshes[0].getCells()
nCells = cells.getSelfCount()
potential = elecFields.potential[cells].asNumPyArray()
file = open(fileName, "w")
for i in range (0, nCells):
file.write('%i\t%e\n' % (i, potential[i]))
file.close()
#========================================================================================#
### read in meshes ###
reader.read()
meshes = reader.getMeshList()
geomFields = models.GeomFields('geom')
metricsCalculator = models.MeshMetricsCalculatorA(geomFields,meshes)
metricsCalculator.init()
elecFields = models.ElectricFields('elec')
elecModel = models.ElectricModelA(geomFields,elecFields,meshes)
vcMap = elecModel.getVCMap()
dielectricID = interiorID
for i,vc in vcMap.iteritems():
vc.vcType = "dielectric"
vc['dielectric_constant'] = dielectric_constant
# setup boundary conditions #
#--- potential BC: top = V; bot = 0; side = symmetry
#--- charges BC: zero Dirichlet BC applied everywhere
membrane_voltage = applied_voltage
substrate_voltage = 0.0
bcMap = elecModel.getBCMap()
for i,bc in bcMap.iteritems():
if i == topID:
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = membrane_voltage
if i == botID:
bc.bcType = "SpecifiedPotential"
bc['specifiedPotential'] = substrate_voltage
if i == sideID:
bc.bcType = "Symmetry"
### setup initial condition ###
elec_options = elecModel.getOptions()
elec_options['initialPotential'] = 0
elec_options['initialTotalCharge'] = 0
elec_options['timeStep'] = timeStep
### setup enable options ###
elec_options.electrostatics_enable = True
elec_options.chargetransport_enable = True
elec_options.timeDiscretizationOrder = 1
elec_options.transient_enable = True
elec_options.injection_enable = True
elec_options.tunneling_enable = True
elec_options.emission_enable = True
elec_options.capture_enable = True
elec_options.drift_enable = True
elec_options.trapbandtunneling_enable = True
elec_options.diffusion_enable = False
### setup physics constants ###
elec_constants = elecModel.getConstants()
elec_constants['dielectric_thickness'] = dielectric_thickness
elec_constants['voltage'] = applied_voltage
elec_constants['dielectric_ionization'] = dielectric_ionization
elec_constants['dielectric_bandgap'] = dielectric_bandgap
elec_constants['substrate_workfunction'] = substrate_workfunction
elec_constants['membrane_workfunction'] = membrane_workfunction
elec_constants['substrate_voltage'] = substrate_voltage
elec_constants['membrane_voltage'] = membrane_voltage
elec_constants['optical_dielectric_constant'] = optical_dielectric_constant
elec_constants['OP_temperature'] = OP_temperature
elec_constants['electron_effmass'] = electron_effmass
elec_constants['poole_frenkel_emission_frequency'] = poole_frenkel_emission_frequency
elec_constants['electron_capture_cross'] = electron_capture_cross
elec_constants['electron_mobility'] = electron_mobility
elec_constants['electron_saturation_velocity'] = electron_saturation_velocity
elec_constants['substrate_id'] = botID
elec_constants['membrane_id'] = topID
elec_constants['nLevel'] = nLevel
elec_constants['normal_direction'] = normal_direction
elec_constants['nTrap'] = nTrap
elec_constants.electron_trapdepth.push_back(electron_trapdepth);
elec_constants.electron_trapdensity.push_back(electron_trapdensity)
elec_constants.electron_trapdepth.push_back(1.5);
elec_constants.electron_trapdensity.push_back(electron_trapdensity)
### setup linear solve options ###
pPC = fvmbaseExt.AMG()
pPC.verbosity = 0
pSolver = fvmbaseExt.BCGStab()
pSolver.preconditioner = pPC
pSolver.relativeTolerance = 1e-20
pSolver.nMaxIterations = 100
pSolver.maxCoarseLevels=20
pSolver.absoluteTolerance = 1e-50
pSolver.verbosity=0
elec_options.electrostaticsLinearSolver = pSolver
cPC = fvmbaseExt.AMG()
cPC.verbosity = 0
cSolver = fvmbaseExt.BCGStab()
cSolver.preconditioner = cPC
cSolver.relativeTolerance = 1e-20
cSolver.nMaxIterations = 100
cSolver.maxCoarseLevels=20
cSolver.absoluteTolerance = 1e-50
cSolver.verbosity=0
elec_options.chargetransportLinearSolver = cSolver
elec_options.electrostaticsTolerance = 1e-20
elec_options.chargetransportTolerance = 1e-20
elec_options.printNormalizedResiduals = False
### advance loop ###
elecModel.init()
t1 = time.time()
unsteadyAdvance (globalCount, globalTime, timeStep)
t2 = time.time()
print '\nsolution time = %f' % (t2-t1)
| [
"yildirim@8639bca7-a847-0410-9c84-802de9f82a5b"
] | yildirim@8639bca7-a847-0410-9c84-802de9f82a5b |
74f16692b8eb0af17e8869801d90123abdc33d83 | 8f56bd8e069da0029e6039c176e0a8c65dd83b10 | /assignment2/assignment2.py | 1ffb7278b91a5420bf4940abde4df8a9be1cf9c8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | benhe119/IN4334-MiningSoftwareRepositories | 04d80f196d69ff8d229e7d5046d19ad1042996b0 | 207b0c91b68851320049d1ab902d7028a5523f4e | refs/heads/master | 2020-09-05T18:52:41.555269 | 2017-02-28T18:20:40 | 2017-02-28T18:20:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,824 | py | import sh # to interact with the shell
import re # for regular expressions
import requests # to download JIRA Bug list
import csv # to save the results
import time
from collections import Counter, defaultdict # useful structures
git = sh.git.bake(_cwd='lucene-solr')
# regex that matches path of Java files inside the core
core_regex = r'^lucene\/core\/src\/java\/org\/apache\/lucene.*?\.java$'
#########################################
# 1ST STEP ##############################
#########################################
start_date, end_date = "2013-01-01 00:00", "2013-12-31 23:59"
log_output = str(git('--no-pager', 'log', '--name-only', after=start_date,
before=end_date, pretty="format:[%H,%ai,%ae]"))
# output of git log will be:
#
# [commit_hash_1, author_timestamp_1, author_email_1]
# path/to/A.java
# path/to/B.java
#
# [commit_hash_2, author_timestamp_2, author_email_2]
# path/to/C.java
# path/to/D.java
splitted_output = (s.split('\n') for s in log_output.split('\n\n'))
# this structure is used to store all the results of the analysis
struct = {}
for commit_group in splitted_output:
i = 0
# empty commits appear on top of each commit group and we want to
# filter them out
while commit_group[i+1].startswith('['):
i += 1
commit_info = commit_group[i][1:-1] # remove parenthesis at start and end
commit_hash, author_date, author_email = commit_info.split(',')
changed_files = commit_group[i+1:]
# filter out files that are not Java or not in the core
changed_files = filter(lambda f: re.search(core_regex, f), changed_files)
for file_path in changed_files:
struct[(commit_hash, file_path)] = {
'author_date': author_date,
'author_email': author_email,
'bugs_info': {
'counters': defaultdict(int),
'lists': defaultdict(list)
}
}
def computeMetrics(counter, commit_author):
"""
Used to compute commit/line metrics
First argument is the contributors counter that looks like this:
Counter({
'contributor_mail_1': contributed_lines_1/contributed_commits_1,
'contributor_mail_2': contributed_lines_2/contributed_commits_2,
...
})
"""
total_contributors = len(counter)
total_value = sum(counter.values())
minor_contributors = sum(1 for contributor, value in counter.items()
if value/total_value <= 0.05)
major_contributors = sum(1 for contributor, value in counter.items()
if value/total_value > 0.05)
commit_author_ratio = \
counter[commit_author]/total_value if commit_author in counter else 0
max_value_contributor = max(counter.keys(), key=(lambda k: counter[k]))
ownership_best_contributor = counter[max_value_contributor] / total_value
commit_author_is_best_contributor = \
True if max_value_contributor == commit_author else False
return {
'total_contributors': total_contributors,
'minor_contributors': minor_contributors,
'major_contributors': major_contributors,
'ownership_best_contributor': ownership_best_contributor,
'commit_author_ratio': commit_author_ratio,
'commit_author_is_best_contributor': commit_author_is_best_contributor
}
# line contributors metrics computation
for ((commit_hash, file_path), info) in struct.items():
try:
blame_out = str(git('--no-pager', 'blame', file_path,
commit_hash + '^1', '--line-porcelain'))
except sh.ErrorReturnCode_128:
# the file was not found at that specific commit, meaning that it still
# didn't exist. in this case we leave the line metrics empty
continue
# the output of git blame contains a contributors' email as many times as
# the number of lines that he has written, so we just search
# for all the emails in the output
line_contributors = re.findall(r'author-mail <(.+?)>', blame_out,
flags=re.M)
line_contributors_counter = Counter(line_contributors)
info['line_metrics'] = computeMetrics(line_contributors_counter,
info['author_email'])
#########################################
# 2ND STEP ##############################
#########################################
# commit contributors metrics computation
start_date = "2011-01-01 00:00"
for ((commit_hash, file_path), info) in struct.items():
end_date = info['author_date']
contributors = str(git('--no-pager', 'log', '--after="%s"' % start_date,
'--before=%s' % end_date, '--follow',
'--pretty=%ae', '--', file_path))
# output of log will be the list of authors of each commit one per line
# if the file was created in the commit that we're analyzing, the output
# will be empty. in that case we leave the commit metrics empty
if not contributors:
continue
# last item is always an empty string
commit_contributors = contributors.split('\n')[:-1]
commit_contributors_counter = Counter(commit_contributors)
info['commit_metrics'] = computeMetrics(commit_contributors_counter,
info['author_email'])
#########################################
# 3RD STEP ##############################
#########################################
start_date, end_date = "2013-01-01 00:00", "2016-01-01 00:00"
raw_revlist_output = str(git('rev-list', 'master', '--timestamp',
pretty='oneline', after=start_date,
before=end_date))
# output of revlist will be:
# unix_timestamp_1 commit_hash_1 title_1
# unix_timestamp_2 commit_hash_2 title_2
# ...
commits_3rd_step = ({'title': commit[52:],
'hash': commit[11:40],
'tstamp': commit[:10]}
for commit in raw_revlist_output.split('\n')[:-1])
# we have to get from the Jira REST API the lists of issue IDs that
# correspond to a bug. the API allows the retrieval of only 100
# results at time, meaning we have to repeat the request multiple
# times in order to get all the issue IDs
jira_api_url = 'https://issues.apache.org/jira/rest/api/2/search'
payload = {
'jql': 'project = LUCENE AND issuetype = Bug',
'fields': ['key']
}
first_results = requests.post(jira_api_url, json=payload).json()
maxResults = first_results['maxResults']
total = first_results['total']
# we store in a list the issue IDs that correspond to a bug
bugs_issue_ids = []
for startAt in range(0, total, maxResults):
payload = {
'jql': 'project = LUCENE AND issuetype = Bug',
'fields': ['key'],
'startAt': startAt
}
results = requests.post(jira_api_url, json=payload).json()
for issue in results['issues']:
bugs_issue_ids.append(issue['key'])
time.sleep(5) # in order not to trigger the rate limiter
for commit in commits_3rd_step:
post_release_bugs, dev_time_bugs = 0, 0
# we look first for a Jira issue id in the commit message
jira_match = re.search(r'LUCENE-\d{1,4}', commit['title'])
if jira_match and jira_match.group() in bugs_issue_ids:
post_release_bugs = 1
else:
# if we didn't find an issue id in the message, we look for a keyword
keywords = ('error', 'bug', 'fix', 'issue', 'mistake', 'incorrect',
'fault', 'defect', 'flaw', 'typo')
if any(keyword in commit['title'] for keyword in keywords):
dev_time_bugs = 1
bugs_induced_qty = dev_time_bugs + post_release_bugs
if bugs_induced_qty > 0:
# if the commit was a bugfix, we get the list of files that it changed
changed_files = str(git('--no-pager', 'show', '--name-only',
'--pretty=', commit['hash'])).split('\n')[:-1]
# output is list of changed files one per line
# we are interested only in Java files in the core.
# theoretically, a bug introduced in one of the Java core files
# could have been "propagated" outside of the core directory, for
# example if the buggy file was moved. we assume this is not the case
# to reduce the complexity of the analysis.
changed_files = filter(lambda f: re.search(core_regex, f),
changed_files)
for file_path in changed_files:
# after getting the list of files changed by the "bugfix" commit
# we need to know for each file which lines were removed,
# since we assume that those lines contained the bug
raw_show_output = str(git('--no-pager', 'show', '--no-color',
'--format=', '--unified=0',
commit['hash'], '--', file_path))
# the output of git show contains the removed line groups
# each one in the following format:
# @@ -X,Y +Z,W @@
# ..content of removed lines..
# where X is the start line of the removed group
# and Y is the number of lines removed.
# if Y = 0 no lines were removed.
# if Y is not present it means 1 line was removed.
# this regex matches the removed lines ranges
removed_linenumbers_regex = \
r'^@@ -(\d+)(?:,(\d+))? \+\d+(?:,\d+)? @@'
removed_lines_matches = re.finditer(removed_linenumbers_regex,
raw_show_output, flags=re.M)
# for convenience we add to a list the ranges of removed lines
removed_lines_ranges = []
for match in removed_lines_matches:
_start_line, _n_lines = match.groups()
start_line = int(_start_line)
n_lines = int(_n_lines) if _n_lines is not None else 1
if n_lines != 0:
end_line_included = start_line + n_lines - 1
removed_lines_ranges.append((start_line,
end_line_included))
# if no line was removed from the file we are analyzing, we skip
# to the next file
if not removed_lines_ranges:
continue
# now that we know the ranges of removed lines, we blame the file
# to understand from where each line comes from
blame_out = str(git('--no-pager', 'blame', '--line-porcelain',
commit['hash'] + '^1', '--', file_path))
# this regex matches the (commit, filepath) pair of each line
# where 'commit' is the commit hash in which the line was
# introduced and filepath is the original path of the file
commit_and_filename_regex = \
r'^([0-9a-f]{40}) \d+ (\d+)(?: \d+)?(?:\n.*?)+?filename (.+?)$'
commit_filename_matches = re.finditer(commit_and_filename_regex,
blame_out, flags=re.M)
# we assume that a (commit, filepath) pair can be the cause of
# just one bug in a file, hence we put the pairs in a set
buggy_commit_file_pairs = set()
for match in commit_filename_matches:
commit_hash, _line_n, file_path = match.groups()
line_n = int(_line_n)
# we iterate over all the lines of the file and add to the set
# only the ones that were removed
if any(start_line <= line_n <= end_line
for start_line, end_line in removed_lines_ranges):
buggy_commit_file_pairs.add((commit_hash, file_path))
for commit_file_pair in buggy_commit_file_pairs:
if commit_file_pair in struct:
bugs_info = struct[commit_file_pair]['bugs_info']
bugs_counters = bugs_info['counters']
bugs_lists = bugs_info['lists']
bugs_counters['dev_time_bugs'] += dev_time_bugs
bugs_counters['post_release_bugs'] += post_release_bugs
bugs_counters['bugs_induced_qty'] += bugs_induced_qty
bugs_lists['fix_commits_hashes'].append(commit['hash'])
bugs_lists['fix_commits_tstamps'].append(commit['tstamp'])
#########################################
# 4TH STEP ##############################
#########################################
with open('assignment2.csv', 'w', newline='') as csvfile:
fieldnames = ['commit_hash',
'file_name',
'directory_name',
'commit_author',
'timestamp',
'line_contributors_total',
'line_contributors_minor',
'line_contributors_major',
'line_contributors_ownership',
'line_contributors_author',
'line_contributors_author_owner',
'commit_contributors_total',
'commit_contributors_minor',
'commit_contributors_major',
'commit_contributors_ownership',
'commit_contributors_author',
'commit_contributors_author_owner',
'bugs_induced_qty',
'post_release_bugs',
'dev_time_bugs',
'fix_commits_hash',
'fix_commits_timestamp']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=',')
writer.writeheader()
for ((commit_hash, file_path), info) in struct.items():
splitted_filepath = file_path.split('/')
file_name = splitted_filepath[-1]
directory_name = '/'.join(splitted_filepath[:-1])
commit_author = info['author_email']
timestamp = info['author_date']
if 'line_metrics' in info:
line_metrics = info['line_metrics']
line_contributors_total = line_metrics['total_contributors']
line_contributors_minor = line_metrics['minor_contributors']
line_contributors_major = line_metrics['major_contributors']
line_contributors_ownership = \
line_metrics['ownership_best_contributor']
line_contributors_author = line_metrics['commit_author_ratio']
line_contributors_author_owner = \
line_metrics['commit_author_is_best_contributor']
else:
line_contributors_total, line_contributors_minor = '', ''
line_contributors_major, line_contributors_ownership = '', ''
line_contributors_author, line_contributors_author_owner = '', ''
if 'commit_metrics' in info:
commit_metrics = info['commit_metrics']
commit_contributors_total = commit_metrics['total_contributors']
commit_contributors_minor = commit_metrics['minor_contributors']
commit_contributors_major = commit_metrics['major_contributors']
commit_contributors_ownership = \
commit_metrics['ownership_best_contributor']
commit_contributors_author = commit_metrics['commit_author_ratio']
commit_contributors_author_owner = \
commit_metrics['commit_author_is_best_contributor']
else:
commit_contributors_total, commit_contributors_minor = '', ''
commit_contributors_major, commit_contributors_ownership = '', ''
commit_contributors_author = ''
commit_contributors_author_owner = ''
bugs_info = info['bugs_info']
if bugs_info['counters']['bugs_induced_qty'] > 0:
bugs_counters = bugs_info['counters']
bugs_lists = bugs_info['lists']
bugs_induced_qty = bugs_counters['bugs_induced_qty']
post_release_bugs = bugs_counters['post_release_bugs']
dev_time_bugs = bugs_counters['dev_time_bugs']
fix_commits_hash = '|'.join(bugs_lists['fix_commits_hashes'])
fix_commits_timestamp = \
'|'.join(bugs_lists['fix_commits_tstamps'])
else:
bugs_induced_qty = 0
post_release_bugs, dev_time_bugs = '', ''
fix_commits_hash, fix_commits_timestamp = '', ''
writer.writerow({
'commit_hash': commit_hash,
'file_name': file_name,
'directory_name': directory_name,
'commit_author': commit_author,
'timestamp': timestamp,
'line_contributors_total': line_contributors_total,
'line_contributors_minor': line_contributors_minor,
'line_contributors_major': line_contributors_major,
'line_contributors_ownership': line_contributors_ownership,
'line_contributors_author': line_contributors_author,
'line_contributors_author_owner': line_contributors_author_owner,
'commit_contributors_total': commit_contributors_total,
'commit_contributors_minor': commit_contributors_minor,
'commit_contributors_major': commit_contributors_major,
'commit_contributors_ownership': commit_contributors_ownership,
'commit_contributors_author': commit_contributors_author,
'commit_contributors_author_owner':
commit_contributors_author_owner,
'bugs_induced_qty': bugs_induced_qty,
'post_release_bugs': post_release_bugs,
'dev_time_bugs': dev_time_bugs,
'fix_commits_hash': fix_commits_hash,
'fix_commits_timestamp': fix_commits_timestamp
})
| [
"gasparini.lorenzo@gmail.com"
] | gasparini.lorenzo@gmail.com |
6873c7ac96dc578c60e5e46b523024d40259f84a | f26cf8123320e7dace97286128bfbacdb541c891 | /ProductInventory.py | 404ec564cd47452d07749af1fc5755723bc4dceb | [] | no_license | brandonlow/Inventory | e946cfde6da5e54fc9684adb8e69f478c034e782 | 67ecb4c08b23cd1e657651daa79b1b759defcbf6 | refs/heads/master | 2021-10-09T11:59:39.321038 | 2018-12-27T14:48:52 | 2018-12-27T14:48:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | item=[]
product=0
number=0
total_price=0
total_amount=0
while(number!=4):
number=int(input("\nWatchmen Security Supply\n 1. Add to Inventory\n 2. View Inventory\n 3. Product Report\n 4. Exit\n"))
if(number==1):
product=int(input("\nEnter number of product to add:"))
for i in range(0,product):
name=input("\nEnter product name:")
price=int(input("Enter price of the product:"))
amount=int(input("Enter amount of the product:"))
item.append([name,price,amount])
if(number==2):
print("Product\tPrice\tAmount\n")
for i in range(0,product):
print(item[i][0],"\t",item[i][1],"\t",item[i][2],"\n")
if(number==3):
for i in range(0,product):
total_price+=item[i][1]*item[i][2]
total_amount+=item[i][2]
print("Total price = ",total_price,"\nTotal Amount = ",total_amount) | [
"noreply@github.com"
] | brandonlow.noreply@github.com |
733f9cca0e430c4af16d6f1940b16ef86ed6c5b7 | 559fd388a1651f754bfd7613de6032eb81fcdd42 | /chapter13.py | 324402a190569283f6af4456b985561aee6cf3f1 | [] | no_license | HardBop/thinkpython | d0cf4cbdaeb1a0faa75a391ef3d7213874aa719e | ca1ae0b456f77685e399f3927e242be321028be1 | refs/heads/master | 2020-04-13T21:48:59.220141 | 2019-04-09T22:35:05 | 2019-04-09T22:35:05 | 163,464,719 | 1 | 0 | null | 2019-01-22T19:16:40 | 2018-12-29T01:43:44 | Python | UTF-8 | Python | false | false | 17,074 | py | # Excercises and notes for Chapter 13 of ThinkPythong by Allen Downy on
# Grean Tea Press
#
# change for testing branch logic
# name.strip() strips leading and trailing whitespaces out of a string
#Try removing whitespaces, punctuation, and capitalization in a loop
# and returning the letters sorted
import string
def squishsort(string_in) :
string_out = ''
string_low = string_in.lower()
for char in string_low :
if char not in string.whitespace :
if char not in string.punctuation :
string_out += char
return ''.join(sorted(string_out))
# Exercise 13.1: Write a program that reads a file, breaks each line into
# words, strips whitespace and punctuation from the words, and converts
# them to lowercase.
""" Exercise 13.1
Write a program that reads a file, breaks each line into words, strips
whitespace and punctuation from the words, and converts them to lowercase.
"""
# 1. read file
# 2. break lines into Words
# 3. mangle by removing whitespace & punctuation and converting to lowercase
# Created file wordlines.txt to test
fin = open('/Users/jimbaer/python/sandbox/turtle/wordlines.txt')
for line in fin :
for word in line.split() :
newword = ''
for char in word :
if char not in (string.punctuation+string.whitespace):
newword += char
print newword.lower()
#Function version - squishy removes punctuation, whitespace, and makes
# lower case
# Note author's method below [process_line()] using string.strip() is a
# better solution because it only strips leading & trailing characters
# but does not remove embedded like apostrophes.
def squishy(word) :
word = word.lower()
newword = ''
for char in word :
if char not in (string.punctuation+string.whitespace) :
newword += char
return newword
def better_squishy(word) :
word = word.lower().strip(string.punctuation+string.whitespace)
return word
# For fun, re-working with the translate() method
# seems that maketrans method not available in 2.7 ... waaah!
fin = open('/Users/jimbaer/python/sandbox/turtle/wordlines.txt')
badchar = (string.punctuation+string.whitespace)
for line in fin :
for word in line.split() :
table = word.maketrans('','',badchar)
newword = word.translate(table)
print newword.lower()
"""Exercise 13.2
Fun with out of print books
"""
"""
I downloaded The Narrative of the Life of Frederick Douglass
An American Slave from the Gutenberg project. Determined that
the actual text starts on line 540 (after header, contents,
and supporting letters) by a combination of inspection and
grep -n "[first phrase of book]"
I did not see a clever way to skip over that front material so just gong to
read starting in the text starting at line 540
Likewise, line 3733 is end of text - rest is added Gutenberg material so
stopping read at line 3733.
There are 4106 lines in the file (bash wc -l)
"""
# Input text of book and organize into a dict with freq count
# book = name fo text file, first/last = first/last line of text
def book_words(book,first,last) :
fin = open('/Users/jimbaer/python/sandbox/text_files/' + book)
lc = 0
words_dict = dict()
for line in fin :
lc += 1
if (lc >= first and lc < last) :
for word in line.replace('-',' ').split() : # see note
cleanword = squishy(word)
if cleanword in words_dict :
words_dict[cleanword] += 1
else :
words_dict[cleanword] = 1
print book," has ",len(words_dict)," distinct words"
return words_dict
# Note: replace().split() allows to split on "-" or " "
Douglass_words = book_words('Frederick_Douglass.txt',540,3733)
# Can now reverse dict and sort - output is count, list of words or word, count
# in descending order? Douglass book has 4777 words ...
# Function to find the highest frequency word in the book (dict)
def getmax(indict) :
hifreq = 0
revdict = dict()
for word in indict :
if indict[word] > hifreq :
hifreq = indict[word]
if indict[word] in revdict:
revdict[indict[word]].append(word)
else :
revdict[indict[word]] = [word]
print hifreq, revdict[hifreq]
return revdict
# Returning revdict to explore more on word freqs for the book
Douglass_revdict = getmax(Douglass_words)
def hi_lo(revdict,heads=10,tails=0) :
flist = []
for cnt in revdict :
flist.append((cnt,revdict[cnt]))
sflist = sorted(flist, reverse=True)
if heads > 0 :
print 'Most Frequent Words: (count,word)'
for item in sflist[:heads] :
print item
if tails > 0 :
print sflist[-tails:]
""" Exercise 13.3: Modify the program from the previous exercise to print
the 20 most frequently-used words in the book.
"""
# Already built the basic function in hi_lo() above
hi_lo(Douglass_revdict,20,0)
""" Exercise 13.4: Modify the previous program to read a word list
(see Section 9.1) and then print all the words in the book that are
not in the word list.
"""
# Read words.txt into a dict
def read_words() :
fin = open('/Users/jimbaer/python/sandbox/text_files/words.txt')
wordlist = dict()
for line in fin :
word = line.strip()
if word not in wordlist :
wordlist[word] = ''
else :
print "error - word repeated in source file"
return
return wordlist
wl_dict = read_words()
# write function to read words from book (Douglass_words dict) and see if they
# are in wl_dict, putting any not in wl_dict into a missing_dict
def wordmatch(book_words) :
missing_dict = dict()
for word in book_words :
if word not in wl_dict :
missing_dict[word] = ''
print len(missing_dict)
return missing_dict
Douglass_miss_dict = wordmatch(Douglass_words)
def dump_mismatch(miss_dict,N) :
i = 0
for word in miss_dict :
if i < N :
i += 1
print i, word
else :
return
# Answer is: mostly mismatch due to contractions, proper names, numbers, and
# ______ which are not in words.txt file
# Did another example with Tale of Two Cities - longer with more geniune
# mismatches
""" Exercise 13.5
Write a function named choose_from_hist that takes a histogram as defined
in Section 11.1 and returns a random value from the histogram, chosen with
probability in proportion to frequency.
"""
# Function to return an item for sample list with probability of any item
# corresponding to the observed frequency in the sample.
# helper fn for bootstrap() that creates histogram from an observed sample,
# where sample is a list of discrete observations over a finite set of
# objects. (E.g, heads/tails, sides of a die, etc.)
def histogram(sample) :
freq_dict = dict()
for item in sample :
if item in freq_dict :
freq_dict[item] += 1
else :
freq_dict[item] = 1
return freq_dict
# helper function for bootstrap() to get denomenator
def get_base(histin) :
denom = 0
for item in histin :
denom += histin[item]
return denom
# function to calculated observed frequency of items in a sample
def bootstrap(sample) :
histin = histogram(sample)
denom = get_base(histin)
distn = dict()
for item in histin :
distn[item] = histin[item] / float(denom)
print item, distn[item]
return distn
# Now need to create the final funcion that makes a random draw with the
# probability of an outcome corresponding to the observed frequencies'
# in the distn dict.
def chooser(distn) :
cumit = 0
draw = random.random()
for item in distn :
cumit += distn[item]
if draw <= cumit :
return item
# chooser() solves problem 13.5 but went further to develop another
# function gensample() that generates samples according to the
# distribuiton in the observed histogram, for validation
# histogram(), getbase(), bootstrap(), and chooser() combine to make the
# choose_from_hist() function requested
def gensample(distn,size) :
newsample = []
for i in range(size) :
newsample.append(chooser(distn))
return newsample
# Function to identify mismatches between two dicts and put the words
# only found in one dict into a new dict with a flag indicating
# which source dict it came from
def dict_comp(dict1,dict2) :
diff_dict = dict()
for word1 in dict1 :
if word1 not in dict2 :
diff_dict[word1] = '1'
for word2 in dict2 :
if word2 not in dict1 :
diff_dict[word2] = '2'
print len(diff_dict)
return diff_dict
Douglass_diff = dict_comp(Douglass_words,Douglass_words2)
# Douglass words created with code above; Douglass_words2 with code below
# creating "hist" with process_file()
# This dict_comp() function does the same as subtract() below but goes
# both directions and labels source when there is a discrepency
""" Exercise 13.6
Python provides a data structure called set that provides many
common set operations. Wwrite a program that uses set subtraction to
find words in the book that are not in the word list.
"""
def set_up(dict1,dict2,show=10) :
set1 = set(dict1.keys())
set2 = set(dict2.keys())
diff_set = set1.symmetric_difference(set2)
i = 0
for word in diff_set :
i += 1
if i < show :
print i, word
return diff_set
""" Exercise 13.7
Alternate approach to a random draw:
1. get cummulative count of words for _words dict
2. Use random package to get a random number and scale to the word count
3. Pick first word from the cummulative distn that passes scaled psuedo-random,
which will require ordering the (key,value) pairs by value - reverse dict
This approach relies on the ordering of the dict being arbitrary - suspect
that will further compromise the "ramdom"-ness
"""
# Takes dict with words as keys and frequencies as values and returns
# dict with words as keys and cummulative word count as values
def cumm_dict(indict) :
cumm_dict = dict()
summer = 0
for word in indict:
summer += indict[word]
cumm_dict[word] = summer
print 'total words: ',max(cumm_dict.values()),' distinct words:', len(cumm_dict)
return cumm_dict
# Takes dict with words as keys and cummulative word count as values and
# inverts to a dict with cummulative count as keys and words as values.
# Not general for dict inversion b/c it errors our rather than concatenation
# a list in case where initial value would map to more than one key.
def invert_dict(indict) :
inverted = dict()
for key in indict :
if indict[key] in inverted :
print "Error: key already exists"
return
else :
inverted[indict[key]] = key
print len(inverted), " entries in new dict"
return inverted
# Takes inverted cummulative word count dict and draws a (psuedo) random
# word where the likelihood of draw is based on observed frequencies
def draw_word(indict) :
cval = max(indict.keys()) * random.random()
keylist = sorted(indict.keys())
for item in keylist :
if item >= cval :
return indict[item]
if __name__ == '__main__' :
Douglass_cumm = cumm_dict(Douglass_words2)
Douglass_cumm_inv = invert_dict(Douglass_cumm)
print draw_word(Douglass_cumm_inv)
""" Exercise 13.8 Markhov analysis
1. Write a program to read a text from a file and perform Markov analysis.
The result should be a dictionary that maps from prefixes to a collection
of possible suffixes. The collection might be a list, tuple, or dictionary;
it is up to you to make an appropriate choice. You can test your program
with prefix length two, but you should write the program in a way that
makes it easy to try other lengths
"""
# How to split text into prefixes and suffixes? I.e., how many words in each?
# First cut: set prefix length to 2 and suffix to 1
# Need to read words in a sliding window of 2-word bundles to get prefixes
# Could read words into a dict that takes word and then order, then
# step through in order and create dict of 2-word tuples. Have to
# make the order the key and word the value b/c word not distinct
# read book into a dict with the order of the word as key and the word as
# value. Have to use order as key b/c the word itself won't be distinct.
def ordered_words(book,first=0,last=1000000) :
fin = open('/Users/jimbaer/python/sandbox/text_files/'+book)
lc = 0
wc = 0
ord_words = dict()
for line in fin :
lc += 1
if (lc >= first and lc < last) :
for word in line.replace('-',' ').split() : # see note
wc += 1
cleanword = squishy(word)
ord_words[wc] = cleanword
return ord_words
halfb_owords = ordered_words('eric_the_half.txt')
# Create dict of ordered 2-word tuples as prefixes
def prefix_dict(indict) :
pref_dict = dict()
for item in indict :
if item < len(indict) :
pref_dict[item] = (indict[item],indict[item+1])
print len(pref_dict), " prefixes in dict"
return pref_dict
# Reverse the prefix dict to make searchable by tuples with frequency of
# the prefix tuple appearing as values
def prefix_rev(prefix_dict) :
prerev = dict()
for item in prefix_dict :
if prefix_dict[item] not in prerev :
prerev[prefix_dict[item]] = 1
else :
prerev[prefix_dict[item]] += 1
return prerev
halfb_prefreq = prefix_rev(halfb_prefix)
# dict with prefix tuple as key and suffix (list of stings) as value
# pre2suf() takes ordered prefix dict and appends suffix in values
# e.g., halfb_prefix dict as input
def pre2suf(prefix) :
pre2suf = dict()
for item in prefix :
if item < len(prefix) :
suffix = prefix[item+1][1]
else :
suffix = ''
if prefix[item] not in pre2suf :
pre2suf[prefix[item]] = [suffix]
else :
pre2suf[prefix[item]].append(suffix)
return pre2suf
halfb_pre2suf = pre2suf(halfb_prefix)
# Utility function to dump dict, default is top 10 lines
def dict_dump(indict, lines=10) :
cnt = 0
for item in indict :
cnt += 1
if cnt <= lines :
print cnt, item, indict[item]
else :
return
# A simple Markhov test stream - uses 2-word prefix and single word suffix
# with random choice of prefix, and suffix also if len(suffix) > 1.
# indict = dict with prefix keys and suffix values
# Arbitrarily kicks out after default 10 iterations through the dict, though
# that parameter is adjustable
def Markhov1(indict,lins=10) :
newstring = ''
cnt = 0
for item in indict :
ranpre = random.choice(indict.keys())
if len(indict[ranpre]) <= 1 :
suffix = indict[ranpre][0]
else :
suffix = random.choice(indict[ranpre])
newstring = newstring + ' ' + ranpre[0] + ' ' + ranpre[1] + ' ' + suffix
cnt += 1
if cnt >= lins :
print newstring
return
return
""" Exercise 13.9: Word rank
1. Calculate word frequencies from an input file
2. Plot log f v log r, where f is the frequency and r is the rankself.
"""
# book_words() function creates a dict of words with frequencies
# so use that and then order the results and print N rows
# Example below with text of Emma
emma_words = book_words('emma.txt',250,17072)
emma_revdict = getmax('emma_words')
# Write a function to create a new dict that has freq of word from x_words
# as value and the freq rank of the word as key
def freq_rank(revdict) :
flist = []
for cnt in revdict :
flist.append((cnt,revdict[cnt]))
sflist = sorted(flist, reverse=True)
# freq_rank = dict()
rank = 0
for item in sflist :
rank += 1
print rank,' , ' , item[0], ' , ', math.log(rank), ' , ', math.log(item[0])
# Above ignores multiple words that have same freq & rank, e.g., the
# words with freq=1 and rank = last.
# need to expand out the words & freqs that are grouped together in revdict
# Chart I get is more concave than linear for both eric_the_half and emma
# though away from the endpoints the linear approx should be good
# Below version writes to a csv
def freq_rank(revdict,csvname) :
flist = []
for cnt in revdict :
flist.append((cnt,revdict[cnt]))
sflist = sorted(flist, reverse=True)
# freq_rank = dict()
rank = 0
fout = open('/Users/jimbaer/python/sandbox/text_files/'+csvname,'w')
for item in sflist :
rank += 1
comma = ' , '
wstring = str(rank) + comma + str(item[0]) + comma + str(math.log(rank)) + comma + str(math.log(item[0]))+'\n'
fout.write(wstring)
fout.close()
| [
"b3b6b7@gmail.com"
] | b3b6b7@gmail.com |
8764b9ddd11364370415e648788e59ed323ce23b | bb1193a058593b596b0d1a292e1524dc0adde6ed | /temp.py | 65d5675905118fa4a5e3c9195b0a4f8ee993422b | [] | no_license | hongym7/cassava-leaf-disease-classification | f3a5af0ad6e645a5eb0d7bed0dc9aeebd87797ec | 6f3b9b8fae8d57c722c7f6382f8d55205e33ee7b | refs/heads/master | 2023-03-16T20:16:21.274211 | 2021-03-08T00:27:29 | 2021-03-08T00:27:29 | 340,275,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | list = [0.8876168131828308, 0.8805795907974243, 0.8826828598976135, 0.8805795907974243, 0.8789719343185425]
import numpy as np
print(np.mean(list)) | [
"hongym7@naver.com"
] | hongym7@naver.com |
ab6965229c22d2549d2f5174c8c4006d04b11bd7 | 38f35484f1149c60d1b925946c4dff0bbcbe5e4b | /venv/Scripts/futurize-script.py | 2c62c4e6a99bc456862954dcdfe509643035ac64 | [] | no_license | wanyingchao/python | 9d6126cbe578f0a92d9f5021d92eb32cf154a403 | 96b02dfafd3e27203d073f80d024110024658f7c | refs/heads/master | 2023-04-05T21:54:14.634881 | 2021-04-26T03:30:44 | 2021-04-26T03:30:44 | 280,092,490 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | #!E:\python\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'futurize')()
)
| [
"123"
] | 123 |
58f3a3519d109d183bcfe18bd78ea46010b8be6c | cac10f75d97b52e05b736ee872a166a8855afc63 | /src/fm-build.py | 69578bcd1cdc77e7c6d9873d57d2739bfc5c3a8a | [] | no_license | pombredanne/fm-index-3 | fd73d7e34959c4ebaaef6835252c528d86987827 | ec012bfc9a2973536256749fb1491d632cedfb2d | refs/heads/master | 2022-01-21T05:24:31.388019 | 2018-09-28T17:30:17 | 2018-09-28T17:30:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | #!/usr/bin/python
import os
from os.path import join, abspath, isfile, isdir, exists, basename
import sys
import fmindex
def main():
if not len(sys.argv) in [3]:
print('Usage: ')
print(' %s data index' % sys.argv[0])
os.abort()
else:
if not isfile(sys.argv[1]):
print("Input file doesn't exist")
os.abort()
inp = open(sys.argv[1])
# read input
data = inp.read()
# create index
idx = fmindex.index(data)
# save index to file
fmindex.save(sys.argv[2], idx)
if __name__ == '__main__':
main() | [
"egonelbre@gmail.com"
] | egonelbre@gmail.com |
7cd74fe3abf4d5b922b588c90ae475e7548cb432 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_1/Python/Brryce/pancake.py | bab274304cad7aac423f291f3efc21c3fa94f122 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | import sys
N = int(sys.stdin.readline().strip())
def decide(ll):
if len(ll) == 0:
return 0
lll = []
if ll[-1] == -1:
for number in ll[:-1]:
lll.append(-number)
return 1+decide(lll)
else:
return decide(ll[:-1])
for qw in range(1,N+1):
s = sys.stdin.readline().strip()
ll = []
for char in s:
if char == '-':
ll.append(-1)
else:
ll.append(1)
print("Case #%d: %d"%(qw,decide(ll))) | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
0bdfd103c4bc14faff5933fbe4b34c8b0856ad44 | f664ddbdfcc2aec278cfe63c829b97c78e5eb863 | /examples/base.py | e9ad7f2ebf116e8e81a9db1772594203e1a10d5f | [] | no_license | yolandahq/KGE_pytorch_test | 284ed2848c98c83c805ca8d9cdd4eb17f66ceea0 | ad4a410e4a25be11164e9f9b0b71ae627b9ec035 | refs/heads/master | 2023-03-26T02:21:04.811687 | 2019-08-17T18:17:16 | 2019-08-17T18:17:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,921 | py | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from tensorboardX import SummaryWriter
from dataloader.dataloader import *
from torch.utils.data import DataLoader
import json
def adjust_learning_rate(optimizer, decay):
for param_group in optimizer.param_groups:
param_group['lr'] *= decay
class TrainBase():
def __init__(self, args):
self.args = args
self.sumwriter = SummaryWriter(log_dir=args.summarydir)
def get_iterator(self):
train_iterator = DataLoader(
UniformDataSet(self.args.trainpath, self.args.neg_sample_rate),
batch_size=self.args.batch_size,
shuffle=self.args.shuffle,
num_workers=self.args.numworkers,
drop_last=self.args.drop_last
)
test_iterator = DataLoader(
TestDataset(self.args.testpath),
batch_size=self.args.eval_batch_size,
shuffle=False,
num_workers=self.args.evalnumberworkers,
drop_last=False
)
valid_iterator = DataLoader(
TestDataset(self.args.validpath),
batch_size=self.args.eval_batch_size,
shuffle=False,
num_workers=self.args.evalnumberworkers,
drop_last=False
)
return train_iterator, test_iterator, valid_iterator
def load_model(self, model, optimizer):
print('Loading checkpoint %s...' % self.args.init_checkpoint)
checkpoint = torch.load(self.args.init_checkpoint)
model.load_state_dict(checkpoint['model_state_dict'])
current_learning_rate = checkpoint['lr']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
def save_model(self, model, optimizer, variable_list):
torch.save({
**variable_list,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, os.path.join(self.args.savepath, 'checkpoint'))
#【TODO】could be save with timestamp
def save_model_param(self, model):
param = model.getWeight()
filename = os.path.join(self.args.savepath, 'checkpoint')
with open(filename + '.json', 'a') as outfile:
json.dump(param, outfile, ensure_ascii=False)
outfile.write('\n')
def load_opt(self, model):
lr = self.args.learningrate
OPTIMIZER = self.args.optimizer
if OPTIMIZER == "Adam":
optimizer = torch.optim.Adam(model.parameters(), weight_decay=self.args.weight_decay, lr=lr)
elif OPTIMIZER == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), weight_decay=self.args.weight_decay, lr=lr)
elif OPTIMIZER == 'Adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), weight_decay=self.args.weight_decay, lr=lr)
elif OPTIMIZER == 'Adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), weight_decay=self.args.weight_decay, lr=lr)
else:
print("ERROR : Optimizer %s is not supported." % OPTIMIZER)
print("Support optimizer:\n===>Adam\n===>SGD\n===>Adagrad\n===>Adadelta")
raise EnvironmentError
return optimizer
def fit(self, model, optimizer=None):
epochs = self.args.epochs
lr = self.args.learningrate
if not optimizer:
optimizer = self.load_opt(model)
if self.args.usegpu:
model.cuda()
globalstep = 0
globalepoch = 0
minLoss = float("inf")
train_iterator, test_iterator, valid_iterator = self.get_iterator()
for epoch in range(epochs):
globalepoch += 1
print("=" * 20 + "EPOCHS(%d/%d)" % (globalepoch, epochs) + "=" * 20)
step = 0
model.train()
for posData, negData in train_iterator:
posData = posData.reshape(-1,3)
negData = negData.reshape(-1,3)
if self.args.usegpu:
posData = posData.cuda()
negData = negData.cuda()
# Calculate the loss from the modellrdecayepoch
data_batch = torch.cat((posData, negData), 0)
loss = model(data_batch)
if self.args.usegpu:
lossVal = loss.cpu().item()
else:
lossVal = loss.item()
# Calculate the gradient and step down
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print infomation and add to summary
if minLoss > lossVal:
minLoss = lossVal
if step % 50 == 0:
print("[TRAIN-EPOCH(%d/%d)-STEP(%d)]Loss:%f, minLoss:%f" % (
epoch + 1, epochs, step, lossVal, minLoss))
step += 1
globalstep += 1
self.sumwriter.add_scalar(self.args.model + '/train/loss', lossVal, global_step=globalstep)
self.sumwriter.add_scalar(self.args.model + '/train/lr', lr, global_step=globalstep)
if globalepoch % self.args.lrdecayepoch == 0:
adjust_learning_rate(optimizer, decay=self.args.lrdecay)
lr = lr * self.args.lrdecay
if globalepoch % self.args.evalepoch == 0:
# eval the model
print('begin eval the model')
model.eval()
hit10 = 0
mr = 0
evalstep = 0
mr_t = 0
mr_h = 0
for data in test_iterator:
evalstep += 1
if self.args.usegpu:
data = data.cuda()
rankH, rankT = model.eval_model(data)
mr_h +=rankH
mr_t +=rankT
if evalstep % 5000 == 0:
print("[TEST-EPOCH(%d/%d)-STEP(%d)]mr:%f, hit@10:%f" % (
globalepoch, epochs, evalstep, mr / evalstep, hit10 / evalstep))
mr += (rankH + rankT) / 2
if (rankH + rankT) / 2 <= 10:
hit10 += 1
mr /= evalstep
hit10 /= evalstep
mr_t /=evalstep
mr_h /=evalstep
self.sumwriter.add_scalar(self.args.model + '/eval/hit@10', hit10, global_step=epoch + 1)
self.sumwriter.add_scalar(self.args.model + '/eval/MR', mr, global_step=epoch + 1)
self.sumwriter.add_scalar(self.args.model + '/eval/RankT', mr_t, global_step=epoch + 1)
self.sumwriter.add_scalar(self.args.model + '/eval/RankH', mr_h, global_step=epoch + 1)
variable_list = {
'step': globalstep,
'lr': lr,
'MR': mr,
'hit@10': hit10
}
self.save_model(model, optimizer, variable_list)
print("=" * 20 + "FINISH TRAINING" + "=" * 20)
self.valid(model, valid_iterator)
def valid(self, model, iter):
print('begin eval the model')
model.eval()
hit10 = 0
mr = 0
evalstep = 0
for data in iter:
evalstep += 1
if self.args.usegpu:
data = data.cuda()
rankH, rankT = model.eval_model(data)
if evalstep % 1000 == 0:
print("[VALID-STEP(%d)]mr:%f, hit@10:%f" % (
evalstep, mr / evalstep, hit10 / evalstep))
mr += (rankH + rankT) / 2
if rankT <= 10:
hit10 += 1
mr /= evalstep
hit10 /= evalstep
print("=" * 20 + "VALID RESULTS" + "=" * 20)
print('Mean Rank: %f' % mr)
print('Hit@10: %d' % hit10)
| [
"811437508@qq.com"
] | 811437508@qq.com |
e4560b2e932a04bcb6d8525c9c7cc97ff7b0fdfe | 0bbf6291d0c98e8d5d84a906d24ae3098d9d90e5 | /Sites/views.py | 0fd0f0162c58c5339d08d7258951186d088e3b28 | [] | no_license | Arcrammer/28.-DWA | ad8e3b57f04189e8707f5e209ab742d6db02156e | d25eaa1e71f338da726f4cd349432b5697559d9a | refs/heads/master | 2016-09-01T10:25:01.324934 | 2016-01-13T14:19:56 | 2016-01-13T14:19:56 | 49,007,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from django.shortcuts import render
from django.http import HttpResponse
from Sites import models
def all(request):
view_data = {
'sites': models.Site.objects.all()
}
return render(request, 'all.html', view_data)
| [
"alexander2475914@gmail.com"
] | alexander2475914@gmail.com |
5005a0f118da47b226554461ee0c39d1166964cd | 7555ea67e7c395be295dc7b81aebffeccf014dc3 | /old/esc_trial1.py | 2184565bd89a2af673ff30da8409bca536678088 | [] | no_license | RoverLiu/SelfZoomingCar | a2a3556f9d412aa7ab8ad02a220d9f78f947d693 | 736f2239c7add408f09aaf340c72cd16aedd3036 | refs/heads/master | 2022-01-12T23:31:23.182737 | 2019-07-04T02:50:46 | 2019-07-04T02:50:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD)
#GPIO.setmode(GPIO.BCM)
# Use the small numbers beside the pins
GPIO.setup(37, GPIO.OUT)
p = GPIO.PWM(37, 50)
finish = 0
p.start(7.5)
# Input for speed: 500 - neutral, 1000 full forward, 0 full brake
while (finish != 1):
speed = input('Enter desired speed:')
speed = float(speed)*0.005
if (speed>1000) or (speed<0):
finish = 1
duty = 5+speed
p.ChangeDutyCycle(duty)
p.ChangeDutyCycle(7.5)
sleep(1)
p.stop()
GPIO.cleanup() | [
"Clucini07@gmail.com"
] | Clucini07@gmail.com |
a8ae95f7f29a131f572e7b0beac8398df104ddb8 | d3a6823ffca0a58373a791fd433fffd74277ba22 | /sage/rings/polynomial/polynomial_singular_interface.pyi | 4b26fc6863e1f909cfaef1fe0063bc33a30102b8 | [
"MIT"
] | permissive | demurgos/sage-typing | c63ff9e60738b71024b82ccba9ab66c1f6874f83 | d27a80237f8656df74607a59256a2c4b8b2eb89e | refs/heads/master | 2020-09-17T04:20:46.985143 | 2016-08-25T09:09:18 | 2016-08-25T09:09:18 | 66,539,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | pyi | u"""
Python interface for the `sage.rings.polynomial.polynomial_singular_interface` module.
http://doc.sagemath.org/html/en/reference/polynomial_rings/sage/rings/polynomial/polynomial_singular_interface.html
"""
from typing import Any, List, Union, Optional
import sage.structure.factory
import sage.rings.finite_rings.finite_field_base
class PolynomialRing_singular_repr(object):
pass
class Polynomial_singular_repr(object):
pass
def can_convert_to_singular(R) -> bool: ...
| [
"demurgos.net@gmail.com"
] | demurgos.net@gmail.com |
9ad7a4d8557b5f0c2a7cb64f6ea3ffed3b18da98 | c51d4868eafb70718363c0e1e9b91c199a4850d6 | /Fedora/vehicles/views/tools.py | 6e74d7d37d940145444b4fcdada9605ae346febe | [] | no_license | digiglean/django_rest_framework_tut | 8581f29a5b6a1431889221b758d87eb0ff9301ff | f65bf96904b73aee621352d971ed9c3165fc56fb | refs/heads/main | 2023-01-19T18:19:01.697146 | 2020-12-04T21:13:16 | 2020-12-04T21:13:16 | 317,333,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # vehicle/views/tools.py
from rest_framework.decorators import api_view
from rest_framework.response import Response
from vehicles.models import Tool
from vehicles.serializers.tools import ToolSerializer
@api_view(["GET"])
def list_tools(request):
tools = [
Tool("hammer", "Mastercraft"),
Tool("wrench", "Husky"),
]
serializer = ToolSerializer(tools, many=True)
content = {
"tools": serializer.data,
}
return Response(content) | [
"chris@CGB-iMac-2TSSD.local"
] | chris@CGB-iMac-2TSSD.local |
f01dcd6d3165306974971e6e05b8652cd8de00cc | 813a6239b8d8e0012349e45db16b08eb4c285314 | /Homework/Lab2/itunes_top_songs.py | dcfac558c2de98852649bada797d72be48f4a535 | [] | no_license | linhkent/nguyenkhuonglinh-fundamental-c4e25 | f2daaf463545fc2f087fd9adbe1c970b84300bff | 2335b3eb8423119dc3efaf85dd98ceb0947378ae | refs/heads/master | 2020-04-13T08:10:48.814771 | 2019-01-19T14:25:40 | 2019-01-19T14:25:40 | 163,074,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | url = 'https://www.apple.com/itunes/charts/songs/'
from urllib.request import urlopen
from bs4 import BeautifulSoup
from collections import OrderedDict
com = urlopen(url)
rawdata = com.read()
content = rawdata.decode('utf8')
soup = BeautifulSoup(content,'html.parser')
ul = soup.find('ul','')
li_list = ul.find_all('li')
songs_list = []
n = 1
for li in li_list:
name = li.h3.string
artist = li.h4.string
song = {
'No' : n,
'Song' : name,
'Artist' : artist
}
songs_list.append(OrderedDict(song))
n += 1
import pyexcel
pyexcel.save_as(records=songs_list, dest_file_name="itunes_top_songs.xlsx") | [
"linhkent@users.noreply.github.com"
] | linhkent@users.noreply.github.com |
e7eca5df3d2a0753e9a8954befba21091a5efdf2 | 341b809513d29a587fe5b3aa4306e119bad235db | /Pruebas/Tutorial_YouTube.py | cd27f7e06087f628a813f3a6edd4646d2dd1bc80 | [] | no_license | BrihayanF/Programacion_BSFR | 6bb5a45d66475da552f9064c2e3e5d6a505d16a1 | 7e76a77de00a77c42f009f38ddccf2f268ded828 | refs/heads/master | 2021-01-17T07:31:34.420794 | 2017-05-17T20:34:17 | 2017-05-17T20:34:17 | 83,743,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import tkinter
from tkinter import messagebox
def accion_de_mi_boton():
print('Mi primer boton ha sido precionado :v')
def accion_de_mi_segundo_boton():
messagebox.showinfo('Primer dialogo emergente','Mi segundo boton ha sido precionado')
def accion_advertencia():
messagebox.showwarning('Cuidado O.o','No debio precionar este boton, su ordenador se va a reiniciar')
# Crendo mi primera ventana
mi_ventana = tkinter.Tk()
mi_ventana.geometry('640x480')
mi_ventana.title('Mi primer programa :v')
# Crear mi primer boton :'v
mi_boton = tkinter.Button(text='Mi Boton', command = accion_de_mi_boton)
# Coloco mi boton en la ventana principal u.U
mi_boton.pack()
# Agregue mi segundo boton
mi_boton2 = tkinter.Button(text='Segundo boton', command = accion_de_mi_segundo_boton)
# Colocar mi segundo boton
mi_boton2.pack()
# Mostrar una advertencia
advertencia = tkinter.Button(text='No precionar >_<', command = accion_advertencia)
# Poner mi advertencia
advertencia.pack()
mi_ventana.mainloop()
| [
"brayanforero97@gmail.com"
] | brayanforero97@gmail.com |
4d75de869f66f5c115f64c370a69045b8d7daaf4 | 2d7918e30792358fa5c1b6aa010300e5060450bb | /Python Basics April 2018/Drawing Figures with Loops/12.Butterfly.py | a3cb5cea5e64277e5669517b7f37972cc88a288a | [] | no_license | skipter/Programming-Basics-Python | acf6dfaa28c341f33c1109ad35e4a01c2269bb81 | 9845bd0e41b96096a83c1d30140f6cd33e8eab67 | refs/heads/master | 2020-03-27T22:45:28.448180 | 2019-10-15T09:15:01 | 2019-10-15T09:15:01 | 147,257,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | n = int(input())
for x in range(n - 2):
if x % 2 == 0:
print(f"{'*' * (n - 2)}\ /{'*' * (n - 2)}")
else:
print(f"{'-' * (n - 2)}\ /{'-' * (n - 2)}")
print(f"{' ' * (n - 1)}@{' ' * (n - 1)}")
for x in range(n - 2):
if x % 2 == 0:
print(f"{'*' * (n - 2)}/ \{'*' * (n - 2)}")
else:
print(f"{'-' * (n - 2)}/ \{'-' * (n - 2)}") | [
"skipter@abv.bg"
] | skipter@abv.bg |
4a6723a62403cf723e2c51f41684b375d0508fe1 | cfb784c57367ed759eefc22b23f90b8538145f5c | /Pertemuan3/range.py | a7eeed21e1fdf4893194db40188b5e9fef2469fa | [] | no_license | erikoang/basic-python-b4-c | 97e37e776ce48d43805e3a90dc9fa0fa0f53f1d4 | 04a4eda2b20dc6339671748ec64d90f89034e37d | refs/heads/main | 2023-03-19T02:21:05.208412 | 2021-03-04T19:02:27 | 2021-03-04T19:02:27 | 338,513,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | x = range(6)
for a in x:
print("Perulangan scenario 1:",a)
y= range(3, 6)
for b in y:
print("Perulangan scenario 2:",b)
z = range(3, 20, 2)
for c in z:
print("Perulangan scenario 3:",c) | [
"eriko.a@agiletechnica.com"
] | eriko.a@agiletechnica.com |
6b4029e2668157ea18569e6b62bc69791286dfb2 | f2afb853d456761f824ef0159ffb1fa4125d8684 | /NetworkDetection/segment_cellpose_helpers.py | 1744206e7d8691515a67f0cb10de65a6be0ec3b5 | [] | no_license | lukasvandenheuvel/CellContactNetwork | 89fa02faf6f435da7a648af24c2c0f7162186c0c | 08454f61a2932217efd893cf5fd21243a8a94244 | refs/heads/main | 2023-06-12T04:06:08.940866 | 2021-07-02T07:36:27 | 2021-07-02T07:36:27 | 372,474,485 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,384 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 26 17:03:32 2021
@author: lukasvandenheu
"""
import numpy as np
from numba import jit
from scipy import ndimage as ndi
from joblib import Parallel, delayed
import multiprocessing
from cellpose import models
use_GPU = models.use_gpu()
#%%
@jit(nopython=True) # Function is compiled s.t. it runs in machine code
def find_network(segmented,max_num_cells=None,R=4):
'''
This function finds the cell-contact network corresponding to a segmented image.
INPUTS
------
segmented (MxN numpy array)
A segmented image, where each cell is labeled by a unique integer value.
max_num_cells (int, default is None)
The maximum number of cells that may be expected to find in the network.
The network matrix will be of the size max_num_cells x max_num_cells.
This parameter is, for example, useful if you are detecting a developing
network over time. Then, max_num_cells is the number of cells on the first
timeframe, and the network matrix of all cells will have the same size.
R (int, default is 4)
Maximal distance between 2 neighbouring cells in pixels.
OUTPUT
------
network (num_cells x num_cells numpy array)
Contact matrix. If network[i,j]=1, then the cells which are labeled with
i+1, j+1 on the segmented image are connected.
'''
# Find size and the number of cells
M,N = segmented.shape # segmented image has M rows and N columns
num_cells = np.max(segmented) # number of cells in the segmented image
if not(max_num_cells==None): # if the user entered a maximum number of cells
num_cells = max_num_cells
# Initialize network matrix:
network = np.zeros((num_cells, num_cells))
# Loop over all pixels of interest
for c in np.arange(R,N-R):
for r in np.arange(R,M-R):
# Get the neighborhood (square with radius R)
nbh = segmented[r-R:r+R+1,c-R:c+R+1]
poi = nbh[R,R] # Pixel of interest (poi) is the center of nbh
if poi > 0:
# Loop through pixels in neighborhood
# and connect the values to poi in the network.
# Don't worry, Numba JIT likes loops!
for row in nbh:
for value in row:
if (value!=0 and value!=poi):
network[poi-1, value-1] = 1 # center is connected to the object labeled by value
network[value-1, poi-1] = 1 # object labeled by value is connected to center
return network
#%%
def enlarge_fused_image(fused, patch_height=512, patch_width=512, overlap=256):
'''
This function makes the fused image larger s.t. an uneven integer number of
patches fits into it.
INPUTS
------
fused (np array)
Fused (whole well) image.
overlap (int)
Overlap of subimages in pixels. Default is 256.
patch_width, patch_height
Size of patches. Default is 512.
OUTPUTS
-------
new_fused (np array)
Enlarged fused image.
patch_locations (list with 2 elements)
patch_locations[0] is a numpy array with the y-coordinates of the subimages in pixels.
patch_locations[1] is a numpy array with the x-coordinates of the subimages in pixels.
'''
[m,n] = [patch_height, patch_width]
[M,N,C] = get_image_dimensions(fused)
num_m = np.ceil((M - m) / (m - overlap) + 1).astype(int) # number of patches that fit on y-axis
num_n = np.ceil((N - n) / (n - overlap) + 1).astype(int) # number of patches that fit on x-axis
# make sure that num_m and num_n are uneven,
# s.t. the last patch is NOT an overlapping patch
if (num_m%2 == 0):
num_m = num_m + 1
if (num_n%2 == 0):
num_n = num_n + 1
new_M = (num_m - 1) * (m - overlap) + m # new fused image height
new_N = (num_n - 1) * (n - overlap) + n # new fused image width
new_fused = np.zeros([new_M,new_N,C], dtype='uint8')
new_fused[0:M,0:N,:] = fused
patch_locations = []
patch_locations.append(np.arange(0,new_M-m+1,m-overlap)) # y-locations of patches (in pixels)
patch_locations.append(np.arange(0,new_N-n+1,n-overlap)) # x-locations of patches (in pixels)
return [new_fused, patch_locations]
#%%
def fused_to_patches(fused, patch_locations, patch_height=512, patch_width=512):
'''
This function takes as input an (enlarged) fused image.
It outputs a list of patches which are ordered in a column-to-column grid.
The locations of the patches in the fused image are specified by patch_locations.
'''
[n,m] = [patch_height, patch_width]
patch_list = []
for c_n in patch_locations[1]: # c_n is n-coordinate in pixels
for c_m in patch_locations[0]: # c_m is m-coordinate in pixels
patch = fused[c_m:c_m+m,c_n:c_n+n,:]
patch_list.append(patch)
return patch_list
#%%
def calculate_img_similarity(img1, img2):
'''
This function calculates what percentage of
img1 and img2 are the same.
Img1 and img2 are boolean images.
'''
equal_pixels = np.logical_and(img1,img2)
return np.sum(equal_pixels) / min([np.sum(img1), np.sum(img2)])
#%%
def find_cell_values_which_overlap(cell_on_overlap, combined_overlap, similarity_threshold):
'''
This function finds which cells on the combined_overlap image
overlap with the boolean image cell_on_overlap.
'''
overlapping_cell_values = cell_on_overlap * combined_overlap
unique = np.unique(overlapping_cell_values)
values_with_sufficient_overlap = []
# loop through cells which overlap with the cell on the egde
# and calculate similarity
for overlapping_value in unique[1:]:
cell_on_mask = (combined_overlap==overlapping_value)
if calculate_img_similarity(cell_on_mask, cell_on_overlap) > similarity_threshold:
values_with_sufficient_overlap.append(overlapping_value)
return values_with_sufficient_overlap
#%%
def store_overlapping_cells(values_on_edge, patch_ol, combined_ol, similarity_threshold):
'''
This function stores which cells on combined_ol overlap with the
cells in the edge region of patch_ol (in a dictionary).
Keys in the 'overlap_dict' dictionary are the values of cells which lie
in the edge region of patch_ol.
Corresponding values are the cells on combined_ol which sufficiently
overlap with the cells.
Example: overlap_dict = {23: [34,45]}
means that there is a cell (nr 23) on path_ol in the edge region.
It overlaps with cells 34 and 45. These are probably two half cells.
'''
# Initialise dictionary to store old and new values
overlap_dict = {}
# Collect values to be removed in the replace dictionary
for value in values_on_edge:
cell_shape = (patch_ol == value)
values_overlapping = find_cell_values_which_overlap(cell_shape, combined_ol, similarity_threshold)
overlap_dict[value] = values_overlapping
return overlap_dict
#%%
def replace_overlapping_cells(overlap_dict, patch_ol, combined_ol, max_value):
'''
This function replaces cells on the edge of combined_ol with
overlapping cells on the edge of patch_ol.
The cells on patch_ol are correctly predicted, so they should
replace the wrongly predicted cells on combined_ol.
'''
combined_ol_new = np.copy(combined_ol)
for value, values_to_remove in overlap_dict.items():
# remove value which overlaps with a cell on patch_ol
for old_value in values_to_remove:
combined_ol_new[np.where(combined_ol_new==old_value)] = 0
# add the cells on mask_ol to the image
if len(values_to_remove) > 0:
max_value = max_value + 1
combined_ol_new[np.where(patch_ol==value)] = max_value
return combined_ol_new
#%%
def align_patches(patch1, patch2, patch_ol, edge_thickness, similarity_threshold, max_value, axis):
'''
This function aligns patch1 and patch2 based on the overlap patch_ol.
'''
m,n = np.shape(patch1)
edge_size = int(edge_thickness/2)
# Get max value (values of new overlapping cells are always larger than max_value)
# max_value = np.max(patch2)
# Get a list of cells that lie on the edge region of the overlapping patch (values_on_edge)
# Create combined overlap by pasting the 2 patches together (combined_ol)
if (axis==0):
center = int(m / 2)
values_on_edge = np.unique( patch_ol[center-edge_size:center+edge_size+1,:] )
combined_ol = np.concatenate([patch1[center:m,:],patch2[0:center,:]],axis=0)
elif (axis==1):
center = int(n / 2)
values_on_edge = np.unique( patch_ol[:,center-edge_size:center+edge_size+1] )
combined_ol = np.concatenate([patch1[:,center:n],patch2[:,0:center]],axis=1)
else:
raise ValueError('Invalid choice for axis. Please choose either 0 or 1.')
# Remove 0 (=background) from the list
values_on_edge = np.delete(values_on_edge, np.where(values_on_edge==0))
# Find overlapping cells
overlap = store_overlapping_cells(values_on_edge, patch_ol, combined_ol, similarity_threshold)
combined_ol_new = replace_overlapping_cells(overlap, patch_ol, combined_ol, max_value)
# Update patches
if (axis==0):
patch1_new = np.concatenate([patch1[0:center,:],combined_ol_new[0:center,:]],axis=0)
patch2_new = np.concatenate([combined_ol_new[center:m,:],patch2[center:m,:]],axis=0)
elif (axis==1):
patch1_new = np.concatenate([patch1[:,0:center],combined_ol_new[:,0:center]],axis=1)
patch2_new = np.concatenate([combined_ol_new[:,center:n],patch2[:,center:n]],axis=1)
return patch1_new, patch2_new
#%%
def create_overlapping_columns_parallel(nn, mask_list, patch_locations, edge_thickness, similarity_threshold):
'''
This function aligns overlapping patches in the y-direction to create columns.
'''
patches_in_column = []
num_patches_mm = np.size(patch_locations[0])
# loop over rows in steps of 2 (avoid the last patch in column)
for mm in range(0,num_patches_mm-1,2):
# Get patch1, patch2 and patch_ol
patch_nr = nn * num_patches_mm + mm
if mm==0: # if we are at the top of the column
patch1 = np.copy(mask_list[patch_nr]) # patch1 (upper patch) is new patch from patch_list
patches_in_column.append(patch1)
else:
patch1 = np.copy(patches_in_column[-1]) # patch1 (upper patch) is previously processed patch
patch2 = np.copy(mask_list[patch_nr + 2]) # patch2 is lower patch
patch_ol = np.copy(mask_list[patch_nr + 1]) # patch_ol is overlapping patch
# Increase cell values on patch2 with the max value of patch1
patch2[np.where(patch2>0)] = patch2[np.where(patch2>0)] + np.max(patch1)
# Align patch1 and patch2 using overlap
max_value = np.max(patch2)
patch1_new,patch2_new = align_patches(patch1, patch2, patch_ol, edge_thickness, similarity_threshold, max_value, axis=0)
patches_in_column[-1] = patch1_new # overwrite first patch
patches_in_column.append(patch2_new) # append new patch
# Combine patches into a column
aligned_patches = np.concatenate(patches_in_column,axis=0)
return aligned_patches
#%%
def make_all_cell_values_unique(overlapping_columns):
'''
This function increases the cell values on all even columns with the
max cell value of the previous column, s.t. all cell valyues are unique.
'''
num_columns = len(overlapping_columns)
max_value = 0
for nn in range(2,num_columns,2):
col = overlapping_columns[nn]
col[np.where(col>0)] = col[np.where(col>0)] + np.max(overlapping_columns[nn-2])
overlapping_columns[nn] = col
if np.max(col) > max_value:
max_value = np.max(col)
return overlapping_columns,max_value
#%%
def align_overlapping_columns_parallel(nn, overlapping_columns, edge_thickness, similarity_threshold, max_value):
'''
This function aligns 2 overlapping columns to create a combined column.
'''
patch1 = np.copy(overlapping_columns[nn]) # left column
patch2 = np.copy(overlapping_columns[nn+2]) # right column
patch_ol = np.copy(overlapping_columns[nn+1]) # overlapping column
# Align patch1 and patch2 using overlap
patch1_new,patch2_new = align_patches(patch1, patch2, patch_ol, edge_thickness, similarity_threshold, max_value, axis=1)
# Paste the two new patches into one
combined_patch = np.concatenate([patch1_new, patch2_new], axis=1)
return combined_patch
#%%
def concatenate_overlapping_columns(aligned_overlapping_columns, new_M, new_N, patch_width):
'''
This function creates a fused image from a list of aligned overlapping columns.
'''
fused_mask = np.zeros((new_M,new_N))
x = 0
for i,col in enumerate(aligned_overlapping_columns):
if (i==0): # First column
new_x = x + int(3*patch_width/2)
fused_mask[:,x:new_x] = col[:,0:int(3*patch_width/2)]
elif (i==len(aligned_overlapping_columns)-1): # last column
new_x = x + int(3*patch_width/2)
fused_mask[:,x:new_x] = col[:,int(patch_width/2):]
else: # columns in between
new_x = x + patch_width
fused_mask[:,x:new_x] = col[:,int(patch_width/2):int(3*patch_width/2)]
x = new_x
return fused_mask
#%%
def find_edges(mask):
'''
This function finds the edges of labeled objects in the mask.
'''
padded_mask = np.pad(mask,1,mode='edge')
center = padded_mask[1:-1,1:-1]
up = padded_mask[0:-2,1:-1]
up_left = padded_mask[0:-2,0:-2]
left = padded_mask[1:-1,0:-2]
compare = np.array((center!=up,center!=up_left,center!=left))
edges = np.logical_or.reduce(compare)
return edges
#%%
def split_cells_on_mask(mask):
'''
This function separates objects on the mask
based on edges.
'''
edges = find_edges(mask)
compare = np.array((mask > 0, ~edges))
segmented_mask = np.logical_and.reduce(compare)
return segmented_mask
#%%
def remove_small_cells(mask, cell_size_threshold):
'''
This function removes cells smaller than cell_size_threshold from a mask.
'''
# Separate cells on fused mask
separated_cells = split_cells_on_mask(mask)
# Remove cells smaller than cell_size_threshold
label_objects, nb_labels = ndi.label(separated_cells)
sizes = np.bincount(label_objects.ravel())
mask_sizes = sizes > cell_size_threshold
mask_sizes[0] = 0
filtered_cells = mask_sizes[label_objects]
# Label again
filtered_mask, nb_labels = ndi.label(filtered_cells)
return filtered_mask
#%%
def cellpose_segment(model, patch_list, diameter, channels):
'''
This function segments a list of RGB images with the appropriate cellpose model.
It outputs a list of segmentations.
INPUTS
------
model (str)
Can be 'cyto' to use Cellpose pre-trained cyto model, 'nuclei' to use
Cellpose pre-trained nucleus model, or a path to a file containing the
weights of a custom-trained model. An example of such a file is
"cellpose_residual_on_style_on_concatenation_off_Cellpose_2021_05_04.236206"
patch_list (list of numpy arrays)
List of RGB images to segment. Can also be a single image.
diameter (float)
Cellpose cell diameter. If the model is custom-built, then diameter=None.
channels (list of 2 integers)
First channel is cyto, second is nucleus. Example:
channels=[2,3] if you have G=cytoplasm and B=nucleuss
'''
mask_list = []
if model == 'cyto': # pre-trained cytoplasm model
cellpose_model = models.Cellpose(gpu=use_GPU, model_type='cyto')
mask_list, flows, styles, diams = cellpose_model.eval(patch_list, diameter=diameter, flow_threshold=None, channels=channels)
elif model == 'nuclei': # pre-trained nucleus model
cellpose_model = models.Cellpose(gpu=use_GPU, model_type='nuclei')
mask_list, flows, styles, diams = cellpose_model.eval(patch_list, diameter=diameter, flow_threshold=None, channels=channels)
else: # custom-trained model
cellpose_model = models.CellposeModel(gpu=use_GPU, pretrained_model=model)
mask_list, flow_list, styles = cellpose_model.eval(patch_list, channels=channels, flow_threshold=0.4)
return mask_list
#%%
def get_image_dimensions(img):
'''
This function returns the height (M), width (N) and number of channels (C)
of a grayscale or RGB image. Note that the image must be 2D (it cannot be a stack)!
'''
if len(np.shape(img))==3: # RGB
[M,N,C] = np.shape(img)
elif len(np.shape(img))==2: # grayscale
[M,N] = np.shape(img)
C = 1
return M,N,C
#%%
def segment_fused_image_with_cellpose(model, fused, diameter, channels,
edge_thickness=60, similarity_threshold=0.7,
cell_size_threshold=100, patch_height=512, patch_width=512,
num_cpu_cores=None):
'''
This is a master function, which takes as input an RGB fused image, and outputs
the segmented image where each cell is labelled with a unique integer value.
INPUTS
------
model (str)
Can be 'cyto' to use Cellpose pre-trained cyto model, 'nuclei' to use
Cellpose pre-trained nucleus model, or a path to a file containing the
weights of a custom-trained model. An example of such a file is
"cellpose_residual_on_style_on_concatenation_off_Cellpose_2021_05_04.236206"
fused (MxNxC numpy array)
Image to segment.
diameter (float)
Cellpose cell diameter. If the model is custom-built, then diameter=None.
channels (list of 2 integers)
First channel is cyto, second is nucleus. Example:
channels=[2,3] if you have G=cytoplasm and B=nucleus
edge_thickness (int)
Size of the edge region.
similarity_threshold (int)
Overlapping cells which are more similar than similarity_threshold are merged.
cell_size_threshold (int)
Minimal area of a cell in pixels.
patch_width, patch_height (int)
Width and height of a single patch in pixels.
num_cpu_cores (int)
Number of CPU cores used to calculate the overlap between patches.
'''
overlap = int(patch_height/2)
print('>>>> CREATING PATCHES.')
# Make fused image larger s.t. an integer number of patches fit inside it.
M,N,C = get_image_dimensions(fused)
if (C==1): # if fused is grayscale, unsqueeze it
fused = np.reshape(fused, (M,N,1))
[new_fused, patch_locations] = enlarge_fused_image(fused, patch_height=patch_height, patch_width=patch_width, overlap=overlap)
num_patches_nn = np.size(patch_locations[1]) # number of patches on horizontal axis
# Make a list of patches
patch_list = fused_to_patches(new_fused, patch_locations, patch_height=patch_height, patch_width=patch_width)
print('>>>> Number of patches to predict: %d'%len(patch_list))
# Predict patches with cellpose
print('>>>> STARTING CELLPOSE.')
mask_list = cellpose_segment(model, patch_list, diameter, channels)
# Align vertical patches into overlapping columns
if (num_cpu_cores==None):
num_cpu_cores = multiprocessing.cpu_count() - 2
print('>>>> CREATING OVERLAPPING COLUMNS...')
print('>>>> Using a parallel pool with ', num_cpu_cores, ' CPU workers.')
overlapping_columns = Parallel(n_jobs=num_cpu_cores)(delayed(create_overlapping_columns_parallel)(nn, mask_list, patch_locations, edge_thickness, similarity_threshold) for nn in range(0,num_patches_nn))
overlapping_columns,max_value = make_all_cell_values_unique(overlapping_columns)
# Align overlapping columns
print('>>>> ALIGNING OVERLAPPING COLUMNS...')
print('>>>> Using a parallel pool with ', num_cpu_cores, ' CPU workers.')
aligned_overlapping_columns = Parallel(n_jobs=num_cpu_cores)(delayed(align_overlapping_columns_parallel)(nn, overlapping_columns, edge_thickness, similarity_threshold, max_value) for nn in range(0,num_patches_nn-1,2))
print('>>>> CREATING FUSED MASK...')
# Combine columns into the fused mask
[new_M,new_N,new_C] = np.shape(new_fused)
fused_mask = concatenate_overlapping_columns(aligned_overlapping_columns, new_M, new_N, patch_width)
fused_mask = fused_mask[0:M,0:N]
# Remove cells smaller than cell_size_threshold
filtered_fused_mask = remove_small_cells(fused_mask, cell_size_threshold)
return filtered_fused_mask
| [
"C.L.M.vandenHeuvel@tudelft.nl"
] | C.L.M.vandenHeuvel@tudelft.nl |
4d924781fb2e425d87633db2e3383355f071c98c | 875415543924eba9506c0bd3fd92739edb2e5717 | /static/MLModels/faceid/extract_embeddings.py | e1db7f7f0c9815a9de7885554e5d83a17c08590d | [] | no_license | leematthewshome/robotAI_version4 | 0845cc325e9c1887e78398d0253df343f7fc0002 | a62dbe795c67d8f5f8c152f2cb9827f5f44c6f75 | refs/heads/master | 2023-08-26T16:50:52.520028 | 2021-11-07T09:10:03 | 2021-11-07T09:10:03 | 279,801,918 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,452 | py | # USAGE
# python3 extract_embeddings.py
# import the necessary packages
from imutils import paths
import numpy as np
import imutils
import pickle
import cv2
import os
# setup paths to the various files and parameters required
thisdir = os.path.dirname(os.path.realpath(__file__))
dataset = os.path.join(thisdir, 'dataset')
embeddings = os.path.join(thisdir, 'output/embeddings.pickle')
embedding_model = os.path.join(thisdir, 'openface_nn4.small2.v1.t7')
conf_cutoff = .5
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.join(thisdir, "deploy.prototxt")
modelPath = os.path.join(thisdir, "res10_300x300_ssd_iter_140000.caffemodel")
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(embedding_model)
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(dataset))
# initialize our lists of extracted facial embeddings and corresponding people names
knownEmbeddings = []
knownNames = []
# initialize the total number of faces processed
total = 0
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the image, resize to 600 pixels wide (maintain aspect ratio), and then get dimensions
image = cv2.imread(imagePath)
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize faces in the input image
detector.setInput(imageBlob)
detections = detector.forward()
# ensure at least one face was found
if len(detections) > 0:
# assume each image has only ONE face, so find the bounding box with the largest probability
i = np.argmax(detections[0, 0, :, 2])
confidence = detections[0, 0, i, 2]
# ensure detection meets our minimum probability test (thus helping filter out weak detections)
if confidence > conf_cutoff:
# compute the (x, y)-coordinates of the bounding box for the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI and grab the ROI dimensions
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# add the name of the person + corresponding face embedding to their respective lists
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
total += 1
# dump the facial embeddings + names to disk
print("[INFO] serializing {} encodings...".format(total))
data = {"embeddings": knownEmbeddings, "names": knownNames}
f = open(embeddings, "wb")
f.write(pickle.dumps(data))
f.close()
| [
"noreply@github.com"
] | leematthewshome.noreply@github.com |
569e0a217f503efe80f95513dc0f87bf862b56d3 | a0fea99a5f72f0e12d6907b0228d5687d91e4cac | /Chapter02_PythonBasics/05_pr_add2numbers.py | d12cbeba55db217b10e571615a4ec650b63d8206 | [] | no_license | AshwineeKSharma/PythonLearning | 3bf405ee81b0cb0f77f0549d33660a95b9e6ac97 | 8cf731e98e26ae17bffd3e439b60698e67b62693 | refs/heads/main | 2023-05-09T10:46:58.183411 | 2021-06-01T16:22:34 | 2021-06-01T16:22:34 | 372,888,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | a=input("Enter Your First Number : ")
a=int(a)
b=input("Enter Your Second Number : ")
b=int(b)
print("The Sum of given two numbers is : ",a+b)
| [
"noreply@github.com"
] | AshwineeKSharma.noreply@github.com |
b31fc159bed009e0da9ef527edc234ed909b9940 | 82fef9bd3ee9d0d1c8208061a4deb0a1ffe4343a | /init_generator.py | f6c54814aef426df3b5b5782b91d3497fe1ccbfa | [] | no_license | simonkurek/init-py-files-generator | 1140a801b27cc7ecad843024decd090214dafa4a | a9be45f95ff8f0c1f374eb0c18bc0e46e156352e | refs/heads/main | 2023-07-30T17:58:34.360462 | 2021-10-05T10:05:05 | 2021-10-05T10:05:05 | 413,763,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | import os
class InitBuilder:
def __init__(self):
self.counter = 0
def run(self):
print('\n === generating __init__ files === \n')
self.generate()
print('\n === done === ')
print(f': Created {self.counter} new files :')
def generate(self, cur_dir=None):
if cur_dir is None:
cur_dir = os.getcwd()
sub_dirs = os.listdir(cur_dir)
for sub_dir in sub_dirs:
if not os.path.isfile(sub_dir) and "." not in sub_dir:
self.counter += 1
sub_dir_path = os.path.join(cur_dir, sub_dir)
init_file = os.path.join(sub_dir_path, "__init__.py")
print(f'{init_file} created')
open(init_file, "w+")
self.generate(sub_dir_path)
if __name__ == "__main__":
init_builder = InitBuilder()
init_builder.run()
| [
"noreply@github.com"
] | simonkurek.noreply@github.com |
52dd006fb14a08d3b2ea787352a7f9a93f7ba1bf | 3db59f640dca238c666961a54f67d8f1177b8424 | /marina.py | 7a4fb94af83aa50eafc197e73a5cb38ed900c339 | [] | no_license | Prodject/marina-testing | 36eec697d3b697e408d5c9e21c0122314d11cef7 | cd872c2b5233e9dcfc3624d98138286f63401dd6 | refs/heads/master | 2022-03-21T08:38:50.674991 | 2019-12-06T15:46:13 | 2019-12-06T15:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,486 | py | #!/data/data/com.thecrackertechnology.andrax/ANDRAX/bin/python3
import os
if(os.getuid() >0):
print("Be root, motherfucker!")
exit()
from marina_modules.info import *
from marina_modules.scan import *
from ctypes import *
import readline
import socket
###
#
NRM = "\x1B[0m"
RED = "\x1B[31m"
GRN = "\x1B[32m"
YEL = "\x1B[33m"
BLU = "\x1B[34m"
MAG = "\x1B[35m"
CYN = "\x1B[36m"
WHT = "\x1B[37m"
#
###
print("\033];M.A.R.I.N.A\007")
banner_lib = "marina-libs/banner.so"
banner_func = CDLL(banner_lib)
banner_func.print_banner()
workmodelist = ("passive", "low", "high", "intrusive")
has_sec_system = 0
# global variables
host = ""
domain = 0
ip = ""
host_ports = []
datas = []
products = []
scan_services = []
web_tech = {}
isp = ""
asn = ""
device_type = ""
isrouter = 0
workmode = "passive"
# end global variables
def show(show_full):
print("\n=========================\x1B[1m\x1B[31mSTART\x1B[0m=========================")
print("\nWorkmode: {}{}{}".format(RED, workmode, NRM))
print("Host: {}{}{}".format(RED, host, NRM))
if(domain):
print("Domain IP Addres: {}{}{}".format(RED, ip, NRM))
if(asn != ""):
print("ASN: {}{}{}".format(RED, asn, NRM))
if(isp == ""):
print("ISP: {}NULL{}".format(RED, NRM))
else:
if("sucuri" in isp.lower() or "akamai" in isp.lower() or "incapsula" in isp.lower() or "cloudfront" in isp.lower() or "stackpath" in isp.lower() or "fastly" in isp.lower() or "cloudflare" in isp.lower()):
has_sec_system = 1
print("ISP: {}{}{} {}[{} {}Security system{} {}]{}".format(RED, isp, NRM, WHT, NRM, YEL, NRM, WHT, NRM))
else:
print("ISP: {}{}{}".format(RED, isp, NRM))
if(host_ports[0]):
print("Ports: {}{}{}".format(RED, host_ports, NRM))
if(device_type != ""):
print("Possible device type: {}{}{}".format(RED, device_type, NRM))
if(host_ports[0]):
num=0
num2=0
num3=0
for z in host_ports:
try:
products.insert(num2, datas[num2]['product'])
except:
try:
products.insert(num2, scan_services[num2])
except:
products.insert(num2, "Unknown")
num2=num2+1
print("Products: {}[{}\n".format(WHT, NRM))
for y in host_ports:
print(" {}{}{} == {}{}{}".format(RED, host_ports[num3], NRM, YEL, products[num3], NRM))
num3=num3+1
print("\n{}]{}".format(WHT, NRM))
if(show_full == 1):
for x in host_ports:
try:
if(datas[num]['data'] == ""):
print("\nPort {}{}{} has no data".format(RED, host_ports[num], NRM))
else:
print("\nPort {}{}{} has data:\n{}{}{}".format(RED, host_ports[num], NRM, YEL, datas[num]['data'], NRM))
except:
print("\nPort {}{}{} has no data".format(RED, host_ports[num], NRM))
num=num+1
print("Web technologies: {}", web_tech)
print("\n==========================\x1B[1m\x1B[31mEND\x1B[0m==========================")
while True:
line = input('\n\001\x1B[37m\002M\001\x1B[0m\002\001\x1B[31m\002.\001\x1B[37m\002A\001\x1B[0m\002\001\x1B[31m\002.\001\x1B[37m\002R\001\x1B[0m\002\001\x1B[31m\002.\001\x1B[37m\002I\001\x1B[0m\002\001\x1B[31m\002.\001\x1B[37m\002N\001\x1B[0m\002\001\x1B[31m\002.\001\x1B[37m\002A\001\x1B[0m\002 \001\x1B[31m\002:\001\x1B[37m\002>\001\x1B[0m\002 ')
line = line.split()
try:
if line[0] == 'quit' or line[0] == 'q' or line[0] == 'exit':
break
if line[0] == 'set':
try:
if line[1] == "host":
host_ports[:] = []
datas[:] = []
products[:] = []
scan_services[:] = []
web_tech.clear()
isp = ""
asn = ""
has_sec_system = 0
domain = 0
device_type = ""
try:
isdomain = socket.gethostbyname_ex(line[2])
if isdomain[0] == isdomain[2][0]:
print("\n[ \x1B[1m+\x1B[0m ] Setting host to: {}{}{}".format(YEL,line[2],NRM))
ip = isdomain[0]
host = line[2]
else:
print("\n[ \x1B[1m+\x1B[0m ] Setting host to: {}{}{} as a domain name".format(YEL,line[2],NRM))
ip = isdomain[2][0]
host = line[2]
domain = 1
except:
print("\n[ \x1B[1m\x1B[31mERROR\x1B[0m ] \"{}{}{}\" not appear be a valid domain or ip address".format(RED,line[2],NRM))
elif line[1] == "workmode":
try:
if line[2] in workmodelist:
print("\n[ \x1B[1m+\x1B[0m ] Using: {}{}{} as workmode".format(YEL, line[2], NRM))
workmode = line[2]
else:
print("\n[ \x1B[1m\x1B[31mERROR\x1B[0m ] \"{}{}{}\" not found in workmode\n\n{}[{}{}{}]{}".format(RED,line[2],NRM, RED, MAG, workmodelist, RED, NRM))
except:
print("\n[ \x1B[1m\x1B[31mERROR\x1B[0m ] \"{}{}{}\" not appear be a valid workmode".format(RED, line[2],NRM))
else:
print("\n[ \x1B[1m\x1B[31mERROR\x1B[0m ] \"{}{}{}\" not found in set options".format(RED,line[1],NRM))
except:
print("\n[ \x1B[1m\x1B[31mERROR\x1B[0m ] Syntax {}ERROR{} in set".format(RED, NRM))
elif line[0] == 'show':
try:
if(line[1] == 'full'):
show(1)
except:
show(0)
elif line[0] == 'info':
print("\n[ \x1B[1m+\x1B[0m ] Running information module")
print("\n\t[ \x1B[1minfo\x1B[0m ] Starting passive information")
jsonresult = get_info(ip)
host_ports[:] = jsonresult['ports']
datas[:] = jsonresult['data']
isp = jsonresult['isp']
asn = jsonresult['asn']
print("\t[ \x1B[1minfo\x1B[0m ] Checking device type by passive analysis")
num=0
for x in host_ports:
try:
if("wap" in jsonresult['data'][num]['devicetype'].lower()):
device_type="router"
except:
None
num=num+1
num2=0
print("\t[ \x1B[1minfo\x1B[0m ] Try find Web technologies")
#print(jsonresult['data'][3]['http']['components'])
for x in host_ports:
try:
if(len(jsonresult['data'][num2]['http']['components']) != 0):
web_tech[num2] = jsonresult['data'][num2]['http']['components']
print("\t\t[ \x1B[1m\x1B[31mFound\x1B[0m ] {}{}{} as Web technology in {}{}{}".format(RED, web_tech[num2], NRM, RED, host_ports[num2], NRM))
except:
None
num2=num2+1
print("\n[ \x1B[1mDONE\x1B[0m ]")
elif line[0] == 'scan':
datas[:] = []
scan_services[:] = []
print("\n[ \x1B[1m+\x1B[0m ] Running Scan module")
print("\n\t[ \x1B[1minfo\x1B[0m ] Scan with mode: {}{}{}".format(RED, workmode, NRM))
if(workmode == 'passive'):
print("\t\t[ Scan ] this can take a while...")
host_ports[:], scan_services = scan_passive(ip)
if(workmode == 'low'):
print("\t\t[ Scan ] this can take a while...")
host_ports[:], device_type, scan_services = scan_low(ip)
if(workmode == 'high'):
print("\t\t[ Scan ] this realy can take a while...")
host_ports[:], device_type, scan_services = scan_high(ip)
if(workmode == 'intrusive'):
print("\t\t[ Scan ] Holy... this realy, realy will take a while...")
host_ports[:], device_type, scan_services = scan_high(ip)
print("\n[ \x1B[1mDONE\x1B[0m ]")
elif line[0] == 'anal':
print("\n[ \x1B[1m+\x1B[0m ] Running analysis module")
print("\n\t[ \x1B[1minfo\x1B[0m ] Analysis with mode: {}{}{}".format(RED, workmode, NRM))
if("wap" in device_type.lower() or "router" in device_type.lower() or "switch" in device_type.lower() or "embedded" in device_type.lower() or "Cisco" in device_type.lower()):
isrouter = 1
print("\n\t\t[ \x1B[1mROUTER ] The device is a router type")
print("\n\t\t[ \x1B[1mROUTER ] Checking the exploitability")
elif line[0] == 'help' or line[0] == '?':
print("\nReal Hackers don't need help, get out!")
else:
print("\n[ \x1B[1m\x1B[31mERROR\x1B[0m ] Command \"{}{}{}\" not found".format(RED, line[0], NRM))
except Exception as e:
print("\n[ Error ] %s" % e)
| [
"noreply@github.com"
] | Prodject.noreply@github.com |
bc9425286a08128bbb6915d4e98ee4325111ca80 | d01319983ddaa533a16dbba0d486934788666590 | /tests/integration/test_database/test_model/test_user.py | 9ca1a91156f3edc58da76c241a97ad7dca404d2a | [
"Apache-2.0"
] | permissive | Feliconut/refitt | 790899d46a7dc84cf099cceba609e68ff52754c1 | 3f226912ec8d303a067b7c2b794afbb15af00cf9 | refs/heads/master | 2023-02-28T13:51:46.050580 | 2021-01-27T20:58:28 | 2021-01-27T20:58:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,151 | py | # Copyright REFITT Team 2019. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the Apache License (v2.0) as published by the Apache Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the Apache License for more details.
#
# You should have received a copy of the Apache License along with this program.
# If not, see <https://www.apache.org/licenses/LICENSE-2.0>.
"""Database user model integration tests."""
# external libs
import pytest
from sqlalchemy.exc import IntegrityError
# internal libs
from refitt.database.model import User, NotFound, Facility, FacilityMap
from tests.integration.test_database.test_model.conftest import TestData
from tests.integration.test_database.test_model import json_roundtrip
class TestUser:
"""Tests for `User` database model."""
def test_init(self, testdata: TestData) -> None:
"""Create user instance and validate accessors."""
for data in testdata['user']:
user = User(**data)
for key, value in data.items():
assert getattr(user, key) == value
def test_dict(self, testdata: TestData) -> None:
"""Test round-trip of dict translations."""
for data in testdata['user']:
user = User.from_dict(data)
assert data == user.to_dict()
def test_tuple(self, testdata: TestData) -> None:
"""Test tuple-conversion."""
for data in testdata['user']:
user = User.from_dict(data)
assert tuple(data.values()) == user.to_tuple()
def test_embedded(self, testdata: TestData) -> None:
"""Tests embedded method to check JSON-serialization."""
for data in testdata['user']:
assert data == json_roundtrip(User(**data).to_json())
def test_from_id(self, testdata: TestData) -> None:
"""Test loading user profile from `id`."""
# NOTE: `id` not set until after insert
for i, user in enumerate(testdata['user']):
assert User.from_id(i + 1).alias == user['alias']
def test_from_id_missing(self) -> None:
"""Test exception on missing user `id`."""
with pytest.raises(NotFound):
User.from_id(-1)
def test_id_already_exists(self) -> None:
"""Test exception on user `id` already exists."""
with pytest.raises(IntegrityError):
User.add({'id': 1, 'first_name': 'Bruce', 'last_name': 'Wayne', 'email': 'bruce@waynecorp.com',
'alias': 'batman', 'data': {'user_type': 'amateur'}})
def test_from_email(self, testdata: TestData) -> None:
"""Test loading user profile from `email`."""
for user in testdata['user']:
assert User.from_email(user['email']).email == user['email']
def test_from_email_missing(self) -> None:
"""Test exception on missing user `email`."""
with pytest.raises(NotFound):
User.from_email('batman@justiceleague.org')
def test_email_already_exists(self) -> None:
"""Test exception on user `email` already exists."""
with pytest.raises(IntegrityError):
User.add({'first_name': 'Bruce', 'last_name': 'Wayne', 'email': 'bourne@cia.gov',
'alias': 'batman', 'data': {'user_type': 'amateur'}})
def test_from_alias(self, testdata: TestData) -> None:
"""Test loading user profile from `alias`."""
for user in testdata['user']:
assert User.from_alias(user['alias']).alias == user['alias']
def test_from_alias_missing(self) -> None:
"""Test exception on missing user `alias`."""
with pytest.raises(NotFound):
User.from_alias('batman')
def test_alias_already_exists(self) -> None:
"""Test exception on user `alias` already exists."""
with pytest.raises(IntegrityError):
User.add({'first_name': 'Bryce', 'last_name': 'Wayne', 'email': 'bruce@waynecorp.com',
'alias': 'tomb_raider', 'data': {'user_type': 'amateur'}})
def test_update_email(self) -> None:
"""Update email address of user profile."""
old_email, new_email = 'bourne@cia.gov', 'jason.bourne@cia.gov'
user = User.from_alias('delta_one')
assert user.email == old_email
User.update(user.id, email=new_email)
assert User.from_alias('delta_one').email == User.from_email(new_email).email
User.update(user.id, email=old_email)
assert User.from_alias('delta_one').email == User.from_email(old_email).email
def test_update_data(self) -> None:
"""Update custom data of user profile."""
old_data = {'user_type': 'amateur'}
new_data = {'user_type': 'amateur', 'special_field': 42}
user_id = User.from_alias('tomb_raider').id
assert User.from_id(user_id).data == old_data
User.update(user_id, special_field=42)
assert User.from_id(user_id).data == new_data
User.update(user_id, data=old_data)
assert User.from_id(user_id).data == old_data
def test_facilities(self) -> None:
"""Access associated user facilities."""
facilities = User.from_alias('tomb_raider').facilities()
assert all(isinstance(facility, Facility) for facility in facilities)
assert len(facilities) == 2
def test_add_facility(self) -> None:
"""Test adding a facility and then removing it."""
user = User.from_alias('tomb_raider')
facilities = user.facilities()
assert len(facilities) == 2 and set(f.name for f in facilities) == {'Croft_1m', 'Croft_4m'}
Facility.add({'name': 'Croft_10m', 'latitude': -25.5, 'longitude': -69.25, 'elevation': 5050,
'limiting_magnitude': 20.5})
new_facility = Facility.from_name('Croft_10m')
user.add_facility(new_facility.id)
facilities = user.facilities()
assert len(facilities) == 3 and set(f.name for f in facilities) == {'Croft_1m', 'Croft_4m', 'Croft_10m'}
user.delete_facility(new_facility.id)
Facility.delete(new_facility.id)
facilities = user.facilities()
assert len(facilities) == 2 and set(f.name for f in facilities) == {'Croft_1m', 'Croft_4m'}
def test_delete(self) -> None:
"""Add a new user record and then remove it."""
assert User.count() == 4
User.add({'first_name': 'James', 'last_name': 'Bond', 'email': 'bond@secret.gov.uk',
'alias': '007', 'data': {'user_type': 'amateur', 'drink_of_choice': 'martini'}})
assert User.count() == 5
assert User.from_alias('007').last_name == 'Bond'
User.delete(User.from_alias('007').id)
assert User.count() == 4
def test_delete_missing(self) -> None:
"""Test exception on attempt to delete non-existent user."""
with pytest.raises(NotFound):
User.delete(-1)
def test_delete_facility_map_cascade(self) -> None:
"""Create a new user, with facility, then remove."""
assert User.count() == 4 and Facility.count() == 4 and FacilityMap.count() == 4
User.add({'first_name': 'James', 'last_name': 'Bond', 'email': 'bond@secret.gov.uk',
'alias': '007', 'data': {'user_type': 'amateur', 'drink_of_choice': 'martini'}})
user = User.from_alias('007')
Facility.add({'name': 'Bond_4m', 'latitude': -25.5, 'longitude': -69.25, 'elevation': 5050,
'limiting_magnitude': 17.5, 'data': {'telescope_design': 'reflector'}})
facility = Facility.from_name('Bond_4m')
user.add_facility(facility.id)
assert user.facilities()[0].to_dict() == facility.to_dict()
assert User.count() == 5 and Facility.count() == 5 and FacilityMap.count() == 5
User.delete(user.id)
assert User.count() == 4 and Facility.count() == 5 and FacilityMap.count() == 4
Facility.delete(facility.id)
assert Facility.count() == 4
| [
"glentner@purdue.edu"
] | glentner@purdue.edu |
4be7bcbe50f4766ad9eab4a0c255e7daccaa85e3 | eec0c7536df052bd4db0be7baf81800fd8f8b8b1 | /user/models.py | 8a31192d3a7f926082a4e9d88750efa1104a09b2 | [] | no_license | ProstoyVadila/stud_fastapi | 10d1359217958488fab2885fab4cb2aefa0e01fd | f477ca777cc3fe9db8d65defe43f6ea57e072550 | refs/heads/main | 2023-03-25T00:33:47.527305 | 2021-03-18T17:17:27 | 2021-03-18T17:17:27 | 349,032,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from sqlalchemy import Column, String, Integer, DateTime, Boolean
from sqlalchemy.orm import relationship
from core.db import Base
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, index=True, unique=True)
name = Column(String, unique=True)
email = Column(String, unique=True)
password = Column(String)
created_at = Column(DateTime)
is_active = Column(Boolean, default=False)
posts = relationship('Post', back_populates='user') | [
"vadim.gorbachev.main@yandex.ru"
] | vadim.gorbachev.main@yandex.ru |
bf86f7c4a309efc2fa55aea60cc521355ebcc6bd | cd1616ec531e9afbd8fab83b23c00425e6cc31fb | /accounts/views.py | fc5152871bb721b19ecc4eb2294de822cad7963b | [] | no_license | muhtutorials/marketplace | 5a7497211687633afdb99289af72e76856d3410a | 3c9305d1c04f7ef1978678158320f331ddc35eac | refs/heads/master | 2022-04-30T22:50:28.943067 | 2019-10-17T18:55:07 | 2019-10-17T18:55:07 | 215,484,246 | 0 | 0 | null | 2022-04-22T22:34:47 | 2019-10-16T07:25:30 | Python | UTF-8 | Python | false | false | 1,018 | py | from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages
from django.contrib.auth import authenticate, login
from .forms import UserSignupForm
def signup(request):
if request.method == 'POST':
form = UserSignupForm(request.POST)
if form.is_valid():
form.save()
# save username and password to authenticate and login user automatically after registration
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
messages.success(request, f'Account with username "{username}" has been created')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
return HttpResponse('User does not exist')
else:
form = UserSignupForm()
return render(request, 'registration/signup.html', {'form': form})
| [
"igorwho@yandex.ru"
] | igorwho@yandex.ru |
8a1ef89217200299ffd45a3c2847028343e7e4b7 | 1188692057ea5ec79600f72a94dd5477d1e499dc | /myprotfolio/settings.py | 714b7d1f3ba501e05bf18407fdcbed14f7309168 | [] | no_license | MuizZer07/myportfolio | 87a37604e889d505bd52f19357d3f8696831dae1 | 5ce48f3346632b5f9771f70b827df71c00613656 | refs/heads/master | 2021-09-22T17:16:35.969843 | 2021-09-10T19:25:15 | 2021-09-10T19:25:15 | 165,716,334 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | """
Django settings for myprotfolio project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8$%7$@!%^rb$i2lxyi)vixpf(rnl$af7utx7c&90lo$m5(u^d$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myprotfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myprotfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'myprotfolio/static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"muizxzer07@gmail.com"
] | muizxzer07@gmail.com |
7d53521e051587f422a5351a91ca2fbc95c0fa26 | 5eb165f282956ba646c1b11bf10856f9bb9c0196 | /core_business.py | ac3da46601fca1791e5f46c4211586f269aecd7b | [] | no_license | rakamoviz/crdjst-py | 915e0aa55f9acb7f1394dd460ecdecbc0737d400 | 213e4053e26b572f4f7469d34a060009c0fd20e5 | refs/heads/master | 2023-01-04T02:45:25.140576 | 2020-10-27T21:38:06 | 2020-10-27T21:38:06 | 307,812,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | import glob
import functools
import re
import importlib
class CoreBusiness:
def __init__ (self, logger):
self.logger = logger
self.rate_fetchers = functools.reduce(
lambda rate_fetchers, filename: CoreBusiness._filename_to_rate_fetchers(
logger, rate_fetchers, filename
), glob.glob('rate_fetchers/*.py'), {}
)
@staticmethod
def _filename_to_rate_fetchers (logger, rate_fetchers, filename):
key = re.match("^rate_fetchers/(.+)\.py$", filename).groups(0)[0]
rate_fetcher = importlib.import_module(f"rate_fetchers.{key}")
rate_fetchers[key] = rate_fetcher.RateFetcher(logger)
return rate_fetchers
def obtain_rates (self, date_str):
return [{item[0]: item[1].obtain_rate(date_str)} for item in self.rate_fetchers.items()] | [
"raka.moviz@gmail.com"
] | raka.moviz@gmail.com |
410b063e5d57e9ed360e356f1551b8542fa392ad | af7043a61ccd0264c076388e8e6536ba12f6f8ee | /icp9/Source/icp_9.3.py | fb940929d0db6a3187a0a1ab405d63176a60377e | [] | no_license | BhavanaDeepthi/python-deep-learning | 36606ae85d4b783dfddcb5f37ed74f1ae64bbd9a | fd36c3a5f4ae77306acb26453ccc6a69700e5173 | refs/heads/master | 2020-04-18T16:18:35.483377 | 2019-05-03T22:16:52 | 2019-05-03T22:16:52 | 167,630,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | from keras import Sequential
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Dense
from keras.utils import to_categorical
(train_images,train_labels),(test_images, test_labels) = mnist.load_data()
#display the first image in the training data
plt.imshow(train_images[0,:,:],cmap='gray')
plt.title('Ground Truth : {}'.format(train_labels[0]))
# plt.show()
#process the data
#1. convert each image of shape 28*28 to 784 dimensional which will be fed to the network as a single feature
dimData = np.prod(train_images.shape[1:])
train_data = train_images.reshape(train_images.shape[0],dimData)
test_data = test_images.reshape(test_images.shape[0],dimData)
#convert data to float and scale values between 0 and 1
train_data = train_data.astype('float')
test_data = test_data.astype('float')
#scale data
train_data /=255.0
test_data /=255.0
#change the labels frominteger to one-hot encoding
train_labels_one_hot = to_categorical(train_labels)
test_labels_one_hot = to_categorical(test_labels)
#creating network
model = Sequential()
model.add(Dense(512, activation='tanh', input_shape=(dimData,)))
model.add(Dense(512, activation='tanh'))
model.add(Dense(512, activation='tanh'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels_one_hot, batch_size=256, epochs=20, verbose=1,
validation_data=(test_data, test_labels_one_hot))
[test_loss, test_acc] = model.evaluate(test_data, test_labels_one_hot)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(test_loss, test_acc))
| [
"47044736+BhavanaDeepthi@users.noreply.github.com"
] | 47044736+BhavanaDeepthi@users.noreply.github.com |
98e9d6f2db08bf6cf86941a5f918b71b15225973 | 5b10b6efa049014f0d00f81b148d94c429286b66 | /Normal Serialization/withRestApi_1/test.py | c48f766bf72faa08a15eec0ca7534391eefe6d65 | [] | no_license | Ruchika-Munde/Rest_Task | dff657aed041ac6925590f423301f0cae7599f6c | ea741889927ed0fa2a1ba9c2311304671680c6bf | refs/heads/master | 2022-12-21T05:21:00.623464 | 2020-09-09T08:06:24 | 2020-09-09T08:06:24 | 294,044,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import requests
import json
BASE_URL='http://127.0.0.1:8000/'
ENDPOINT='api/'
# def get_resources(id=None):
# data={}
# if id is not None:
# data={
# 'id':id
# }
# resp=requests.get(BASE_URL+ENDPOINT,data=json.dumps(data))
# print(resp.status_code)
# print(resp.json())
# get_resources()
# def create_resources():
# new_emp={
# 'eno':400,
# 'ename':'shiva',
# 'esal':4000,
# 'eaddr':'pune'
# }
# r=requests.post(BASE_URL+ENDPOINT,data=json.dumps(new_emp))
# print(r.status_code)
# print(r.json())
# create_resources()
def update_resource(id):
new_data={
'id':id,
# 'eno':600,
'ename':'RUCHA ',
'esal':80000,
#'eaddr':'Nagpur',
}
r=requests.put(BASE_URL+ENDPOINT,data=json.dumps(new_data))
print(r.status_code)
print(r.json())
update_resource(3)
# def delete_resource(id):
# data={
# 'id':id,
# }
# r=requests.delete(BASE_URL+ENDPOINT,data=json.dumps(data))
# print(r.status_code)
# print(r.json())
# delete_resource(4) | [
"ruchamunde@gmail.com"
] | ruchamunde@gmail.com |
cf2892f4fa4b409121851f316950b77e8e641749 | afbc18c477f414db1c6f402a520943fce36db427 | /measures/DLcoloc/models.py | adcba1282dfce3894663ca79073f3c86b4febf73 | [
"Apache-2.0"
] | permissive | metaspace2020/coloc | 465b68806bbff66607b7b6c64776fea36dc19aa7 | 3941b285a73b9685fd44856e59149acff273f705 | refs/heads/master | 2021-07-09T13:28:02.599807 | 2020-10-07T15:27:04 | 2020-10-07T15:27:04 | 199,397,256 | 6 | 4 | Apache-2.0 | 2019-12-23T21:16:47 | 2019-07-29T07:02:24 | Vue | UTF-8 | Python | false | false | 10,621 | py | from keras.models import Model
from keras.layers import Dense, Input
from keras.layers import Convolution2D, GlobalAveragePooling2D
from keras.layers import Average, Subtract, Concatenate, Lambda
from keras import backend as K
from keras.losses import mean_squared_error
from keras.optimizers import Adam, SGD
from keras.applications.xception import Xception
try:
import h5py
except ImportError:
h5py = None
import numpy as np
from keras.callbacks import Callback
import warnings
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1, layer_to_save='xception'):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
self.layer_to_save = layer_to_save
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.get_layer(self.layer_to_save).save_weights(filepath, overwrite=True)
else:
self.model.get_layer(self.layer_to_save).save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.get_layer(self.layer_to_save).save_weights(filepath, overwrite=True)
else:
self.model.get_layer(self.layer_to_save).save(filepath, overwrite=True)
def correlation(a):
x, y = a
mx = K.mean(x, axis=1, keepdims=True)
my = K.mean(y, axis=1, keepdims=True)
xm, ym = x - mx, y - my
r_num = K.sum(xm * ym, axis=1, keepdims=True)
r_den = K.sqrt(K.epsilon() + K.sum(K.square(xm), axis=1, keepdims=True) * K.sum(K.square(ym), axis=1, keepdims=True))
r = r_num / r_den
r = K.clip(r, -1.0, 1.0)
return r
def correlation_loss(a):
return 1 - correlation(a)
def xception(input_channels=2, lr=1e-4, weights=None, optimizer='adam'):
if K.image_data_format() == 'channels_last':
input_shape = (None, None, input_channels)
input_shape_xception = (None, None, 3)
else:
input_shape = (input_channels, None, None)
input_shape_xception = (3, None, None)
xception_model = Xception(input_shape=input_shape_xception, include_top=False, weights='imagenet')
main_input = Input(input_shape)
x = Convolution2D(3, (1, 1), kernel_initializer='he_normal')(main_input)
x = xception_model(x)
x = GlobalAveragePooling2D(name='pool1')(x)
output_activation = 'linear'
main_output = Dense(1, activation=output_activation, name='predictions')(x)
model = Model(main_input, main_output, name='xception')
if weights is not None:
print('Load weights from', weights)
model.load_weights(weights)
if optimizer.lower() == 'adam':
optimizer = Adam(lr, decay=0.0005)
print('Optimizer is Adam')
elif optimizer.lower() == 'sgd':
optimizer = SGD(lr, momentum=0.95, decay=0.0005, nesterov=True)
print('Optimizer is SGD')
else:
raise ValueError('Unknown optimizer')
model.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
return model
def pi_model(input_channels=2, lr=1e-4, weights=None, optimizer='adam', loss_weights=(0.5, 0.5)):
""" Pi-model. https://arxiv.org/pdf/1610.02242.pdf """
if K.image_data_format() == 'channels_last':
input_shape = (None, None, input_channels)
else:
input_shape = (input_channels, None, None)
core_model = xception(input_channels=input_channels, lr=lr, optimizer=optimizer,
weights=weights)
input1 = Input(input_shape)
input2 = Input(input_shape)
x1 = core_model(input1)
x2 = core_model(input2)
out1 = Average(name='out1')([x1, x2])
out2 = Subtract(name='out2')([x1, x2])
model = Model(inputs=[input1, input2], outputs=[out1, out2], name='pi_model')
model.compile(optimizer=optimizer,
loss={'out1': 'mse', 'out2': 'mse'},
loss_weights={'out1': loss_weights[0], 'out2': loss_weights[1]})
return model
def mu_model(lr=1e-4, weights=None, optimizer='adam', embd_dim=128, return_core_model=False):
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def correlation_distance(vects):
# d = -K.elu(1.4*(correlation(vects) - 1))
d = 1 - correlation(vects)
return d
def dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
if K.image_data_format() == 'channels_last':
input_shape = (None, None, 2)
input_core_shape = (None, None, 1)
input_shape_xception = (None, None, 3)
else:
input_shape = (2, None, None)
input_core_shape = (1, None, None)
input_shape_xception = (3, None, None)
def create_core_model(input_shape, weights):
xception_model = Xception(input_shape=input_shape_xception, include_top=False, weights='imagenet')
input = Input(shape=input_shape)
x = Convolution2D(3, (1, 1), kernel_initializer='he_normal')(input)
x = xception_model(x)
x = GlobalAveragePooling2D(name='pool')(x)
output = Dense(embd_dim, activation='elu', kernel_initializer='he_normal', name='embd')(x)
model = Model(input, output, name='core_model')
if weights is not None:
print('Load weights from', weights)
model.load_weights(weights)
return model
core_model = create_core_model(input_core_shape, weights)
if return_core_model:
return core_model
input = Input(input_shape)
if K.image_data_format() == 'channels_last':
input1 = Lambda(lambda x: x[..., 0: 1])(input)
input2 = Lambda(lambda x: x[..., 1: 2])(input)
else:
input1 = Lambda(lambda x: x[0: 1])(input)
input2 = Lambda(lambda x: x[0: 1])(input)
x1 = core_model(input1)
x2 = core_model(input2)
output = Lambda(correlation_distance, output_shape=dist_output_shape, name='correlation')([x1, x2])
model = Model(input, output, name='mu_model')
if optimizer.lower() == 'adam':
optimizer = Adam(lr, decay=0.0005)
print('Optimizer is Adam')
elif optimizer.lower() == 'sgd':
optimizer = SGD(lr, momentum=0.95, decay=0.0005, nesterov=True)
print('Optimizer is SGD')
else:
raise ValueError('Unknown optimizer')
model.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
return model
if __name__ == '__main__':
model = xception()
| [
"e.ovchinnikova@gmail.com"
] | e.ovchinnikova@gmail.com |
8eef5fee2874938c330cee1a9bbdf1b6b2d5d338 | 38c883b1938950dd57c76d038b2b7eba6e459aef | /date_parser.py | fdef24ac87d375afe8a67f36146e5c27d1a05797 | [] | no_license | lizstarin/anachronometer | 8905ca66ea1edb59f240e24dbe234de480f04916 | f8397ee209d2ed20c4cc0a2294b0e7e2aa10e60e | refs/heads/master | 2021-01-10T08:15:51.289954 | 2016-03-24T20:39:48 | 2016-03-24T20:39:48 | 54,151,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | import re
class WordDate(object):
def __init__(self, date_string):
self.date_string = date_string
self.earliest = self.__find_earliest()
self.latest = self.__find_latest()
def __is_year(self):
p = re.compile('^\d+$')
return True if p.match(self.date_string) else False
def __find_earliest(self):
if self.date_string == None:
return None
elif self.__is_year():
return int(self.date_string)
else:
return self.__parse()[0]
def __find_latest(self):
if self.date_string == None:
return None
elif self.__is_year():
return int(self.date_string)
else:
return self.__parse()[1]
def __parse(self):
p = re.compile('[\d]*[a-z]{2} [Cc]entury')
q = re.compile('[Bb]efore')
if p.search(self.date_string):
century = int(re.search(r'\d+', self.date_string).group(0))
if q.search(self.date_string):
max_date = (century - 1) * 100
return (None, max_date)
else:
max_date = century * 100
min_date = max_date - 99
return (min_date, max_date)
else:
return (None, None)
def is_earlier_than(self, word_date):
if self.latest < word_date.earliest:
return True
else:
return False
def is_later_than(self, word_date):
if self.earliest > word_date.latest:
return True
else:
return False
def is_concurrent_with(self, word_date):
if not self.is_earlier_than(word_date) and not word_date.is_earlier_than(self):
return True
else:
return False
| [
"liz.starin@gmail.com"
] | liz.starin@gmail.com |
cb13521055d2bf2be99a141738706b8157641aa7 | 0e50f1d6c43e7f5124a30a08494837d3bb6963da | /api/is_exit.py | a3e3ea24a1fdb85fa0d7b40875fb2cf86c3d69b9 | [] | no_license | SemenovAV/MyDoc | 68c2b2aaccbd8cec197de49e212607915d7ddadd | c0145d06329ff4ff1a4633ab5e04d9dca801fa00 | refs/heads/master | 2020-12-10T10:52:06.255944 | 2020-01-14T10:32:13 | 2020-01-14T10:32:13 | 233,572,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | def is_exit(command, elem):
if elem == -1:
command['message'] = 'Отмена команды'
return True
| [
"7.on.off@bk.ru"
] | 7.on.off@bk.ru |
ce00165da005d73dafaa9570b1a00169c3bbe5c4 | c698018fc3c769ce56371405be8e698538c82971 | /Reactive/setup.py | 2b0a99abb2b7c033ecedbfdb69aee9093d861eaa | [] | no_license | Abhi-1725/ROS | 10fe29192a8867596a060e12bebc347a35dcd7b1 | ef3576d811430845b810ba95c8d398eb30f8a245 | refs/heads/master | 2022-12-17T14:06:07.698813 | 2020-09-21T08:01:07 | 2020-09-21T08:01:07 | 297,264,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['ibx0020_control'],
package_dir={'': 'include'}
)
setup(**setup_args)
| [
"noreply@github.com"
] | Abhi-1725.noreply@github.com |
bf62f7461e96c2e0e3d70d379744fefe5953e5ff | b42850bc3e36bbd1683070393582617f2b3cd8e6 | /Encapsulation/restaurant/food/salmon.py | 52b4959c4da8031b664b37ae1b1e31f7c6944eac | [] | no_license | marianidchenko/Python_OOP | aecca18be6df3850c0efbf2fa6d25bf3ff53ae96 | 547c12cbdad5b8c16fa55bba6c03b71db181ad2b | refs/heads/main | 2023-07-09T05:42:43.863681 | 2021-08-14T14:55:51 | 2021-08-14T14:55:51 | 381,572,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from Encapsulation.restaurant.food import MainDish
class Salmon(MainDish):
GRAMS = 22
def __init__(self, name, price):
super().__init__(name, price, Salmon.GRAMS)
| [
"marianidchenko@gmail.com"
] | marianidchenko@gmail.com |
0e9f9cdf7df5800aae94c2577cb1c3bf85e88d22 | ba8541f29c0ea3903b8dc9361b2192ab20ff099c | /src/bl_bert_biencoder/train_bert_crossencoder.py | 7d6d64c87ccaa7b29c72a17bbbb2e09f3713f371 | [] | no_license | ujiuji1259/wiki_en | 1ff3613ef855fd4105198bdf1e5b0f172517803a | 217d30029f15f3f38ed7e5e92d575537e66e41da | refs/heads/master | 2023-04-20T19:17:16.860518 | 2021-05-11T09:17:09 | 2021-05-11T09:17:09 | 357,447,137 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,386 | py | #import cProfile
import sys
sys.path.append('../')
from line_profiler import LineProfiler
import argparse
from logging import getLogger, StreamHandler, DEBUG, Formatter, FileHandler
import numpy as np
import mlflow
import torch
from transformers import AutoTokenizer, AutoModel
import apex
from apex import amp
from dataloader import MentionDataset, CandidateDataset
from bert_ranking import BertCrossEncoder, BertCandidateRanker
from utils.util import to_parallel, save_model
device = "cuda" if torch.cuda.is_available() else "cpu"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="bert-name used for biencoder")
parser.add_argument("--mention_dataset", type=str, help="mention dataset path")
parser.add_argument("--mention_index", type=str, help="mention dataset path")
parser.add_argument("--candidate_dataset", type=str, help="candidate dataset path")
parser.add_argument("--model_path", type=str, help="model save path")
parser.add_argument("--mention_preprocessed", action="store_true", help="whether mention_dataset is preprocessed")
parser.add_argument("--candidate_preprocessed", action="store_true", help="whether candidate_dataset is preprocessed")
parser.add_argument("--epochs", type=int, help="epochs")
parser.add_argument("--lr", type=float, help="learning rate")
parser.add_argument("--warmup_propotion", type=float, help="learning rate")
parser.add_argument("--gradient_accumulation_steps", type=int, help="learning rate")
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument("--model_save_interval", default=None, type=int, help="batch size")
parser.add_argument("--max_ctxt_len", type=int, help="maximum context length")
parser.add_argument("--max_title_len", type=int, help="maximum title length")
parser.add_argument("--max_desc_len", type=int, help="maximum description length")
parser.add_argument("--traindata_size", type=int, help="training datasize (for progress bar)")
parser.add_argument("--mlflow", action="store_true", help="whether using inbatch negative")
parser.add_argument("--parallel", action="store_true", help="whether using inbatch negative")
parser.add_argument("--fp16", action="store_true", help="whether using inbatch negative")
parser.add_argument('--fp16_opt_level', type=str, default="O1")
parser.add_argument("--logging", action="store_true", help="whether using inbatch negative")
parser.add_argument("--log_file", type=str, help="whether using inbatch negative")
args = parser.parse_args()
if args.mlflow:
mlflow.start_run()
arg_dict = vars(args)
for key, value in arg_dict.items():
mlflow.log_param(key, value)
logger = None
if args.logging:
logger = getLogger(__name__)
#handler = StreamHandler()
logger.setLevel(DEBUG)
#handler.setLevel(DEBUG)
formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#handler.setFormatter(formatter)
#logger.addHandler(handler)
if args.log_file:
fh = FileHandler(filename=args.log_file)
fh.setLevel(DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return args, logger
def main():
args, logger = parse_args()
mention_tokenizer = AutoTokenizer.from_pretrained(args.model_name)
mention_tokenizer.add_special_tokens({"additional_special_tokens": ["[M]", "[/M]"]})
index = np.load(args.mention_index)
mention_dataset = MentionDataset(args.mention_dataset, index, mention_tokenizer, preprocessed=args.mention_preprocessed, return_json=True)
#mention_dataset = MentionDataset2(args.mention_dataset, mention_tokenizer, preprocessed=args.mention_preprocessed)
candidate_dataset = CandidateDataset(args.candidate_dataset, mention_tokenizer, preprocessed=args.candidate_preprocessed)
bert = AutoModel.from_pretrained(args.model_name)
bert.resize_token_embeddings(len(mention_tokenizer))
cross = BertCrossEncoder(bert)
model = BertCandidateRanker(
cross,
device=device,
model_path=args.model_path,
use_mlflow=args.mlflow,
logger=logger,
)
try:
model.train(
mention_dataset,
candidate_dataset,
lr=args.lr,
max_ctxt_len=args.max_ctxt_len,
max_title_len=args.max_title_len,
max_desc_len=args.max_desc_len,
traindata_size=args.traindata_size,
model_save_interval=args.model_save_interval,
grad_acc_step=args.gradient_accumulation_steps,
max_grad_norm=args.max_grad_norm,
epochs=args.epochs,
warmup_propotion=args.warmup_propotion,
fp16=args.fp16,
fp16_opt_level=args.fp16_opt_level,
parallel=args.parallel,
)
except KeyboardInterrupt:
pass
save_model(model.model, args.model_path)
#torch.save(model.model.state_dict(), args.model_path)
if args.mlflow:
mlflow.end_run()
if __name__ == "__main__":
"""
prf = LineProfiler()
prf.add_function(BertCandidateGenerator.train)
prf.runcall(main)
prf.print_stats()
#cProfile.run('main()', filename="main.prof")
"""
main()
| [
"suzzz428@gmail.com"
] | suzzz428@gmail.com |
570eb3fab94f2b5a22a4c3db27b78477f5f78f25 | 9cb7670c64c13f09abee315f85f1f6b67b8eb1ad | /ptp/components/language/__init__.py | 751b15e51e39d08a530a5e22dfdab74a2fca8e1a | [
"Apache-2.0"
] | permissive | ConnectionMaster/pytorchpipe | 057325a5d4e8e6ce2198a953a705721388531add | 9cb17271666061cb19fe24197ecd5e4c8d32c5da | refs/heads/develop | 2023-04-07T17:46:26.451692 | 2019-11-05T23:36:13 | 2019-11-05T23:36:13 | 183,084,219 | 1 | 0 | Apache-2.0 | 2023-04-03T23:18:43 | 2019-04-23T19:38:29 | Python | UTF-8 | Python | false | false | 422 | py | from .bow_encoder import BOWEncoder
from .label_indexer import LabelIndexer
from .sentence_indexer import SentenceIndexer
from .sentence_one_hot_encoder import SentenceOneHotEncoder
from .sentence_tokenizer import SentenceTokenizer
from .word_decoder import WordDecoder
__all__ = [
'BOWEncoder',
'LabelIndexer',
'SentenceIndexer',
'SentenceOneHotEncoder',
'SentenceTokenizer',
'WordDecoder'
]
| [
"tkornut@us.ibm.com"
] | tkornut@us.ibm.com |
4be8775aa66d28cead0828a35fd300d2b5860e9b | 81cabe1833537c23a200981d2d4e3ad97cf16699 | /apps/search/tests/tests_views.py | 6ea75af44cc1a855568d76ea83c0a1ee4d10384d | [
"BSD-3-Clause"
] | permissive | Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803 | ec186ac82670f5020083a543edd4f2d99d7de7dd | 4e374b4d52dfb9039ebe543e7f27682189022307 | refs/heads/master | 2020-05-02T18:12:47.394246 | 2019-03-29T04:45:24 | 2019-03-29T04:45:24 | 178,122,067 | 0 | 0 | BSD-3-Clause | 2019-03-28T03:49:18 | 2019-03-28T03:49:17 | null | UTF-8 | Python | false | false | 3,120 | py | from datetime import datetime, timedelta
from challenges.models import Submission, SubmissionParent
from challenges.tests.fixtures.ignite_fixtures import (setup_ignite_challenge,
teardown_ignite_challenge,
setup_ideation_phase,
setup_development_phase,
create_submission,
create_user)
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from search.forms import CustomFacetedSearchForm
from test_utils import TestCase
class SearchTest(TestCase):
def setUp(self):
self.profile = create_user('bob')
self.url = reverse('create_entry', args=['ideas'])
self.initial_data = setup_development_phase(**setup_ignite_challenge())
self.ideation = self.initial_data['ideation_phase']
self.development = self.initial_data['dev_phase']
self.submission_kwargs = {
'created_by': self.profile,
'phase': self.ideation,
'is_winner': True
}
self.submission_a = create_submission(title='ping',
**self.submission_kwargs)
self.submission_b = create_submission(title='pong',
**self.submission_kwargs)
self.search_url = reverse('search:search')
def tearDown(self):
teardown_ignite_challenge()
for model in [SubmissionParent, Submission, User]:
model.objects.all().delete()
def test_search_get(self):
response = self.client.get(self.search_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.context['form'],
CustomFacetedSearchForm))
self.assertEqual(response.context['page'].paginator.count, 0)
def test_search_title(self):
response = self.client.get(self.search_url+ '?q=ping')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['page'].paginator.count, 1)
def test_search_capitalization(self):
response = self.client.get(self.search_url+ '?q=PING')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['page'].paginator.count, 1)
def test_different_phase_search(self):
"""Archive a submission and search again"""
submission_a_fork = create_submission(title='Brand new',
with_parent=False,
**self.submission_kwargs)
self.submission_a.parent.update_version(submission_a_fork)
response = self.client.get(self.search_url+ '?q=ping')
self.assertEqual(response.context['page'].paginator.count, 0)
response = self.client.get(self.search_url+ '?q=brand')
self.assertEqual(response.context['page'].paginator.count, 1)
| [
"alfredo@madewithbyt.es"
] | alfredo@madewithbyt.es |
a3f7f4e0d3bb4a64c3a7f8c50018c5876765a56d | 98b3f736bf4565e8853bebe0b8554ad82d991fe7 | /python/alp/lectures/stack_examples.py | a387e1110b8b525064b0802336590734520a4a14 | [] | no_license | TomasBahnik/pslib | 0fcf3fdd54059384498cabd0c5a26e7b87321484 | 9c47ea66524676b5f24b7daadc2ecead66ae70d6 | refs/heads/master | 2023-02-20T19:52:46.509321 | 2023-02-12T20:31:02 | 2023-02-12T20:31:02 | 150,961,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,913 | py | # -*- coding: utf-8 -*-
# příklad na použití zásobníku
from alp.lectures.stack import Stack
s = Stack()
s.push(1)
s.push(2)
s.push(3)
print(s.pop())
print(s.pop())
s.push(10)
print(s.pop())
print(s.is_empty())
print(s.pop())
print(s.is_empty())
def to_str(n, base):
""" Převede číslo 'n' na řetězec pro soustavu se základem 'base' """
cislice = "0123456789ABCDEF"
assert (n >= 0)
stack = Stack()
while True:
stack.push(n % base)
n //= base
if n == 0:
break
result = ""
while not stack.is_empty():
result += cislice[stack.pop()]
return result
print(to_str(67, 2))
import random
def quick_sort(a):
""" Setřídí pole a na místě """
stack = Stack()
stack.push((0, len(a) - 1))
while not stack.is_empty():
(first, last) = stack.pop()
m = partition(a, first, last)
if first < m - 1:
stack.push((first, m - 1))
if m + 1 < last:
stack.push((m + 1, last))
def partition(a, first, right):
""" Vrátí index 'i' a změní 'a' tak, že všechny prvky před 'i' jsou menší než a[i] a všechny prvky po 'i' jsou větší než a[i]"""
pivot = a[first]
left = first + 1
# print("Calling partition with a=",a," first=",first," right=",right," pivot=",pivot)
while True:
# najdi první zleva větší než pivot
while left <= right and a[left] <= pivot:
left += 1
while left <= right and a[right] >= pivot:
right -= 1
if right < left:
a[first], a[right] = a[right], a[first] # pivot na místo
return right
a[left], a[right] = a[right], a[left] # výměna
a = [random.randrange(100) for i in range(10)]
quick_sort(a)
print(a)
###############################################################
def par_checker(s):
""" returns True if the string 's' is correctly parenthesized """
lparens = "([{" # otevírací závorky
rparens = ")]}" # uzavírací závorky (ve stejném pořadí)
stack = Stack()
for c in s:
if c in lparens:
stack.push(c)
for i in range(len(rparens)):
if c == rparens[i]:
if stack.is_empty() or stack.pop() != lparens[i]: # líné vyhodnocení
return False
return stack.is_empty()
print(par_checker("(4+(3*[a+b]))"))
print(par_checker("(x+([21*c]-5}*6)"))
print(par_checker("[(3+4)*7-{}*(((0)+(1))%7)]"))
print(par_checker("{ { ( [ ] [ ] ) } ( ) }"))
print(par_checker("ahoj+(svete-(X+Y{}})"))
###############################################################
def eval_postfix(s):
""" returns the value of a postfix expression given by a string 's'"""
stack = Stack()
for x in s.split(): # rozděl 's' dle mezer
if x == '+':
stack.push(stack.pop() + stack.pop())
elif x == '-':
stack.push(-stack.pop() + stack.pop())
elif x == '*':
stack.push(stack.pop() * stack.pop())
elif x == '/':
second = stack.pop()
stack.push(stack.pop() / second)
else:
stack.push(float(x))
return stack.pop()
# python evaluates expressions left to right
# there is no error checking
print(eval_postfix("3 4 *"))
print(eval_postfix("10 6 -"))
print(eval_postfix("20 4 /"))
print(eval_postfix("3 4 * 2 -")) # 3 * 4 - 2
print(eval_postfix("3 4 2 - *")) # 3 * (4 - 2)
###############################################################
def infix_to_postfix(s):
""" converts an infix to postfix expression """
result = "" # output string
op = Stack() # operator stack
i = 0 # index to 's'
while i < len(s):
if s[i] in "0123456789":
while i < len(s) and s[i] in "0123456789":
result += s[i]
i += 1
result += " "
continue
if s[i] == '(':
op.push(s[i])
elif s[i] == ')':
top = op.pop()
while top != '(':
result += top + " "
top = op.pop()
else: # s[i] is +,-,*,/
while not op.is_empty() and not higher_prec(s[i], op.peek()):
result += op.pop() + " "
op.push(s[i])
i += 1
while not op.is_empty():
result += op.pop() + " "
return result
def higher_prec(a, b):
""" does operator 'a' have a higher precedence than the stack top element 'b' """
return ((a in "*/") and (b in "+-")) or (b == "(")
print(infix_to_postfix("32+4"))
print(infix_to_postfix("3*4-2"))
print(infix_to_postfix("3*(4-2)"))
print(infix_to_postfix("(62-32)*5/9"))
def eval_infix(s):
return eval_postfix(infix_to_postfix(s))
print(eval_infix("32+4"))
print(eval_infix("3*4-2"))
print(eval_infix("3*(4-2)"))
print(eval_infix("(62-32)*5/9"))
# nekontroluje, nesmi tam byt mezery
| [
"tomas.bahnik@ataccama.com"
] | tomas.bahnik@ataccama.com |
5d688b376b314301c2bfbbe1ea88c567b4009b51 | f7806230f48f30b2ad468218663eb91d9a1311f8 | /tests/dataset_readers/jsonl_dataset_reader_test.py | aa979f3f0ffc3d167ad51fe2cfdda3965fac053d | [
"MIT"
] | permissive | ChristophAlt/StAn | 246c1350716af50604341f4c217825c72827d92a | abb115b9f245c66edbb3bb89ee45cdc654a48709 | refs/heads/master | 2020-04-27T21:07:38.934768 | 2020-04-08T20:04:51 | 2020-04-08T20:04:51 | 174,685,088 | 3 | 2 | MIT | 2019-04-23T20:21:08 | 2019-03-09T11:21:56 | Python | UTF-8 | Python | false | false | 507 | py | import os
from tests import FIXTURES_ROOT
from stan.dataset_readers.jsonl import JsonlDatasetReader
def test_read():
path = os.path.join(FIXTURES_ROOT, "test_data.jsonl")
reader = JsonlDatasetReader()
instances = reader.read(path)
assert len(instances) == 2
instance = instances[0]
metadata = instance.metadata
assert instance.text == "This is the first test text."
assert list(metadata.keys()) == ["label", "another_field"]
assert metadata["label"] == "label1"
| [
"ChristophAlt@users.noreply.github.com"
] | ChristophAlt@users.noreply.github.com |
7a75e3aa8a8a12b0de49233f63ee22c0fb296524 | 864602310fc298cfb85a81fcfc8f14e89681ecde | /fooof/tests/test_core_funcs.py | c737a0f7877ad6127d18fd9f0d8a4c353c0ff5bd | [
"Apache-2.0"
] | permissive | TheCheeseToast/fooof | 3c426fd204bcaccb87c567b9f3f6822eb237e13f | f3f8422af7d87fa73772e083deaf8439ca59908d | refs/heads/master | 2020-03-10T19:51:07.813027 | 2018-04-14T22:03:04 | 2018-04-14T22:03:04 | 129,557,004 | 0 | 0 | Apache-2.0 | 2018-04-14T21:49:45 | 2018-04-14T21:49:45 | null | UTF-8 | Python | false | false | 2,736 | py | """Tests for FOOOF core.funcs."""
from py.test import raises
import numpy as np
from scipy.stats import norm, linregress
from fooof.core.funcs import *
###################################################################################################
###################################################################################################
def test_gaussian_function():
ctr, amp, wid = 50, 5, 10
xs = np.arange(1, 100)
ys = gaussian_function(xs, ctr, amp, wid)
assert np.all(ys)
# Check distribution matches generated gaussian from scipy
# Generated gaussian is normalized for this comparison, amp tested separately
assert max(ys) == amp
assert np.allclose([i/sum(ys) for i in ys], norm.pdf(xs, ctr, wid))
def test_expo_function():
off, knee, exp = 10, 5, 2
xs = np.arange(1, 100)
ys = expo_function(xs, off, knee, exp)
assert np.all(ys)
# Note: no obvious way to test the knee specifically
# Here - test that past the knee, has expected slope & offset value
exp_meas, off_meas, _, _, _ = linregress(np.log10(xs[knee**2:]), ys[knee**2:])
assert np.isclose(off_meas, off, 0.1)
assert np.isclose(np.abs(exp_meas), exp, 0.1)
def test_expo_nk_function():
off, exp = 10, 2
xs = np.arange(1, 100)
ys = expo_nk_function(xs, off, exp)
assert np.all(ys)
# By design, this expo function assumes log-space ys, linear xs
# Where the log-log should be a straight line. Use that to test.
sl_meas, off_meas, _, _, _ = linregress(np.log10(xs), ys)
assert np.isclose(off, off_meas)
assert np.isclose(exp, np.abs(sl_meas))
def test_linear_function():
off, sl = 10, 2
xs = np.arange(1, 100)
ys = linear_function(xs, off, sl)
assert np.all(ys)
sl_meas, off_meas, _, _, _ = linregress(xs, ys)
assert np.isclose(off_meas, off)
assert np.isclose(sl_meas, sl)
def test_quadratic_function():
off, sl, curve = 10, 3, 2
xs = np.arange(1, 100)
ys = quadratic_function(xs, off, sl, curve)
assert np.all(ys)
curve_meas, sl_meas, off_meas = np.polyfit(xs, ys, 2)
assert np.isclose(off_meas, off)
assert np.isclose(sl_meas, sl)
assert np.isclose(curve_meas, curve)
def test_get_bg_func():
bgf_nk = get_bg_func('fixed')
assert bgf_nk
bgf_kn = get_bg_func('knee')
assert bgf_kn
# Check error
with raises(ValueError):
get_bg_func('bad')
def test_infer_bg_func():
bgp_nk = [50, 1]
bgm_nk = infer_bg_func(bgp_nk)
assert bgm_nk == 'fixed'
bgp_kn = [50, 2, 1]
bgm_kn = infer_bg_func(bgp_kn)
assert bgm_kn == 'knee'
# Check error
with raises(ValueError):
infer_bg_func([1, 2, 3, 4])
| [
"tdonoghue@ucsd.edu"
] | tdonoghue@ucsd.edu |
b632d54426edc5038274807bd1a451eeb284ab5d | ac67fa39118e57d08ddcff4abaf027c96a079dc3 | /0x0F-python-object_relational_mapping/8-model_state_fetch_first.py | 28d8431c8e5da807166404e03f08d2fad0401c47 | [] | no_license | tomasgvgt/holbertonschool-higher_level_programming | 7a444b5807fcd04a2c79347f69027662b181578c | 4b819977d294432f458054e45ed16061454ff43d | refs/heads/master | 2023-04-25T11:47:38.260585 | 2021-05-14T02:28:32 | 2021-05-14T02:28:32 | 291,826,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #!/usr/bin/python3
"""
script that prints the first State object from the database hbtn_0e_6_usa
"""
from sys import argv
from model_state import State, Base
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
# Create an engine
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(
argv[1], argv[2], argv[3]), pool_pre_ping=True)
# create a configured "Session" class
Session = sessionmaker(bind=engine)
# create a Session
session = Session()
# Query (Returns an array of objects)
state_1 = session.query(State).first()
# Loop trough the array and print id and name for each object
print("{}: {}".format(state_1.id, state_1.name))
session.close()
| [
"tomasgvg.t@gmail.com"
] | tomasgvg.t@gmail.com |
560b201e879016b123882a37fe847e1619168c06 | fa938567a8a7eb183a972709b695d8932330e3be | /Exercicio_Lista/exercicio_8.py | a89f0ba1fa83c0c86bd4a39c05f456448634f51d | [] | no_license | murilonerdx/exercicios-python | 7d76a8eb9f9a158f2f7c304a3ecff806c7603281 | 1eb79db5c80105ff462423a98d19207134d6414c | refs/heads/master | 2022-12-19T15:23:32.844996 | 2020-09-22T05:28:55 | 2020-09-22T05:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | num1 = float(input("Digite um numero: "))
num2 = float(input("Digite um numero: "))
soma = num1 + num2
if soma>10:
print("Numero maior que 10")
elif soma <=10:
print("Numero menor ou igual a 10")
| [
"noreply@github.com"
] | murilonerdx.noreply@github.com |
02dcbfba14ea8f005a32b4a9641908224a807ecd | 9bb31d98ea1066d6f5538638e3398aba203cf815 | /astrojc/spectra/plot_idents.py | 161058bec055a1a6fdaa2db6ead92d480e0f502c | [] | no_license | juliotux/astrojc | 2e78445fb55ae26030a06110af0dbc264f9e8a47 | 0581cd5fd456a5d4d0d6d529a66dc1f20b09e80f | refs/heads/master | 2022-02-20T00:40:51.174167 | 2022-02-17T10:52:16 | 2022-02-17T10:52:16 | 75,884,051 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #plot bokeh line ids
from line_ident import specID
import lineid_plot
import numpy as np
import pandas as pd
def plot_line_ids(ax, spec, spec_ids, min_weq=None, **kwargs):
'''
Plots the line IDs in an graph if the equivalent width of the line is bigger
then min_weq.
'''
#TODO: implantar sistema de min_weq
lineid_plot.plot_line_ids(spec.dispersion, spec.flux, spec_ids.wavelength(), spec_ids.line_id(), ax=ax)
| [
"juliocampagnolo@gmail.com"
] | juliocampagnolo@gmail.com |
a34f57852df5161e484925536923600bcfdead3e | 466912406272829982f75854cf0104c6ce8c9814 | /data/spider2/aggregator/company/company_info_expand_mongo.py | c9f58d10f7dd355bbb12504f304988f6b8b4e548 | [] | no_license | logonmy/Codes | 9631fa103fc499663361fa7eeccd7cedb9bb08e4 | 92723efdeccfc193f9ee5d0ab77203c254f34bc2 | refs/heads/master | 2021-09-21T18:07:22.985184 | 2018-08-30T05:53:26 | 2018-08-30T05:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,334 | py | # -*- coding: utf-8 -*-
import os, sys, time
import datetime
import json
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../util'))
import config
import db
import loghelper
import url_helper
import name_helper
import util
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../crawler/website'))
import website
#logger
loghelper.init_logger("company_expand", stream=True)
logger = loghelper.get_logger("company_expand")
#mongo
mongo = db.connect_mongo()
#create index?
#collection = mongo.crawler_v3.projectdata
collection_beian = mongo.info.beian
collection_main_beianhao = mongo.info.main_beianhao
collection_itunes = mongo.market.itunes
collection_android = mongo.market.android
collection_android_market = mongo.market.android_market
collection_goshang = mongo.info.gongshang
collection_website = mongo.info.website
collection_source_company = mongo.source.company
COMPANIES=[]
#exception
itunesDomainEx = ["baidu.com","hao123.com","appzg.org"]
source_artifact_columns = {
"name": None,
"description": None,
"link": None,
"domain": None,
"type": None,
"verify": None,
"active": None,
"createTime": "now",
"modifyTime": "now",
"rank": None,
"rankDate": None,
"extended": None,
"expanded": None
}
source_company_name_columns = {
"type": None,
"name": None,
"verify": None,
"chinese": None,
"createTime": "now",
"modifyTime": "now",
"extended": None,
"expanded": None
}
source_mainbeianhao_columns = {
"mainBeianhao": None,
"verify": None,
"createTime": "now",
"modifyTime": "now",
"expanded": None
}
def populate_column(item, columns):
item_new = {}
for column in columns:
if item.has_key(column) is True:
item_new[column] = item[column]
else:
item_new[column] = columns[column]
if item_new[column] == "now":
item_new[column] = datetime.datetime.now()
return item_new
def save_mongo_source_artifact(source, sourceId, sadata):
item = populate_column(sadata, source_artifact_columns)
record = collection_source_company.find_one({"source": source, "sourceId": sourceId})
if record is not None:
collection_source_company.update_one({"_id": record["_id"]}, {'$addToSet': {"source_artifact": item}})
def save_mongo_source_company_name(source, sourceId, scndata):
item = populate_column(scndata, source_company_name_columns)
record = collection_source_company.find_one({"source": source, "sourceId": sourceId})
if record is not None:
collection_source_company.update_one({"_id": record["_id"]}, {'$addToSet': {"source_company_name": item}})
def save_mongo_source_mainbeianhao(source, sourceId, smdata):
item = populate_column(smdata, source_mainbeianhao_columns)
record = collection_source_company.find_one({"source": source, "sourceId": sourceId})
if record is not None:
collection_source_company.update_one({"_id": record["_id"]}, {'$addToSet': {"source_mainbeianhao": item}})
def save_collection_beian(collection_name, items):
for item in items:
#logger.info(json.dumps(item, ensure_ascii=False, cls=util.CJsonEncoder))
if collection_name.find_one({"domain": item["domain"]}) is not None:
collection_name.delete_one({"domain": item["domain"]})
item["createTime"] = datetime.datetime.now()
item["modifyTime"] = item["createTime"]
collection_name.insert_one(item)
def save_collection_mainBeianhao(collection_name, items):
for item in items:
if collection_name.find_one({"mainBeianhao": item["mainBeianhao"]}) is None:
item["createTime"] = datetime.datetime.now()
item["modifyTime"] = item["createTime"]
collection_name.insert_one(item)
def screenshot_wesbite(collection_name, websiteId, screenshot_crawler):
dest = "jpeg/"
website = collection_name.find_one({"_id": websiteId})
if website is not None and not website.has_key("screenshotTime") and website.has_key("httpcode") and website["httpcode"] ==200:
logger.info("%s need to do screenshot", website["redirect_url"])
url = website["redirect_url"]
id = str(website["_id"])
screenshot_crawler.run(url, id, dest, timeout=30)
screenshotId = None
jpgfile = dest + id + '.jpg'
if os.path.isfile(jpgfile):
size = os.path.getsize(jpgfile)
if size > 0:
screenshotId = screenshot_crawler.save(jpgfile, id)
os.remove(jpgfile)
screenshotTime = datetime.datetime.now()
collection_name.update_one({"_id": website["_id"]},{"$set": {"screenshotTime": screenshotTime, "screenshotId": screenshotId}})
logger.info("%s screenshot finished", website["redirect_url"])
def save_collection_website(collection_name, item):
#in case that related websites have been saved before
record = collection_name.find_one({"url": item["url"]})
if record is None:
item["createTime"] = datetime.datetime.now()
item["modifyTime"] = item["createTime"]
try:
id = collection_name.insert(item)
except:
return None
else:
id = record["_id"]
item["modifyTime"] = datetime.datetime.now()
collection_name.update_one({"_id": id}, {'$set': item})
return id
def merge_beian(items1, items2):
items = []
for item1 in items1:
items.append(item1)
for item2 in items2:
new = True
domain = item2["domain"]
#Check if domain listed items1ç
for item in items:
if item["domain"] == domain:
new = False
if new:
items.append(item2)
return items
def set_artifact_active(artifact, active_value, source, sourceId):
# Now artifactStatus is new symbol to mark artifact status, not "active" anymore
if active_value == 'N':
artifactStatus = 11
elif active_value == "Offline" or active_value == "Redirect":
artifactStatus = 12
else:
artifactStatus = 13
collection_source_company.update_one({"source": source, "sourceId": sourceId, "source_artifact": {
"$elemMatch": {"type": artifact["type"], "domain": artifact["domain"], "name": artifact["name"], "link": artifact["link"]}}}, {
'$set': {"source_artifact.$.artifactStatus": artifactStatus}})
def set_artifact_expand(artifact, source, sourceId):
collection_source_company.update_one({"source": source, "sourceId": sourceId, "source_artifact": {
"$elemMatch": {"type": artifact["type"], "domain": artifact["domain"], "name": artifact["name"],"link": artifact["link"]}}}, {
'$set': {"source_artifact.$.expanded": "Y"}})
def set_scname_expand(scname, source, sourceId):
collection_source_company.update_one({"source": source, "sourceId": sourceId, "source_company_name": {
"$elemMatch": {"type": scname["type"], "name": scname["name"]}}}, {
'$set': {"source_company_name.$.expanded": "Y"}})
def set_scbeianhao_expand(scbeianhao, source, sourceId):
collection_source_company.update_one({"source": source, "sourceId": sourceId, "source_mainbeianhao": {
"$elemMatch": {"mainBeianhao": scbeianhao["mainBeianhao"]}}}, {
'$set': {"source_mainbeianhao.$.expanded": "Y"}})
def save_beian_artifacts(items, source, sourceId):
for item in items:
if item.has_key("whoisExpire") and item["whoisExpire"] == 'Y':
continue
homepage = "http://www." + item["domain"]
source_artifact = collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_artifact": {"$elemMatch": {"type": 4010, "link": homepage}}})
if source_artifact is None:
source_artifact = collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_artifact": {"$elemMatch": {"type": 4010, "domain": item["domain"]}}})
if source_artifact is None:
sadata = {
"name": item["websiteName"],
"link": homepage,
"type": 4010,
"domain": item["domain"],
"extended": 'Y',
}
save_mongo_source_artifact(source, sourceId, sadata)
def save_website_artifact(website, source, sourceId):
if not find_link(website["url"], source, sourceId):
try:
websadata = {
"name": website["title"],
"description": None,
"link": website["url"],
"type": 4010,
"domain": website["domain"],
"extended": 'Y',
}
save_mongo_source_artifact(source, sourceId, websadata)
except:
pass
def save_beian_mainbeianhaos(items, source, sourceId):
for item in items:
if item.has_key("whoisExpire") and item["whoisExpire"] == 'Y':
continue
source_mainbeianhao= collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_mainbeianhao.mainBeianhao": item["mainBeianhao"]})
if source_mainbeianhao is None:
smdata = {
"mainBeianhao": item["mainBeianhao"],
"extended": 'Y',
}
save_mongo_source_mainbeianhao(source, sourceId, smdata)
def save_beian_company_names(items, source, sourceId):
for item in items:
if item.has_key("whoisExpire") and item["whoisExpire"] == 'Y':
continue
if item["organizerType"] != "企业":
continue
company_name = name_helper.company_name_normalize(item["organizer"])
source_company_name = collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_company_name.name": company_name})
if source_company_name is None:
scndata = {
"name": company_name,
"chinese": 'Y',
"type": 12010,
"extended": 'Y',
}
save_mongo_source_company_name(source, sourceId, scndata)
def copy_from_itunes(app, artifact, source, sourceId):
if app.has_key("description"):
collection_source_company.update_one({"source": source, "sourceId": sourceId, "source_artifact": {
"$elemMatch": {"type": artifact["type"], "domain": artifact["domain"], "name": artifact["name"],"link": artifact["link"]}}}, {
'$set': {"source_artifact.$.name": app["trackName"],
"source_artifact.$.description": app["description"],
"source_artifact.$.link": app["trackViewUrl"],
"source_artifact.$.domain": app["trackId"],
"source_artifact.$.modifyTime": datetime.datetime.now()}})
else:
collection_source_company.update_one({"source": source, "sourceId": sourceId, "source_artifact": {
"$elemMatch": {"type": artifact["type"], "domain": artifact["domain"], "name": artifact["name"],"link": artifact["link"]}}}, {
'$set': {"source_artifact.$.name": app["trackName"],
"source_artifact.$.link": app["trackViewUrl"],
"source_artifact.$.domain": app["trackId"],
"source_artifact.$.modifyTime": datetime.datetime.now()}})
def save_itunes_artifact(app, source, sourceId):
try:
itunessadata = {
"name": app["trackName"],
"description": app["description"],
"link": app["trackViewUrl"],
"type": 4040,
"domain": app["trackId"],
"extended": 'Y'
}
save_mongo_source_artifact(source, sourceId, itunessadata)
except:
pass
#here should check all artifacts under sourceCompanyId
def find_itunesId(itunesId, source, sourceId):
artifacts = find_mongo_data(collection_source_company, "source_artifact", source, sourceId, nonexpand=False)
#Check if itunesId is already existed in artifacts
for artifact in artifacts:
if artifact["type"] != 4040:
continue
#Get trackid
trackid = None
if artifact["domain"] is None:
(apptype, appmarket, trackid) = url_helper.get_market(artifact["link"])
if apptype != 4040:
continue
else:
try:
trackid = int(artifact["domain"])
except:
pass
if trackid == itunesId:
return True
return False
def copy_from_android(app, artifact, source, sourceId):
collection_source_company.update_one({"source": source, "sourceId": sourceId, "source_artifact": {
"$elemMatch": {"type": artifact["type"], "domain": artifact["domain"], "name": artifact["name"], "link": artifact["link"]}}}, {
'$set': {"source_artifact.$.name": app["name"],
"source_artifact.$.description": app["description"],
"source_artifact.$.link": app["link"],
"source_artifact.$.domain": app["apkname"],
"source_artifact.$.modifyTime": datetime.datetime.now()}})
def save_android_artifact(app, source, sourceId):
andsadata = {
"name": app["name"],
"description": app["description"],
"link": app["link"],
"type": 4050,
"domain": app["apkname"],
"extended": 'Y',
}
save_mongo_source_artifact(source, sourceId, andsadata)
def save_company_name(app, item_of_name, source, sourceId):
company_name = app[item_of_name]
if company_name is None or company_name.strip() == "":
return
company_name = name_helper.company_name_normalize(company_name)
source_company_name = collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_company_name.name": company_name})
if source_company_name is None:
(chinese, company) = name_helper.name_check(app[item_of_name])
if chinese is True:
chinese_type = "Y"
else:
chinese_type = "N"
scnamedata = {
"name": company_name,
"chinese": chinese_type,
"type": 12010,
"extended": 'Y',
}
save_mongo_source_company_name(source, sourceId, scnamedata)
#here should check all artifacts under sourceCompanyId
def find_androidAppname(androidApk, source, sourceId):
if androidApk is None or androidApk.strip() == "":
return True
artifacts = find_mongo_data(collection_source_company, "source_artifact", source, sourceId, nonexpand=False)
#Check if apkname is already existed in artifacts
for artifact in artifacts:
if artifact["type"] != 4050:
continue
apkname = None
if artifact["domain"] is None:
(apptype, appmarket, appid) = url_helper.get_market(artifact["link"])
if apptype != 4050:
continue
# Get apkname of baidu and 360 from android market
if appmarket == 16010 or appmarket == 16020:
android_app = collection_android_market.find_one({"appmarket": appmarket, "key_int": appid})
if android_app:
apkname = android_app["apkname"]
else:
apkname = appid
else:
apkname = artifact["domain"]
#logger.info(apkname)
if apkname == androidApk:
return True
return False
def find_link(link, source, sourceId):
if link is None:
return True
if link.strip() == "":
return True
artifact = collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_artifact": {"$elemMatch": {"type": 4010, "link": link}}})
if artifact is None:
flag, domain = url_helper.get_domain(link)
if domain is not None:
artifact = collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_artifact": {"$elemMatch": {"type": 4010, "domain": domain}}})
if artifact is None:
return False
else:
return True
def save_itunesSellerUrl_artifact(app, source, sourceId):
url = app["sellerUrl"]
flag, domain = url_helper.get_domain(url)
if flag is not True:
return None
if find_link(app["sellerUrl"], source, sourceId):
return None
try:
itunessellersadata = {
"name": app["sellerName"],
"description": app["description"],
"link": app["sellerUrl"],
"type": 4010,
"domain": app["sellerDomain"],
"extended": 'Y',
}
save_mongo_source_artifact(source, sourceId, itunessellersadata)
return 1
except:
return None
def save_androidWebsite_artifact(app, source, sourceId):
url = app["website"]
flag, domain = url_helper.get_domain(url)
if flag is not True:
return None
if find_link(url, source, sourceId):
return None
try:
andwebsadata = {
"name": app["name"],
"description": app["description"],
"link": app["website"],
"type": 4010,
"domain": app["website_domain"],
"extended": 'Y',
}
save_mongo_source_artifact(source, sourceId, andwebsadata)
return 1
except:
return None
def filter_domain(items, domain):
items_new = []
for item in items:
if item["domain"] == domain:
items_new.append(item)
return items_new
def check_source_artifact(source, sourceId):
artifact = collection_source_company.find_one({"source": source, "sourceId": sourceId,
"source_artifact": {"$elemMatch": {"type": 4010, "$or":[{"active": None},{"active":'Y'}]}}})
if artifact is None:
return False
else:
return True
def check_source_company_name(source, sourceId):
company_name = collection_source_company.find_one({"source": source, "sourceId": sourceId,
"source_company_name": {"$elemMatch": {"type": 12010, "$or":[{"verify": None},{"verify":'Y'}]}}})
if company_name is None:
return False
else:
return True
def count_company_names(apps, item_of_name):
names = {}
for app in apps:
company_name = app.get(item_of_name)
if company_name is not None:
ischinese, iscompany = name_helper.name_check(company_name)
if iscompany == True:
names[company_name] = 1
return len(names)
def count_domains(apps, item_of_url):
domains = {}
for app in apps:
url = app.get(item_of_url)
flag, domain = url_helper.get_domain(url)
if flag is not None and domain is not None:
domains[domain] = 1
return len(domains)
def find_mongo_data(collection_name, table_name, source, sourceId, nonexpand=True):
new_data = []
sourcecompany = collection_name.find_one({"source": source, "sourceId": sourceId})
if sourcecompany is not None:
table_data = sourcecompany[table_name]
for data in table_data:
if nonexpand is True:
if data.has_key("expanded") and data["expanded"] == 'Y':
pass
else:
new_data.append(data)
else:
new_data.append(data)
return new_data
def expand_clean(source, sourceId):
sourcecompany = collection_source_company.find_one({"source": source, "sourceId": sourceId})
new_data = {}
for table in ["source_artifact", "source_company_name", "source_mainbeianhao"]:
new_data[table] = []
if sourcecompany.has_key(table):
table_data = sourcecompany[table]
for data in table_data:
if data.has_key("extended") and data.has_key("verify") and data["extended"] is None and (data["verify"] == 'Y' or data["verify"] is None):
data["expanded"] = None
new_data[table].append(data)
collection_source_company.update_one({"_id": sourcecompany["_id"]},
{'$set': {"source_company_name": new_data["source_company_name"],
"source_artifact": new_data["source_artifact"],
"source_mainbeianhao": new_data["source_mainbeianhao"]}})
def expand_source_company(source, sourceId, beian_links_crawler, icp_chinaz_crawler, screenshot_crawler, test=False):
logger.info("source: %s, sourceId: %s Start expand!!!", source, sourceId)
logger.info("clean old expanded data")
expand_clean(source, sourceId)
sourcecompany = collection_source_company.find_one({"source": source, "sourceId": sourceId})
# exit()
company_fullname = sourcecompany["source_company"]["fullName"]
if company_fullname is not None and company_fullname.strip() != "":
company_fullname = name_helper.company_name_normalize(company_fullname)
scnames = sourcecompany["source_company_name"]
check_fullname = False
for scname in scnames:
if scname["name"] == company_fullname:
check_fullname = True
break
if check_fullname is False:
(chinese, company) = name_helper.name_check(company_fullname)
if chinese is True:
chinese_type = "Y"
else:
chinese_type = "N"
scname_data ={
"name": company_fullname,
"chinese": chinese_type,
"type": 12010,
}
save_mongo_source_company_name(source, sourceId, scname_data)
round = 1
while True:
if round >= 6:
collection_source_company.update_one({"_id": sourcecompany["_id"]},{'$set': {"scexpanded": True, "modifyTime": datetime.datetime.now()}})
break
source_company_names = find_mongo_data(collection_source_company, "source_company_name", source, sourceId)
main_beianhaos = find_mongo_data(collection_source_company, "source_mainbeianhao", source, sourceId)
artifacts = find_mongo_data(collection_source_company, "source_artifact", source, sourceId)
logger.info(json.dumps(source_company_names, ensure_ascii=False, cls=util.CJsonEncoder))
logger.info(json.dumps(main_beianhaos, ensure_ascii=False, cls=util.CJsonEncoder))
logger.info(json.dumps(artifacts, ensure_ascii=False, cls=util.CJsonEncoder))
# Check if there are new stuff which need to do expansion
if len(source_company_names) == 0 and len(artifacts) == 0 and len(main_beianhaos) == 0:
collection_source_company.update_one({"_id": sourcecompany["_id"]}, {'$set': {"scexpanded": True, "modifyTime": datetime.datetime.now()}})
break
logger.info("source: %s, sourceId: %s expand for round %d", source, sourceId, round)
# Step A/1:按公司名,备案查询
logger.info("source: %s, sourceId: %s 按公司名备案查询", source, sourceId)
for source_company_name in source_company_names:
# Only check chinese company name
if source_company_name["name"] is None or source_company_name["name"].strip() == "":
continue
if source_company_name["chinese"] is None:
(chinese, companyName) = name_helper.name_check(source_company_name["name"])
else:
chinese = source_company_name["chinese"]
if chinese != "Y":
continue
check_name = list(collection_beian.find({"organizer": source_company_name["name"]}))
# Case that one company_name has multiple beian# : 上海汇翼->(today.ai/teambition.com)#If only one found in Mongo.beian(organizer) it is fine
if len(check_name) == 0:
if test:
items_beianlinks = []
else:
items_beianlinks = beian_links_crawler.query_by_company_name(source_company_name["name"])
save_collection_beian(collection_beian, items_beianlinks) # insert infos into Mongo.beian
else:
items_beianlinks = check_name
save_beian_artifacts(items_beianlinks, source, sourceId) # insert website/homepage into Mysql.source_artifact
save_beian_company_names(items_beianlinks, source, sourceId) # insert organizer into Mysql.source_company_names
save_beian_mainbeianhaos(items_beianlinks, source, sourceId) # insert mainBeianhao into Mysql.source_mainbeiahao
# beian
# 发现更多的artifact(website)和公司名,主备案号
# Step A/2:按domian,备案查询
logger.info("source: %s, sourceId: %s 按domian备案查询", source, sourceId)
for artifact in artifacts:
# Only check is artifact is a website
if artifact["type"] != 4010:
continue
if artifact["domain"] is None:
link = url_helper.url_normalize(artifact["link"])
(flag, domain) = url_helper.get_domain(link)
if flag is None:
continue
if flag is False:
continue
else:
domain = artifact["domain"]
if domain is None or domain.strip() == "":
continue
check_domain = list(collection_beian.find({"domain": domain}))
if len(check_domain) == 0:
if test:
items_merge =[]
else:
items_beianlinks = beian_links_crawler.query_by_domain(domain)
items_icpchinaz = icp_chinaz_crawler.query_by_domain(domain)
items_merge = merge_beian(items_beianlinks, items_icpchinaz)
save_collection_beian(collection_beian, items_merge) # insert infos into Mongo.beian
else:
items_merge = check_domain
# filer by check domain to avoid sinaapp.cn case
items_merge = filter_domain(items_merge, domain)
save_beian_artifacts(items_merge, source, sourceId) # insert website/homepage into Mysql.source_artifact
save_beian_company_names(items_merge, source, sourceId) # insert organizer into Mysql.source_company_names
save_beian_mainbeianhaos(items_merge, source, sourceId) # insert mainBeianhao into Mysql.source_mainbeiahao
# beian
# 发现更多的artifact(website)和公司名,主备案号
# Step A/3 #按主备案号查询
logger.info("source: %s, sourceId: %s 按主备案号查询", source, sourceId)
for main_beianhao in main_beianhaos:
mainBeianhao = main_beianhao["mainBeianhao"]
check_mainBeianhao = collection_main_beianhao.find_one({"mainBeianhao": mainBeianhao})
if check_mainBeianhao is None:
if test:
items_merge =[]
else:
items_beianlinks = beian_links_crawler.query_by_main_beianhao(mainBeianhao)
items_icpchinaz = icp_chinaz_crawler.query_by_main_beianhao(mainBeianhao)
items_merge = merge_beian(items_beianlinks, items_icpchinaz)
save_collection_beian(collection_beian, items_merge) # insert infos into Mongo.beian
# if mainBeianhao could be found in two links
if len(items_merge) > 0:
items_main_beianhao = [{"mainBeianhao": mainBeianhao}]
save_collection_mainBeianhao(collection_main_beianhao, items_main_beianhao) # insert mainBeianhao into Mongo.main_beianhao
else:
items_merge = list(collection_beian.find({"mainBeianhao": mainBeianhao}))
save_beian_artifacts(items_merge, source, sourceId) # insert website/homepage into Mysql.source_artifact
save_beian_company_names(items_merge, source, sourceId) # insert organizer into Mysql.source_company_names
save_beian_mainbeianhaos(items_merge, source, sourceId) # insert mainBeianhao into Mysql.source_mainbeiahao
# 发现更多的artifact(website)和公司名
# itunes扩展
# Step B/1 #查询itunes artifact
logger.info("source: %s, sourceId: %s 查询itunes artifact", source, sourceId)
itunes_company_enames = {}
app_by_name = {}
for artifact in artifacts:
if artifact["type"] != 4040:
continue
# Get trackid
trackid = None
if artifact["domain"] is None:
(apptype, appmarket, trackid) = url_helper.get_market(artifact["link"])
if apptype != 4040:
continue
else:
try:
trackid = int(artifact["domain"])
except:
pass
if trackid is not None:
app = collection_itunes.find_one({"trackId": trackid})
if app is None:
# mark it as Noactive
set_artifact_active(artifact, "N", source, sourceId)
else:
copy_from_itunes(app, artifact, source, sourceId) # 存在: copy from mongo.itunes
if app.has_key("offline") and app["offline"] is True:
set_artifact_active(artifact, "Offline", source, sourceId)
else:
set_artifact_active(artifact, "Y", source, sourceId)
english, is_company = name_helper.english_name_check(app["sellerName"])
if english and is_company:
itunes_company_enames["sellerName"] = 1
app_by_name = app
else:
set_artifact_active(artifact, "N", source, sourceId)
# save the only english name
if len(itunes_company_enames) == 1:
company_name = collection_source_company.find_one({"source": source, "sourceId": sourceId, "source_company_name": {"$elemMatch": {"type": 12010, "chinese":"N"}}})
if company_name is None:
save_company_name(app_by_name, "sellerName", source, sourceId)
# Step B/2根据公司名查询更多的itunes artifact
logger.info("source: %s, sourceId: %s 根据公司名查询更多的itunes artifact", source, sourceId)
for source_company_name in source_company_names:
# producer name
'''
check_itunes_producers = list(collection_itunes.find({"developer": source_company_name["name"]}))
if len(check_itunes_producers) > 0:
for app in check_itunes_producers:
# Check if itunesId is already existed in artifacts
if find_itunesId(app["trackId"], source_company_id):
pass
else:
source_artifact_id = save_itunes_artifact(app, source_company_id)
#save_artifact_itunes_rel(app["_id"], source_artifact_id)
save_company_name(app, "developer", source_company_id)
'''
if source_company_name["name"] is None or source_company_name["name"].strip() == "":
continue
check_itunes_sellers = list(collection_itunes.find({"sellerName": source_company_name["name"]}))
if len(check_itunes_sellers) > 0:
'''
domains = {}
for app in check_itunes_sellers:
sellerUrl = app.get("sellerUrl")
flag ,domain = url_helper.get_domain(sellerUrl)
if flag is not None and domain is not None:
domains[domain] = 1
'''
lens_domain = count_domains(check_itunes_sellers, "sellerUrl")
artifact_status = check_source_artifact(source, sourceId)
for app in check_itunes_sellers:
# Check if itunesId is already existed in all artifacts in 1 sourceCompanyId
if find_itunesId(app["trackId"], source, sourceId):
pass
else:
save_itunes_artifact(app, source, sourceId)
if app.has_key("sellerUrl"):
# if find_link(app["sellerUrl"], source_company_id) or check_source_artifact(source_company_id):
if artifact_status:
pass
elif lens_domain == 1:
artifact_id = save_itunesSellerUrl_artifact(app, source, sourceId)
if artifact_id is not None:
artifact_status = True
# comment due to incorrect expand
'''
if app.has_key("supportUrl"):
if find_link(app["supportUrl"], source_company_id):
pass
else:
save_itunesSupportUrl_artifact(app, source_company_id)
'''
# save_artifact_itunes_rel(app["_id"], source_artifact_id)
# save_company_name(app, "sellerName", source_company_id)
# Step B/3根据域名查询更多的itunes artifact
logger.info("source: %s, sourceId: %s 根据域名查询更多的itunes artifact", source, sourceId)
for artifact in artifacts:
if artifact["type"] != 4010:
continue
if artifact["domain"] is None:
(flag, domain) = url_helper.get_domain(artifact["link"])
if flag is None:
continue
if flag is False:
continue
else:
domain = artifact["domain"]
if domain is None or domain.strip() == "":
continue
if domain in itunesDomainEx:
continue
check_itunes_sellerDomains = list(collection_itunes.find({"sellerDomain": domain}))
if len(check_itunes_sellerDomains) > 0:
lens_company_names = count_company_names(check_itunes_sellerDomains, "sellerName")
company_name_status = check_source_company_name(source, sourceId)
for app in check_itunes_sellerDomains:
# Check if itunesId is already existed in all artifacts in 1 sourceCompanyId
if find_itunesId(app["trackId"], source, sourceId):
pass
else:
save_itunes_artifact(app, source, sourceId)
if company_name_status:
pass
elif lens_company_names == 1:
# save_artifact_itunes_rel(app["_id"], source_artifact_id)
chinese, is_company = name_helper.name_check(app["sellerName"])
if chinese and is_company:
save_company_name(app, "sellerName", source, sourceId)
company_name_status = True
english, is_company = name_helper.english_name_check(app["sellerName"])
if english and is_company:
save_company_name(app, "sellerName", source, sourceId)
company_name_status = True
check_itunes_supportDomains = list(collection_itunes.find({"supportDomain": domain}))
if len(check_itunes_supportDomains) > 0 and len(check_itunes_supportDomains) < 100:
lens_company_names = count_company_names(check_itunes_supportDomains, "sellerName")
company_name_status = check_source_company_name(source, sourceId)
for app in check_itunes_supportDomains:
# Check if itunesId is already existed in all artifacts in 1 sourceCompanyId
if find_itunesId(app["trackId"], source, sourceId):
pass
else:
save_itunes_artifact(app, source, sourceId)
# save_artifact_itunes_rel(app["_id"], source_artifact_id)
if company_name_status:
pass
elif lens_company_names == 1:
chinese, is_company = name_helper.name_check(app["sellerName"])
if chinese and is_company:
save_company_name(app, "sellerName", source, sourceId)
company_name_status = True
english, is_company = name_helper.english_name_check(app["sellerName"])
if english and is_company:
save_company_name(app, "sellerName", source, sourceId)
company_name_status = True
# 发现更多的artifact(website)和公司名,check if existed in source_art..and company_name
# android扩展
# Step C/1#查询android artifact
logger.info("source: %s, sourceId: %s 查询android artifact", source, sourceId)
for artifact in artifacts:
if artifact["type"] != 4050:
continue
# Get apkname
apkname = None
if artifact["domain"] is None:
(apptype, appmarket, appid) = url_helper.get_market(artifact["link"])
# Get apkname of baidu and 360 from android market
if apptype != 4050:
continue
if appmarket == 16010 or appmarket == 16020:
android_app = collection_android_market.find_one({"appmarket": appmarket, "key_int": appid})
if android_app:
apkname = android_app["apkname"]
else:
apkname = appid
else:
apkname = artifact["domain"]
if apkname is not None:
app = collection_android.find_one({"apkname": apkname})
if app is None:
# mark it as Noactive
set_artifact_active(artifact, "N", source, sourceId)
else:
copy_from_android(app, artifact, source, sourceId) # 存在: copy from mongo.android
set_artifact_active(artifact, "Y", source, sourceId)
# chinese, is_company = name_helper.name_check(app["author"])
# if is_company:
# save_company_name(app, "author", source_company_id)
else:
set_artifact_active(artifact, "N", source, sourceId)
# Step C/2根据公司名查询更多的android artifact
logger.info("source: %s, sourceId: %s 根据公司名查询更多的android artifact", source, sourceId)
for source_company_name in source_company_names:
# producer name
if source_company_name["name"] is None or source_company_name["name"].strip() == "":
continue
check_android_authors = list(collection_android.find({"author": source_company_name["name"]}))
if len(check_android_authors) > 0 and len(check_android_authors) < 200:
lens_domain = count_domains(check_android_authors, "website")
artifact_status = check_source_artifact(source, sourceId)
# check if author is consistent
for app in check_android_authors:
# Check if AnId have one 4010
if find_androidAppname(app["apkname"], source, sourceId):
pass
else:
save_android_artifact(app, source, sourceId)
if artifact_status:
pass
elif lens_domain == 1:
artifact_id = save_androidWebsite_artifact(app, source, sourceId)
if artifact_id is not None:
artifact_status = True
# save_artifact_android_rel(app["_id"], source_artifact_id)
# save_company_name(app, "author", source_company_id)
# Step C/3根据域名查询更多的android artifact
logger.info("source: %s, sourceId: %s 根据域名查询更多的android artifact", source, sourceId)
for artifact in artifacts:
if artifact["type"] != 4010:
continue
if artifact["domain"] is None:
(flag, domain) = url_helper.get_domain(artifact["link"])
if flag is None:
continue
if flag is False:
continue
else:
domain = artifact["domain"]
if domain is None or domain.strip() == "":
continue
check_android_websiteDomains = list(collection_android.find({"website_domain": domain}))
if len(check_android_websiteDomains) > 0:
lens_company_names = count_company_names(check_android_websiteDomains, "author")
company_name_status = check_source_company_name(source, sourceId)
for app in check_android_websiteDomains:
# Check if AndroidId is already existed in artifacts
if find_androidAppname(app["apkname"], source, sourceId):
pass
else:
save_android_artifact(app, source, sourceId)
# save_artifact_android_rel(app["_id"], source_artifact_id)
if company_name_status:
pass
elif lens_company_names == 1:
chinese, is_company = name_helper.name_check(app["author"])
if is_company:
save_company_name(app, "author", source, sourceId)
company_name_status = True
check_android_apknameDomains = list(collection_android.find({"apkname_domain": domain}))
# add threshold to avoid case: domain: com.wowotuan
if len(check_android_apknameDomains) > 0 and len(check_android_apknameDomains) < 100:
lens_company_names = count_company_names(check_android_apknameDomains, "author")
company_name_status = check_source_company_name(source, sourceId)
for app in check_android_apknameDomains:
# Check if AndroidId is already existed in artifacts
if find_androidAppname(app["apkname"], source, sourceId):
pass
else:
save_android_artifact(app, source, sourceId)
# save_artifact_android_rel(app["_id"], source_artifact_id)
if company_name_status:
pass
elif lens_company_names == 1:
chinese, is_company = name_helper.name_check(app["author"])
if is_company:
save_company_name(app, "author", source, sourceId)
company_name_status = True
# 发现更多的artifact(website)和公司名
# 曾用名 TODO
# 清洗website artfiact
# 查询meta信息, 标记不能访问的?website?, 处理转跳的website
logger.info("source: %s, sourceId: %s website meta", source, sourceId)
for artifact in artifacts:
if artifact["type"] != 4010:
continue
if artifact["link"] is None or artifact["link"].strip() == "":
# set_active("source_artifact", "N", artifact["id"])
set_artifact_active(artifact, "N", source, sourceId)
continue
url = artifact["link"].strip()
meta = collection_website.find_one({"url": url})
if meta is None or meta["httpcode"]==404:
meta = website.get_meta_info(url)
if meta:
websiteId = save_collection_website(collection_website, meta)
if websiteId is not None and not test:
#screenshot_wesbite(collection_website, websiteId, screenshot_crawler)
pass
else:
meta = {
"url": artifact["link"],
"httpcode": 404
}
websiteId = save_collection_website(collection_website, meta)
set_artifact_active(artifact, "N", source, sourceId)
if meta:
# 发生转跳
# logger.info(meta)
if meta["httpcode"] == 200:
redirect_url = meta.get("redirect_url")
if artifact["link"] != redirect_url:
url = url_helper.url_normalize(meta["redirect_url"])
(flag_new, domain_new) = url_helper.get_domain(url)
meta_new = {
"url": url,
"domain": domain_new if flag_new is True else None,
"redirect_url": url,
"title": meta["title"],
"tags": meta["tags"],
"description": meta["description"],
"httpcode": 200
}
websiteId_new = save_collection_website(collection_website, meta_new)
if websiteId_new is not None and not test:
#screenshot_wesbite(collection_website, websiteId_new, screenshot_crawler)
pass
flag, domain = url_helper.get_domain(artifact["link"])
if domain_new != domain: # 跳出原域名
set_artifact_active(artifact, "Redirect", source, sourceId)
else:
if flag is True: # 这是个'好'地址
set_artifact_active(artifact, "Y", source, sourceId)
else:
if flag_new is True: # 转跳后是个 '好'地址
set_artifact_active(artifact, "Redirect", source, sourceId)
save_website_artifact(meta_new, source, sourceId)
else:
set_artifact_active(artifact, "Y", source, sourceId)
else:
set_artifact_active(artifact, "Y", source, sourceId)
elif meta["httpcode"] == 404:
set_artifact_active(artifact, "N", source, sourceId)
# verify -> source_artifacts/source_company_name set verify
logger.info("source: %s, sourceId: %s set verify", source, sourceId)
for artifact in artifacts:
set_artifact_expand(artifact, source, sourceId)
for source_company_name in source_company_names:
set_scname_expand(source_company_name, source, sourceId)
for main_beianhao in main_beianhaos:
set_scbeianhao_expand(main_beianhao, source, sourceId)
round += 1
if __name__ == "__main__":
pass
| [
"hush_guo@163.com"
] | hush_guo@163.com |
e265117edf1e18840b0f614b32faeba6cead8cc0 | 8d77b272cea0122bcaccfba66028408f01d3a9a5 | /models/option.py | 961dc3a0c0a19d77b38ef503de9dd32759f39bd0 | [] | no_license | cho0op/polls_psycopg2_curse | 0ca0a2cfeff0948180dfdc6a41495bfa4fde2095 | 93a84ad458472b9b3d381996a3448e390ca2365e | refs/heads/master | 2022-12-01T04:14:06.963430 | 2020-08-13T10:53:25 | 2020-08-13T10:53:25 | 284,676,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | import datetime
import pytz
from typing import List
from connections import create_connection, connection_pool, get_cursor
import database
class Option:
def __init__(self, option_text: str, poll_id: int, _id: int = None):
self.text = option_text
self.poll_id = poll_id
self.id = _id
def __str__(self):
return f"{self.id}: {self.text}"
def __repr__(self):
return f"Option({self.id}, {self.text}, {self.poll_id})"
def save(self):
with get_cursor() as connection:
new_option_id = database.add_option(connection, self.text, self.poll_id)
self.id = new_option_id
def vote(self, username: str):
with get_cursor() as connection:
vote_timestamp = datetime.datetime.now(tz=pytz.utc).timestamp()
database.add_poll_vote(connection, username, vote_timestamp, self.id)
@property
def votes(self) -> List[database.Vote]:
with get_cursor() as connection:
votes = database.get_votes_for_option(connection, self.id)
return votes
@classmethod
def get(cls, option_id: int) -> "Option":
with get_cursor() as connection:
option = database.get_option(connection, option_id)
return cls(option[1], option[2], option[0])
| [
"sorintema@gmail.com"
] | sorintema@gmail.com |
3fbd022c9b737e0b105895511171b4ecff43caeb | 6e8c00162af72a10c0e8da52bc2543a9821ab72b | /Contador_mayusculas.py | adfa421cb563f164090d0019c82502918e10fc58 | [] | no_license | TomasNiessner/Mi_primer_programa | 79485bf574190b47ed701d0cbeb63c0b6243cd86 | ddf82a8aaef47f624cbd190019472c03c71ab532 | refs/heads/master | 2020-05-03T12:17:08.932208 | 2019-10-05T15:08:58 | 2019-10-05T15:08:58 | 178,621,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | frase_usuario = input("Ingrese una frase")
n_mayusculas = 0
for letra in frase_usuario:
if (letra.isupper()) == True:
n_mayusculas +=1
print("Hay {} mayusculas".format(n_mayusculas))
| [
"tomasniessner@outlook.com"
] | tomasniessner@outlook.com |
820f2256a3f8d5d229f565d0358fb1ded5c31812 | 30bb84a7545a11c6d9f788933a0907de2fbfd59b | /spider/libs/database/database.py | adefd2e54c4975d4fcf56b4f5c168202caa53138 | [] | no_license | sumex630/xiyou | db4db3d42e53768ae2459587520e260362134803 | a4d31567a7ae8f7b308e9ff97b28c81bc4047b54 | refs/heads/master | 2022-04-09T01:51:43.801145 | 2020-03-25T07:43:33 | 2020-03-25T07:43:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/6/10 16:07
# @Author : sunny
# @File : database.py
# @Software: PyCharm
from pymysql import connect
from setting import DATABASES
class DB(object):
"""连接数据库的上下文管理器"""
def __init__(self, db_name='default'):
"""初始化数据库连接"""
self.db_name = db_name
self.host = DATABASES[self.db_name]['HOST']
self.port = DATABASES[self.db_name]['PORT']
self.user = DATABASES[self.db_name]['USER']
self.password = DATABASES[self.db_name]['PASSWORD']
self.database = DATABASES[self.db_name]['DATABASE']
self.charset = DATABASES[self.db_name]['CHARSET']
self.conn = connect(host=self.host, port=self.port, user=self.user, password=self.password,
database=self.database, charset=self.charset)
self.cs = self.conn.cursor()
def __enter__(self):
"""返回cursor 游标对象"""
return self.cs, self.conn
def __exit__(self, exc_type, exc_val, exc_tb):
"""关闭数据库连接"""
self.cs.close()
self.conn.close()
if __name__ == '__main__':
with DB() as cs_conn:
# 执行sql 语句
print(cs_conn)
cs = cs_conn[0]
sql = "select * from customer_info;"
cs.execute(sql)
data = cs.fetchall()
print(data)
| [
"sunny@163.com"
] | sunny@163.com |
6d2f817bbb31eb6fbe71f8daec2472c5937a3f76 | bd6b7f461716a406a57fea21ff999a2ab249b086 | /app.py | 3963e636959d929dedd0c404c7eaac2cede47d84 | [
"MIT"
] | permissive | jonatan5524/doodle-classifier | ddc6660e8b9d809a433cb8a9c4082c7faf43fd9b | f531502d2a70779518062705850b1afb2bf41259 | refs/heads/main | 2023-07-27T11:53:01.749437 | 2021-09-11T17:14:48 | 2021-09-11T17:14:48 | 334,482,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from flask import Flask
from flask import render_template
from flask_cors import CORS, cross_origin
from flask import request
from model import Model
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
model = Model()
@app.route('/')
def index():
""" Index route of the server
Returns:
Text: The index.html file
"""
return render_template('index.html')
@app.route('/load')
@cross_origin()
def load():
""" Loads the model from the last checkpoint
Returns:
Str: Loaded approval
"""
model.load()
return "loaded"
@app.route('/capture', methods=['POST'])
@cross_origin()
def capture():
""" Predict the current drawing of the user
Returns:
Str: The model prediction
"""
data = request.stream.read()
data = data.decode("utf-8").split(',')
return model.predict(data)
if __name__ == "__main__":
app.run() | [
"jonatan5524@gmail.com"
] | jonatan5524@gmail.com |
05b2af316903dbb16da40ad952f4ecf0ba4d8489 | 1ee4a6aea7c0f12be92b51c45284796ddd3b0df4 | /music_controller/urls.py | fd75bde2224a86a7f1ab219531636324afc331f3 | [] | no_license | sharma-kshitij/spotify-room | dd8b9eca09407564d682c2a717ab3fb796e8a571 | c02a231ba0d3cc89bbf2c5eab289e594a5ae4a52 | refs/heads/main | 2023-06-27T12:59:17.086376 | 2021-07-23T09:04:21 | 2021-07-23T09:04:21 | 388,739,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | """music_controller URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/',include('api.urls'))
]
| [
"sharmakshitij250@gmail.com"
] | sharmakshitij250@gmail.com |
71b1f81c6ad4e5e50160513309e242c535862526 | 025109af645ffed4def9158aea8ff60439100e7e | /ukko/monitor.py | 4d5c83fb3b9cfff74a30eb72518a8b3b867bf314 | [
"MIT"
] | permissive | ryanrhymes/personal | 0cb1101df5658cae5fbf6d5994d7ba17616311d0 | ace3e82cf0c48547d461d3343690506276232fd1 | refs/heads/master | 2021-04-24T17:44:39.824391 | 2018-04-18T08:08:44 | 2018-04-18T08:08:44 | 28,007,504 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,120 | py | #!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# This script is modified based on center.py, and supports IPV4 UDP multicast.
# The script keeps listening the broadcasts from the nodes.
#
# Liang Wang @ Dept. of Computer Science, University of Helsinki, Finland
# Email: liang.wang [at] helsinki.fi
# 2011.03.07
#
import time
import os,sys
import struct
import pickle
import socket
import threading
import subprocess
import SocketServer
from multiprocessing import *
INCQUE = Queue(2**20)
class MyUDPServer(SocketServer.UDPServer):
allow_reuse_address = True
pass
class MyRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
data = pickle.loads(self.request[0].strip())
#print "%s wrote:%s" % (self.client_address[0], data) # Remember to comment it out!!!
INCQUE.put(data, False)
socket = self.request[1]
#socket.sendto("OK", self.client_address)
except Exception, err:
print "Exception:CentralServer.handle():", err
def handle_error(self, request, client_address):
print "Error:CentralServer.handle_error():", request
class MyListener(object):
def __init__(self, mgrp=None, mport=None, register=False):
#self.addr = (subprocess.Popen(["hostname","-I"], stdout=subprocess.PIPE).communicate()[0].split()[0], 1212)
self.addr = (mgrp if mgrp else get_myip(), mport if mport else 1212)
self.server = MyUDPServer(self.addr, MyRequestHandler)
self.server.allow_reuse_address = True
self.sock = socket.fromfd(self.server.fileno(), socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.regs = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if register:
t = threading.Thread(target=self.register_me, args=())
t.daemon = True
t.start()
pass
def register_me(self):
while True:
for i in range(1, 256):
try:
self.regs.sendto(pickle.dumps(self.addr, pickle.HIGHEST_PROTOCOL),
("ukko%03i.hpc.cs.helsinki.fi" % i, self.addr[1]))
except Exception, err:
print "Exception:centermc.py:MyListener.register_me():", err
time.sleep(300)
pass
def listen_forever(self):
self.server.serve_forever()
pass
#
# This script provides some helper functions for all the classes in cluster
# subject. The helper functions included here should only be restricted within
# the scope of this sub-project.
#
# Liang Wang @ Dept. of Computer Science, University of Helsinki, Finland
# 2011.03.08
#
import re
import os,sys
import struct
import pickle
import urllib2
import subprocess
def are_rects_overlapped(rect1, rect2):
"""Check whether two rects are overlapped or not?"""
overlapped = False
x1, y1, w1, h1 = rect1
x2, y2 = x1+w1, y1+h1
x3, y3, w2, h2 = rect2
x4, y4 = x3+w2, y3+h2
if (x1<x3<x2 or x1<x4<x2 or x3<x1<x4 or x3<x2<x4) and (y1<y3<y2 or y1<y4<y2 or y3<y1<y4 or y3<y2<y4):
overlapped = True
return overlapped
def get_myip():
return subprocess.Popen(["hostname","-I"], stdout=subprocess.PIPE).communicate()[0].split()[0]
def calc_rate(r):
"""Calculate the rate, and convert it into suitable measure unit. r should be in bytes."""
s = ""
if r < 2**10:
s = "%i B/S" % r
elif 2**10 <= r < 2**20:
s = "%i KB/S" % int(r/2**10)
elif 2**20 <= r < 2**30:
s = "%i MB/S" % int(r/2**20)
elif 2**30 <= r < 2**40:
s = "%i GB/S" % int(r/2**30)
elif 2**40 <= r < 2**50:
s = "%i TB/S" % int(r/2**40)
return s
def get_power_consumption():
power = 0
try:
s = urllib2.urlopen("http://www.cs.helsinki.fi/u/jjaakkol/hpc-report.txt", timeout=5).read()
for x in re.findall(r"(\d+)\W*?W", s, re.I):
x = int(x)
power += x
except Exception, err:
print "Exception:myutil.py:get_power_consumption():", err
return power
def get_pc_mikko():
power = None
try:
f = open("/group/home/greenict/public_html/exactum-kwhcalc/last-minute-watts.txt", "r")
power = int(re.search(r".*?;.*?;(\d+)", f.readlines()[0]).group(1))
except Exception, err:
print "Exception:myutil.py:get_pc_mikko():", err
return power
#
# This file is the main UI of cluster monitor.
#
# Liang Wang @ Dept. of Computer Science, University of Helsinki, Finland
# 2011.03.07
#
import re
import wx
import time
import random
import threading
import subprocess
import multiprocessing
class Node(object):
def __init__(self, id=None, parent=None):
self.id = id
self.parent = parent
self.name = "n%03i" % (id+1)
self.highlight = False
self.fontsize = 8
self.x, self.y = 0, 0
self.w, self.h = 100, 100
self.plx, self.ply = 9, 9
self.plw, self.plh = 9, 9
self.pmx, self.pmy = 9, 9
self.pmw, self.pmh = 9, 9
self.r, self.rn = 3, 30.0 # Radius and Max num of histories
self.rr_history = [1]
self.tr_history = [1]
# The states a node maintains
self.ts = 0 # Timestamp for the last message
self.load = 0.0 # 1 min average load
self.cpu_count = 1.0 # Num of CPU cores
self.mem_used = 0.0 # Used mem
self.mem_total = 1.0 # Total physic mem
self.user_count = 0 # Num of login users
self.user_uniq = 0 # Num of uniq users
self.disk = "" # Disk usage
self.rx = "" # Total data recv by eth
self.tx = "" # Total data send by eth
self.rr = 0 # The eth interface recv rate
self.tr = 0 # The eth interface send rate
pass
def draw(self, dc):
self.draw_text_info(dc)
self.draw_node_loadbar(dc, self.load/self.cpu_count, self.mem_used/self.mem_total)
self.draw_speed_curve(dc)
self.draw_frame(dc)
self.parent.rr_total += self.rr
self.parent.tr_total += self.tr
pass
def draw_frame(self, dc):
x, y, w, h = self.x, self.y, self.w, self.h
if self.highlight:
dc.SetPen(wx.Pen('red', 2))
else:
dc.SetPen(wx.Pen(wx.Colour(64,64,64), 1))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(x, y, w, h)
pass
def draw_text_info(self, dc):
x, y, w, h, fz = self.x, self.y, self.w, self.h, self.fz
dc.SetFont(wx.Font(fz, wx.FONTFAMILY_SWISS,wx.FONTSTYLE_NORMAL,wx.FONTWEIGHT_NORMAL))
if time.time() - self.ts < 60:
dc.SetTextForeground('green')
else:
dc.SetTextForeground('grey')
if w < 100:
dc.DrawText("%s" % (self.name), x+1, y)
else:
dc.DrawText("%s D:%s U:%i" % (self.name, self.disk, self.user_count), x+2, y)
dc.DrawText("R:%s T:%s" % (self.rx, self.tx), x+2, y+fz+3)
pass
def draw_node_loadbar(self, dc, load, mem):
load = load if load <= 1 else 1.0
mem = mem if mem <= 1 else 1.0
x, y, w, h = self.x, self.y, self.w, self.h
plx, ply, plw, plh = self.plx, self.ply, self.plw, self.plh
pmx, pmy, pmw, pmh = self.pmx, self.pmy, self.pmw, self.pmh
dc.SetPen(wx.Pen('black', 0, wx.TRANSPARENT))
dc.SetBrush(wx.BLACK_BRUSH)
dc.GradientFillLinear((plx+1,ply+1,plw-2,plh-2), 'green', 'red')
dc.GradientFillLinear((pmx+1,pmy+1,pmw-2,pmh-2), 'green', 'red')
dc.DrawRectangle(plx+plw*load+1,ply+1,plw*(1-load)-1,plh-2)
dc.DrawRectangle(pmx+pmw*mem+1,pmy+1,pmw*(1-mem)-1,pmh-2)
pass
def draw_speed_curve(self, dc):
x, y, w, h, r = self.x, self.y, self.w, self.h, self.r
rn = int(w/r)
self.rr_history.append(self.rr)
self.tr_history.append(self.tr)
norm = max(max(self.rr_history), max(self.tr_history))
self.parent.norm = max(norm, self.parent.norm)
norm = 3.5*self.parent.norm
self.rr_history = self.rr_history[-rn:]
self.tr_history = self.tr_history[-rn:]
dc.SetPen(wx.Pen("cyan", 0, wx.TRANSPARENT))
dc.SetBrush(wx.GREEN_BRUSH)
for i in range(1, len(self.rr_history)):
rr = self.rr_history[-i]
rh = int(h*rr/(norm))
ry = y + h - rh
rx = x + w - i*r
rd = int(r/2)
dc.DrawRectangle(rx-rd, ry, r-1, rh)
dc.SetPen(wx.Pen("cyan", 0, wx.TRANSPARENT))
dc.SetBrush(wx.RED_BRUSH)
for i in range(1, len(self.tr_history)):
tr = self.tr_history[-i]
th = int(h*tr/(norm))
ty = y + h - th
tx = x + w - i*r
dc.DrawRectangle(tx, ty, r-1, th)
pass
class MyFrame(wx.Frame):
def __init__(self, parent, title, size):
self.matrix_x, self.matrix_y = 16, 15
self.nodes = [ Node(i, self) for i in range(self.matrix_x*self.matrix_y) ]
self.norm = 10
self.nodes_lock = threading.Lock()
self.rr_total = 0
self.tr_total = 0
self.power_consumption = get_pc_mikko()
wx.Frame.__init__(self, parent, wx.ID_ANY, title, size=size)
self.anchor0 = None
self.anchor1 = None
self.last_refresh = time.time()
self.event = threading.Event()
self.SetBackgroundColour('black')
wx.EVT_SIZE(self, self.on_size)
wx.EVT_PAINT(self, self.on_paint)
wx.EVT_LEFT_DOWN(self, self.on_left_down)
wx.EVT_LEFT_UP(self, self.on_left_up)
wx.EVT_MOTION(self, self.on_motion)
wx.EVT_RIGHT_DCLICK(self, self.btexp)
wx.EVT_CLOSE(self, self.on_close)
# Start the timer to refresh the frame periodically
self.timer = wx.Timer(self, id=-1)
self.Bind(wx.EVT_TIMER, self.update, self.timer)
self.timer.Start(1000)
# Start the timer to refresh power consumption periodically
self.power_consumption_timer = wx.Timer(self, id=-1)
self.Bind(wx.EVT_TIMER, self.update_power_consumption, self.power_consumption_timer)
self.power_consumption_timer.Start(5*1000)
pass
def Show(self):
wx.Frame.Show(self)
self.on_size()
def on_size(self, event=None):
mx, my = self.matrix_x, self.matrix_y
scrW, scrH = wx.PaintDC(self).GetSize()
nw, nh = scrW/mx - 2, scrH/my - 2
fz = 7 if int(min(nw,nh)/9.5)<7 else int(min(nw,nh)/9.5)
r, rn = self.nodes[0].r, self.nodes[0].rn
r = 3 if int(r/rn)<3 else int(r/rn)
for i in range(my):
for j in range(mx):
id = i*mx+j
node = self.nodes[id]
node.w, node.h = nw, nh
node.x, node.y = (nw+2)*j+2, (nh+2)*i+2
node.plx = node.x + 0.02*nw
node.ply = node.y + 0.35*nh
node.plw = nw*0.95
node.plh = nh*0.15
node.pmx = node.plx
node.pmy = node.ply + node.plh + 0.04*nh
node.pmw = node.plw
node.pmh = node.plh
node.fz = fz
node.r = r
self.Refresh(False)
pass
def on_paint(self, event=None):
dc = wx.PaintDC(self)
dc.SetPen(wx.Pen('black', 0))
self.nodes_lock.acquire()
try:
self.draw_nodes(dc)
except Exception, err:
print "Exception:MyFrame.on_paint():", err
self.nodes_lock.release()
self.draw_select_rect(dc)
self.set_frame_title()
self.last_refresh = time.time()
pass
def update(self, event=None):
self.norm = 10 if self.norm*0.95<10 else self.norm*0.95
self.rr_total, self.tr_total = 0, 0
self.Refresh(False)
def update_power_consumption(self, event=None):
self.power_consumption = get_pc_mikko()
pass
def btexp(self, event=None):
args = []
for node in self.nodes:
if node.highlight:
args += [str(node.id+1)]
subprocess.Popen(["./btexp.py"] + args)
pass
def draw_nodes(self, dc):
for node in self.nodes:
node.draw(dc)
pass
def on_left_down(self, event=None):
self.anchor0 = (event.m_x, event.m_y)
pass
def on_left_up(self, event=None):
self.highlight_nodes()
self.anchor0 = None
self.Refresh(False)
pass
def on_motion(self, event=None):
self.anchor1 = (event.m_x, event.m_y)
if self.anchor0:
self.Refresh(False)
pass
def on_close(self, event=None):
self.event.set()
event.Skip()
pass
def draw_select_rect(self, dc):
if self.anchor0:
x1, y1 = self.anchor0
x2, y2 = self.anchor1
x, y = min(x1,x2), min(y1,y2)
w, h = abs(x1-x2), abs(y1-y2)
dc.SetPen(wx.Pen('red', 3, wx.SHORT_DASH))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(x, y, w, h)
pass
def highlight_nodes(self):
if self.anchor0 and self.anchor1:
x1,y1,x2,y2 = self.anchor0[0],self.anchor0[1],self.anchor1[0],self.anchor1[1]
rect = (min(x1,x2),min(y1,y2),abs(x1-x2),abs(y1-y2))
for node in self.nodes:
if are_rects_overlapped(rect, (node.x,node.y,node.w,node.h)):
node.highlight = not node.highlight
pass
def set_frame_title(self):
rr = calc_rate(self.rr_total)
tr = calc_rate(self.tr_total)
self.SetTitle("UKKO CLUSTER PC: %s W RX: %s TX: %s" % (str(self.power_consumption), rr, tr))
pass
def process_multicast(self):
while not self.event.isSet():
try:
data = INCQUE.get()
self.nodes_lock.acquire()
id = int(re.search(r"(\d+)", data["nodename"]).group(1)) - 1
n = self.nodes[id]
n.ts = time.time()
n.load = float(data["load"])
n.cpu_count = float(data["cpu_count"])
n.mem_used = float(data["mem_used"])
n.mem_total = float(data["mem_total"])
n.user_count = int(data["user_count"])
#n.user_uniq = int(data["user_uniq"])
n.disk = data["disk"]
n.rx = data["rx"]
n.tx = data["tx"]
n.rr = data["rr"]
n.tr = data["tr"]
self.nodes_lock.release()
except Exception, err:
self.nodes_lock.release()
print "Exception:process_multicast():", err
pass
if __name__=="__main__":
app = wx.App()
frame = MyFrame(None, "UKKO Cluster", (800,600))
frame.Show()
# Start the multicast listener as daemon
listener = Process(target=MyListener(None, 1212, True).listen_forever, args=())
listener.daemon = True
listener.start()
# Start the worker thread for processing update multicasts
t = threading.Thread(target=frame.process_multicast, args=())
t.daemon = True
t.start()
# Start the app's mainloop
app.MainLoop()
| [
"ryanrhymes@gmail.com"
] | ryanrhymes@gmail.com |
1ef5dd4a594547acfcde8263b5d9eceae207d423 | c74c6e49660c5a7d4c25f5ae6316ac5c88acd285 | /XRD_Tool/XRD_Tool/bin/Debug/CalcResult.py | 7e4988874a8244fdb9d8909f66b4e1fcdfe2b088 | [] | no_license | weiqizhang001/xrd | 3376f54d42fb4255bf9aee4bfb379e09c3cab0d4 | 2245030eb8b9a05732928f1e2401d13eb551faae | refs/heads/master | 2020-03-21T00:23:49.163710 | 2018-06-23T14:26:14 | 2018-06-23T14:26:14 | 137,891,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | 154.9517099587937,730.4206675142866,7.339115606065661,0.19782585965270286,7.3391156060656595
| [
"weiqizhang_002@163.com"
] | weiqizhang_002@163.com |
0db7de4bffc15fa525f2edaa13ba5f6288ae1253 | 208c32c1943085ad8f2c01a15bd924c6725e72b3 | /venv/Scripts/easy_install-2.7-script.py | 490b29f63b6e41e0837988e623f1c03a9472c851 | [] | no_license | abraaobarros/urbbox-python | a055bcd2d39fb4250e836007d7bcdbefddf63472 | ef167a0e1b2ace5ecf4854520ff12e482467d56c | refs/heads/master | 2021-01-01T05:32:22.873256 | 2012-12-07T06:36:17 | 2012-12-07T06:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | #!C:\Users\Abraao\python_workspace\urbbox-python\venv\Scripts\python2.7.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.15','console_scripts','easy_install-2.7'
__requires__ = 'distribute==0.6.15'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.15', 'console_scripts', 'easy_install-2.7')()
)
| [
"abraaobarros3@gmail.com"
] | abraaobarros3@gmail.com |
76c72f6b180c96b7300689c850bcd0b6258a31c9 | 3ca76b01ff8fd067ad5913bc1585de6bdc19de20 | /server.py | 67a0be01af8aa6cc41dc544fe588c97855db675f | [] | no_license | Rookieokky/robottop | 3aaca0bbcbfab0f0885c954b8daea6095af05f82 | 8665598c5f59c8ce1798ce659e455191c4f9a1b4 | refs/heads/master | 2021-01-20T09:40:57.066876 | 2014-08-09T22:36:31 | 2014-08-09T22:36:31 | 22,587,228 | 1 | 0 | null | 2014-08-09T22:36:31 | 2014-08-04T00:37:53 | Python | UTF-8 | Python | false | false | 1,597 | py | #!/usr/bin/python
import web
import smbus
import math
urls = (
'/', 'index'
)
# Power management registers
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
bus = smbus.SMBus(1) # or bus = smbus.SMBus(1) for Revision 2 boards
address = 0x68 # This is the address value read via the i2cdetect command
def read_byte(adr):
return bus.read_byte_data(address, adr)
def read_word(adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr+1)
val = (high << 8) + low
return val
def read_word_2c(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
class index:
def GET(self):
accel_xout = read_word_2c(0x3b)
accel_yout = read_word_2c(0x3d)
accel_zout = read_word_2c(0x3f)
accel_xout_scaled = accel_xout / 16384.0
accel_yout_scaled = accel_yout / 16384.0
accel_zout_scaled = accel_zout / 16384.0
return str(get_x_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled))+" "+str(get_y_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled))
if __name__ == "__main__":
# Now wake the 6050 up as it starts in sleep mode
bus.write_byte_data(address, power_mgmt_1, 0)
app = web.application(urls, globals())
app.run()
| [
"okkificrada10@gmail.com"
] | okkificrada10@gmail.com |
267d596ed47e833a2df4db36a097b38ad3bcd3d1 | 477442bf4ef317b8e7024c229e7de5b3a11ddbc9 | /Design-Patterns/Behaviour-Design-Patterns/chain-of-responsibility.py | b95dedfcea2a3208714a4989afaebbf6c28cad95 | [] | no_license | counterjack/Python | d856375ab49d6b09eb26938c61eef8d3dd4b6c29 | 0e3cd2364001f8db06714cbd68ffbb23063f2039 | refs/heads/master | 2022-12-10T07:47:36.359514 | 2020-09-04T05:05:36 | 2020-09-04T05:05:36 | 262,550,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Optional
class Handler(ABC):
"""
The Handler interface declares a method for building the chain of handlers.
It also declares a method for executing a request.
"""
@abstractmethod
def set_next(self, handler: Handler) -> Handler:
pass
@abstractmethod
def handle(self, request) -> Optional[str]:
pass
class AbstractHandler(Handler):
"""
The default chaining behavior can be implemented inside a base handler
class.
"""
_next_handler: Handler = None
def set_next(self, handler: Handler) -> Handler:
self._next_handler = handler
# Returning a handler from here will let us link handlers in a
# convenient way like this:
# monkey.set_next(squirrel).set_next(dog)
return handler
@abstractmethod
def handle(self, request: Any) -> str:
if self._next_handler:
return self._next_handler.handle(request)
return None
"""
All Concrete Handlers either handle a request or pass it to the next handler in
the chain.
"""
class MonkeyHandler(AbstractHandler):
def handle(self, request: Any) -> str:
if request == "Banana":
return f"Monkey: I'll eat the {request}"
else:
return super().handle(request)
class SquirrelHandler(AbstractHandler):
def handle(self, request: Any) -> str:
if request == "Nut":
return f"Squirrel: I'll eat the {request}"
else:
return super().handle(request)
class DogHandler(AbstractHandler):
def handle(self, request: Any) -> str:
if request == "MeatBall":
return f"Dog: I'll eat the {request}"
else:
return super().handle(request)
def client_code(handler: Handler) -> None:
"""
The client code is usually suited to work with a single handler. In most
cases, it is not even aware that the handler is part of a chain.
"""
for food in ["Nut", "Banana", "Cup of coffee"]:
print(f"\nClient: Who wants a {food}?")
result = handler.handle(food)
if result:
print(f" {result}", end="")
else:
print(f" {food} was left untouched.", end="")
monkey = MonkeyHandler()
squirrel = SquirrelHandler()
dog = DogHandler()
monkey.set_next(squirrel).set_next(dog)
# The client should be able to send a request to any handler, not just the
# first one in the chain.
print("Chain: Monkey > Squirrel > Dog")
client_code(monkey)
print("\n")
print("Subchain: Squirrel > Dog")
client_code(squirrel) | [
"ankur@spotdraft.com"
] | ankur@spotdraft.com |
bf172275a43ccaa79fefcb145c3931c849f36341 | 0850be21f17c487880f2b2297205c4cce1bdf7c1 | /lib/modeling/roi_xfrom/roi_align/functions/roi_align.py | a0cb497f1eb434ae3146f9c10ea3adcae5af7a56 | [
"MIT"
] | permissive | aniket-agarwal1999/LTVRR | 68f29758d2e176e4f10fbc196e1b10c8278bf93d | 1ea3a6fba92e6c40990565587e9096cdd7b0049d | refs/heads/master | 2022-12-28T22:24:39.087180 | 2020-09-25T12:01:59 | 2020-09-25T12:01:59 | 282,016,022 | 0 | 0 | MIT | 2020-07-23T17:33:47 | 2020-07-23T17:33:46 | null | UTF-8 | Python | false | false | 1,869 | py | import torch
from torch.autograd import Function
from .._ext import roi_align
# TODO use save_for_backward instead
class RoIAlignFunction(Function):
def __init__(self, aligned_height, aligned_width, spatial_scale, sampling_ratio):
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.rois = None
self.feature_size = None
def forward(self, features, rois):
self.rois = rois
self.feature_size = features.size()
batch_size, num_channels, data_height, data_width = features.size()
num_rois = rois.size(0)
output = features.new(num_rois, num_channels, self.aligned_height, self.aligned_width).zero_()
if features.is_cuda:
roi_align.roi_align_forward_cuda(self.aligned_height,
self.aligned_width,
self.spatial_scale, self.sampling_ratio, features,
rois, output)
else:
raise NotImplementedError
return output
def backward(self, grad_output):
assert(self.feature_size is not None and grad_output.is_cuda)
batch_size, num_channels, data_height, data_width = self.feature_size
grad_input = self.rois.new(batch_size, num_channels, data_height,
data_width).zero_()
roi_align.roi_align_backward_cuda(self.aligned_height,
self.aligned_width,
self.spatial_scale, self.sampling_ratio, grad_output,
self.rois, grad_input)
# print grad_input
return grad_input, None
| [
"zhangjiapply@gmail.com"
] | zhangjiapply@gmail.com |
12cc25a2bf1e524c6958fbd245bbaab2e8f0c23f | 29fa8a1c845c9ccd2149d1f62d0375360dcc1c53 | /Capturing/trail2.py | 496e2a50641ba4c052e2bf0ea8ce337cc5e3df95 | [] | no_license | bsreenath-r7/FaceDetection | dd506368de578f2d78d2165f8b0465d28fa19571 | de35afac833121edfd38d9a940ceb62f0e75ad6d | refs/heads/master | 2020-03-23T04:42:20.093788 | 2018-07-17T10:02:36 | 2018-07-17T10:02:36 | 141,100,242 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | # This script will detect faces via your webcam.
# Tested with OpenCV3
import cv2
cap = cv2.VideoCapture(0)
# Create the haar cascade
faceCascade = cv2.CascadeClassifier("C:\\Users\\AL2041\\FaceDetecion\\cascades\\haarcascade_frontalface_default.xml")
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if ret==False:
print ('cannot read frame')
break
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
#flags = cv2.CV_HAAR_SCALE_IMAGE
)
print("Found {0} faces!".format(len(faces)))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"sreenath.bandi888@gmail.com"
] | sreenath.bandi888@gmail.com |
8185d326029d7d4ea30f6b04b8b1f60344fbe5f3 | 7e0a8eb74ea03b8da41b79b21227a916beed1e1b | /handlers/topics.py | e76f7698401ac92037d5b781b6c6758c985d5128 | [] | no_license | rok-povsic/17-2-predavanje12 | 2416de15ec0d5b1493008b10ea37742ff1b1dcf5 | eebbfb0538f8d4b80aca29a6876aaaec1ab0f6be | refs/heads/master | 2021-01-23T14:55:52.949308 | 2017-06-03T17:17:44 | 2017-06-03T17:17:44 | 93,262,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | import cgi
from google.appengine.api import users
from google.appengine.api import memcache
from handlers.base import BaseHandler
from models.topic import Topic
from models.comment import Comment
class TopicAddHandler(BaseHandler):
def get(self):
return self.render_template_with_csrf("topic_add.html")
def post(self):
csrf_token_from_form = self.request.get("csrf_token")
csrf_memcache_result = memcache.get(csrf_token_from_form)
if not csrf_memcache_result:
return self.write("You are an attacker.")
user = users.get_current_user()
if not user:
return self.write("You are not logged in.")
title = cgi.escape(self.request.get("title"))
text = cgi.escape(self.request.get("text"))
new_topic = Topic(
title=title,
content=text,
author_email=user.email()
)
new_topic.put()
return self.write("Topic created successfully.")
class TopicShowHandler(BaseHandler):
def get(self, topic_id):
topic = Topic.get_by_id(int(topic_id))
comments = Comment.query(Comment.topic_id == int(topic_id), Comment.deleted == False).fetch()
is_user_admin = users.is_current_user_admin()
params = {
"topic": topic,
"comments": comments,
"is_user_admin": is_user_admin
}
return self.render_template_with_csrf("topic_show.html", params)
class TopicDeleteHandler(BaseHandler):
def post(self, topic_id):
topic = Topic.get_by_id(int(topic_id))
topic.deleted = True
topic.put()
return self.write("Topic deleted.")
| [
"rok.povsic@gmail.com"
] | rok.povsic@gmail.com |
874ba6f9a9e6da14fd395ec5d1c6e026c1aca7f0 | b75b1b85a2529c3bfdeb6e6a84a056e5835f4a9c | /Init.py | 97446e6e760d337178a530fa0f2497ec504ca31c | [] | no_license | drmorale/WIPpsychophysics | c8de5ce710accb2ea359632ea4fafeab8c9acc2a | 18ec40ddc04f9cc4bb7e40defba12883ed90fb40 | refs/heads/master | 2020-04-27T08:07:15.751927 | 2014-11-20T06:49:30 | 2014-11-20T06:49:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,016 | py | from bge import logic
from random import randrange
import random
def init():
logic.globalDict["randNum"] = randrange(0,1000)
random.seed(0)
logic.globalDict["saveString"] = '{"trials":['
positions = []
arrayList = []
array1 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array2 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array1)
arrayList.append(array2)
array3 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array4 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array3)
arrayList.append(array4)
array5 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array6 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array5)
arrayList.append(array6)
array7 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array8 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array7)
arrayList.append(array8)
array9 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array10 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array9)
arrayList.append(array10)
array11 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array12 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array11)
arrayList.append(array12)
array13 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array14 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array13)
arrayList.append(array14)
array15 = [(1,50),(-5,47),(.5,15),(-3,60),(-7,20),(6,35),(1,55)]
array16 = [(-1,20),(5,23),(-0.5,55),(3,10),(7,50),(-6,35),(-1,15)]
arrayList.append(array15)
arrayList.append(array16)
randList = []
finalRandList = []
for i in range(16):
for j in range(4):
randList.append(i+1)
while(len(randList) > 0):
num = randrange(0,len(randList))
finalRandList.append(arrayList[randList[num]-1])
randList.pop(num)
positions = finalRandList
logic.globalDict["positions"] = finalRandList
logic.globalDict["game_start"] = True
logic.globalDict["trial_number"] = 0
logic.globalDict["prev_time"] = 0
logic.globalDict["prev_pos"] = (0,0)
logic.globalDict["hacky_bool"] = True
logic.globalDict["freeze"] = True
logic.globalDict["trial_start"] = False
print("hello")
trial()
def trial():
logic.globalDict["trial_start"] = False
logic.globalDict["freeze"] = True
s = logic.globalDict["saveString"]
if not "game_start" in logic.globalDict:
return
if not logic.globalDict["hacky_bool"]:
logic.globalDict["hacky_bool"] = True
return
logic.globalDict["hacky_bool"] = False
if logic.globalDict["trial_number"] == 0:
logic.globalDict["hacky_bool"] = True
else:
s = s[:-1]
s += ']'
logic.globalDict["trial_number"] += 1
if logic.globalDict["trial_number"] > 48: #number of trials
s += '}]}'
f = open('output'+str(logic.globalDict["randNum"]) + '.json', 'a')
f.write(s)
f.close()
logic.endGame()
return
controller = logic.getCurrentController()
obj = controller.owner
print("here! trial " + str(logic.globalDict["trial_number"]))
if logic.globalDict["trial_number"] != 1:
s += '},'
logic.globalDict["saveString"] = ""
logic.globalDict["saveString"] += '{"number":' + str(logic.globalDict["trial_number"]) + ','
logic.globalDict["saveString"] += '"obstacles": ['
# get the current scene
scene = logic.getCurrentScene()
# and use it to get a list of the objects in the scene
objList = scene.objects
currPositions = logic.globalDict["positions"]
currPositions = currPositions[logic.globalDict["trial_number"]]
for i in range(7):
string = "Cylinder" + str(i+1)
curr = objList[string]
(x,y) = currPositions[i]
curr.position = [x,y,0]
curr.visible = False
logic.globalDict["saveString"] += '{"number": ' + str(i+1) + ',"location": [' + str(x) + ',' + str(y) + ']}'
if i != 6:
logic.globalDict["saveString"] += ','
test = "meow"
test = "woof"
logic.globalDict["saveString"] += '],'
logic.globalDict["saveString"] += '"output":['
#objList["A1"].visible = False
#print(objList["A1"].visible)
objList["Bounds"].position = [0,0,3]
# objList["Body"].position = [0,0,7]
# objList["Body"].orientation = [0,0,0]
camera = objList["Camera"]
# camera.orientation = [1.57,0,0]
logic.globalDict["prev_time"] = 0
logic.globalDict["prev_pos"] = (0,0)
camera["timer"] = 0
f = open('output' + str(logic.globalDict["randNum"]) + '.json','a')
f.write(s)
f.close()
| [
"timothy_parsons@brown.edu"
] | timothy_parsons@brown.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.